mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Allow scheduler's creation of transactions to be async and error
I don't love this, but it's the only way to select decoys without using a local database. While the prior commit added such a databse, the performance of it presumably wasn't viable, and while TODOs marked the needed improvements, it was still messy with an immense scope re: any auditing. The relevant scheduler functions now take `&self` (intentional, as all mutations should be via the `&mut impl DbTxn` passed). The calls to `&self` are expected to be completely deterministic (as usual).
This commit is contained in:
@@ -2,6 +2,8 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{fmt::Debug, future::Future};
|
||||
|
||||
use serai_primitives::{Coin, Amount};
|
||||
|
||||
use primitives::{ReceivedOutput, Payment};
|
||||
@@ -40,8 +42,14 @@ pub struct AmortizePlannedTransaction<S: ScannerFeed, ST: SignableTransaction, A
|
||||
|
||||
/// An object able to plan a transaction.
|
||||
pub trait TransactionPlanner<S: ScannerFeed, A>: 'static + Send + Sync {
|
||||
/// An error encountered when handling planning transactions.
|
||||
///
|
||||
/// This MUST be an ephemeral error. Retrying planning transactions MUST eventually resolve
|
||||
/// resolve manual intervention/changing the arguments.
|
||||
type EphemeralError: Debug;
|
||||
|
||||
/// The type representing a fee rate to use for transactions.
|
||||
type FeeRate: Clone + Copy;
|
||||
type FeeRate: Send + Clone + Copy;
|
||||
|
||||
/// The type representing a signable transaction.
|
||||
type SignableTransaction: SignableTransaction;
|
||||
@@ -82,11 +90,15 @@ pub trait TransactionPlanner<S: ScannerFeed, A>: 'static + Send + Sync {
|
||||
/// `change` will always be an address belonging to the Serai network. If it is `Some`, a change
|
||||
/// output must be created.
|
||||
fn plan(
|
||||
&self,
|
||||
fee_rate: Self::FeeRate,
|
||||
inputs: Vec<OutputFor<S>>,
|
||||
payments: Vec<Payment<AddressFor<S>>>,
|
||||
change: Option<KeyFor<S>>,
|
||||
) -> PlannedTransaction<S, Self::SignableTransaction, A>;
|
||||
) -> impl Send
|
||||
+ Future<
|
||||
Output = Result<PlannedTransaction<S, Self::SignableTransaction, A>, Self::EphemeralError>,
|
||||
>;
|
||||
|
||||
/// Obtain a PlannedTransaction via amortizing the fee over the payments.
|
||||
///
|
||||
@@ -98,132 +110,142 @@ pub trait TransactionPlanner<S: ScannerFeed, A>: 'static + Send + Sync {
|
||||
/// Returns `None` if the fee exceeded the inputs, or `Some` otherwise.
|
||||
// TODO: Enum for Change of None, Some, Mandatory
|
||||
fn plan_transaction_with_fee_amortization(
|
||||
&self,
|
||||
operating_costs: &mut u64,
|
||||
fee_rate: Self::FeeRate,
|
||||
inputs: Vec<OutputFor<S>>,
|
||||
mut payments: Vec<Payment<AddressFor<S>>>,
|
||||
mut change: Option<KeyFor<S>>,
|
||||
) -> Option<AmortizePlannedTransaction<S, Self::SignableTransaction, A>> {
|
||||
// If there's no change output, we can't recoup any operating costs we would amortize
|
||||
// We also don't have any losses if the inputs are written off/the change output is reduced
|
||||
let mut operating_costs_if_no_change = 0;
|
||||
let operating_costs_in_effect =
|
||||
if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs };
|
||||
) -> impl Send
|
||||
+ Future<
|
||||
Output = Result<
|
||||
Option<AmortizePlannedTransaction<S, Self::SignableTransaction, A>>,
|
||||
Self::EphemeralError,
|
||||
>,
|
||||
> {
|
||||
async move {
|
||||
// If there's no change output, we can't recoup any operating costs we would amortize
|
||||
// We also don't have any losses if the inputs are written off/the change output is reduced
|
||||
let mut operating_costs_if_no_change = 0;
|
||||
let operating_costs_in_effect =
|
||||
if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs };
|
||||
|
||||
// Sanity checks
|
||||
{
|
||||
assert!(!inputs.is_empty());
|
||||
assert!((!payments.is_empty()) || change.is_some());
|
||||
let coin = inputs.first().unwrap().balance().coin;
|
||||
for input in &inputs {
|
||||
assert_eq!(coin, input.balance().coin);
|
||||
}
|
||||
for payment in &payments {
|
||||
assert_eq!(coin, payment.balance().coin);
|
||||
}
|
||||
assert!(
|
||||
(inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() +
|
||||
*operating_costs_in_effect) >=
|
||||
payments.iter().map(|payment| payment.balance().amount.0).sum::<u64>(),
|
||||
"attempted to fulfill payments without a sufficient input set"
|
||||
);
|
||||
}
|
||||
|
||||
// Sanity checks
|
||||
{
|
||||
assert!(!inputs.is_empty());
|
||||
assert!((!payments.is_empty()) || change.is_some());
|
||||
let coin = inputs.first().unwrap().balance().coin;
|
||||
for input in &inputs {
|
||||
assert_eq!(coin, input.balance().coin);
|
||||
|
||||
// Amortization
|
||||
{
|
||||
// Sort payments from high amount to low amount
|
||||
payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse());
|
||||
|
||||
let mut fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0;
|
||||
let mut amortized = 0;
|
||||
while !payments.is_empty() {
|
||||
// We need to pay the fee, and any accrued operating costs, minus what we've already
|
||||
// amortized
|
||||
let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized);
|
||||
|
||||
/*
|
||||
Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could
|
||||
be amortized over the largest outputs, which wouldn't be relevant here as we only work
|
||||
with the smallest output. The issue is the theoretical edge case where all outputs have
|
||||
the same value and are of the minimum value. In that case, none would be able to have
|
||||
the remainder amortized as it'd cause them to need to be dropped. Using a ceil div
|
||||
avoids this.
|
||||
*/
|
||||
let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap());
|
||||
// Pop the last payment if it can't pay the fee, remaining about the dust limit as it does
|
||||
if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) {
|
||||
amortized += payments.pop().unwrap().balance().amount.0;
|
||||
// Recalculate the fee and try again
|
||||
fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0;
|
||||
continue;
|
||||
}
|
||||
// Break since all of these payments shouldn't be dropped
|
||||
break;
|
||||
}
|
||||
|
||||
// If we couldn't amortize the fee over the payments, check if we even have enough to pay it
|
||||
if payments.is_empty() {
|
||||
// If we don't have a change output, we simply return here
|
||||
// We no longer have anything to do here, nor any expectations
|
||||
if change.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::<u64>();
|
||||
// Checks not just if we can pay for it, yet that the would-be change output is at least
|
||||
// dust
|
||||
if inputs < (fee + S::dust(coin).0) {
|
||||
// Write off these inputs
|
||||
*operating_costs_in_effect += inputs;
|
||||
// Yet also claw back the payments we dropped, as we only lost the change
|
||||
// The dropped payments will be worth less than the inputs + operating_costs we started
|
||||
// with, so this shouldn't use `saturating_sub`
|
||||
*operating_costs_in_effect -= amortized;
|
||||
return Ok(None);
|
||||
}
|
||||
} else {
|
||||
// Since we have payments which can pay the fee we ended up with, amortize it
|
||||
let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized);
|
||||
let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap();
|
||||
let payments_paying_one_atomic_unit_more =
|
||||
usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap();
|
||||
|
||||
for (i, payment) in payments.iter_mut().enumerate() {
|
||||
let per_payment_fee =
|
||||
per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more));
|
||||
payment.balance().amount.0 -= per_payment_fee;
|
||||
amortized += per_payment_fee;
|
||||
}
|
||||
assert!(amortized >= (*operating_costs_in_effect + fee));
|
||||
|
||||
// If the change is less than the dust, drop it
|
||||
let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() -
|
||||
payments.iter().map(|payment| payment.balance().amount.0).sum::<u64>() -
|
||||
fee;
|
||||
if would_be_change < S::dust(coin).0 {
|
||||
change = None;
|
||||
*operating_costs_in_effect += would_be_change;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the amount of operating costs
|
||||
*operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized);
|
||||
}
|
||||
for payment in &payments {
|
||||
assert_eq!(coin, payment.balance().coin);
|
||||
}
|
||||
assert!(
|
||||
(inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() +
|
||||
*operating_costs_in_effect) >=
|
||||
payments.iter().map(|payment| payment.balance().amount.0).sum::<u64>(),
|
||||
"attempted to fulfill payments without a sufficient input set"
|
||||
);
|
||||
|
||||
// Because we amortized, or accrued as operating costs, the fee, make the transaction
|
||||
let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect();
|
||||
let has_change = change.is_some();
|
||||
|
||||
let PlannedTransaction { signable, eventuality, auxilliary } =
|
||||
self.plan(fee_rate, inputs, payments, change).await?;
|
||||
Ok(Some(AmortizePlannedTransaction {
|
||||
effected_payments,
|
||||
has_change,
|
||||
signable,
|
||||
eventuality,
|
||||
auxilliary,
|
||||
}))
|
||||
}
|
||||
|
||||
let coin = inputs.first().unwrap().balance().coin;
|
||||
|
||||
// Amortization
|
||||
{
|
||||
// Sort payments from high amount to low amount
|
||||
payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse());
|
||||
|
||||
let mut fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0;
|
||||
let mut amortized = 0;
|
||||
while !payments.is_empty() {
|
||||
// We need to pay the fee, and any accrued operating costs, minus what we've already
|
||||
// amortized
|
||||
let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized);
|
||||
|
||||
/*
|
||||
Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could
|
||||
be amortized over the largest outputs, which wouldn't be relevant here as we only work
|
||||
with the smallest output. The issue is the theoretical edge case where all outputs have
|
||||
the same value and are of the minimum value. In that case, none would be able to have the
|
||||
remainder amortized as it'd cause them to need to be dropped. Using a ceil div avoids
|
||||
this.
|
||||
*/
|
||||
let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap());
|
||||
// Pop the last payment if it can't pay the fee, remaining about the dust limit as it does
|
||||
if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) {
|
||||
amortized += payments.pop().unwrap().balance().amount.0;
|
||||
// Recalculate the fee and try again
|
||||
fee = Self::calculate_fee(fee_rate, inputs.clone(), payments.clone(), change).0;
|
||||
continue;
|
||||
}
|
||||
// Break since all of these payments shouldn't be dropped
|
||||
break;
|
||||
}
|
||||
|
||||
// If we couldn't amortize the fee over the payments, check if we even have enough to pay it
|
||||
if payments.is_empty() {
|
||||
// If we don't have a change output, we simply return here
|
||||
// We no longer have anything to do here, nor any expectations
|
||||
if change.is_none() {
|
||||
None?;
|
||||
}
|
||||
|
||||
let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::<u64>();
|
||||
// Checks not just if we can pay for it, yet that the would-be change output is at least
|
||||
// dust
|
||||
if inputs < (fee + S::dust(coin).0) {
|
||||
// Write off these inputs
|
||||
*operating_costs_in_effect += inputs;
|
||||
// Yet also claw back the payments we dropped, as we only lost the change
|
||||
// The dropped payments will be worth less than the inputs + operating_costs we started
|
||||
// with, so this shouldn't use `saturating_sub`
|
||||
*operating_costs_in_effect -= amortized;
|
||||
None?;
|
||||
}
|
||||
} else {
|
||||
// Since we have payments which can pay the fee we ended up with, amortize it
|
||||
let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized);
|
||||
let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap();
|
||||
let payments_paying_one_atomic_unit_more =
|
||||
usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap();
|
||||
|
||||
for (i, payment) in payments.iter_mut().enumerate() {
|
||||
let per_payment_fee =
|
||||
per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more));
|
||||
payment.balance().amount.0 -= per_payment_fee;
|
||||
amortized += per_payment_fee;
|
||||
}
|
||||
assert!(amortized >= (*operating_costs_in_effect + fee));
|
||||
|
||||
// If the change is less than the dust, drop it
|
||||
let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() -
|
||||
payments.iter().map(|payment| payment.balance().amount.0).sum::<u64>() -
|
||||
fee;
|
||||
if would_be_change < S::dust(coin).0 {
|
||||
change = None;
|
||||
*operating_costs_in_effect += would_be_change;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the amount of operating costs
|
||||
*operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized);
|
||||
}
|
||||
|
||||
// Because we amortized, or accrued as operating costs, the fee, make the transaction
|
||||
let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect();
|
||||
let has_change = change.is_some();
|
||||
let PlannedTransaction { signable, eventuality, auxilliary } =
|
||||
Self::plan(fee_rate, inputs, payments, change);
|
||||
Some(AmortizePlannedTransaction {
|
||||
effected_payments,
|
||||
has_change,
|
||||
signable,
|
||||
eventuality,
|
||||
auxilliary,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a tree to fulfill a set of payments.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use core::{marker::PhantomData, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use group::GroupEncoding;
|
||||
@@ -14,7 +14,7 @@ use serai_db::DbTxn;
|
||||
use primitives::{ReceivedOutput, Payment};
|
||||
use scanner::{
|
||||
LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor,
|
||||
SchedulerUpdate, Scheduler as SchedulerTrait,
|
||||
SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait,
|
||||
};
|
||||
use scheduler_primitives::*;
|
||||
use utxo_scheduler_primitives::*;
|
||||
@@ -23,16 +23,27 @@ mod db;
|
||||
use db::Db;
|
||||
|
||||
/// A scheduler of transactions for networks premised on the UTXO model.
|
||||
pub struct Scheduler<S: ScannerFeed, P: TransactionPlanner<S, ()>>(PhantomData<S>, PhantomData<P>);
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone)]
|
||||
pub struct Scheduler<S: ScannerFeed, P: TransactionPlanner<S, ()>> {
|
||||
planner: P,
|
||||
_S: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
fn aggregate_inputs(
|
||||
/// Create a new scheduler.
|
||||
pub fn new(planner: P) -> Self {
|
||||
Self { planner, _S: PhantomData }
|
||||
}
|
||||
|
||||
async fn aggregate_inputs(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
key_for_change: KeyFor<S>,
|
||||
key: KeyFor<S>,
|
||||
coin: Coin,
|
||||
) -> Vec<EventualityFor<S>> {
|
||||
) -> Result<Vec<EventualityFor<S>>, <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let mut eventualities = vec![];
|
||||
|
||||
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
|
||||
@@ -41,13 +52,17 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
while outputs.len() > P::MAX_INPUTS {
|
||||
let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::<Vec<_>>();
|
||||
|
||||
let Some(planned) = P::plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
to_aggregate,
|
||||
vec![],
|
||||
Some(key_for_change),
|
||||
) else {
|
||||
let Some(planned) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
to_aggregate,
|
||||
vec![],
|
||||
Some(key_for_change),
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
@@ -57,7 +72,7 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
|
||||
Db::<S>::set_outputs(txn, key, coin, &outputs);
|
||||
Db::<S>::set_operating_costs(txn, coin, Amount(operating_costs));
|
||||
eventualities
|
||||
Ok(eventualities)
|
||||
}
|
||||
|
||||
fn fulfillable_payments(
|
||||
@@ -140,31 +155,36 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_branch(
|
||||
async fn handle_branch(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
eventualities: &mut Vec<EventualityFor<S>>,
|
||||
output: OutputFor<S>,
|
||||
tx: TreeTransaction<AddressFor<S>>,
|
||||
) -> bool {
|
||||
) -> Result<bool, <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let key = output.key();
|
||||
let coin = output.balance().coin;
|
||||
let Some(payments) = tx.payments::<S>(coin, &P::branch_address(key), output.balance().amount.0)
|
||||
else {
|
||||
// If this output has become too small to satisfy this branch, drop it
|
||||
return false;
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
let Some(planned) = P::plan_transaction_with_fee_amortization(
|
||||
// Uses 0 as there's no operating costs to incur/amortize here
|
||||
&mut 0,
|
||||
P::fee_rate(block, coin),
|
||||
vec![output],
|
||||
payments,
|
||||
None,
|
||||
) else {
|
||||
let Some(planned) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
// Uses 0 as there's no operating costs to incur/amortize here
|
||||
&mut 0,
|
||||
P::fee_rate(block, coin),
|
||||
vec![output],
|
||||
payments,
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
// This Branch isn't viable, so drop it (and its children)
|
||||
return false;
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned.signable);
|
||||
@@ -172,15 +192,16 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
|
||||
Self::queue_branches(txn, key, coin, planned.effected_payments, tx);
|
||||
|
||||
true
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn step(
|
||||
async fn step(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
block: &BlockFor<S>,
|
||||
key: KeyFor<S>,
|
||||
) -> Vec<EventualityFor<S>> {
|
||||
) -> Result<Vec<EventualityFor<S>>, <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let mut eventualities = vec![];
|
||||
|
||||
let key_for_change = match active_keys[0].1 {
|
||||
@@ -198,7 +219,8 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
let coin = *coin;
|
||||
|
||||
// Perform any input aggregation we should
|
||||
eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin));
|
||||
eventualities
|
||||
.append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?);
|
||||
|
||||
// Fetch the operating costs/outputs
|
||||
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
|
||||
@@ -228,15 +250,19 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
// scanner API)
|
||||
let mut planned_outer = None;
|
||||
for i in 0 .. 2 {
|
||||
let Some(planned) = P::plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs.clone(),
|
||||
tree[0]
|
||||
.payments::<S>(coin, &branch_address, tree[0].value())
|
||||
.expect("payments were dropped despite providing an input of the needed value"),
|
||||
Some(key_for_change),
|
||||
) else {
|
||||
let Some(planned) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs.clone(),
|
||||
tree[0]
|
||||
.payments::<S>(coin, &branch_address, tree[0].value())
|
||||
.expect("payments were dropped despite providing an input of the needed value"),
|
||||
Some(key_for_change),
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
// This should trip on the first iteration or not at all
|
||||
assert_eq!(i, 0);
|
||||
// This doesn't have inputs even worth aggregating so drop the entire tree
|
||||
@@ -272,46 +298,53 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> Scheduler<S, P> {
|
||||
Self::queue_branches(txn, key, coin, planned.effected_payments, tree.remove(0));
|
||||
}
|
||||
|
||||
eventualities
|
||||
Ok(eventualities)
|
||||
}
|
||||
|
||||
fn flush_outputs(
|
||||
async fn flush_outputs(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
eventualities: &mut HashMap<Vec<u8>, Vec<EventualityFor<S>>>,
|
||||
eventualities: &mut KeyScopedEventualities<S>,
|
||||
block: &BlockFor<S>,
|
||||
from: KeyFor<S>,
|
||||
to: KeyFor<S>,
|
||||
coin: Coin,
|
||||
) {
|
||||
) -> Result<(), <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let from_bytes = from.to_bytes().as_ref().to_vec();
|
||||
// Ensure our inputs are aggregated
|
||||
eventualities
|
||||
.entry(from_bytes.clone())
|
||||
.or_insert(vec![])
|
||||
.append(&mut Self::aggregate_inputs(txn, block, to, from, coin));
|
||||
.append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?);
|
||||
|
||||
// Now that our inputs are aggregated, transfer all of them to the new key
|
||||
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
|
||||
let outputs = Db::<S>::outputs(txn, from, coin).unwrap();
|
||||
if outputs.is_empty() {
|
||||
return;
|
||||
return Ok(());
|
||||
}
|
||||
let planned = P::plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs,
|
||||
vec![],
|
||||
Some(to),
|
||||
);
|
||||
let planned = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs,
|
||||
vec![],
|
||||
Some(to),
|
||||
)
|
||||
.await?;
|
||||
Db::<S>::set_operating_costs(txn, coin, Amount(operating_costs));
|
||||
let Some(planned) = planned else { return };
|
||||
let Some(planned) = planned else { return Ok(()) };
|
||||
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &from, &planned.signable);
|
||||
eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> SchedulerTrait<S> for Scheduler<S, P> {
|
||||
type EphemeralError = P::EphemeralError;
|
||||
type SignableTransaction = P::SignableTransaction;
|
||||
|
||||
fn activate_key(txn: &mut impl DbTxn, key: KeyFor<S>) {
|
||||
@@ -324,29 +357,32 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> SchedulerTrait<S> for Schedul
|
||||
}
|
||||
|
||||
fn flush_key(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
retiring_key: KeyFor<S>,
|
||||
new_key: KeyFor<S>,
|
||||
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
|
||||
let mut eventualities = HashMap::new();
|
||||
for coin in S::NETWORK.coins() {
|
||||
// Move the payments to the new key
|
||||
{
|
||||
let still_queued = Db::<S>::queued_payments(txn, retiring_key, *coin).unwrap();
|
||||
let mut new_queued = Db::<S>::queued_payments(txn, new_key, *coin).unwrap();
|
||||
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
|
||||
async move {
|
||||
let mut eventualities = HashMap::new();
|
||||
for coin in S::NETWORK.coins() {
|
||||
// Move the payments to the new key
|
||||
{
|
||||
let still_queued = Db::<S>::queued_payments(txn, retiring_key, *coin).unwrap();
|
||||
let mut new_queued = Db::<S>::queued_payments(txn, new_key, *coin).unwrap();
|
||||
|
||||
let mut queued = still_queued;
|
||||
queued.append(&mut new_queued);
|
||||
let mut queued = still_queued;
|
||||
queued.append(&mut new_queued);
|
||||
|
||||
Db::<S>::set_queued_payments(txn, retiring_key, *coin, &[]);
|
||||
Db::<S>::set_queued_payments(txn, new_key, *coin, &queued);
|
||||
Db::<S>::set_queued_payments(txn, retiring_key, *coin, &[]);
|
||||
Db::<S>::set_queued_payments(txn, new_key, *coin, &queued);
|
||||
}
|
||||
|
||||
// Move the outputs to the new key
|
||||
self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?;
|
||||
}
|
||||
|
||||
// Move the outputs to the new key
|
||||
Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin);
|
||||
Ok(eventualities)
|
||||
}
|
||||
eventualities
|
||||
}
|
||||
|
||||
fn retire_key(txn: &mut impl DbTxn, key: KeyFor<S>) {
|
||||
@@ -359,155 +395,174 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, ()>> SchedulerTrait<S> for Schedul
|
||||
}
|
||||
|
||||
fn update(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
update: SchedulerUpdate<S>,
|
||||
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
|
||||
let mut eventualities = HashMap::new();
|
||||
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
|
||||
async move {
|
||||
let mut eventualities = HashMap::new();
|
||||
|
||||
// Accumulate the new outputs
|
||||
{
|
||||
let mut outputs_by_key = HashMap::new();
|
||||
for output in update.outputs() {
|
||||
// If this aligns for a branch, handle it
|
||||
if let Some(branch) = Db::<S>::take_pending_branch(txn, output.key(), output.balance()) {
|
||||
if Self::handle_branch(
|
||||
txn,
|
||||
block,
|
||||
eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]),
|
||||
output.clone(),
|
||||
branch,
|
||||
) {
|
||||
// If we could use it for a branch, we do and move on
|
||||
// Else, we let it be accumulated by the standard accumulation code
|
||||
continue;
|
||||
// Accumulate the new outputs
|
||||
{
|
||||
let mut outputs_by_key = HashMap::new();
|
||||
for output in update.outputs() {
|
||||
// If this aligns for a branch, handle it
|
||||
if let Some(branch) = Db::<S>::take_pending_branch(txn, output.key(), output.balance()) {
|
||||
if self
|
||||
.handle_branch(
|
||||
txn,
|
||||
block,
|
||||
eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]),
|
||||
output.clone(),
|
||||
branch,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
// If we could use it for a branch, we do and move on
|
||||
// Else, we let it be accumulated by the standard accumulation code
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let coin = output.balance().coin;
|
||||
outputs_by_key
|
||||
// Index by key and coin
|
||||
.entry((output.key().to_bytes().as_ref().to_vec(), coin))
|
||||
// If we haven't accumulated here prior, read the outputs from the database
|
||||
.or_insert_with(|| (output.key(), Db::<S>::outputs(txn, output.key(), coin).unwrap()))
|
||||
.1
|
||||
.push(output.clone());
|
||||
}
|
||||
// Write the outputs back to the database
|
||||
for ((_key_vec, coin), (key, outputs)) in outputs_by_key {
|
||||
Db::<S>::set_outputs(txn, key, coin, &outputs);
|
||||
}
|
||||
}
|
||||
|
||||
// Fulfill the payments we prior couldn't
|
||||
for (key, _stage) in active_keys {
|
||||
eventualities
|
||||
.entry(key.to_bytes().as_ref().to_vec())
|
||||
.or_insert(vec![])
|
||||
.append(&mut self.step(txn, active_keys, block, *key).await?);
|
||||
}
|
||||
|
||||
// If this key has been flushed, forward all outputs
|
||||
match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting |
|
||||
LifetimeStage::Active |
|
||||
LifetimeStage::UsingNewForChange => {}
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => {
|
||||
for coin in S::NETWORK.coins() {
|
||||
self
|
||||
.flush_outputs(
|
||||
txn,
|
||||
&mut eventualities,
|
||||
block,
|
||||
active_keys[0].0,
|
||||
active_keys[1].0,
|
||||
*coin,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
let coin = output.balance().coin;
|
||||
outputs_by_key
|
||||
// Index by key and coin
|
||||
.entry((output.key().to_bytes().as_ref().to_vec(), coin))
|
||||
// If we haven't accumulated here prior, read the outputs from the database
|
||||
.or_insert_with(|| (output.key(), Db::<S>::outputs(txn, output.key(), coin).unwrap()))
|
||||
.1
|
||||
.push(output.clone());
|
||||
}
|
||||
// Write the outputs back to the database
|
||||
for ((_key_vec, coin), (key, outputs)) in outputs_by_key {
|
||||
Db::<S>::set_outputs(txn, key, coin, &outputs);
|
||||
}
|
||||
}
|
||||
|
||||
// Fulfill the payments we prior couldn't
|
||||
for (key, _stage) in active_keys {
|
||||
eventualities
|
||||
.entry(key.to_bytes().as_ref().to_vec())
|
||||
.or_insert(vec![])
|
||||
.append(&mut Self::step(txn, active_keys, block, *key));
|
||||
}
|
||||
// Create the transactions for the forwards/burns
|
||||
{
|
||||
let mut planned_txs = vec![];
|
||||
for forward in update.forwards() {
|
||||
let key = forward.key();
|
||||
|
||||
// If this key has been flushed, forward all outputs
|
||||
match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting |
|
||||
LifetimeStage::Active |
|
||||
LifetimeStage::UsingNewForChange => {}
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => {
|
||||
for coin in S::NETWORK.coins() {
|
||||
Self::flush_outputs(
|
||||
txn,
|
||||
&mut eventualities,
|
||||
block,
|
||||
active_keys[0].0,
|
||||
active_keys[1].0,
|
||||
*coin,
|
||||
);
|
||||
assert_eq!(active_keys.len(), 2);
|
||||
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
|
||||
assert_eq!(active_keys[1].1, LifetimeStage::Active);
|
||||
let forward_to_key = active_keys[1].0;
|
||||
|
||||
let Some(plan) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be forwarded, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, forward.balance().coin),
|
||||
vec![forward.clone()],
|
||||
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
for to_return in update.returns() {
|
||||
let key = to_return.output().key();
|
||||
let out_instruction =
|
||||
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
|
||||
let Some(plan) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be returned, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, out_instruction.balance().coin),
|
||||
vec![to_return.output().clone()],
|
||||
vec![out_instruction],
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
|
||||
for (key, planned_tx) in planned_txs {
|
||||
// Send the transactions off for signing
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned_tx.signable);
|
||||
|
||||
// Insert the Eventualities into the result
|
||||
eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality);
|
||||
}
|
||||
|
||||
Ok(eventualities)
|
||||
}
|
||||
}
|
||||
|
||||
// Create the transactions for the forwards/burns
|
||||
{
|
||||
let mut planned_txs = vec![];
|
||||
for forward in update.forwards() {
|
||||
let key = forward.key();
|
||||
|
||||
assert_eq!(active_keys.len(), 2);
|
||||
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
|
||||
assert_eq!(active_keys[1].1, LifetimeStage::Active);
|
||||
let forward_to_key = active_keys[1].0;
|
||||
|
||||
let Some(plan) = P::plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be forwarded, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, forward.balance().coin),
|
||||
vec![forward.clone()],
|
||||
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
|
||||
None,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
for to_return in update.returns() {
|
||||
let key = to_return.output().key();
|
||||
let out_instruction =
|
||||
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
|
||||
let Some(plan) = P::plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be returned, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, out_instruction.balance().coin),
|
||||
vec![to_return.output().clone()],
|
||||
vec![out_instruction],
|
||||
None,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
|
||||
for (key, planned_tx) in planned_txs {
|
||||
// Send the transactions off for signing
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned_tx.signable);
|
||||
|
||||
// Insert the Eventualities into the result
|
||||
eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality);
|
||||
}
|
||||
|
||||
eventualities
|
||||
}
|
||||
}
|
||||
|
||||
fn fulfill(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
payments: Vec<Payment<AddressFor<S>>>,
|
||||
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
|
||||
// Find the key to filfill these payments with
|
||||
let fulfillment_key = match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting => {
|
||||
panic!("expected to fulfill payments despite not reporting for the oldest key")
|
||||
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
|
||||
async move {
|
||||
// Find the key to filfill these payments with
|
||||
let fulfillment_key = match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting => {
|
||||
panic!("expected to fulfill payments despite not reporting for the oldest key")
|
||||
}
|
||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0,
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0,
|
||||
};
|
||||
|
||||
// Queue the payments for this key
|
||||
for coin in S::NETWORK.coins() {
|
||||
let mut queued_payments = Db::<S>::queued_payments(txn, fulfillment_key, *coin).unwrap();
|
||||
queued_payments
|
||||
.extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned());
|
||||
Db::<S>::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments);
|
||||
}
|
||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0,
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0,
|
||||
};
|
||||
|
||||
// Queue the payments for this key
|
||||
for coin in S::NETWORK.coins() {
|
||||
let mut queued_payments = Db::<S>::queued_payments(txn, fulfillment_key, *coin).unwrap();
|
||||
queued_payments
|
||||
.extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned());
|
||||
Db::<S>::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments);
|
||||
// Handle the queued payments
|
||||
Ok(HashMap::from([(
|
||||
fulfillment_key.to_bytes().as_ref().to_vec(),
|
||||
self.step(txn, active_keys, block, fulfillment_key).await?,
|
||||
)]))
|
||||
}
|
||||
|
||||
// Handle the queued payments
|
||||
HashMap::from([(
|
||||
fulfillment_key.to_bytes().as_ref().to_vec(),
|
||||
Self::step(txn, active_keys, block, fulfillment_key),
|
||||
)])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use core::{marker::PhantomData, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use group::GroupEncoding;
|
||||
@@ -14,7 +14,7 @@ use serai_db::DbTxn;
|
||||
use primitives::{OutputType, ReceivedOutput, Payment};
|
||||
use scanner::{
|
||||
LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor,
|
||||
SchedulerUpdate, Scheduler as SchedulerTrait,
|
||||
SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait,
|
||||
};
|
||||
use scheduler_primitives::*;
|
||||
use utxo_scheduler_primitives::*;
|
||||
@@ -27,12 +27,19 @@ pub struct EffectedReceivedOutputs<S: ScannerFeed>(pub Vec<OutputFor<S>>);
|
||||
|
||||
/// A scheduler of transactions for networks premised on the UTXO model which support
|
||||
/// transaction chaining.
|
||||
pub struct Scheduler<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>>(
|
||||
PhantomData<S>,
|
||||
PhantomData<P>,
|
||||
);
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone)]
|
||||
pub struct Scheduler<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> {
|
||||
planner: P,
|
||||
_S: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Scheduler<S, P> {
|
||||
/// Create a new scheduler.
|
||||
pub fn new(planner: P) -> Self {
|
||||
Self { planner, _S: PhantomData }
|
||||
}
|
||||
|
||||
fn accumulate_outputs(txn: &mut impl DbTxn, outputs: Vec<OutputFor<S>>, from_scanner: bool) {
|
||||
let mut outputs_by_key = HashMap::new();
|
||||
for output in outputs {
|
||||
@@ -59,13 +66,14 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
}
|
||||
}
|
||||
|
||||
fn aggregate_inputs(
|
||||
async fn aggregate_inputs(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
key_for_change: KeyFor<S>,
|
||||
key: KeyFor<S>,
|
||||
coin: Coin,
|
||||
) -> Vec<EventualityFor<S>> {
|
||||
) -> Result<Vec<EventualityFor<S>>, <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let mut eventualities = vec![];
|
||||
|
||||
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
|
||||
@@ -74,13 +82,17 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::<Vec<_>>();
|
||||
Db::<S>::set_outputs(txn, key, coin, &outputs);
|
||||
|
||||
let Some(planned) = P::plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
to_aggregate,
|
||||
vec![],
|
||||
Some(key_for_change),
|
||||
) else {
|
||||
let Some(planned) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
to_aggregate,
|
||||
vec![],
|
||||
Some(key_for_change),
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
@@ -93,7 +105,7 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
}
|
||||
|
||||
Db::<S>::set_operating_costs(txn, coin, Amount(operating_costs));
|
||||
eventualities
|
||||
Ok(eventualities)
|
||||
}
|
||||
|
||||
fn fulfillable_payments(
|
||||
@@ -151,12 +163,13 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
}
|
||||
}
|
||||
|
||||
fn step(
|
||||
async fn step(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
block: &BlockFor<S>,
|
||||
key: KeyFor<S>,
|
||||
) -> Vec<EventualityFor<S>> {
|
||||
) -> Result<Vec<EventualityFor<S>>, <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let mut eventualities = vec![];
|
||||
|
||||
let key_for_change = match active_keys[0].1 {
|
||||
@@ -174,7 +187,8 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
let coin = *coin;
|
||||
|
||||
// Perform any input aggregation we should
|
||||
eventualities.append(&mut Self::aggregate_inputs(txn, block, key_for_change, key, coin));
|
||||
eventualities
|
||||
.append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?);
|
||||
|
||||
// Fetch the operating costs/outputs
|
||||
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
|
||||
@@ -211,15 +225,19 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
// scanner API)
|
||||
let mut planned_outer = None;
|
||||
for i in 0 .. 2 {
|
||||
let Some(planned) = P::plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs.clone(),
|
||||
tree[0]
|
||||
.payments::<S>(coin, &branch_address, tree[0].value())
|
||||
.expect("payments were dropped despite providing an input of the needed value"),
|
||||
Some(key_for_change),
|
||||
) else {
|
||||
let Some(planned) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs.clone(),
|
||||
tree[0]
|
||||
.payments::<S>(coin, &branch_address, tree[0].value())
|
||||
.expect("payments were dropped despite providing an input of the needed value"),
|
||||
Some(key_for_change),
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
// This should trip on the first iteration or not at all
|
||||
assert_eq!(i, 0);
|
||||
// This doesn't have inputs even worth aggregating so drop the entire tree
|
||||
@@ -300,14 +318,18 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
};
|
||||
|
||||
let branch_output_id = branch_output.id();
|
||||
let Some(mut planned) = P::plan_transaction_with_fee_amortization(
|
||||
// Uses 0 as there's no operating costs to incur/amortize here
|
||||
&mut 0,
|
||||
P::fee_rate(block, coin),
|
||||
vec![branch_output],
|
||||
payments,
|
||||
None,
|
||||
) else {
|
||||
let Some(mut planned) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
// Uses 0 as there's no operating costs to incur/amortize here
|
||||
&mut 0,
|
||||
P::fee_rate(block, coin),
|
||||
vec![branch_output],
|
||||
payments,
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
// This Branch isn't viable, so drop it (and its children)
|
||||
continue;
|
||||
};
|
||||
@@ -328,49 +350,56 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
}
|
||||
}
|
||||
|
||||
eventualities
|
||||
Ok(eventualities)
|
||||
}
|
||||
|
||||
fn flush_outputs(
|
||||
async fn flush_outputs(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
eventualities: &mut HashMap<Vec<u8>, Vec<EventualityFor<S>>>,
|
||||
eventualities: &mut KeyScopedEventualities<S>,
|
||||
block: &BlockFor<S>,
|
||||
from: KeyFor<S>,
|
||||
to: KeyFor<S>,
|
||||
coin: Coin,
|
||||
) {
|
||||
) -> Result<(), <Self as SchedulerTrait<S>>::EphemeralError> {
|
||||
let from_bytes = from.to_bytes().as_ref().to_vec();
|
||||
// Ensure our inputs are aggregated
|
||||
eventualities
|
||||
.entry(from_bytes.clone())
|
||||
.or_insert(vec![])
|
||||
.append(&mut Self::aggregate_inputs(txn, block, to, from, coin));
|
||||
.append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?);
|
||||
|
||||
// Now that our inputs are aggregated, transfer all of them to the new key
|
||||
let mut operating_costs = Db::<S>::operating_costs(txn, coin).0;
|
||||
let outputs = Db::<S>::outputs(txn, from, coin).unwrap();
|
||||
if outputs.is_empty() {
|
||||
return;
|
||||
return Ok(());
|
||||
}
|
||||
let planned = P::plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs,
|
||||
vec![],
|
||||
Some(to),
|
||||
);
|
||||
let planned = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
&mut operating_costs,
|
||||
P::fee_rate(block, coin),
|
||||
outputs,
|
||||
vec![],
|
||||
Some(to),
|
||||
)
|
||||
.await?;
|
||||
Db::<S>::set_operating_costs(txn, coin, Amount(operating_costs));
|
||||
let Some(planned) = planned else { return };
|
||||
let Some(planned) = planned else { return Ok(()) };
|
||||
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &from, &planned.signable);
|
||||
eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality);
|
||||
Self::accumulate_outputs(txn, planned.auxilliary.0, false);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> SchedulerTrait<S>
|
||||
for Scheduler<S, P>
|
||||
{
|
||||
type EphemeralError = P::EphemeralError;
|
||||
type SignableTransaction = P::SignableTransaction;
|
||||
|
||||
fn activate_key(txn: &mut impl DbTxn, key: KeyFor<S>) {
|
||||
@@ -383,29 +412,32 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
}
|
||||
|
||||
fn flush_key(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
retiring_key: KeyFor<S>,
|
||||
new_key: KeyFor<S>,
|
||||
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
|
||||
let mut eventualities = HashMap::new();
|
||||
for coin in S::NETWORK.coins() {
|
||||
// Move the payments to the new key
|
||||
{
|
||||
let still_queued = Db::<S>::queued_payments(txn, retiring_key, *coin).unwrap();
|
||||
let mut new_queued = Db::<S>::queued_payments(txn, new_key, *coin).unwrap();
|
||||
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
|
||||
async move {
|
||||
let mut eventualities = HashMap::new();
|
||||
for coin in S::NETWORK.coins() {
|
||||
// Move the payments to the new key
|
||||
{
|
||||
let still_queued = Db::<S>::queued_payments(txn, retiring_key, *coin).unwrap();
|
||||
let mut new_queued = Db::<S>::queued_payments(txn, new_key, *coin).unwrap();
|
||||
|
||||
let mut queued = still_queued;
|
||||
queued.append(&mut new_queued);
|
||||
let mut queued = still_queued;
|
||||
queued.append(&mut new_queued);
|
||||
|
||||
Db::<S>::set_queued_payments(txn, retiring_key, *coin, &[]);
|
||||
Db::<S>::set_queued_payments(txn, new_key, *coin, &queued);
|
||||
Db::<S>::set_queued_payments(txn, retiring_key, *coin, &[]);
|
||||
Db::<S>::set_queued_payments(txn, new_key, *coin, &queued);
|
||||
}
|
||||
|
||||
// Move the outputs to the new key
|
||||
self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?;
|
||||
}
|
||||
|
||||
// Move the outputs to the new key
|
||||
Self::flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin);
|
||||
Ok(eventualities)
|
||||
}
|
||||
eventualities
|
||||
}
|
||||
|
||||
fn retire_key(txn: &mut impl DbTxn, key: KeyFor<S>) {
|
||||
@@ -418,121 +450,137 @@ impl<S: ScannerFeed, P: TransactionPlanner<S, EffectedReceivedOutputs<S>>> Sched
|
||||
}
|
||||
|
||||
fn update(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
update: SchedulerUpdate<S>,
|
||||
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
|
||||
Self::accumulate_outputs(txn, update.outputs().to_vec(), true);
|
||||
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
|
||||
async move {
|
||||
Self::accumulate_outputs(txn, update.outputs().to_vec(), true);
|
||||
|
||||
// Fulfill the payments we prior couldn't
|
||||
let mut eventualities = HashMap::new();
|
||||
for (key, _stage) in active_keys {
|
||||
assert!(eventualities
|
||||
.insert(key.to_bytes().as_ref().to_vec(), Self::step(txn, active_keys, block, *key))
|
||||
.is_none());
|
||||
}
|
||||
// Fulfill the payments we prior couldn't
|
||||
let mut eventualities = HashMap::new();
|
||||
for (key, _stage) in active_keys {
|
||||
assert!(eventualities
|
||||
.insert(key.to_bytes().as_ref().to_vec(), self.step(txn, active_keys, block, *key).await?)
|
||||
.is_none());
|
||||
}
|
||||
|
||||
// If this key has been flushed, forward all outputs
|
||||
match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting |
|
||||
LifetimeStage::Active |
|
||||
LifetimeStage::UsingNewForChange => {}
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => {
|
||||
for coin in S::NETWORK.coins() {
|
||||
Self::flush_outputs(
|
||||
txn,
|
||||
&mut eventualities,
|
||||
block,
|
||||
active_keys[0].0,
|
||||
active_keys[1].0,
|
||||
*coin,
|
||||
);
|
||||
// If this key has been flushed, forward all outputs
|
||||
match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting |
|
||||
LifetimeStage::Active |
|
||||
LifetimeStage::UsingNewForChange => {}
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => {
|
||||
for coin in S::NETWORK.coins() {
|
||||
self
|
||||
.flush_outputs(
|
||||
txn,
|
||||
&mut eventualities,
|
||||
block,
|
||||
active_keys[0].0,
|
||||
active_keys[1].0,
|
||||
*coin,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create the transactions for the forwards/burns
|
||||
{
|
||||
let mut planned_txs = vec![];
|
||||
for forward in update.forwards() {
|
||||
let key = forward.key();
|
||||
// Create the transactions for the forwards/burns
|
||||
{
|
||||
let mut planned_txs = vec![];
|
||||
for forward in update.forwards() {
|
||||
let key = forward.key();
|
||||
|
||||
assert_eq!(active_keys.len(), 2);
|
||||
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
|
||||
assert_eq!(active_keys[1].1, LifetimeStage::Active);
|
||||
let forward_to_key = active_keys[1].0;
|
||||
assert_eq!(active_keys.len(), 2);
|
||||
assert_eq!(active_keys[0].1, LifetimeStage::Forwarding);
|
||||
assert_eq!(active_keys[1].1, LifetimeStage::Active);
|
||||
let forward_to_key = active_keys[1].0;
|
||||
|
||||
let Some(plan) = P::plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be forwarded, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, forward.balance().coin),
|
||||
vec![forward.clone()],
|
||||
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
|
||||
None,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
let Some(plan) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be forwarded, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, forward.balance().coin),
|
||||
vec![forward.clone()],
|
||||
vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)],
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
for to_return in update.returns() {
|
||||
let key = to_return.output().key();
|
||||
let out_instruction =
|
||||
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
|
||||
let Some(plan) = self
|
||||
.planner
|
||||
.plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be returned, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, out_instruction.balance().coin),
|
||||
vec![to_return.output().clone()],
|
||||
vec![out_instruction],
|
||||
None,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
|
||||
for (key, planned_tx) in planned_txs {
|
||||
// Send the transactions off for signing
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned_tx.signable);
|
||||
|
||||
// Insert the Eventualities into the result
|
||||
eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality);
|
||||
}
|
||||
|
||||
Ok(eventualities)
|
||||
}
|
||||
for to_return in update.returns() {
|
||||
let key = to_return.output().key();
|
||||
let out_instruction =
|
||||
Payment::new(to_return.address().clone(), to_return.output().balance(), None);
|
||||
let Some(plan) = P::plan_transaction_with_fee_amortization(
|
||||
// This uses 0 for the operating costs as we don't incur any here
|
||||
// If the output can't pay for itself to be returned, we simply drop it
|
||||
&mut 0,
|
||||
P::fee_rate(block, out_instruction.balance().coin),
|
||||
vec![to_return.output().clone()],
|
||||
vec![out_instruction],
|
||||
None,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
planned_txs.push((key, plan));
|
||||
}
|
||||
|
||||
for (key, planned_tx) in planned_txs {
|
||||
// Send the transactions off for signing
|
||||
TransactionsToSign::<P::SignableTransaction>::send(txn, &key, &planned_tx.signable);
|
||||
|
||||
// Insert the Eventualities into the result
|
||||
eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality);
|
||||
}
|
||||
|
||||
eventualities
|
||||
}
|
||||
}
|
||||
|
||||
fn fulfill(
|
||||
&self,
|
||||
txn: &mut impl DbTxn,
|
||||
block: &BlockFor<S>,
|
||||
active_keys: &[(KeyFor<S>, LifetimeStage)],
|
||||
payments: Vec<Payment<AddressFor<S>>>,
|
||||
) -> HashMap<Vec<u8>, Vec<EventualityFor<S>>> {
|
||||
// Find the key to filfill these payments with
|
||||
let fulfillment_key = match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting => {
|
||||
panic!("expected to fulfill payments despite not reporting for the oldest key")
|
||||
) -> impl Send + Future<Output = Result<KeyScopedEventualities<S>, Self::EphemeralError>> {
|
||||
async move {
|
||||
// Find the key to filfill these payments with
|
||||
let fulfillment_key = match active_keys[0].1 {
|
||||
LifetimeStage::ActiveYetNotReporting => {
|
||||
panic!("expected to fulfill payments despite not reporting for the oldest key")
|
||||
}
|
||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0,
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0,
|
||||
};
|
||||
|
||||
// Queue the payments for this key
|
||||
for coin in S::NETWORK.coins() {
|
||||
let mut queued_payments = Db::<S>::queued_payments(txn, fulfillment_key, *coin).unwrap();
|
||||
queued_payments
|
||||
.extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned());
|
||||
Db::<S>::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments);
|
||||
}
|
||||
LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0,
|
||||
LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0,
|
||||
};
|
||||
|
||||
// Queue the payments for this key
|
||||
for coin in S::NETWORK.coins() {
|
||||
let mut queued_payments = Db::<S>::queued_payments(txn, fulfillment_key, *coin).unwrap();
|
||||
queued_payments
|
||||
.extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned());
|
||||
Db::<S>::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments);
|
||||
// Handle the queued payments
|
||||
Ok(HashMap::from([(
|
||||
fulfillment_key.to_bytes().as_ref().to_vec(),
|
||||
self.step(txn, active_keys, block, fulfillment_key).await?,
|
||||
)]))
|
||||
}
|
||||
|
||||
// Handle the queued payments
|
||||
HashMap::from([(
|
||||
fulfillment_key.to_bytes().as_ref().to_vec(),
|
||||
Self::step(txn, active_keys, block, fulfillment_key),
|
||||
)])
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user