diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 33f2e852..ca0bd4f5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -42,9 +42,10 @@ jobs: -p serai-processor-key-gen \ -p serai-processor-frost-attempt-manager \ -p serai-processor-primitives \ + -p serai-processor-scanner \ + -p serai-processor-scheduler-primitives \ -p serai-processor-utxo-scheduler-primitives \ -p serai-processor-transaction-chaining-scheduler \ - -p serai-processor-scanner \ -p serai-processor \ -p tendermint-machine \ -p tributary-chain \ diff --git a/Cargo.lock b/Cargo.lock index 7512f35c..6e7ced07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8679,6 +8679,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", +] + [[package]] name = "serai-processor-tests" version = "0.1.0" @@ -8715,11 +8725,11 @@ dependencies = [ "borsh", "group", "parity-scale-codec", - "serai-coins-primitives", "serai-db", "serai-primitives", "serai-processor-primitives", "serai-processor-scanner", + "serai-processor-scheduler-primitives", "serai-processor-utxo-scheduler-primitives", ] diff --git a/Cargo.toml b/Cargo.toml index 17435713..b61cde68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,9 +74,10 @@ members = [ "processor/frost-attempt-manager", "processor/primitives", + "processor/scanner", + "processor/scheduler/primitives", "processor/scheduler/utxo/primitives", "processor/scheduler/utxo/transaction-chaining", - "processor/scanner", "processor", "coordinator/tributary/tendermint", diff --git a/deny.toml b/deny.toml index fb616244..2ca0ca50 100644 --- a/deny.toml +++ b/deny.toml @@ -49,9 +49,10 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, - { allow = ["AGPL-3.0"], name = "serai-processor-utxo-primitives" }, - { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, + { allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, { allow = ["AGPL-3.0"], name = "serai-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs index 4d33d0d0..d894f819 100644 --- a/processor/scanner/src/lib.rs +++ b/processor/scanner/src/lib.rs @@ -241,8 +241,12 @@ pub trait Scheduler: 'static + Send { /// /// When a key is activated, the existing multisig should retain its outputs and utility for a /// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some - /// obligation or the `new_key`. Every output MUST be connected to an Eventuality. If a key no - /// longer has active Eventualities, it MUST be able to be retired. + /// obligation or the `new_key`. Every output held by the retiring key MUST be connected to an + /// Eventuality. If a key no longer has active Eventualities, it MUST be able to be retired + /// without losing any coins. + /// + /// If the retiring key has any unfulfilled payments associated with it, those MUST be made + /// the responsibility of the new key. fn flush_key(&mut self, txn: &mut impl DbTxn, retiring_key: KeyFor, new_key: KeyFor); /// Retire a key as it'll no longer be used. diff --git a/processor/scheduler/primitives/Cargo.toml b/processor/scheduler/primitives/Cargo.toml new file mode 100644 index 00000000..31d73853 --- /dev/null +++ b/processor/scheduler/primitives/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +description = "Primitives for schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../../common/db" } diff --git a/processor/scheduler/primitives/LICENSE b/processor/scheduler/primitives/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/scheduler/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/primitives/README.md b/processor/scheduler/primitives/README.md new file mode 100644 index 00000000..6e81249d --- /dev/null +++ b/processor/scheduler/primitives/README.md @@ -0,0 +1,3 @@ +# Scheduler Primitives + +Primitives for schedulers. diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs new file mode 100644 index 00000000..97a00c03 --- /dev/null +++ b/processor/scheduler/primitives/src/lib.rs @@ -0,0 +1,48 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::marker::PhantomData; +use std::io; + +use group::GroupEncoding; + +use serai_db::DbTxn; + +/// A signable transaction. +pub trait SignableTransaction: 'static + Sized + Send + Sync { + /// Read a `SignableTransaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `SignableTransaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + +mod db { + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + SchedulerPrimitives { + TransactionsToSign: (key: &[u8]) -> Vec, + } + } +} + +/// The transactions to sign, as scheduled by a Scheduler. +pub struct TransactionsToSign(PhantomData); +impl TransactionsToSign { + /// Send a transaction to sign. + pub fn send(txn: &mut impl DbTxn, key: &impl GroupEncoding, tx: &T) { + let mut buf = Vec::with_capacity(128); + tx.write(&mut buf).unwrap(); + db::TransactionsToSign::send(txn, key.to_bytes().as_ref(), &buf); + } + + /// Try to receive a transaction to sign. + pub fn try_recv(txn: &mut impl DbTxn, key: &impl GroupEncoding) -> Option { + let tx = db::TransactionsToSign::try_recv(txn, key.to_bytes().as_ref())?; + let mut tx = tx.as_slice(); + let res = T::read(&mut tx).unwrap(); + assert!(tx.is_empty()); + Some(res) + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml index d54d0f85..a6b12128 100644 --- a/processor/scheduler/utxo/transaction-chaining/Cargo.toml +++ b/processor/scheduler/utxo/transaction-chaining/Cargo.toml @@ -26,10 +26,10 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } -serai-coins-primitives = { path = "../../../../substrate/coins/primitives", default-features = false, features = ["std"] } serai-db = { path = "../../../../common/db" } primitives = { package = "serai-processor-primitives", path = "../../../primitives" } -scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } scanner = { package = "serai-processor-scanner", path = "../../../scanner" } diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs index 20c574e9..f6de26d1 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/db.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -2,7 +2,7 @@ use core::marker::PhantomData; use group::GroupEncoding; -use serai_primitives::Coin; +use serai_primitives::{Coin, Amount}; use serai_db::{Get, DbTxn, create_db}; @@ -11,12 +11,23 @@ use scanner::{ScannerFeed, KeyFor, OutputFor}; create_db! { TransactionChainingScheduler { + OperatingCosts: (coin: Coin) -> Amount, SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + // We should be immediately able to schedule the fulfillment of payments, yet this may not be + // possible if we're in the middle of a multisig rotation (as our output set will be split) + SerializedQueuedPayments: (key: &[u8]) > Vec, } } pub(crate) struct Db(PhantomData); impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + pub(crate) fn outputs( getter: &impl Get, key: KeyFor, @@ -46,4 +57,17 @@ impl Db { pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + ) -> Option>> { + todo!("TODO") + } + pub(crate) fn set_queued_payments(txn: &mut impl DbTxn, key: KeyFor, queued: Vec>) { + todo!("TODO") + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref()); + } } diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs index 63635696..8f21e9d6 100644 --- a/processor/scheduler/utxo/transaction-chaining/src/lib.rs +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -5,6 +5,8 @@ use core::marker::PhantomData; use std::collections::HashMap; +use group::GroupEncoding; + use serai_primitives::Coin; use serai_db::DbTxn; @@ -15,6 +17,7 @@ use scanner::{ Scheduler as SchedulerTrait, }; use scheduler_primitives::*; +use utxo_scheduler_primitives::*; mod db; use db::Db; @@ -25,7 +28,7 @@ pub struct PlannedTransaction { signable: T, /// The outputs we'll receive from this. effected_received_outputs: OutputFor, - /// The Evtnuality to watch for. + /// The Eventuality to watch for. eventuality: EventualityFor, } @@ -60,13 +63,13 @@ impl>, > SchedulerTrait for Scheduler { fn activate_key(&mut self, txn: &mut impl DbTxn, key: KeyFor) { for coin in S::NETWORK.coins() { - Db::::set_outputs(txn, key, *coin, &vec![]); + Db::::set_outputs(txn, key, *coin, &[]); } } @@ -98,22 +101,27 @@ impl< { let mut planned_txs = vec![]; for forward in update.forwards() { - let forward_to_key = active_keys.last().unwrap(); - assert_eq!(forward_to_key.1, LifetimeStage::Active); + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; let Some(plan) = P::plan_transaction_with_fee_amortization( // This uses 0 for the operating costs as we don't incur any here &mut 0, fee_rates[&forward.balance().coin], vec![forward.clone()], - vec![Payment::new(P::forwarding_address(forward_to_key.0), forward.balance(), None)], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance(), None)], None, ) else { continue; }; - planned_txs.push(plan); + planned_txs.push((key, plan)); } for to_return in update.returns() { + let key = to_return.output().key(); let out_instruction = Payment::new(to_return.address().clone(), to_return.output().balance(), None); let Some(plan) = P::plan_transaction_with_fee_amortization( @@ -126,12 +134,24 @@ impl< ) else { continue; }; - planned_txs.push(plan); + planned_txs.push((key, plan)); } - // TODO: Send the transactions off for signing - // TODO: Return the eventualities - todo!("TODO") + let mut eventualities = HashMap::new(); + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the eventualities into the result + eventualities + .entry(key.to_bytes().as_ref().to_vec()) + .or_insert(Vec::with_capacity(1)) + .push(planned_tx.eventuality); + } + + // TODO: Fulfill any payments we prior couldn't + + eventualities } }