From 59ff944152b7b4f8e34fad75d6f81513bf4ec7db Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 7 Sep 2024 03:33:26 -0400 Subject: [PATCH] Work on the higher-level signers API --- Cargo.lock | 1 + processor/signers/Cargo.toml | 1 + processor/signers/src/db.rs | 8 ++ processor/signers/src/lib.rs | 137 +++++++++++++++++++++-- processor/signers/src/transaction/mod.rs | 38 +++---- 5 files changed, 153 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 768191b4..b960db4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8735,6 +8735,7 @@ dependencies = [ "serai-processor-scheduler-primitives", "serai-validator-sets-primitives", "tokio", + "zeroize", ] [[package]] diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml index 06d64da2..3a96c043 100644 --- a/processor/signers/Cargo.toml +++ b/processor/signers/Cargo.toml @@ -21,6 +21,7 @@ workspace = true [dependencies] async-trait = { version = "0.1", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs index 5ba5f7d4..9975cbda 100644 --- a/processor/signers/src/db.rs +++ b/processor/signers/src/db.rs @@ -4,6 +4,14 @@ use serai_db::{Get, DbTxn, create_db, db_channel}; use messages::sign::{ProcessorMessage, CoordinatorMessage}; +create_db! { + SignersGlobal { + RegisteredKeys: () -> Vec, + SerializedKeys: (session: Session) -> Vec, + LatestRetiredSession: () -> Session, + } +} + db_channel! { SignersGlobal { // CompletedEventualities needs to be handled by each signer, meaning we need to turn its diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs index eb09440d..9bc2459d 100644 --- a/processor/signers/src/lib.rs +++ b/processor/signers/src/lib.rs @@ -2,11 +2,18 @@ #![doc = include_str!("../README.md")] #![deny(missing_docs)] -use core::fmt::Debug; +use core::{fmt::Debug, marker::PhantomData}; -use frost::sign::PreprocessMachine; +use zeroize::Zeroizing; -use scheduler::SignableTransaction; +use serai_validator_sets_primitives::Session; + +use ciphersuite::{group::GroupEncoding, Ristretto}; +use frost::dkg::{ThresholdCore, ThresholdKeys}; + +use serai_db::{DbTxn, Db}; + +use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; pub(crate) mod db; @@ -14,7 +21,7 @@ mod transaction; /// An object capable of publishing a transaction. #[async_trait::async_trait] -pub trait TransactionPublisher: 'static + Send + Sync { +pub trait TransactionPublisher: 'static + Send + Sync { /// An error encountered when publishing a transaction. /// /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual @@ -28,10 +35,124 @@ pub trait TransactionPublisher: 'static + Send + Sync { /// /// The transaction already being present in the mempool/on-chain MUST NOT be considered an /// error. - async fn publish( - &self, - tx: ::Signature, - ) -> Result<(), Self::EphemeralError>; + async fn publish(&self, tx: T) -> Result<(), Self::EphemeralError>; +} + +/// The signers used by a processor. +pub struct Signers(PhantomData); + +/* + This is completely outside of consensus, so the worst that can happen is: + + 1) Leakage of a private key, hence the usage of frost-attempt-manager which has an API to ensure + that doesn't happen + 2) The database isn't perfectly cleaned up (leaving some bytes on disk wasted) + 3) The state isn't perfectly cleaned up (leaving some bytes in RAM wasted) + + The last two are notably possible via a series of race conditions. For example, if an Eventuality + completion comes in *before* we registered a key, the signer will hold the signing protocol in + memory until the session is retired entirely. +*/ +impl Signers { + /// Initialize the signers. + /// + /// This will spawn tasks for any historically registered keys. + pub fn new(db: impl Db) -> Self { + for session in db::RegisteredKeys::get(&db).unwrap_or(vec![]) { + let buf = db::SerializedKeys::get(&db, session).unwrap(); + let mut buf = buf.as_slice(); + + let mut substrate_keys = vec![]; + let mut external_keys = vec![]; + while !buf.is_empty() { + substrate_keys + .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + external_keys + .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + } + + todo!("TODO") + } + + todo!("TODO") + } + + /// Register a set of keys to sign with. + /// + /// If this session (or a session after it) has already been retired, this is a NOP. + pub fn register_keys( + &mut self, + txn: &mut impl DbTxn, + session: Session, + substrate_keys: Vec>, + network_keys: Vec>, + ) { + if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { + return; + } + + { + let mut sessions = db::RegisteredKeys::get(txn).unwrap_or_else(|| Vec::with_capacity(1)); + sessions.push(session); + db::RegisteredKeys::set(txn, &sessions); + } + + { + let mut buf = Zeroizing::new(Vec::with_capacity(2 * substrate_keys.len() * 128)); + for (substrate_keys, network_keys) in substrate_keys.into_iter().zip(network_keys) { + buf.extend(&*substrate_keys.serialize()); + buf.extend(&*network_keys.serialize()); + } + db::SerializedKeys::set(txn, session, &buf); + } + } + + /// Retire the signers for a session. + /// + /// This MUST be called in order, for every session (even if we didn't register keys for this + /// session). + pub fn retire_session( + &mut self, + txn: &mut impl DbTxn, + session: Session, + external_key: &impl GroupEncoding, + ) { + // Update the latest retired session + { + let next_to_retire = + db::LatestRetiredSession::get(txn).map_or(Session(0), |session| Session(session.0 + 1)); + assert_eq!(session, next_to_retire); + db::LatestRetiredSession::set(txn, &session); + } + + // Kill the tasks + todo!("TODO"); + + // Update RegisteredKeys/SerializedKeys + if let Some(registered) = db::RegisteredKeys::get(txn) { + db::RegisteredKeys::set( + txn, + ®istered.into_iter().filter(|session_i| *session_i != session).collect(), + ); + } + db::SerializedKeys::del(txn, session); + + // Drain the transactions to sign + // Presumably, TransactionsToSign will be fully populated before retiry occurs, making this + // perfect in not leaving any pending blobs behind + while TransactionsToSign::::try_recv(txn, external_key).is_some() {} + + // Drain our DB channels + while db::CompletedEventualitiesForEachKey::try_recv(txn, session).is_some() {} + while db::CoordinatorToTransactionSignerMessages::try_recv(txn, session).is_some() {} + while db::TransactionSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + while db::CoordinatorToBatchSignerMessages::try_recv(txn, session).is_some() {} + while db::BatchSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + while db::CoordinatorToSlashReportSignerMessages::try_recv(txn, session).is_some() {} + while db::SlashReportSignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + while db::CoordinatorToCosignerMessages::try_recv(txn, session).is_some() {} + while db::CosignerToCoordinatorMessages::try_recv(txn, session).is_some() {} + } } /* diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs index b638eac0..8fdf8145 100644 --- a/processor/signers/src/transaction/mod.rs +++ b/processor/signers/src/transaction/mod.rs @@ -11,7 +11,6 @@ use serai_db::{DbTxn, Db}; use primitives::task::ContinuallyRan; use scheduler::{Transaction, SignableTransaction, TransactionsToSign}; -use scanner::{ScannerFeed, Scheduler}; use frost_attempt_manager::*; @@ -26,40 +25,35 @@ use crate::{ mod db; use db::*; -type TransactionFor = < - < - - >::SignableTransaction as SignableTransaction>::PreprocessMachine as PreprocessMachine ->::Signature; +type TransactionFor = + <::PreprocessMachine as PreprocessMachine>::Signature; // Fetches transactions to sign and signs them. pub(crate) struct TransactionTask< D: Db, - S: ScannerFeed, - Sch: Scheduler, - P: TransactionPublisher, + ST: SignableTransaction, + P: TransactionPublisher>, > { db: D, publisher: P, session: Session, - keys: Vec::Ciphersuite>>, + keys: Vec>, active_signing_protocols: HashSet<[u8; 32]>, - attempt_manager: - AttemptManager::PreprocessMachine>, + attempt_manager: AttemptManager::PreprocessMachine>, last_publication: Instant, } -impl, P: TransactionPublisher> - TransactionTask +impl>> + TransactionTask { pub(crate) fn new( db: D, publisher: P, session: Session, - keys: Vec::Ciphersuite>>, + keys: Vec>, ) -> Self { let mut active_signing_protocols = HashSet::new(); let mut attempt_manager = AttemptManager::new( @@ -74,8 +68,7 @@ impl, P: TransactionPublisher>::SignableTransaction::read(&mut signable_transaction_buf).unwrap(); + let signable_transaction = ST::read(&mut signable_transaction_buf).unwrap(); assert!(signable_transaction_buf.is_empty()); assert_eq!(signable_transaction.id(), tx); @@ -99,8 +92,8 @@ impl, P: TransactionPublisher, P: TransactionPublisher> - ContinuallyRan for TransactionTask +impl>> ContinuallyRan + for TransactionTask { async fn run_iteration(&mut self) -> Result { let mut iterated = false; @@ -108,10 +101,7 @@ impl, P: TransactionPublisher::try_recv( - &mut txn, - &self.keys[0].group_key(), - ) else { + let Some(tx) = TransactionsToSign::::try_recv(&mut txn, &self.keys[0].group_key()) else { break; }; iterated = true; @@ -208,7 +198,7 @@ impl, P: TransactionPublisher::read(&mut tx_buf).unwrap(); + let tx = TransactionFor::::read(&mut tx_buf).unwrap(); assert!(tx_buf.is_empty()); self