diff --git a/.github/workflows/monero-tests.yaml b/.github/workflows/monero-tests.yaml index 37595084..b98bbfd4 100644 --- a/.github/workflows/monero-tests.yaml +++ b/.github/workflows/monero-tests.yaml @@ -53,4 +53,4 @@ jobs: if: ${{ matrix.version != 'v0.18.1.2' }} run: | cargo test --package monero-serai --all-features --test '*' - cargo test --package serai-processor monero + cargo test --package processor monero diff --git a/Cargo.lock b/Cargo.lock index bcf64b7f..f1320d9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2788,7 +2788,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-runtime-interface", - "sp-std", + "sp-std 5.0.0", "sp-storage", "static_assertions", ] @@ -2833,7 +2833,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 5.0.0", "sp-storage", "sp-trie", "thiserror", @@ -2852,7 +2852,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "sp-tracing", ] @@ -2894,7 +2894,7 @@ dependencies = [ "sp-runtime", "sp-staking", "sp-state-machine", - "sp-std", + "sp-std 5.0.0", "sp-tracing", "sp-weights", "tt-call", @@ -2950,7 +2950,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "sp-version", "sp-weights", ] @@ -3675,10 +3675,8 @@ dependencies = [ "parity-scale-codec", "scale-info", "serai-primitives", - "serde", "sp-inherents", "sp-runtime", - "sp-std", "thiserror", "tokens-pallet", ] @@ -3691,7 +3689,10 @@ dependencies = [ "scale-info", "serai-primitives", "serde", + "sp-runtime", + "sp-std 5.0.0", "tokens-primitives", + "zeroize", ] [[package]] @@ -5693,7 +5694,7 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -5708,7 +5709,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -5728,7 +5729,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", - "sp-std", + "sp-std 5.0.0", "sp-trie", ] @@ -5742,7 +5743,7 @@ dependencies = [ "scale-info", "sp-application-crypto", "sp-core", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -5759,7 +5760,7 @@ dependencies = [ "sp-inherents", "sp-io", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "sp-timestamp", ] @@ -5776,7 +5777,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -6350,6 +6351,54 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "processor" +version = "0.1.0" +dependencies = [ + "async-trait", + "bincode", + "bitcoin", + "bitcoin-serai", + "dalek-ff-group", + "env_logger", + "flexible-transcript", + "futures", + "group", + "hex", + "k256 0.12.0", + "lazy_static", + "log", + "modular-frost", + "monero-serai", + "parity-scale-codec", + "processor-messages", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "secp256k1", + "serai-client", + "serde", + "serde_json", + "thiserror", + "tokio", + "zeroize", +] + +[[package]] +name = "processor-messages" +version = "0.1.0" +dependencies = [ + "dkg", + "flexible-transcript", + "in-instructions-primitives", + "rand_chacha 0.3.1", + "rand_core 0.6.4", + "serai-primitives", + "serde", + "tokens-primitives", + "validator-sets-primitives", + "zeroize", +] + [[package]] name = "prometheus" version = "0.13.3" @@ -7807,7 +7856,7 @@ dependencies = [ "serde_json", "sp-core", "sp-io", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -8243,13 +8292,14 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" name = "serai-client" version = "0.1.0" dependencies = [ + "bitcoin", + "ciphersuite", "jsonrpsee-server", "lazy_static", + "monero-serai", "parity-scale-codec", "rand_core 0.6.4", "scale-info", - "scale-value", - "serai-primitives", "serai-runtime", "sp-core", "subxt", @@ -8303,30 +8353,6 @@ dependencies = [ "serde", "sp-core", "sp-runtime", -] - -[[package]] -name = "serai-processor" -version = "0.1.0" -dependencies = [ - "async-trait", - "bitcoin", - "bitcoin-serai", - "curve25519-dalek 3.2.0", - "dalek-ff-group", - "flexible-transcript", - "futures", - "group", - "hex", - "k256 0.12.0", - "modular-frost", - "monero-serai", - "rand_core 0.6.4", - "secp256k1", - "serde", - "serde_json", - "thiserror", - "tokio", "zeroize", ] @@ -8357,7 +8383,7 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-std", + "sp-std 5.0.0", "sp-tendermint", "sp-transaction-pool", "sp-version", @@ -8657,7 +8683,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 5.0.0", "sp-trie", "sp-version", "thiserror", @@ -8685,7 +8711,7 @@ dependencies = [ "serde", "sp-core", "sp-io", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -8698,7 +8724,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-std", + "sp-std 5.0.0", "static_assertions", ] @@ -8711,7 +8737,7 @@ dependencies = [ "sp-api", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -8745,7 +8771,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-std", + "sp-std 5.0.0", "sp-version", "thiserror", ] @@ -8780,11 +8806,11 @@ dependencies = [ "secp256k1", "secrecy", "serde", - "sp-core-hashing", + "sp-core-hashing 5.0.0", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", - "sp-std", + "sp-std 5.0.0", "sp-storage", "ss58-registry", "substrate-bip39", @@ -8803,7 +8829,22 @@ dependencies = [ "digest 0.10.6", "sha2 0.10.6", "sha3", - "sp-std", + "sp-std 5.0.0", + "twox-hash", +] + +[[package]] +name = "sp-core-hashing" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc2d1947252b7a4e403b0a260f596920443742791765ec111daa2bbf98eff25" +dependencies = [ + "blake2", + "byteorder", + "digest 0.10.6", + "sha2 0.10.6", + "sha3", + "sp-std 6.0.0", "twox-hash", ] @@ -8814,7 +8855,7 @@ source = "git+https://github.com/serai-dex/substrate#b1c7248b1fc93e3a453ffa1a14c dependencies = [ "proc-macro2", "quote", - "sp-core-hashing", + "sp-core-hashing 5.0.0", "syn", ] @@ -8844,7 +8885,7 @@ source = "git+https://github.com/serai-dex/substrate#b1c7248b1fc93e3a453ffa1a14c dependencies = [ "environmental", "parity-scale-codec", - "sp-std", + "sp-std 5.0.0", "sp-storage", ] @@ -8863,7 +8904,7 @@ dependencies = [ "sp-core", "sp-keystore", "sp-runtime", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -8877,7 +8918,7 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "thiserror", ] @@ -8899,7 +8940,7 @@ dependencies = [ "sp-keystore", "sp-runtime-interface", "sp-state-machine", - "sp-std", + "sp-std 5.0.0", "sp-tracing", "sp-trie", "tracing", @@ -8991,7 +9032,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-io", - "sp-std", + "sp-std 5.0.0", "sp-weights", ] @@ -9006,7 +9047,7 @@ dependencies = [ "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", - "sp-std", + "sp-std 5.0.0", "sp-storage", "sp-tracing", "sp-wasm-interface", @@ -9036,7 +9077,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-staking", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -9048,7 +9089,7 @@ dependencies = [ "scale-info", "sp-core", "sp-runtime", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -9065,7 +9106,7 @@ dependencies = [ "sp-core", "sp-externalities", "sp-panic-handler", - "sp-std", + "sp-std 5.0.0", "sp-trie", "thiserror", "tracing", @@ -9076,6 +9117,12 @@ name = "sp-std" version = "5.0.0" source = "git+https://github.com/serai-dex/substrate#b1c7248b1fc93e3a453ffa1a14c7bf61dd19f767" +[[package]] +name = "sp-std" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af0ee286f98455272f64ac5bb1384ff21ac029fbb669afbaf48477faff12760e" + [[package]] name = "sp-storage" version = "7.0.0" @@ -9086,7 +9133,7 @@ dependencies = [ "ref-cast", "serde", "sp-debug-derive", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -9095,7 +9142,7 @@ version = "0.1.0" dependencies = [ "sp-api", "sp-core", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -9109,7 +9156,7 @@ dependencies = [ "parity-scale-codec", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "thiserror", ] @@ -9119,7 +9166,7 @@ version = "6.0.0" source = "git+https://github.com/serai-dex/substrate#b1c7248b1fc93e3a453ffa1a14c7bf61dd19f767" dependencies = [ "parity-scale-codec", - "sp-std", + "sp-std 5.0.0", "tracing", "tracing-core", "tracing-subscriber", @@ -9146,7 +9193,7 @@ dependencies = [ "sp-core", "sp-inherents", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "sp-trie", ] @@ -9166,7 +9213,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", - "sp-std", + "sp-std 5.0.0", "thiserror", "tracing", "trie-db", @@ -9185,7 +9232,7 @@ dependencies = [ "serde", "sp-core-hashing-proc-macro", "sp-runtime", - "sp-std", + "sp-std 5.0.0", "sp-version-proc-macro", "thiserror", ] @@ -9210,7 +9257,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-std", + "sp-std 5.0.0", "wasmi", "wasmtime", ] @@ -9227,7 +9274,7 @@ dependencies = [ "sp-arithmetic", "sp-core", "sp-debug-derive", - "sp-std", + "sp-std 5.0.0", ] [[package]] @@ -9447,26 +9494,29 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "subxt" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3cbc78fd36035a24883eada29e0205b9b1416172530a7d00a60c07d0337db0c" +checksum = "54639dba6a113584083968b6a8f457dedae612abe1bd214762101ca29f12e332" dependencies = [ - "bitvec 1.0.1", + "base58 0.2.0", + "blake2", "derivative", "frame-metadata", "futures", "getrandom 0.2.8", "hex", + "impl-serde", "jsonrpsee", "parity-scale-codec", "parking_lot 0.12.1", + "primitive-types", + "scale-bits", "scale-decode", "scale-info", "scale-value", "serde", "serde_json", - "sp-core", - "sp-runtime", + "sp-core-hashing 6.0.0", "subxt-macro", "subxt-metadata", "thiserror", @@ -9475,9 +9525,9 @@ dependencies = [ [[package]] name = "subxt-codegen" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7722c31febf55eb300c73d977da5d65cfd6fb443419b1185b9abcdd9925fd7be" +checksum = "b8e86cb719003f1cedf2710a6e55ca4c37aba4c989bbd3b81dd1c52af9e4827e" dependencies = [ "darling", "frame-metadata", @@ -9496,9 +9546,9 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f64826f2c4ba20e3b2a86ec81a6ae8655ca6b6a4c2a6ccc888b6615efc2df14" +checksum = "74c08de402a78c4c06c3ee3702c80e519efdcb65911348e018b6998d04404916" dependencies = [ "darling", "proc-macro-error", @@ -9508,14 +9558,14 @@ dependencies = [ [[package]] name = "subxt-metadata" -version = "0.25.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869af75e23513538ad0af046af4a97b8d684e8d202e35ff4127ee061c1110813" +checksum = "2593ab5f53435e6352675af4f9851342607f37785d84c7a3fb3139550d3c35f0" dependencies = [ "frame-metadata", "parity-scale-codec", "scale-info", - "sp-core", + "sp-core-hashing 6.0.0", ] [[package]] @@ -9841,6 +9891,7 @@ dependencies = [ "serai-primitives", "serde", "sp-runtime", + "zeroize", ] [[package]] @@ -10333,6 +10384,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", + "zeroize", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 2d1aea59..bd8bb84a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ members = [ "coins/monero/generators", "coins/monero", + "processor/messages", "processor", "substrate/serai/primitives", @@ -63,8 +64,3 @@ monero-serai = { opt-level = 3 } [profile.release] panic = "unwind" - -# Required for subxt -[patch.crates-io] -sp-core = { git = "https://github.com/serai-dex/substrate" } -sp-runtime = { git = "https://github.com/serai-dex/substrate" } diff --git a/coins/bitcoin/src/rpc.rs b/coins/bitcoin/src/rpc.rs index 1ae84916..e4853296 100644 --- a/coins/bitcoin/src/rpc.rs +++ b/coins/bitcoin/src/rpc.rs @@ -6,7 +6,10 @@ use serde::{Deserialize, de::DeserializeOwned}; use serde_json::json; use bitcoin::{ - hashes::hex::{FromHex, ToHex}, + hashes::{ + Hash, + hex::{FromHex, ToHex}, + }, consensus::encode, Txid, Transaction, BlockHash, Block, }; @@ -64,12 +67,23 @@ impl Rpc { self.rpc_call("getblockcount", json!([])).await } - pub async fn get_block_hash(&self, number: usize) -> Result { - self.rpc_call("getblockhash", json!([number])).await + pub async fn get_block_hash(&self, number: usize) -> Result<[u8; 32], RpcError> { + let mut hash = + self.rpc_call::("getblockhash", json!([number])).await?.as_hash().into_inner(); + hash.reverse(); + Ok(hash) } - pub async fn get_block(&self, block_hash: &BlockHash) -> Result { - let hex = self.rpc_call::("getblock", json!([block_hash.to_hex(), 0])).await?; + pub async fn get_block_number(&self, hash: &[u8; 32]) -> Result { + #[derive(Deserialize, Debug)] + struct Number { + height: usize, + } + Ok(self.rpc_call::("getblockheader", json!([hash.to_hex()])).await?.height) + } + + pub async fn get_block(&self, hash: &[u8; 32]) -> Result { + let hex = self.rpc_call::("getblock", json!([hash.to_hex(), 0])).await?; let bytes: Vec = FromHex::from_hex(&hex).map_err(|_| RpcError::InvalidResponse)?; encode::deserialize(&bytes).map_err(|_| RpcError::InvalidResponse) } @@ -77,4 +91,10 @@ impl Rpc { pub async fn send_raw_transaction(&self, tx: &Transaction) -> Result { self.rpc_call("sendrawtransaction", json!([encode::serialize_hex(tx)])).await } + + pub async fn get_transaction(&self, hash: &[u8; 32]) -> Result { + let hex = self.rpc_call::("getrawtransaction", json!([hash.to_hex()])).await?; + let bytes: Vec = FromHex::from_hex(&hex).map_err(|_| RpcError::InvalidResponse)?; + encode::deserialize(&bytes).map_err(|_| RpcError::InvalidResponse) + } } diff --git a/coins/bitcoin/src/wallet.rs b/coins/bitcoin/src/wallet.rs index 1887cbca..f7de1481 100644 --- a/coins/bitcoin/src/wallet.rs +++ b/coins/bitcoin/src/wallet.rs @@ -25,7 +25,7 @@ use bitcoin::{ use crate::crypto::{BitcoinHram, make_even}; /// A spendable output. -#[derive(Clone, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct SpendableOutput { /// The scalar offset to obtain the key usable to spend this output. /// Enables HDKD systems. @@ -69,8 +69,8 @@ impl SpendableOutput { } /// A signable transaction, clone-able across attempts. -#[derive(Clone, Debug)] -pub struct SignableTransaction(Transaction, Vec, Vec); +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct SignableTransaction(Transaction, Vec, Vec, u64); impl SignableTransaction { fn calculate_weight(inputs: usize, payments: &[(Address, u64)], change: Option<&Address>) -> u64 { @@ -97,13 +97,25 @@ impl SignableTransaction { u64::try_from(tx.weight()).unwrap() } - /// Create a new signable-transaction. + pub fn fee(&self) -> u64 { + self.3 + } + + /// Create a new SignableTransaction. pub fn new( mut inputs: Vec, payments: &[(Address, u64)], change: Option
, + data: Option>, fee: u64, ) -> Option { + if inputs.is_empty() || + (payments.is_empty() && change.is_none()) || + (data.as_ref().map(|data| data.len()).unwrap_or(0) > 80) + { + return None; + } + let input_sat = inputs.iter().map(|input| input.output.value).sum::(); let offsets = inputs.iter().map(|input| input.offset).collect(); let tx_ins = inputs @@ -122,17 +134,22 @@ impl SignableTransaction { .map(|payment| TxOut { value: payment.1, script_pubkey: payment.0.script_pubkey() }) .collect::>(); - let actual_fee = fee * Self::calculate_weight(tx_ins.len(), payments, None); - if payment_sat > (input_sat - actual_fee) { + // Add the OP_RETURN output + if let Some(data) = data { + tx_outs.push(TxOut { value: 0, script_pubkey: Script::new_op_return(&data) }) + } + + let mut actual_fee = fee * Self::calculate_weight(tx_ins.len(), payments, None); + if input_sat < (payment_sat + actual_fee) { return None; } - // If there's a change address, check if there's a meaningful change + // If there's a change address, check if there's change to give it if let Some(change) = change.as_ref() { let fee_with_change = fee * Self::calculate_weight(tx_ins.len(), payments, Some(change)); - // If there's a non-zero change, add it if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { tx_outs.push(TxOut { value, script_pubkey: change.script_pubkey() }); + actual_fee = fee_with_change; } } @@ -143,6 +160,7 @@ impl SignableTransaction { Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: tx_ins, output: tx_outs }, offsets, inputs.drain(..).map(|input| input.output).collect(), + actual_fee, )) } diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 509a4e19..d06a5622 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -18,7 +18,7 @@ lazy_static = "1" thiserror = "1" rand_core = "0.6" -rand_chacha = { version = "0.3", optional = true } +rand_chacha = "0.3" rand = "0.8" rand_distr = "0.4" @@ -41,11 +41,11 @@ dleq = { path = "../../crypto/dleq", version = "0.3", features = ["serialize"], monero-generators = { path = "generators", version = "0.2" } hex = "0.4" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" +serde = { version = "1", features = ["derive"] } +serde_json = "1" base58-monero = "1" -monero-epee-bin-serde = "1.0" +monero-epee-bin-serde = "1" digest_auth = "0.3" reqwest = { version = "0.11", features = ["json"] } @@ -63,4 +63,4 @@ monero-rpc = "0.3" frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.6", features = ["tests"] } [features] -multisig = ["rand_chacha", "transcript", "frost", "dleq"] +multisig = ["transcript", "frost", "dleq"] diff --git a/coins/monero/src/block.rs b/coins/monero/src/block.rs index 72178d17..f24519eb 100644 --- a/coins/monero/src/block.rs +++ b/coins/monero/src/block.rs @@ -1,6 +1,9 @@ use std::io::{self, Read, Write}; -use crate::{serialize::*, transaction::Transaction}; +use crate::{ + serialize::*, + transaction::{Input, Transaction}, +}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct BlockHeader { @@ -45,6 +48,13 @@ pub struct Block { } impl Block { + pub fn number(&self) -> usize { + match self.miner_tx.prefix.inputs.get(0) { + Some(Input::Gen(number)) => (*number).try_into().unwrap(), + _ => panic!("invalid block, miner TX didn't have a Input::Gen"), + } + } + pub fn write(&self, w: &mut W) -> io::Result<()> { self.header.write(w)?; self.miner_tx.write(w)?; diff --git a/coins/monero/src/lib.rs b/coins/monero/src/lib.rs index cc6b325e..203698a9 100644 --- a/coins/monero/src/lib.rs +++ b/coins/monero/src/lib.rs @@ -3,21 +3,23 @@ //! A modern Monero transaction library intended for usage in wallets. It prides //! itself on accuracy, correctness, and removing common pit falls developers may //! face. - +//! //! monero-serai contains safety features, such as first-class acknowledgement of //! the burning bug, yet also a high level API around creating transactions. //! monero-serai also offers a FROST-based multisig, which is orders of magnitude //! more performant than Monero's. - +//! //! monero-serai was written for Serai, a decentralized exchange aiming to support //! Monero. Despite this, monero-serai is intended to be a widely usable library, //! accurate to Monero. monero-serai guarantees the functionality needed for Serai, //! yet will not deprive functionality from other users, and may potentially leave //! Serai's umbrella at some point. - +//! //! Various legacy transaction formats are not currently implemented, yet //! monero-serai is still increasing its support for various transaction types. +use std::io; + use lazy_static::lazy_static; use rand_core::{RngCore, CryptoRng}; @@ -34,6 +36,7 @@ use curve25519_dalek::{ pub use monero_generators::H; mod serialize; +use serialize::{read_byte, read_u16}; /// RingCT structs and functionality. pub mod ringct; @@ -80,6 +83,45 @@ impl Protocol { Protocol::Custom { bp_plus, .. } => *bp_plus, } } + + pub(crate) fn write(&self, w: &mut W) -> io::Result<()> { + match self { + Protocol::v14 => w.write_all(&[0, 14]), + Protocol::v16 => w.write_all(&[0, 16]), + Protocol::Custom { ring_len, bp_plus } => { + // Custom, version 0 + w.write_all(&[1, 0])?; + w.write_all(&u16::try_from(*ring_len).unwrap().to_le_bytes())?; + w.write_all(&[u8::from(*bp_plus)]) + } + } + } + + pub(crate) fn read(r: &mut R) -> io::Result { + Ok(match read_byte(r)? { + // Monero protocol + 0 => match read_byte(r)? { + 14 => Protocol::v14, + 16 => Protocol::v16, + _ => Err(io::Error::new(io::ErrorKind::Other, "unrecognized monero protocol"))?, + }, + // Custom + 1 => match read_byte(r)? { + 0 => Protocol::Custom { + ring_len: read_u16(r)?.into(), + bp_plus: match read_byte(r)? { + 0 => false, + 1 => true, + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid bool serialization"))?, + }, + }, + _ => { + Err(io::Error::new(io::ErrorKind::Other, "unrecognized custom protocol serialization"))? + } + }, + _ => Err(io::Error::new(io::ErrorKind::Other, "unrecognized protocol serialization"))?, + }) + } } lazy_static! { diff --git a/coins/monero/src/rpc.rs b/coins/monero/src/rpc.rs index f53bfc72..dda15be8 100644 --- a/coins/monero/src/rpc.rs +++ b/coins/monero/src/rpc.rs @@ -501,10 +501,8 @@ impl Rpc { reason: String, } - let mut buf = Vec::with_capacity(2048); - tx.write(&mut buf).unwrap(); let res: SendRawResponse = self - .rpc_call("send_raw_transaction", Some(json!({ "tx_as_hex": hex::encode(&buf) }))) + .rpc_call("send_raw_transaction", Some(json!({ "tx_as_hex": hex::encode(tx.serialize()) }))) .await?; if res.status != "OK" { diff --git a/coins/monero/src/serialize.rs b/coins/monero/src/serialize.rs index bc548737..aec745bc 100644 --- a/coins/monero/src/serialize.rs +++ b/coins/monero/src/serialize.rs @@ -67,14 +67,18 @@ pub(crate) fn read_byte(r: &mut R) -> io::Result { Ok(read_bytes::<_, 1>(r)?[0]) } -pub(crate) fn read_u64(r: &mut R) -> io::Result { - read_bytes(r).map(u64::from_le_bytes) +pub(crate) fn read_u16(r: &mut R) -> io::Result { + read_bytes(r).map(u16::from_le_bytes) } pub(crate) fn read_u32(r: &mut R) -> io::Result { read_bytes(r).map(u32::from_le_bytes) } +pub(crate) fn read_u64(r: &mut R) -> io::Result { + read_bytes(r).map(u64::from_le_bytes) +} + pub(crate) fn read_varint(r: &mut R) -> io::Result { let mut bits = 0; let mut res = 0; diff --git a/coins/monero/src/transaction.rs b/coins/monero/src/transaction.rs index e981792a..4417588d 100644 --- a/coins/monero/src/transaction.rs +++ b/coins/monero/src/transaction.rs @@ -44,6 +44,12 @@ impl Input { } } + pub fn serialize(&self) -> Vec { + let mut res = vec![]; + self.write(&mut res).unwrap(); + res + } + pub fn read(r: &mut R) -> io::Result { Ok(match read_byte(r)? { 255 => Input::Gen(read_varint(r)?), @@ -82,6 +88,12 @@ impl Output { Ok(()) } + pub fn serialize(&self) -> Vec { + let mut res = Vec::with_capacity(8 + 1 + 32); + self.write(&mut res).unwrap(); + res + } + pub fn read(r: &mut R) -> io::Result { let amount = read_varint(r)?; let view_tag = match read_byte(r)? { @@ -172,6 +184,12 @@ impl TransactionPrefix { w.write_all(&self.extra) } + pub fn serialize(&self) -> Vec { + let mut res = vec![]; + self.write(&mut res).unwrap(); + res + } + pub fn read(r: &mut R) -> io::Result { let mut prefix = TransactionPrefix { version: read_varint(r)?, @@ -219,6 +237,12 @@ impl Transaction { } } + pub fn serialize(&self) -> Vec { + let mut res = Vec::with_capacity(2048); + self.write(&mut res).unwrap(); + res + } + pub fn read(r: &mut R) -> io::Result { let prefix = TransactionPrefix::read(r)?; let mut signatures = vec![]; diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index 4b67955a..f82d8262 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -42,6 +42,10 @@ async fn select_n<'a, R: RngCore + CryptoRng>( used: &mut HashSet, count: usize, ) -> Result, RpcError> { + if height >= rpc.get_height().await? { + Err(RpcError::InternalError("decoys being requested from too young blocks"))?; + } + let mut iters = 0; let mut confirmed = Vec::with_capacity(count); // Retries on failure. Retries are obvious as decoys, yet should be minimal diff --git a/coins/monero/src/wallet/mod.rs b/coins/monero/src/wallet/mod.rs index d7dff8ff..b7322d84 100644 --- a/coins/monero/src/wallet/mod.rs +++ b/coins/monero/src/wallet/mod.rs @@ -28,7 +28,9 @@ pub(crate) mod decoys; pub(crate) use decoys::Decoys; mod send; -pub use send::{Fee, TransactionError, Change, SignableTransaction, SignableTransactionBuilder}; +pub use send::{ + Fee, TransactionError, Change, SignableTransaction, SignableTransactionBuilder, Eventuality, +}; #[cfg(feature = "multisig")] pub(crate) use send::InternalPayment; #[cfg(feature = "multisig")] diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index 3deb080f..4a9fecc8 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -219,6 +219,10 @@ impl SpendableOutput { self.output.commitment() } + pub fn arbitrary_data(&self) -> &[Vec] { + self.output.arbitrary_data() + } + pub fn write(&self, w: &mut W) -> io::Result<()> { self.output.write(w)?; w.write_all(&self.global_index.to_le_bytes()) diff --git a/coins/monero/src/wallet/send/builder.rs b/coins/monero/src/wallet/send/builder.rs index d1e632cf..7c575750 100644 --- a/coins/monero/src/wallet/send/builder.rs +++ b/coins/monero/src/wallet/send/builder.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, RwLock}; -use zeroize::{Zeroize, ZeroizeOnDrop}; +use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use crate::{ Protocol, @@ -15,6 +15,7 @@ struct SignableTransactionBuilderInternal { protocol: Protocol, fee: Fee, + r_seed: Option>, inputs: Vec, payments: Vec<(MoneroAddress, u64)>, change_address: Option, @@ -25,7 +26,19 @@ impl SignableTransactionBuilderInternal { // Takes in the change address so users don't miss that they have to manually set one // If they don't, all leftover funds will become part of the fee fn new(protocol: Protocol, fee: Fee, change_address: Option) -> Self { - Self { protocol, fee, inputs: vec![], payments: vec![], change_address, data: vec![] } + Self { + protocol, + fee, + r_seed: None, + inputs: vec![], + payments: vec![], + change_address, + data: vec![], + } + } + + fn set_r_seed(&mut self, r_seed: Zeroizing<[u8; 32]>) { + self.r_seed = Some(r_seed); } fn add_input(&mut self, input: SpendableOutput) { @@ -85,6 +98,11 @@ impl SignableTransactionBuilder { )))) } + pub fn set_r_seed(&mut self, r_seed: Zeroizing<[u8; 32]>) -> Self { + self.0.write().unwrap().set_r_seed(r_seed); + self.shallow_copy() + } + pub fn add_input(&mut self, input: SpendableOutput) -> Self { self.0.write().unwrap().add_input(input); self.shallow_copy() @@ -115,6 +133,7 @@ impl SignableTransactionBuilder { let read = self.0.read().unwrap(); SignableTransaction::new( read.protocol, + read.r_seed.clone(), read.inputs.clone(), read.payments.clone(), read.change_address.clone(), diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 0a58cf94..1ba8e8fa 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -1,8 +1,10 @@ use core::{ops::Deref, fmt}; +use std::io; use thiserror::Error; -use rand_core::{RngCore, CryptoRng}; +use rand_core::{RngCore, CryptoRng, SeedableRng}; +use rand_chacha::ChaCha20Rng; use rand::seq::SliceRandom; use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; @@ -19,7 +21,11 @@ use dalek_ff_group as dfg; use frost::FrostError; use crate::{ - Protocol, Commitment, random_scalar, + Protocol, Commitment, hash, random_scalar, + serialize::{ + read_byte, read_bytes, read_u64, read_scalar, read_point, read_vec, write_byte, write_scalar, + write_point, write_raw_vec, write_vec, + }, ringct::{ generate_key_image, clsag::{ClsagError, ClsagInput, Clsag}, @@ -156,7 +162,7 @@ async fn prepare_inputs( rng, rpc, ring_len, - rpc.get_height().await.map_err(TransactionError::RpcError)? - 10, + rpc.get_height().await.map_err(TransactionError::RpcError)? - 1, inputs, ) .await @@ -204,10 +210,30 @@ impl Fee { } } +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub(crate) enum InternalPayment { + Payment((MoneroAddress, u64)), + Change(Change, u64), +} + +/// The eventual output of a SignableTransaction. +/// +/// If the SignableTransaction has a Change with a view key, this will also have the view key. +/// Accordingly, it must be treated securely. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct Eventuality { + protocol: Protocol, + r_seed: Zeroizing<[u8; 32]>, + inputs: Vec, + payments: Vec, + extra: Vec, +} + /// A signable transaction, either in a single-signer or multisig context. #[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] pub struct SignableTransaction { protocol: Protocol, + r_seed: Option>, inputs: Vec, payments: Vec, data: Vec>, @@ -250,22 +276,19 @@ impl Change { } } -#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] -pub(crate) enum InternalPayment { - Payment((MoneroAddress, u64)), - Change(Change, u64), -} - impl SignableTransaction { /// Create a signable transaction. /// - /// Up to 16 outputs may be present, including the change output. + /// `r_seed` refers to a seed used to derive the transaction's ephemeral keys (colloquially + /// called Rs). If None is provided, one will be automatically generated. /// - /// If the change address is specified, leftover funds will be sent to it. + /// Up to 16 outputs may be present, including the change output. If the change address is + /// specified, leftover funds will be sent to it. /// - /// Each chunk of data must not exceed MAX_ARBITRARY_DATA_SIZE. + /// Each chunk of data must not exceed MAX_ARBITRARY_DATA_SIZE and will be embedded in TX extra. pub fn new( protocol: Protocol, + r_seed: Option>, inputs: Vec, mut payments: Vec<(MoneroAddress, u64)>, change_address: Option, @@ -351,27 +374,46 @@ impl SignableTransaction { payments.push(InternalPayment::Change(change, in_amount - out_amount)); } - Ok(SignableTransaction { protocol, inputs, payments, data, fee }) + Ok(SignableTransaction { protocol, r_seed, inputs, payments, data, fee }) } - fn prepare_transaction( - &mut self, - rng: &mut R, + pub fn fee(&self) -> u64 { + self.fee + } + + #[allow(clippy::type_complexity)] + fn prepare_payments( + seed: &Zeroizing<[u8; 32]>, + inputs: &[EdwardsPoint], + payments: &mut Vec, uniqueness: [u8; 32], - ) -> (Transaction, Scalar) { + ) -> (EdwardsPoint, Vec>, Vec, Option<[u8; 8]>) { + let mut rng = { + // Hash the inputs into the seed so we don't re-use Rs + // Doesn't re-use uniqueness as that's based on key images, which requires interactivity + // to generate. The output keys do not + // This remains private so long as the seed is private + let mut r_uniqueness = vec![]; + for input in inputs { + r_uniqueness.extend(input.compress().to_bytes()); + } + ChaCha20Rng::from_seed(hash( + &[b"monero-serai_outputs".as_ref(), seed.as_ref(), &r_uniqueness].concat(), + )) + }; + // Shuffle the payments - self.payments.shuffle(rng); + payments.shuffle(&mut rng); // Used for all non-subaddress outputs, or if there's only one subaddress output and a change - let tx_key = Zeroizing::new(random_scalar(rng)); + let tx_key = Zeroizing::new(random_scalar(&mut rng)); let mut tx_public_key = tx_key.deref() * &ED25519_BASEPOINT_TABLE; // If any of these outputs are to a subaddress, we need keys distinct to them // The only time this *does not* force having additional keys is when the only other output // is a change output we have the view key for, enabling rewriting rA to aR let mut has_change_view = false; - let subaddresses = self - .payments + let subaddresses = payments .iter() .filter(|payment| match *payment { InternalPayment::Payment(payment) => payment.0.is_subaddress(), @@ -391,14 +433,14 @@ impl SignableTransaction { // We need additional keys if we have any subaddresses let mut additional = subaddresses; // Unless the above change view key path is taken - if (self.payments.len() == 2) && has_change_view { + if (payments.len() == 2) && has_change_view { additional = false; } let modified_change_ecdh = subaddresses && (!additional); // If we're using the aR rewrite, update tx_public_key from rG to rB if modified_change_ecdh { - for payment in &self.payments { + for payment in &*payments { match payment { InternalPayment::Payment(payment) => { // This should be the only payment and it should be a subaddress @@ -412,9 +454,10 @@ impl SignableTransaction { } // Actually create the outputs - let mut outputs = Vec::with_capacity(self.payments.len()); + let mut additional_keys = vec![]; + let mut outputs = Vec::with_capacity(payments.len()); let mut id = None; - for (o, mut payment) in self.payments.drain(..).enumerate() { + for (o, mut payment) in payments.drain(..).enumerate() { // Downcast the change output to a payment output if it doesn't require special handling // regarding it's view key payment = if !modified_change_ecdh { @@ -430,7 +473,7 @@ impl SignableTransaction { let (output, payment_id) = match payment { InternalPayment::Payment(payment) => { // If this is a subaddress, generate a dedicated r. Else, reuse the TX key - let dedicated = Zeroizing::new(random_scalar(&mut *rng)); + let dedicated = Zeroizing::new(random_scalar(&mut rng)); let use_dedicated = additional && payment.0.is_subaddress(); let r = if use_dedicated { &dedicated } else { &tx_key }; @@ -438,9 +481,13 @@ impl SignableTransaction { if modified_change_ecdh { debug_assert_eq!(tx_public_key, output.R); } - // If this used tx_key, randomize its R - if !use_dedicated { - output.R = dfg::EdwardsPoint::random(&mut *rng).0; + + if use_dedicated { + additional_keys.push(dedicated); + } else { + // If this used tx_key, randomize its R + // This is so when extra is created, there's a distinct R for it to use + output.R = dfg::EdwardsPoint::random(&mut rng).0; } (output, payment_id) } @@ -466,6 +513,92 @@ impl SignableTransaction { id = id.or(Some(rand)); } + (tx_public_key, additional_keys, outputs, id) + } + + #[allow(non_snake_case)] + fn extra( + tx_key: EdwardsPoint, + additional: bool, + Rs: Vec, + id: Option<[u8; 8]>, + data: &mut Vec>, + ) -> Vec { + #[allow(non_snake_case)] + let Rs_len = Rs.len(); + let mut extra = Extra::new(tx_key, if additional { Rs } else { vec![] }); + + if let Some(id) = id { + let mut id_vec = Vec::with_capacity(1 + 8); + PaymentId::Encrypted(id).write(&mut id_vec).unwrap(); + extra.push(ExtraField::Nonce(id_vec)); + } + + // Include data if present + let extra_len = Extra::fee_weight(Rs_len, id.is_some(), data.as_ref()); + for part in data.drain(..) { + let mut arb = vec![ARBITRARY_DATA_MARKER]; + arb.extend(part); + extra.push(ExtraField::Nonce(arb)); + } + + let mut serialized = Vec::with_capacity(extra_len); + extra.write(&mut serialized).unwrap(); + serialized + } + + /// Returns the eventuality of this transaction. + /// The eventuality is defined as the TX extra/outputs this transaction will create, if signed + /// with the specified seed. This eventuality can be compared to on-chain transactions to see + /// if the transaction has already been signed and published. + pub fn eventuality(&self) -> Option { + let inputs = self.inputs.iter().map(|input| input.key()).collect::>(); + let (tx_key, additional, outputs, id) = Self::prepare_payments( + self.r_seed.as_ref()?, + &inputs, + &mut self.payments.clone(), + // Lie about the uniqueness, used when determining output keys/commitments yet not the + // ephemeral keys, which is want we want here + // While we do still grab the outputs variable, it's so we can get its Rs + [0; 32], + ); + #[allow(non_snake_case)] + let Rs = outputs.iter().map(|output| output.R).collect(); + drop(outputs); + + let additional = !additional.is_empty(); + let extra = Self::extra(tx_key, additional, Rs, id, &mut self.data.clone()); + + Some(Eventuality { + protocol: self.protocol, + r_seed: self.r_seed.clone()?, + inputs, + payments: self.payments.clone(), + extra, + }) + } + + fn prepare_transaction( + &mut self, + rng: &mut R, + uniqueness: [u8; 32], + ) -> (Transaction, Scalar) { + // If no seed for the ephemeral keys was provided, make one + let r_seed = self.r_seed.clone().unwrap_or_else(|| { + let mut res = Zeroizing::new([0; 32]); + rng.fill_bytes(res.as_mut()); + res + }); + + let (tx_key, additional, outputs, id) = Self::prepare_payments( + &r_seed, + &self.inputs.iter().map(|input| input.key()).collect::>(), + &mut self.payments, + uniqueness, + ); + // This function only cares if additional keys were necessary, not what they were + let additional = !additional.is_empty(); + let commitments = outputs.iter().map(|output| output.commitment.clone()).collect::>(); let sum = commitments.iter().map(|commitment| commitment.mask).sum(); @@ -473,34 +606,19 @@ impl SignableTransaction { let bp = Bulletproofs::prove(rng, &commitments, self.protocol.bp_plus()).unwrap(); // Create the TX extra - let extra = { - let mut extra = Extra::new( - tx_public_key, - if additional { outputs.iter().map(|output| output.R).collect() } else { vec![] }, - ); - - if let Some(id) = id { - let mut id_vec = Vec::with_capacity(1 + 8); - PaymentId::Encrypted(id).write(&mut id_vec).unwrap(); - extra.push(ExtraField::Nonce(id_vec)); - } - - // Include data if present - for part in self.data.drain(..) { - let mut arb = vec![ARBITRARY_DATA_MARKER]; - arb.extend(part); - extra.push(ExtraField::Nonce(arb)); - } - - let mut serialized = - Vec::with_capacity(Extra::fee_weight(outputs.len(), id.is_some(), self.data.as_ref())); - extra.write(&mut serialized).unwrap(); - serialized - }; + let extra = Self::extra( + tx_key, + additional, + outputs.iter().map(|output| output.R).collect(), + id, + &mut self.data, + ); + let mut fee = self.inputs.iter().map(|input| input.commitment().amount).sum::(); let mut tx_outputs = Vec::with_capacity(outputs.len()); let mut ecdh_info = Vec::with_capacity(outputs.len()); for output in &outputs { + fee -= output.commitment.amount; tx_outputs.push(Output { amount: 0, key: output.dest.compress(), @@ -521,7 +639,7 @@ impl SignableTransaction { signatures: vec![], rct_signatures: RctSignatures { base: RctBase { - fee: self.fee, + fee, ecdh_info, commitments: commitments.iter().map(|commitment| commitment.calculate()).collect(), }, @@ -579,3 +697,128 @@ impl SignableTransaction { Ok(tx) } } + +impl Eventuality { + /// Enables building a HashMap of Extra -> Eventuality for efficiently checking if an on-chain + /// transaction may match this eventuality. + /// This extra is cryptographically bound to: + /// 1) A specific set of inputs (via their output key) + /// 2) A specific seed for the ephemeral keys + pub fn extra(&self) -> &[u8] { + &self.extra + } + + pub fn matches(&self, tx: &Transaction) -> bool { + if self.payments.len() != tx.prefix.outputs.len() { + return false; + } + + // Verify extra. + // Even if all the outputs were correct, a malicious extra could still cause a recipient to + // fail to receive their funds. + // This is the cheapest check available to perform as it does not require TX-specific ECC ops. + if self.extra != tx.prefix.extra { + return false; + } + + // Also ensure no timelock was set. + if tx.prefix.timelock != Timelock::None { + return false; + } + + // Generate the outputs. This is TX-specific due to uniqueness. + let (_, _, outputs, _) = SignableTransaction::prepare_payments( + &self.r_seed, + &self.inputs, + &mut self.payments.clone(), + uniqueness(&tx.prefix.inputs), + ); + + for (o, (expected, actual)) in outputs.iter().zip(tx.prefix.outputs.iter()).enumerate() { + // Verify the output, commitment, and encrypted amount. + if (&Output { + amount: 0, + key: expected.dest.compress(), + view_tag: Some(expected.view_tag).filter(|_| matches!(self.protocol, Protocol::v16)), + } != actual) || + (Some(&expected.commitment.calculate()) != tx.rct_signatures.base.commitments.get(o)) || + (Some(&expected.amount) != tx.rct_signatures.base.ecdh_info.get(o)) + { + return false; + } + } + + true + } + + pub fn write(&self, w: &mut W) -> io::Result<()> { + self.protocol.write(w)?; + write_raw_vec(write_byte, self.r_seed.as_ref(), w)?; + write_vec(write_point, &self.inputs, w)?; + + fn write_payment(payment: &InternalPayment, w: &mut W) -> io::Result<()> { + match payment { + InternalPayment::Payment(payment) => { + w.write_all(&[0])?; + write_vec(write_byte, payment.0.to_string().as_bytes(), w)?; + w.write_all(&payment.1.to_le_bytes()) + } + InternalPayment::Change(change, amount) => { + w.write_all(&[1])?; + write_vec(write_byte, change.address.to_string().as_bytes(), w)?; + if let Some(view) = change.view.as_ref() { + w.write_all(&[1])?; + write_scalar(view, w)?; + } else { + w.write_all(&[0])?; + } + w.write_all(&amount.to_le_bytes()) + } + } + } + write_vec(write_payment, &self.payments, w)?; + + write_vec(write_byte, &self.extra, w) + } + + pub fn serialize(&self) -> Vec { + let mut buf = Vec::with_capacity(128); + self.write(&mut buf).unwrap(); + buf + } + + pub fn read(r: &mut R) -> io::Result { + fn read_address(r: &mut R) -> io::Result { + String::from_utf8(read_vec(read_byte, r)?) + .ok() + .and_then(|str| MoneroAddress::from_str_raw(&str).ok()) + .ok_or(io::Error::new(io::ErrorKind::Other, "invalid address")) + } + + fn read_payment(r: &mut R) -> io::Result { + Ok(match read_byte(r)? { + 0 => InternalPayment::Payment((read_address(r)?, read_u64(r)?)), + 1 => InternalPayment::Change( + Change { + address: read_address(r)?, + view: match read_byte(r)? { + 0 => None, + 1 => Some(Zeroizing::new(read_scalar(r)?)), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid change payment"))?, + }, + }, + read_u64(r)?, + ), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid payment"))?, + }) + } + + Ok(Eventuality { + protocol: Protocol::read(r)?, + r_seed: Zeroizing::new(read_bytes::<_, 32>(r)?), + inputs: read_vec(read_point, r)?, + payments: read_vec(read_payment, r)?, + extra: read_vec(read_byte, r)?, + }) + } +} diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 3c0dbee8..749f3afe 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -39,6 +39,7 @@ use crate::{ /// FROST signing machine to produce a signed transaction. pub struct TransactionMachine { signable: SignableTransaction, + i: Participant, transcript: RecommendedTranscript, @@ -52,6 +53,7 @@ pub struct TransactionMachine { pub struct TransactionSignMachine { signable: SignableTransaction, + i: Participant, transcript: RecommendedTranscript, @@ -93,15 +95,22 @@ impl SignableTransaction { // multiple times, already breaking privacy there transcript.domain_separate(b"monero_transaction"); + // Include the height we're using for our data // The data itself will be included, making this unnecessary, yet a lot of this is technically // unnecessary. Anything which further increases security at almost no cost should be followed transcript.append_message(b"height", u64::try_from(height).unwrap().to_le_bytes()); + // Also include the spend_key as below only the key offset is included, so this transcripts the // sum product // Useful as transcripting the sum product effectively transcripts the key image, further // guaranteeing the one time properties noted below transcript.append_message(b"spend_key", keys.group_key().0.compress().to_bytes()); + + if let Some(r_seed) = &self.r_seed { + transcript.append_message(b"r_seed", r_seed); + } + for input in &self.inputs { // These outputs can only be spent once. Therefore, it forces all RNGs derived from this // transcript (such as the one used to create one time keys) to be unique @@ -111,6 +120,7 @@ impl SignableTransaction { // to determine RNG seeds and therefore the true spends transcript.append_message(b"input_shared_key", input.key_offset().to_bytes()); } + for payment in &self.payments { match payment { InternalPayment::Payment(payment) => { @@ -162,6 +172,7 @@ impl SignableTransaction { Ok(TransactionMachine { signable: self, + i: keys.params().i(), transcript, @@ -208,6 +219,7 @@ impl PreprocessMachine for TransactionMachine { ( TransactionSignMachine { signable: self.signable, + i: self.i, transcript: self.transcript, @@ -324,6 +336,7 @@ impl SignMachine for TransactionSignMachine { sorted_images.sort_by(key_image_sort); self.signable.prepare_transaction( + // Technically, r_seed is used for the transaction keys if it's provided &mut ChaCha20Rng::from_seed(self.transcript.rng_seed(b"transaction_keys_bulletproofs")), uniqueness( &sorted_images diff --git a/coins/monero/tests/add_data.rs b/coins/monero/tests/add_data.rs index edb550ab..fe967c72 100644 --- a/coins/monero/tests/add_data.rs +++ b/coins/monero/tests/add_data.rs @@ -1,6 +1,6 @@ use monero_serai::{ - wallet::{TransactionError, extra::MAX_ARBITRARY_DATA_SIZE}, transaction::Transaction, + wallet::{TransactionError, extra::MAX_ARBITRARY_DATA_SIZE}, }; mod runner; diff --git a/coins/monero/tests/eventuality.rs b/coins/monero/tests/eventuality.rs new file mode 100644 index 00000000..dfbc6f0d --- /dev/null +++ b/coins/monero/tests/eventuality.rs @@ -0,0 +1,79 @@ +use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; + +use monero_serai::{ + transaction::Transaction, + wallet::{ + Eventuality, + address::{AddressType, AddressMeta, MoneroAddress}, + }, +}; + +mod runner; + +test!( + eventuality, + ( + |_, mut builder: Builder, _| async move { + // Add a standard address, a payment ID address, a subaddress, and a guaranteed address + // Each have their own slight implications to eventualities + builder.add_payment( + MoneroAddress::new( + AddressMeta::new(Network::Mainnet, AddressType::Standard), + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_POINT, + ), + 1, + ); + builder.add_payment( + MoneroAddress::new( + AddressMeta::new(Network::Mainnet, AddressType::Integrated([0xaa; 8])), + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_POINT, + ), + 2, + ); + builder.add_payment( + MoneroAddress::new( + AddressMeta::new(Network::Mainnet, AddressType::Subaddress), + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_POINT, + ), + 3, + ); + builder.add_payment( + MoneroAddress::new( + AddressMeta::new( + Network::Mainnet, + AddressType::Featured { subaddress: false, payment_id: None, guaranteed: true }, + ), + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_POINT, + ), + 4, + ); + builder.set_r_seed(Zeroizing::new([0xbb; 32])); + let tx = builder.build().unwrap(); + let eventuality = tx.eventuality().unwrap(); + assert_eq!( + eventuality, + Eventuality::read::<&[u8]>(&mut eventuality.serialize().as_ref()).unwrap() + ); + (tx, eventuality) + }, + |_, mut tx: Transaction, _, eventuality: Eventuality| async move { + // 4 explicitly outputs added and one change output + assert_eq!(tx.prefix.outputs.len(), 5); + + // The eventuality's available extra should be the actual TX's + assert_eq!(tx.prefix.extra, eventuality.extra()); + + // The TX should match + assert!(eventuality.matches(&tx)); + + // Mutate the TX + tx.rct_signatures.base.commitments[0] += ED25519_BASEPOINT_POINT; + // Verify it no longer matches + assert!(!eventuality.matches(&tx)); + }, + ), +); diff --git a/coins/monero/tests/runner.rs b/coins/monero/tests/runner.rs index 6626a736..a94cd20f 100644 --- a/coins/monero/tests/runner.rs +++ b/coins/monero/tests/runner.rs @@ -12,12 +12,12 @@ use tokio::sync::Mutex; use monero_serai::{ random_scalar, + rpc::Rpc, wallet::{ ViewPair, Scanner, address::{Network, AddressType, AddressSpec, AddressMeta, MoneroAddress}, SpendableOutput, }, - rpc::Rpc, }; pub fn random_address() -> (Scalar, ViewPair, MoneroAddress) { diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index 66f8e593..295b8027 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -1,6 +1,6 @@ use monero_serai::{ - wallet::{extra::Extra, address::SubaddressIndex, ReceivedOutput, SpendableOutput}, transaction::Transaction, + wallet::{extra::Extra, address::SubaddressIndex, ReceivedOutput, SpendableOutput}, rpc::Rpc, }; diff --git a/coins/monero/tests/wallet2_compatibility.rs b/coins/monero/tests/wallet2_compatibility.rs index 113249c0..db5f5d99 100644 --- a/coins/monero/tests/wallet2_compatibility.rs +++ b/coins/monero/tests/wallet2_compatibility.rs @@ -19,12 +19,12 @@ use monero_rpc::{ use monero_serai::{ transaction::Transaction, + rpc::Rpc, wallet::{ address::{Network, AddressSpec, SubaddressIndex, MoneroAddress}, extra::{MAX_TX_EXTRA_NONCE_SIZE, Extra}, Scanner, }, - rpc::Rpc, }; mod runner; diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index 6c5cac9a..073b483f 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -21,14 +21,14 @@ impl WriteAddendum for () { } /// Trait alias for the requirements to be used as an addendum. -pub trait Addendum: Send + Clone + PartialEq + Debug + WriteAddendum {} -impl Addendum for A {} +pub trait Addendum: Send + Sync + Clone + PartialEq + Debug + WriteAddendum {} +impl Addendum for A {} /// Algorithm trait usable by the FROST signing machine to produce signatures.. -pub trait Algorithm: Send + Clone { +pub trait Algorithm: Send + Sync + Clone { /// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible /// transcript included in this crate. - type Transcript: Clone + Debug + Transcript; + type Transcript: Sync + Clone + Debug + Transcript; /// Serializable addendum, used in algorithms requiring more data than just the nonces. type Addendum: Addendum; /// The resulting type of the signatures this algorithm will produce. @@ -120,7 +120,7 @@ mod sealed { pub(crate) use sealed::IetfTranscript; /// HRAm usable by the included Schnorr signature algorithm to generate challenges. -pub trait Hram: Send + Clone { +pub trait Hram: Send + Sync + Clone { /// HRAm function to generate a challenge. /// H2 from the IETF draft, despite having a different argument set (not being pre-formatted). #[allow(non_snake_case)] @@ -129,7 +129,7 @@ pub trait Hram: Send + Clone { /// Schnorr signature algorithm ((R, s) where s = r + cx). #[derive(Clone)] -pub struct Schnorr> { +pub struct Schnorr> { transcript: T, c: Option, _hram: PhantomData, @@ -145,7 +145,7 @@ pub struct Schnorr> { /// specify a protocol for offsets. pub type IetfSchnorr = Schnorr; -impl> Schnorr { +impl> Schnorr { /// Construct a Schnorr algorithm continuing the specified transcript. pub fn new(transcript: T) -> Schnorr { Schnorr { transcript, c: None, _hram: PhantomData } @@ -161,7 +161,7 @@ impl> IetfSchnorr { } } -impl> Algorithm for Schnorr { +impl> Algorithm for Schnorr { type Transcript = T; type Addendum = (); type Signature = SchnorrSignature; diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 52507e7f..b7e47e25 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -202,7 +202,7 @@ impl SignatureShare { } /// Trait for the second machine of a two-round signing protocol. -pub trait SignMachine: Send + Sized { +pub trait SignMachine: Send + Sync + Sized { /// Params used to instantiate this machine which can be used to rebuild from a cache. type Params: Clone; /// Keys used for signing operations. @@ -435,7 +435,7 @@ impl> SignMachine for AlgorithmSignMachi } /// Trait for the final machine of a two-round signing protocol. -pub trait SignatureMachine: Send { +pub trait SignatureMachine: Send + Sync { /// SignatureShare message for this machine. type SignatureShare: Clone + PartialEq + Writable; diff --git a/deny.toml b/deny.toml index 5826699d..705eb5e0 100644 --- a/deny.toml +++ b/deny.toml @@ -47,7 +47,8 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "bitcoin-serai" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" }, - { allow = ["AGPL-3.0"], name = "serai-processor" }, + { allow = ["AGPL-3.0"], name = "processor-messages" }, + { allow = ["AGPL-3.0"], name = "processor" }, { allow = ["AGPL-3.0"], name = "tokens-pallet" }, diff --git a/deploy/coins/bitcoin/scripts/entry-dev.sh b/deploy/coins/bitcoin/scripts/entry-dev.sh index a8525881..12712eac 100755 --- a/deploy/coins/bitcoin/scripts/entry-dev.sh +++ b/deploy/coins/bitcoin/scripts/entry-dev.sh @@ -1,6 +1,8 @@ -#!/bin/sh +#!/bin/bash RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -regtest -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS -rpcallowip=0.0.0.0/0 -rpcbind=127.0.0.1 -rpcbind=$(hostname) +bitcoind -txindex -regtest \ + -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ + -rpcbind=127.0.0.1 -rpcbind=$(hostname) -rpcallowip=0.0.0.0/0 diff --git a/deploy/coins/ethereum/scripts/entry-dev.sh b/deploy/coins/ethereum/scripts/entry-dev.sh index 01a980bc..0b86ff69 100755 --- a/deploy/coins/ethereum/scripts/entry-dev.sh +++ b/deploy/coins/ethereum/scripts/entry-dev.sh @@ -1,2 +1,6 @@ #!/bin/sh -geth --dev --dev.period 5 --verbosity 2 --networkid 15 --datadir "data" -mine --miner.threads 1 -http --http.addr 0.0.0.0 --http.port 8545 --allow-insecure-unlock --http.api "eth,net,web3,miner,personal,txpool,debug" --http.corsdomain "*" -nodiscover --http.vhosts="*" + +geth --dev --networkid 5208 --datadir "eth-devnet" \ + --http --http.api "web3,net,eth,miner" \ + --http.addr 0.0.0.0 --http.port 8545 \ + --http.vhosts="*" --http.corsdomain "*" diff --git a/deploy/coins/monero/scripts/entry-dev.sh b/deploy/coins/monero/scripts/entry-dev.sh index 262c2c21..4429988e 100755 --- a/deploy/coins/monero/scripts/entry-dev.sh +++ b/deploy/coins/monero/scripts/entry-dev.sh @@ -1,11 +1,9 @@ #!/bin/sh -# Setup Environment + RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -BLOCK_TIME=${BLOCK_TIME:=5} # Run Monero # TODO: Restore Auth -monerod --regtest --rpc-access-control-origins * --confirm-external-bind \ - --rpc-bind-ip=0.0.0.0 --offline --fixed-difficulty=1 \ - --non-interactive --mining-threads 1 --detach +monerod --regtest --offline --fixed-difficulty=1 \ + --rpc-bind-ip=0.0.0.0 --rpc-access-control-origins * --confirm-external-bind diff --git a/docs/integrations/Monero.md b/docs/integrations/Monero.md index be5b635d..76f170cd 100644 --- a/docs/integrations/Monero.md +++ b/docs/integrations/Monero.md @@ -2,22 +2,33 @@ ### Addresses -Monero addresses are an enum, defined as follows: +Monero addresses are structs, defined as follows: - - `standard`: 32-byte key, 32-byte key. - - `subaddress`: 32-byte key, 32-byte key. - - `featured`: 1-byte flags, 32-byte key, 32-byte key. + - `kind`: Enum { + Standard, + Subaddress, + Featured { flags: u8 } + } + - `spend`: [u8; 32] + - `view`: [u8; 32] -This definition of Featured Addresses is non-standard given the flags are -intended to be a VarInt, yet as of now, only half of the bits are used, with no -further planned features. Accordingly, it should be fine to fix its length, -which makes it comply with expectations present here. If needed, another enum -entry for a 2-byte flags Featured Address could be added. +Integrated addresses are not supported due to only being able to send to one +per Monero transaction. Supporting them would add a level of complexity +to Serai which isn't worth it. + +This definition of Featured Addresses is non-standard since the flags are +represented by a u8, not a VarInt. Currently, only half of the bits are used, +with no further planned features. Accordingly, it should be fine to fix its +size. If needed, another enum entry for a 2-byte flags Featured Address could be +added. + +This definition is also non-standard by not having a Payment ID field. This is +per not supporting integrated addresses. ### In Instructions Monero In Instructions are present via `tx.extra`, specifically via inclusion -in a `TX_EXTRA_TAG_PADDING` tag, and accordingly limited to 255 bytes. +in a `TX_EXTRA_NONCE` tag, and accordingly limited to 255 bytes. ### Out Instructions diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 3c643db9..81ab8966 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "serai-processor" +name = "processor" version = "0.1.0" description = "Multichain processor premised on canonicity to reach distributed consensus automatically" license = "AGPL-3.0-only" @@ -16,39 +16,56 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] # Macros async-trait = "0.1" -zeroize = "^1.5" +lazy_static = "1" +zeroize = "1" thiserror = "1" +serde = { version = "1", features = ["derive"] } + +# Libs rand_core = "0.6" +rand_chacha = "0.3" + +# Encoders +hex = "0.4" +scale = { package = "parity-scale-codec", version = "3" } +bincode = "1" +serde_json = "1" # Cryptography -transcript = { package = "flexible-transcript", path = "../crypto/transcript", features = ["recommended"] } - group = "0.12" -frost = { package = "modular-frost", path = "../crypto/frost", features = ["secp256k1", "ed25519"] } -# Monero -curve25519-dalek = { version = "3", features = ["std"] } -dalek-ff-group = { path = "../crypto/dalek-ff-group", features = ["black_box"] } -monero-serai = { path = "../coins/monero", features = ["multisig"] } +transcript = { package = "flexible-transcript", path = "../crypto/transcript" } +frost = { package = "modular-frost", path = "../crypto/frost" } # Bitcoin -bitcoin-serai = { path = "../coins/bitcoin" } +secp256k1 = { version = "0.24", features = ["global-context", "rand-std"], optional = true } +bitcoin = { version = "0.29", optional = true } -k256 = { version = "0.12", features = ["arithmetic"] } -bitcoin = "0.29" -hex = "0.4" -secp256k1 = { version = "0.24", features = ["global-context", "rand-std"] } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" +k256 = { version = "0.12", features = ["arithmetic"], optional = true } +bitcoin-serai = { path = "../coins/bitcoin", optional = true } -[dev-dependencies] -rand_core = "0.6" +# Monero +dalek-ff-group = { path = "../crypto/dalek-ff-group", optional = true } +monero-serai = { path = "../coins/monero", features = ["multisig"], optional = true } -hex = "0.4" -serde = { version = "1", features = ["derive"] } -serde_json = "1.0" - -futures = "0.3" +# Application +log = "0.4" tokio = { version = "1", features = ["full"] } +serai-client = { path = "../substrate/serai/client", default-features = false } + +messages = { package = "processor-messages", path = "./messages" } + +[dev-dependencies] +futures = "0.3" + frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } + +env_logger = "0.10" + +[features] +secp256k1 = ["k256", "frost/secp256k1"] +bitcoin = ["dep:secp256k1", "dep:bitcoin", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] + +ed25519 = ["dalek-ff-group", "frost/ed25519"] +monero = ["ed25519", "monero-serai", "serai-client/monero"] diff --git a/processor/README.md b/processor/README.md new file mode 100644 index 00000000..f250d0ab --- /dev/null +++ b/processor/README.md @@ -0,0 +1,65 @@ +# Processor + +The Serai processor scans a specified chain, communicating with the coordinator. + +### Key Generation + +The coordinator will tell the processor if it's been included in managing a +coin. If so, the processor is to begin the key generation protocol, relying on +the coordinator to provided authenticated communication with the remote parties. + +When the key generation protocol successfully completes, the processor is +expected to inform the coordinator so it may vote on it on the Substrate chain. +Once the key is voted in, it'll become active. + +### Scanning + +The processor is expected to scan all sufficiently confirmed blocks from a given +coin. This will create a list of outputs, considered pending. + +### Reporting + +These outputs are to be placed in a `Batch`, identified by the block containing +them. Batches are provided in an `Update` to Serai, paired by an agreed upon +block number. + +The processor will also produce an `Update` if there have been no batches within +the confirmation window. + +### Confirmed Outputs + +Once outputs have been acknowledged by Serai, they are considered confirmed. +With their confirmation, the validators are ready to create actions based on +them. + +Actions are triggered by passing the outputs to the scheduler. The scheduler +will do one of two things: + +1) Use the output +2) Accumulate it for later usage + +### Burn Events + +When the Serai chain issues a `Burn` event, the processor should send coins +accordingly. This is done by scheduling the payments out. + +# TODO + +- Acknowledging a sign ID as signed so we don't continue trying + +monero-serai now supports `Eventuality`s. When we have a plan to attempt, +we can create an `Eventuality` and see if it matches a given TX. A signing node +just has to submit the TX hash. + +Bitcoin will have the same TX hash flow, just explicitly matching against the +inputs. + +- Coordinator communication + +Kafka? RPC ping to them, which we don't count as 'sent' until we get a pong? + +- Handle reboots + +- Items marked TODO + +- Items marked TODO2, yet those only need to be done after protonet diff --git a/processor/messages/Cargo.toml b/processor/messages/Cargo.toml new file mode 100644 index 00000000..524384b4 --- /dev/null +++ b/processor/messages/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "processor-messages" +version = "0.1.0" +description = "Messages sent and received by the processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/messages" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +zeroize = { version = "1", features = ["derive"] } + +rand_core = "0.6" +rand_chacha = "0.3" +transcript = { package = "flexible-transcript", path = "../../crypto/transcript" } + +serde = { version = "1", features = ["derive"] } + +dkg = { path = "../../crypto/dkg", features = ["serde"] } + +serai-primitives = { path = "../../substrate/serai/primitives" } +in-instructions-primitives = { path = "../../substrate/in-instructions/primitives" } +tokens-primitives = { path = "../../substrate/tokens/primitives" } +validator-sets-primitives = { path = "../../substrate/validator-sets/primitives" } diff --git a/processor/messages/LICENSE b/processor/messages/LICENSE new file mode 100644 index 00000000..f684d027 --- /dev/null +++ b/processor/messages/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/messages/README.md b/processor/messages/README.md new file mode 100644 index 00000000..815eecb4 --- /dev/null +++ b/processor/messages/README.md @@ -0,0 +1,44 @@ +# Processor + +The Serai processor scans a specified chain, communicating with the coordinator. + +### Key Generation + +The coordinator will tell the processor if it's been included in managing a +coin. If so, the processor is to begin the key generation protocol, relying on +the coordinator to provided authenticated communication with the remote parties. + +When the key generation protocol successfully completes, the processor is +expected to inform the coordinator so it may vote on it on the Substrate chain. +Once the key is voted in, it'll become active. + +### Scanning + +The processor is expected to scan all sufficiently confirmed blocks from a given +coin. This will create a list of outputs, considered pending. + +### Reporting + +These outputs are to be placed in a `Batch`, identified by the block containing +them. Batches are provided in an `Update` to Serai, paired by an agreed upon +block number. + +The processor will also produce an `Update` if there have been no batches within +the confirmation window. + +### Confirmed Outputs + +Once outputs have been acknowledged by Serai, they are considered confirmed. +With their confirmation, the validators are ready to create actions based on +them. + +Actions are triggered by passing the outputs to the scheduler. The scheduler +will do one of two things: + +1) Use the output +2) Accumulate it for later usage + +### Burn Events + +When the Serai chain issues a `Burn` event, the processor should send coins +accordingly. This is done by scheduling the payments out. diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs new file mode 100644 index 00000000..7d744883 --- /dev/null +++ b/processor/messages/src/lib.rs @@ -0,0 +1,147 @@ +use std::collections::HashMap; + +use zeroize::Zeroize; + +use rand_core::{RngCore, SeedableRng}; +use rand_chacha::ChaCha8Rng; +use transcript::{Transcript, RecommendedTranscript}; + +use serde::{Serialize, Deserialize}; + +use dkg::{Participant, ThresholdParams}; + +use serai_primitives::WithAmount; +use in_instructions_primitives::InInstruction; +use tokens_primitives::OutInstruction; +use validator_sets_primitives::ValidatorSetInstance; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)] +pub struct SubstrateContext { + pub time: u64, + pub coin_latest_block_number: u64, +} + +pub mod key_gen { + use super::*; + + #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, Serialize, Deserialize)] + pub struct KeyGenId { + pub set: ValidatorSetInstance, + pub attempt: u32, + } + + #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] + pub enum CoordinatorMessage { + // Instructs the Processor to begin the key generation process. + GenerateKey { id: KeyGenId, params: ThresholdParams }, + // Received commitments for the specified key generation protocol. + Commitments { id: KeyGenId, commitments: HashMap> }, + // Received shares for the specified key generation protocol. + Shares { id: KeyGenId, shares: HashMap> }, + // Confirm a key. + ConfirmKey { context: SubstrateContext, id: KeyGenId }, + } + + #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] + pub enum ProcessorMessage { + // Created commitments for the specified key generation protocol. + Commitments { id: KeyGenId, commitments: Vec }, + // Created shares for the specified key generation protocol. + Shares { id: KeyGenId, shares: HashMap> }, + // Resulting key from the specified key generation protocol. + GeneratedKey { id: KeyGenId, key: Vec }, + } +} + +pub mod sign { + use super::*; + + #[derive(Clone, PartialEq, Eq, Hash, Debug, Zeroize, Serialize, Deserialize)] + pub struct SignId { + pub key: Vec, + pub id: [u8; 32], + pub attempt: u32, + } + + impl SignId { + /// Determine a signing set for a given signing session. + // TODO: Replace with ROAST or the first available group of signers. + // https://github.com/serai-dex/serai/issues/163 + pub fn signing_set(&self, params: &ThresholdParams) -> Vec { + let mut transcript = RecommendedTranscript::new(b"SignId signing_set"); + transcript.domain_separate(b"SignId"); + transcript.append_message(b"key", &self.key); + transcript.append_message(b"id", self.id); + transcript.append_message(b"attempt", self.attempt.to_le_bytes()); + + let mut candidates = + (1 ..= params.n()).map(|i| Participant::new(i).unwrap()).collect::>(); + let mut rng = ChaCha8Rng::from_seed(transcript.rng_seed(b"signing_set")); + while candidates.len() > params.t().into() { + candidates.swap_remove( + usize::try_from(rng.next_u64() % u64::try_from(candidates.len()).unwrap()).unwrap(), + ); + } + candidates + } + } + + #[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] + pub enum CoordinatorMessage { + // Received preprocesses for the specified signing protocol. + Preprocesses { id: SignId, preprocesses: HashMap> }, + // Received shares for the specified signing protocol. + Shares { id: SignId, shares: HashMap> }, + // Completed a signing protocol already. + Completed { key: Vec, id: [u8; 32], tx: Vec }, + } + + #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)] + pub enum ProcessorMessage { + // Created preprocess for the specified signing protocol. + Preprocess { id: SignId, preprocess: Vec }, + // Signed share for the specified signing protocol. + Share { id: SignId, share: Vec }, + // Completed a signing protocol already. + Completed { key: Vec, id: [u8; 32], tx: Vec }, + } + + impl CoordinatorMessage { + pub fn key(&self) -> &[u8] { + match self { + CoordinatorMessage::Preprocesses { id, .. } => &id.key, + CoordinatorMessage::Shares { id, .. } => &id.key, + CoordinatorMessage::Completed { key, .. } => key, + } + } + } +} + +pub mod substrate { + use super::*; + + #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)] + pub enum CoordinatorMessage { + BlockAcknowledged { context: SubstrateContext, key: Vec, block: Vec }, + Burns { context: SubstrateContext, burns: Vec> }, + } + + #[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)] + pub enum ProcessorMessage { + Update { key: Vec, block: Vec, instructions: Vec> }, + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub enum CoordinatorMessage { + KeyGen(key_gen::CoordinatorMessage), + Sign(sign::CoordinatorMessage), + Substrate(substrate::CoordinatorMessage), +} + +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub enum ProcessorMessage { + KeyGen(key_gen::ProcessorMessage), + Sign(sign::ProcessorMessage), + Substrate(substrate::ProcessorMessage), +} diff --git a/processor/src/coin/bitcoin.rs b/processor/src/coin/bitcoin.rs deleted file mode 100644 index 57f925b7..00000000 --- a/processor/src/coin/bitcoin.rs +++ /dev/null @@ -1,318 +0,0 @@ -use std::{io, collections::HashMap}; - -use async_trait::async_trait; - -#[rustfmt::skip] -use bitcoin::{ - hashes::Hash, schnorr::TweakedPublicKey, OutPoint, Transaction, Block, Network, Address -}; - -#[cfg(test)] -use bitcoin::{ - secp256k1::{SECP256K1, SecretKey, Message}, - PrivateKey, PublicKey, EcdsaSighashType, - blockdata::script::Builder, - PackedLockTime, Sequence, Script, Witness, TxIn, TxOut, -}; - -use transcript::RecommendedTranscript; -use k256::{ - ProjectivePoint, Scalar, - elliptic_curve::sec1::{ToEncodedPoint, Tag}, -}; -use frost::{curve::Secp256k1, ThresholdKeys}; - -use bitcoin_serai::{ - crypto::{x_only, make_even}, - wallet::{SpendableOutput, TransactionMachine, SignableTransaction as BSignableTransaction}, - rpc::Rpc, -}; - -use crate::coin::{CoinError, Block as BlockTrait, OutputType, Output as OutputTrait, Coin}; - -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.block_hash().as_hash().into_inner() - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Fee(u64); - -#[derive(Clone, Debug)] -pub struct Output(SpendableOutput, OutputType); -impl OutputTrait for Output { - type Id = [u8; 36]; - - fn kind(&self) -> OutputType { - self.1 - } - - fn id(&self) -> Self::Id { - self.0.id() - } - - fn amount(&self) -> u64 { - self.0.output.value - } - - fn serialize(&self) -> Vec { - let mut res = self.0.serialize(); - self.1.write(&mut res).unwrap(); - res - } - - fn read(reader: &mut R) -> io::Result { - Ok(Output(SpendableOutput::read(reader)?, OutputType::read(reader)?)) - } -} - -#[derive(Debug)] -pub struct SignableTransaction { - keys: ThresholdKeys, - transcript: RecommendedTranscript, - actual: BSignableTransaction, -} - -fn next_key(mut key: ProjectivePoint, i: usize) -> (ProjectivePoint, Scalar) { - let mut offset = Scalar::ZERO; - for _ in 0 .. i { - key += ProjectivePoint::GENERATOR; - offset += Scalar::ONE; - - let even_offset; - (key, even_offset) = make_even(key); - offset += Scalar::from(even_offset); - } - (key, offset) -} - -fn branch(key: ProjectivePoint) -> (ProjectivePoint, Scalar) { - next_key(key, 1) -} - -fn change(key: ProjectivePoint) -> (ProjectivePoint, Scalar) { - next_key(key, 2) -} - -#[derive(Clone, Debug)] -pub struct Bitcoin { - pub(crate) rpc: Rpc, -} - -impl Bitcoin { - pub async fn new(url: String) -> Bitcoin { - Bitcoin { rpc: Rpc::new(url) } - } - - #[cfg(test)] - pub async fn fresh_chain(&self) { - if self.rpc.get_latest_block_number().await.unwrap() > 0 { - self - .rpc - .rpc_call("invalidateblock", serde_json::json!([self.rpc.get_block_hash(1).await.unwrap()])) - .await - .unwrap() - } - } -} - -#[async_trait] -impl Coin for Bitcoin { - type Curve = Secp256k1; - - type Fee = Fee; - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type TransactionMachine = TransactionMachine; - - type Address = Address; - - const ID: &'static [u8] = b"Bitcoin"; - const CONFIRMATIONS: usize = 3; - - // TODO: Get hard numbers and tune - const MAX_INPUTS: usize = 128; - const MAX_OUTPUTS: usize = 16; - - fn tweak_keys(&self, key: &mut ThresholdKeys) { - let (_, offset) = make_even(key.group_key()); - *key = key.offset(Scalar::from(offset)); - } - - fn address(&self, key: ProjectivePoint) -> Self::Address { - debug_assert!(key.to_encoded_point(true).tag() == Tag::CompressedEvenY, "YKey is odd"); - Address::p2tr_tweaked( - TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)), - Network::Regtest, - ) - } - - fn branch_address(&self, key: ProjectivePoint) -> Self::Address { - self.address(branch(key).0) - } - - async fn get_latest_block_number(&self) -> Result { - Ok(self.rpc.get_latest_block_number().await.map_err(|_| CoinError::ConnectionError)?) - } - - async fn get_block(&self, number: usize) -> Result { - let block_hash = - self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?; - self.rpc.get_block(&block_hash).await.map_err(|_| CoinError::ConnectionError) - } - - async fn get_outputs( - &self, - block: &Self::Block, - key: ProjectivePoint, - ) -> Result, CoinError> { - let external = (key, Scalar::ZERO); - let branch = branch(key); - let change = change(key); - - let entry = - |pair: (_, _), kind| (self.address(pair.0).script_pubkey().to_bytes(), (pair.1, kind)); - let scripts = HashMap::from([ - entry(external, OutputType::External), - entry(branch, OutputType::Branch), - entry(change, OutputType::Change), - ]); - - let mut outputs = Vec::new(); - // Skip the coinbase transaction which is burdened by maturity - for tx in &block.txdata[1 ..] { - for (vout, output) in tx.output.iter().enumerate() { - if let Some(info) = scripts.get(&output.script_pubkey.to_bytes()) { - outputs.push(Output( - SpendableOutput { - offset: info.0, - output: output.clone(), - outpoint: OutPoint { txid: tx.txid(), vout: u32::try_from(vout).unwrap() }, - }, - info.1, - )); - } - } - } - - Ok(outputs) - } - - async fn prepare_send( - &self, - keys: ThresholdKeys, - transcript: RecommendedTranscript, - _: usize, - mut inputs: Vec, - payments: &[(Address, u64)], - change_key: Option, - fee: Fee, - ) -> Result { - Ok(SignableTransaction { - keys, - transcript, - actual: BSignableTransaction::new( - inputs.drain(..).map(|input| input.0).collect(), - payments, - change_key.map(|change_key| self.address(change(change_key).0)), - fee.0, - ) - .ok_or(CoinError::NotEnoughFunds)?, - }) - } - - async fn attempt_send( - &self, - transaction: Self::SignableTransaction, - ) -> Result { - transaction - .actual - .clone() - .multisig(transaction.keys.clone(), transaction.transcript.clone()) - .await - .map_err(|_| CoinError::ConnectionError) - } - - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result, CoinError> { - Ok(self.rpc.send_raw_transaction(tx).await.unwrap().to_vec()) - } - - #[cfg(test)] - async fn get_fee(&self) -> Self::Fee { - Fee(1) - } - - #[cfg(test)] - async fn mine_block(&self) { - self - .rpc - .rpc_call::>( - "generatetoaddress", - serde_json::json!([ - 1, - Address::p2sh(&Script::new(), Network::Regtest).unwrap().to_string() - ]), - ) - .await - .unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Self::Address) { - let secret_key = SecretKey::new(&mut rand_core::OsRng); - let private_key = PrivateKey::new(secret_key, Network::Regtest); - let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = Address::p2pkh(&public_key, Network::Regtest); - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - self - .rpc - .rpc_call::>("generatetoaddress", serde_json::json!([1, main_addr])) - .await - .unwrap(); - - for _ in 0 .. 100 { - self.mine_block().await; - } - - // TODO: Consider grabbing bdk as a dev dependency - let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); - let mut tx = Transaction { - version: 2, - lock_time: PackedLockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, - script_sig: Script::default(), - sequence: Sequence(u32::MAX), - witness: Witness::default(), - }], - output: vec![TxOut { - value: tx.output[0].value - 10000, - script_pubkey: address.script_pubkey(), - }], - }; - - let mut der = SECP256K1 - .sign_ecdsa_low_r( - &Message::from( - tx.signature_hash(0, &main_addr.script_pubkey(), EcdsaSighashType::All.to_u32()) - .as_hash(), - ), - &private_key.inner, - ) - .serialize_der() - .to_vec(); - der.push(1); - tx.input[0].script_sig = Builder::new().push_slice(&der).push_key(&public_key).into_script(); - - self.rpc.send_raw_transaction(&tx).await.unwrap(); - for _ in 0 .. Self::CONFIRMATIONS { - self.mine_block().await; - } - } -} diff --git a/processor/src/coin/mod.rs b/processor/src/coin/mod.rs deleted file mode 100644 index 3a81731e..00000000 --- a/processor/src/coin/mod.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::io; - -use async_trait::async_trait; -use thiserror::Error; - -use transcript::RecommendedTranscript; -use frost::{ - curve::{Ciphersuite, Curve}, - ThresholdKeys, - sign::PreprocessMachine, -}; - -pub mod bitcoin; -pub use self::bitcoin::Bitcoin; - -pub mod monero; -pub use self::monero::Monero; - -#[derive(Clone, Copy, Error, Debug)] -pub enum CoinError { - #[error("failed to connect to coin daemon")] - ConnectionError, - #[error("not enough funds")] - NotEnoughFunds, -} - -pub trait Block: Sized + Clone { - type Id: Clone + Copy + AsRef<[u8]>; - fn id(&self) -> Self::Id; -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum OutputType { - External, - Branch, - Change, -} - -impl OutputType { - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&[match self { - OutputType::External => 0, - OutputType::Branch => 1, - OutputType::Change => 2, - }]) - } - - fn read(reader: &mut R) -> io::Result { - let mut byte = [0; 1]; - reader.read_exact(&mut byte)?; - Ok(match byte[0] { - 0 => OutputType::External, - 1 => OutputType::Branch, - 2 => OutputType::Change, - _ => Err(io::Error::new(io::ErrorKind::Other, "invalid OutputType"))?, - }) - } -} - -pub trait Output: Sized + Clone { - type Id: Clone + Copy + AsRef<[u8]>; - - fn kind(&self) -> OutputType; - - fn id(&self) -> Self::Id; - fn amount(&self) -> u64; - - fn serialize(&self) -> Vec; - fn read(reader: &mut R) -> std::io::Result; -} - -#[async_trait] -pub trait Coin { - type Curve: Curve; - - type Fee: Copy; - type Transaction; - type Block: Block; - - type Output: Output; - type SignableTransaction; - type TransactionMachine: PreprocessMachine; - - type Address: Send; - - const ID: &'static [u8]; - const CONFIRMATIONS: usize; - const MAX_INPUTS: usize; - const MAX_OUTPUTS: usize; // TODO: Decide if this includes change or not - - fn tweak_keys(&self, key: &mut ThresholdKeys); - - /// Address for the given group key to receive external coins to. - // Doesn't have to take self, enables some level of caching which is pleasant - fn address(&self, key: ::G) -> Self::Address; - /// Address for the given group key to use for scheduled branches. - fn branch_address(&self, key: ::G) -> Self::Address; - - async fn get_latest_block_number(&self) -> Result; - async fn get_block(&self, number: usize) -> Result; - async fn get_outputs( - &self, - block: &Self::Block, - key: ::G, - ) -> Result, CoinError>; - - #[allow(clippy::too_many_arguments)] - async fn prepare_send( - &self, - keys: ThresholdKeys, - transcript: RecommendedTranscript, - block_number: usize, - inputs: Vec, - payments: &[(Self::Address, u64)], - change: Option<::G>, - fee: Self::Fee, - ) -> Result; - - async fn attempt_send( - &self, - transaction: Self::SignableTransaction, - ) -> Result; - - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result, CoinError>; - - #[cfg(test)] - async fn get_fee(&self) -> Self::Fee; - - #[cfg(test)] - async fn mine_block(&self); - - #[cfg(test)] - async fn test_send(&self, key: Self::Address); -} diff --git a/processor/src/coin/monero.rs b/processor/src/coin/monero.rs deleted file mode 100644 index 9103c3b0..00000000 --- a/processor/src/coin/monero.rs +++ /dev/null @@ -1,331 +0,0 @@ -use async_trait::async_trait; - -use zeroize::Zeroizing; - -use curve25519_dalek::scalar::Scalar; - -use dalek_ff_group as dfg; -use transcript::RecommendedTranscript; -use frost::{curve::Ed25519, ThresholdKeys}; - -use monero_serai::{ - transaction::Transaction, - block::Block as MBlock, - rpc::Rpc, - wallet::{ - ViewPair, Scanner, - address::{Network, SubaddressIndex, AddressSpec, MoneroAddress}, - Fee, SpendableOutput, Change, SignableTransaction as MSignableTransaction, TransactionMachine, - }, -}; - -use crate::{ - additional_key, - coin::{CoinError, Block as BlockTrait, OutputType, Output as OutputTrait, Coin}, -}; - -#[derive(Clone, Debug)] -pub struct Block([u8; 32], MBlock); -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.0 - } -} - -#[derive(Clone, Debug)] -pub struct Output(SpendableOutput); -impl From for Output { - fn from(output: SpendableOutput) -> Output { - Output(output) - } -} - -const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(0, 0); -const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(1, 0); -const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 0); - -impl OutputTrait for Output { - // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. - // While we already are immune, thanks to using featured address, this doesn't hurt and is - // technically more efficient. - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - match self.0.output.metadata.subaddress { - EXTERNAL_SUBADDRESS => OutputType::External, - BRANCH_SUBADDRESS => OutputType::Branch, - CHANGE_SUBADDRESS => OutputType::Change, - _ => panic!("unrecognized address was scanned for"), - } - } - - fn id(&self) -> Self::Id { - self.0.output.data.key.compress().to_bytes() - } - - fn amount(&self) -> u64 { - self.0.commitment().amount - } - - fn serialize(&self) -> Vec { - self.0.serialize() - } - - fn read(reader: &mut R) -> std::io::Result { - SpendableOutput::read(reader).map(Output) - } -} - -#[derive(Debug)] -pub struct SignableTransaction { - keys: ThresholdKeys, - transcript: RecommendedTranscript, - // Monero height, defined as the length of the chain - height: usize, - actual: MSignableTransaction, -} - -#[derive(Clone, Debug)] -pub struct Monero { - pub(crate) rpc: Rpc, - view: Zeroizing, -} - -impl Monero { - pub async fn new(url: String) -> Monero { - Monero { rpc: Rpc::new(url).unwrap(), view: Zeroizing::new(additional_key::(0).0) } - } - - fn view_pair(&self, spend: dfg::EdwardsPoint) -> ViewPair { - ViewPair::new(spend.0, self.view.clone()) - } - - fn address_internal( - &self, - spend: dfg::EdwardsPoint, - subaddress: Option, - ) -> MoneroAddress { - self.view_pair(spend).address( - Network::Mainnet, - AddressSpec::Featured { subaddress, payment_id: None, guaranteed: true }, - ) - } - - fn scanner(&self, spend: dfg::EdwardsPoint) -> Scanner { - let mut scanner = Scanner::from_view(self.view_pair(spend), None); - debug_assert!(EXTERNAL_SUBADDRESS.is_none()); - scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); - scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); - scanner - } - - #[cfg(test)] - fn test_view_pair() -> ViewPair { - use group::Group; - ViewPair::new(*dfg::EdwardsPoint::generator(), Zeroizing::new(Scalar::one())) - } - - #[cfg(test)] - fn test_scanner() -> Scanner { - Scanner::from_view(Self::test_view_pair(), Some(std::collections::HashSet::new())) - } - - #[cfg(test)] - fn test_address() -> MoneroAddress { - Self::test_view_pair().address(Network::Mainnet, AddressSpec::Standard) - } -} - -#[async_trait] -impl Coin for Monero { - type Curve = Ed25519; - - type Fee = Fee; - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type TransactionMachine = TransactionMachine; - - type Address = MoneroAddress; - - const ID: &'static [u8] = b"Monero"; - const CONFIRMATIONS: usize = 10; - // Testnet TX bb4d188a4c571f2f0de70dca9d475abc19078c10ffa8def26dd4f63ce1bcfd79 uses 146 inputs - // while using less than 100kb of space, albeit with just 2 outputs (though outputs share a BP) - // The TX size limit is half the contextual median block weight, where said weight is >= 300,000 - // This means any TX which fits into 150kb will be accepted by Monero - // 128, even with 16 outputs, should fit into 100kb. Further efficiency by 192 may be viable - // TODO: Get hard numbers and tune - const MAX_INPUTS: usize = 128; - const MAX_OUTPUTS: usize = 16; - - // Monero doesn't require/benefit from tweaking - fn tweak_keys(&self, _: &mut ThresholdKeys) {} - - fn address(&self, key: dfg::EdwardsPoint) -> Self::Address { - self.address_internal(key, EXTERNAL_SUBADDRESS) - } - - fn branch_address(&self, key: dfg::EdwardsPoint) -> Self::Address { - self.address_internal(key, BRANCH_SUBADDRESS) - } - - async fn get_latest_block_number(&self) -> Result { - // Monero defines height as chain length, so subtract 1 for block number - Ok(self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError)? - 1) - } - - async fn get_block(&self, number: usize) -> Result { - let hash = self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?; - let block = self.rpc.get_block(hash).await.map_err(|_| CoinError::ConnectionError)?; - Ok(Block(hash, block)) - } - - async fn get_outputs( - &self, - block: &Self::Block, - key: dfg::EdwardsPoint, - ) -> Result, CoinError> { - let mut transactions = self - .scanner(key) - .scan(&self.rpc, &block.1) - .await - .map_err(|_| CoinError::ConnectionError)? - .iter() - .map(|outputs| outputs.not_locked()) - .collect::>(); - - // This should be pointless as we shouldn't be able to scan for any other subaddress - // This just ensures nothing invalid makes it through - for transaction in transactions.iter_mut() { - *transaction = transaction - .drain(..) - .filter(|output| { - [EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS] - .contains(&output.output.metadata.subaddress) - }) - .collect(); - } - - Ok( - transactions - .drain(..) - .flat_map(|mut outputs| outputs.drain(..).map(Output::from).collect::>()) - .collect(), - ) - } - - async fn prepare_send( - &self, - keys: ThresholdKeys, - transcript: RecommendedTranscript, - block_number: usize, - mut inputs: Vec, - payments: &[(MoneroAddress, u64)], - change: Option, - fee: Fee, - ) -> Result { - Ok(SignableTransaction { - keys, - transcript, - height: block_number + 1, - actual: MSignableTransaction::new( - self.rpc.get_protocol().await.unwrap(), // TODO: Make this deterministic - inputs.drain(..).map(|input| input.0).collect(), - payments.to_vec(), - change - .map(|change| Change::fingerprintable(self.address_internal(change, CHANGE_SUBADDRESS))), - vec![], - fee, - ) - .map_err(|_| CoinError::ConnectionError)?, - }) - } - - async fn attempt_send( - &self, - transaction: SignableTransaction, - ) -> Result { - transaction - .actual - .clone() - .multisig( - &self.rpc, - transaction.keys.clone(), - transaction.transcript.clone(), - transaction.height, - ) - .await - .map_err(|_| CoinError::ConnectionError) - } - - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result, CoinError> { - self.rpc.publish_transaction(tx).await.map_err(|_| CoinError::ConnectionError)?; - Ok(tx.hash().to_vec()) - } - - #[cfg(test)] - async fn get_fee(&self) -> Self::Fee { - self.rpc.get_fee().await.unwrap() - } - - #[cfg(test)] - async fn mine_block(&self) { - #[derive(serde::Deserialize, Debug)] - struct EmptyResponse {} - let _: EmptyResponse = self - .rpc - .rpc_call( - "json_rpc", - Some(serde_json::json!({ - "method": "generateblocks", - "params": { - "wallet_address": Self::test_address().to_string(), - "amount_of_blocks": 10 - }, - })), - ) - .await - .unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Self::Address) { - use zeroize::Zeroizing; - use rand_core::OsRng; - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - - self.mine_block().await; - for _ in 0 .. 7 { - self.mine_block().await; - } - - let outputs = Self::test_scanner() - .scan(&self.rpc, &self.rpc.get_block_by_number(new_block).await.unwrap()) - .await - .unwrap() - .swap_remove(0) - .ignore_timelock(); - - let amount = outputs[0].commitment().amount; - let fee = 3000000000; // TODO - let tx = MSignableTransaction::new( - self.rpc.get_protocol().await.unwrap(), - outputs, - vec![(address, amount - fee)], - Some(Change::new(&Self::test_view_pair(), true)), - vec![], - self.rpc.get_fee().await.unwrap(), - ) - .unwrap() - .sign(&mut OsRng, &self.rpc, &Zeroizing::new(Scalar::one())) - .await - .unwrap(); - self.rpc.publish_transaction(&tx).await.unwrap(); - self.mine_block().await; - } -} diff --git a/processor/src/coins/bitcoin.rs b/processor/src/coins/bitcoin.rs new file mode 100644 index 00000000..3c7ec235 --- /dev/null +++ b/processor/src/coins/bitcoin.rs @@ -0,0 +1,517 @@ +use std::{io, collections::HashMap}; + +use async_trait::async_trait; + +use bitcoin::{ + hashes::Hash as HashTrait, + schnorr::TweakedPublicKey, + consensus::{Encodable, Decodable}, + psbt::serialize::Serialize, + OutPoint, + blockdata::script::Instruction, + Transaction, Block, Network, Address as BAddress, +}; + +#[cfg(test)] +use bitcoin::{ + secp256k1::{SECP256K1, SecretKey, Message}, + PrivateKey, PublicKey, EcdsaSighashType, + blockdata::script::Builder, + PackedLockTime, Sequence, Script, Witness, TxIn, TxOut, +}; + +use transcript::RecommendedTranscript; +use k256::{ + ProjectivePoint, Scalar, + elliptic_curve::sec1::{ToEncodedPoint, Tag}, +}; +use frost::{curve::Secp256k1, ThresholdKeys}; + +use bitcoin_serai::{ + crypto::{x_only, make_even}, + wallet::{SpendableOutput, TransactionMachine, SignableTransaction as BSignableTransaction}, + rpc::{RpcError, Rpc}, +}; + +use serai_client::coins::bitcoin::Address; + +use crate::{ + coins::{ + CoinError, Block as BlockTrait, OutputType, Output as OutputTrait, + Transaction as TransactionTrait, Eventuality, PostFeeBranch, Coin, drop_branches, amortize_fee, + }, + Plan, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct OutputId(pub [u8; 36]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 36]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Output { + kind: OutputType, + output: SpendableOutput, + data: Vec, +} + +impl OutputTrait for Output { + type Id = OutputId; + + fn kind(&self) -> OutputType { + self.kind + } + + fn id(&self) -> Self::Id { + OutputId(self.output.id()) + } + + fn amount(&self) -> u64 { + self.output.output.value + } + + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.kind.write(writer)?; + self.output.write(writer)?; + writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.data) + } + + fn read(reader: &mut R) -> io::Result { + Ok(Output { + kind: OutputType::read(reader)?, + output: SpendableOutput::read(reader)?, + data: { + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + data + }, + }) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Fee(u64); + +#[async_trait] +impl TransactionTrait for Transaction { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + let mut hash = self.txid().as_hash().into_inner(); + hash.reverse(); + hash + } + fn serialize(&self) -> Vec { + Serialize::serialize(self) + } + #[cfg(test)] + async fn fee(&self, coin: &Bitcoin) -> u64 { + let mut value = 0; + for input in &self.input { + let output = input.previous_output; + let mut hash = output.txid.as_hash().into_inner(); + hash.reverse(); + value += coin.rpc.get_transaction(&hash).await.unwrap().output + [usize::try_from(output.vout).unwrap()] + .value; + } + for output in &self.output { + value -= output.value; + } + value + } +} + +impl Eventuality for OutPoint { + fn read(reader: &mut R) -> io::Result { + OutPoint::consensus_decode(reader) + .map_err(|_| io::Error::new(io::ErrorKind::Other, "couldn't decode outpoint as eventuality")) + } + fn serialize(&self) -> Vec { + let mut buf = Vec::with_capacity(36); + self.consensus_encode(&mut buf).unwrap(); + buf + } +} + +#[derive(Clone, Debug)] +pub struct SignableTransaction { + keys: ThresholdKeys, + transcript: RecommendedTranscript, + actual: BSignableTransaction, +} +impl PartialEq for SignableTransaction { + fn eq(&self, other: &SignableTransaction) -> bool { + self.actual == other.actual + } +} +impl Eq for SignableTransaction {} + +impl BlockTrait for Block { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + let mut hash = self.block_hash().as_hash().into_inner(); + hash.reverse(); + hash + } + fn median_fee(&self) -> Fee { + // TODO + Fee(20) + } +} + +fn next_key(mut key: ProjectivePoint, i: usize) -> (ProjectivePoint, Scalar) { + let mut offset = Scalar::ZERO; + for _ in 0 .. i { + key += ProjectivePoint::GENERATOR; + offset += Scalar::ONE; + + let even_offset; + (key, even_offset) = make_even(key); + offset += Scalar::from(even_offset); + } + (key, offset) +} + +fn branch(key: ProjectivePoint) -> (ProjectivePoint, Scalar) { + next_key(key, 1) +} + +fn change(key: ProjectivePoint) -> (ProjectivePoint, Scalar) { + next_key(key, 2) +} + +#[derive(Clone, Debug)] +pub struct Bitcoin { + pub(crate) rpc: Rpc, +} +// Shim required for testing/debugging purposes due to generic arguments also necessitating trait +// bounds +impl PartialEq for Bitcoin { + fn eq(&self, _: &Self) -> bool { + true + } +} +impl Eq for Bitcoin {} + +impl Bitcoin { + pub fn new(url: String) -> Bitcoin { + Bitcoin { rpc: Rpc::new(url) } + } + + #[cfg(test)] + pub async fn fresh_chain(&self) { + if self.rpc.get_latest_block_number().await.unwrap() > 0 { + self + .rpc + .rpc_call( + "invalidateblock", + serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]), + ) + .await + .unwrap() + } + } +} + +#[async_trait] +impl Coin for Bitcoin { + type Curve = Secp256k1; + + type Fee = Fee; + type Transaction = Transaction; + type Block = Block; + + type Output = Output; + type SignableTransaction = SignableTransaction; + // Valid given an honest multisig, as assumed + // Only the multisig can spend this output and the multisig, if spending this output, will + // always create a specific plan + type Eventuality = OutPoint; + type TransactionMachine = TransactionMachine; + + type Address = Address; + + const ID: &'static str = "Bitcoin"; + const CONFIRMATIONS: usize = 3; + + // 0.0001 BTC + #[allow(clippy::inconsistent_digit_grouping)] + const DUST: u64 = 1_00_000_000 / 10_000; + + // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) + // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes + // While our inputs are entirely SegWit, such fine tuning is not necessary and could create + // issues in the future (if the size decreases or we mis-evaluate it) + // It also offers a minimal amount of benefit when we are able to logarithmically accumulate + // inputs + // For 128-byte inputs (40-byte output specification, 64-byte signature, whatever overhead) and + // 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 + // bytes + // 100,000 / 192 = 520 + // 520 * 192 leaves 160 bytes of overhead for the transaction structure itself + const MAX_INPUTS: usize = 520; + const MAX_OUTPUTS: usize = 520; + + fn tweak_keys(key: &mut ThresholdKeys) { + let (_, offset) = make_even(key.group_key()); + *key = key.offset(Scalar::from(offset)); + } + + fn address(key: ProjectivePoint) -> Self::Address { + assert!(key.to_encoded_point(true).tag() == Tag::CompressedEvenY, "YKey is odd"); + Address(BAddress::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)), + Network::Bitcoin, + )) + } + + fn branch_address(key: ProjectivePoint) -> Self::Address { + Self::address(branch(key).0) + } + + async fn get_latest_block_number(&self) -> Result { + self.rpc.get_latest_block_number().await.map_err(|_| CoinError::ConnectionError) + } + + async fn get_block(&self, number: usize) -> Result { + let block_hash = + self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?; + self.rpc.get_block(&block_hash).await.map_err(|_| CoinError::ConnectionError) + } + + async fn get_outputs( + &self, + block: &Self::Block, + key: ProjectivePoint, + ) -> Result, CoinError> { + let external = (key, Scalar::ZERO); + let branch = branch(key); + let change = change(key); + + let entry = + |pair: (_, _), kind| (Self::address(pair.0).0.script_pubkey().to_bytes(), (pair.1, kind)); + let scripts = HashMap::from([ + entry(external, OutputType::External), + entry(branch, OutputType::Branch), + entry(change, OutputType::Change), + ]); + + let mut outputs = Vec::new(); + // Skip the coinbase transaction which is burdened by maturity + for tx in &block.txdata[1 ..] { + for (vout, output) in tx.output.iter().enumerate() { + if let Some(info) = scripts.get(&output.script_pubkey.to_bytes()) { + outputs.push(Output { + kind: info.1, + output: SpendableOutput { + offset: info.0, + output: output.clone(), + outpoint: OutPoint { txid: tx.txid(), vout: u32::try_from(vout).unwrap() }, + }, + data: (|| { + for output in &tx.output { + if output.script_pubkey.is_op_return() { + match output.script_pubkey.instructions_minimal().last() { + Some(Ok(Instruction::PushBytes(data))) => return data.to_vec(), + _ => continue, + } + } + } + vec![] + })(), + }); + } + } + } + + Ok(outputs) + } + + async fn prepare_send( + &self, + keys: ThresholdKeys, + _: usize, + mut plan: Plan, + fee: Fee, + ) -> Result<(Option<(SignableTransaction, Self::Eventuality)>, Vec), CoinError> { + let signable = |plan: &Plan, tx_fee: Option<_>| { + let mut payments = vec![]; + for payment in &plan.payments { + // If we're solely estimating the fee, don't actually specify an amount + // This won't affect the fee calculation yet will ensure we don't hit an out of funds error + payments + .push((payment.address.0.clone(), if tx_fee.is_none() { 0 } else { payment.amount })); + } + + match BSignableTransaction::new( + plan.inputs.iter().map(|input| input.output.clone()).collect(), + &payments, + plan.change.map(|key| Self::address(change(key).0).0), + None, + fee.0, + ) { + Some(signable) => Some(signable), + // TODO: Use a proper error here + None => { + if tx_fee.is_none() { + // Not enough funds + None + } else { + panic!("didn't have enough funds for a Bitcoin TX"); + } + } + } + }; + + let tx_fee = match signable(&plan, None) { + Some(tx) => tx.fee(), + None => return Ok((None, drop_branches(&plan))), + }; + + let branch_outputs = amortize_fee(&mut plan, tx_fee); + + Ok(( + Some(( + SignableTransaction { + keys, + transcript: plan.transcript(), + actual: signable(&plan, Some(tx_fee)).unwrap(), + }, + plan.inputs[0].output.outpoint, + )), + branch_outputs, + )) + } + + async fn attempt_send( + &self, + transaction: Self::SignableTransaction, + ) -> Result { + transaction + .actual + .clone() + .multisig(transaction.keys.clone(), transaction.transcript.clone()) + .await + .map_err(|_| CoinError::ConnectionError) + } + + async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError> { + match self.rpc.send_raw_transaction(tx).await { + Ok(_) => (), + Err(RpcError::ConnectionError) => Err(CoinError::ConnectionError)?, + // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs + // invalid transaction + Err(e) => panic!("failed to publish TX {:?}: {e}", tx.txid()), + } + Ok(()) + } + + async fn get_transaction(&self, id: &[u8; 32]) -> Result { + self.rpc.get_transaction(id).await.map_err(|_| CoinError::ConnectionError) + } + + fn confirm_completion(&self, eventuality: &OutPoint, tx: &Transaction) -> bool { + eventuality == &tx.input[0].previous_output + } + + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block_number(id).await.unwrap() + } + + #[cfg(test)] + async fn get_fee(&self) -> Self::Fee { + Fee(1) + } + + #[cfg(test)] + async fn mine_block(&self) { + self + .rpc + .rpc_call::>( + "generatetoaddress", + serde_json::json!([ + 1, + BAddress::p2sh(&Script::new(), Network::Regtest).unwrap().to_string() + ]), + ) + .await + .unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Self::Address) -> Block { + let secret_key = SecretKey::new(&mut rand_core::OsRng); + let private_key = PrivateKey::new(secret_key, Network::Regtest); + let public_key = PublicKey::from_private_key(SECP256K1, &private_key); + let main_addr = BAddress::p2pkh(&public_key, Network::Regtest); + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + self + .rpc + .rpc_call::>("generatetoaddress", serde_json::json!([1, main_addr])) + .await + .unwrap(); + + for _ in 0 .. 100 { + self.mine_block().await; + } + + let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); + let mut tx = Transaction { + version: 2, + lock_time: PackedLockTime::ZERO, + input: vec![TxIn { + previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + script_sig: Script::default(), + sequence: Sequence(u32::MAX), + witness: Witness::default(), + }], + output: vec![TxOut { + value: tx.output[0].value - 10000, + script_pubkey: address.0.script_pubkey(), + }], + }; + + let mut der = SECP256K1 + .sign_ecdsa_low_r( + &Message::from( + tx.signature_hash(0, &main_addr.script_pubkey(), EcdsaSighashType::All.to_u32()) + .as_hash(), + ), + &private_key.inner, + ) + .serialize_der() + .to_vec(); + der.push(1); + tx.input[0].script_sig = Builder::new().push_slice(&der).push_key(&public_key).into_script(); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.send_raw_transaction(&tx).await.unwrap(); + for _ in 0 .. Self::CONFIRMATIONS { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} diff --git a/processor/src/coins/mod.rs b/processor/src/coins/mod.rs new file mode 100644 index 00000000..2c8378da --- /dev/null +++ b/processor/src/coins/mod.rs @@ -0,0 +1,298 @@ +use core::fmt::Debug; +use std::io; + +use async_trait::async_trait; +use thiserror::Error; + +use frost::{ + curve::{Ciphersuite, Curve}, + ThresholdKeys, + sign::PreprocessMachine, +}; + +#[cfg(feature = "bitcoin")] +pub mod bitcoin; +#[cfg(feature = "bitcoin")] +pub use self::bitcoin::Bitcoin; + +#[cfg(feature = "monero")] +pub mod monero; +#[cfg(feature = "monero")] +pub use monero::Monero; + +use crate::Plan; + +#[derive(Clone, Copy, Error, Debug)] +pub enum CoinError { + #[error("failed to connect to coin daemon")] + ConnectionError, +} + +pub trait Id: + Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug +{ +} +impl + AsMut<[u8]> + Debug> Id for I {} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum OutputType { + // Needs to be processed/sent up to Substrate + External, + + // Given a known output set, and a known series of outbound transactions, we should be able to + // form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs + // in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, say + // S[1], build off S[0], we need to observe when S[0] is included on-chain. + // + // We cannot. + // + // Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to + // create S[0], and the actual payment info behind it, we cannot observe it on the blockchain + // unless we participated in creating it. Locking the entire schedule, when we cannot sign for + // the entire schedule at once, to a single signing set isn't feasible. + // + // While any member of the active signing set can provide data enabling other signers to + // participate, it's several KB of data which we then have to code communication for. + // The other option is to simply not observe S[0]. Instead, observe a TX with an identical output + // to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a malicious + // actor, has sent us a forged TX which is... equally as usable? so who cares? + // + // The only issue is if we have multiple outputs on-chain with identical amounts and purposes. + // Accordingly, when the scheduler makes a plan for when a specific output is available, it + // shouldn't write that plan. It should *push* that plan to a queue of plans to perform when + // instances of that output occur. + Branch, + + // Should be added to the available UTXO pool with no further action + Change, +} + +impl OutputType { + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&[match self { + OutputType::External => 0, + OutputType::Branch => 1, + OutputType::Change => 2, + }]) + } + + fn read(reader: &mut R) -> io::Result { + let mut byte = [0; 1]; + reader.read_exact(&mut byte)?; + Ok(match byte[0] { + 0 => OutputType::External, + 1 => OutputType::Branch, + 2 => OutputType::Change, + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid OutputType"))?, + }) + } +} + +pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { + type Id: 'static + Id; + + fn kind(&self) -> OutputType; + + fn id(&self) -> Self::Id; + fn amount(&self) -> u64; + + fn data(&self) -> &[u8]; + + fn write(&self, writer: &mut W) -> io::Result<()>; + fn read(reader: &mut R) -> io::Result; +} + +#[async_trait] +pub trait Transaction: Send + Sync + Sized + Clone + Debug { + type Id: 'static + Id; + fn id(&self) -> Self::Id; + fn serialize(&self) -> Vec; + + #[cfg(test)] + async fn fee(&self, coin: &C) -> u64; +} + +pub trait Eventuality: Send + Sync + Clone + Debug { + fn read(reader: &mut R) -> io::Result; + fn serialize(&self) -> Vec; +} + +pub trait Block: Send + Sync + Sized + Clone + Debug { + type Id: 'static + Id; + fn id(&self) -> Self::Id; + fn median_fee(&self) -> C::Fee; +} + +// The post-fee value of an expected branch. +pub struct PostFeeBranch { + pub expected: u64, + pub actual: Option, +} + +// Return the PostFeeBranches needed when dropping a transaction +pub fn drop_branches(plan: &Plan) -> Vec { + let mut branch_outputs = vec![]; + for payment in &plan.payments { + if payment.address == C::branch_address(plan.key) { + branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: None }); + } + } + branch_outputs +} + +// Amortize a fee over the plan's payments +pub fn amortize_fee(plan: &mut Plan, tx_fee: u64) -> Vec { + // No payments to amortize over + if plan.payments.is_empty() { + return vec![]; + } + + // Amortize the transaction fee across outputs + let payments_len = u64::try_from(plan.payments.len()).unwrap(); + // Use a formula which will round up + let output_fee = (tx_fee + (payments_len - 1)) / payments_len; + + let mut branch_outputs = vec![]; + for payment in plan.payments.iter_mut() { + let mut post_fee = payment.amount.checked_sub(output_fee); + // If this is under our dust threshold, drop it + if let Some(amount) = post_fee { + if amount < C::DUST { + post_fee = None; + } + } + + // Note the branch output, if this is one + if payment.address == C::branch_address(plan.key) { + branch_outputs.push(PostFeeBranch { expected: payment.amount, actual: post_fee }); + } + payment.amount = post_fee.unwrap_or(0); + } + // Drop payments now worth 0 + plan.payments = plan.payments.drain(..).filter(|payment| payment.amount != 0).collect(); + branch_outputs +} + +#[async_trait] +pub trait Coin: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { + /// The elliptic curve used for this coin. + type Curve: Curve; + + /// The type representing the fee for this coin. + // This should likely be a u64, wrapped in a type which implements appropriate fee logic. + type Fee: Copy; + + /// The type representing the transaction for this coin. + type Transaction: Transaction; + /// The type representing the block for this coin. + type Block: Block; + + /// The type containing all information on a scanned output. + // This is almost certainly distinct from the coin's native output type. + type Output: Output; + /// The type containing all information on a planned transaction, waiting to be signed. + type SignableTransaction: Send + Sync + Clone + Debug; + /// The type containing all information to check if a plan was completed. + type Eventuality: Eventuality; + /// The FROST machine to sign a transaction. + type TransactionMachine: PreprocessMachine; + + /// The type representing an address. + // This should NOT be a String, yet a tailored type representing an efficient binary encoding, + // as detailed in the integration documentation. + type Address: Send + + Sync + + Clone + + PartialEq + + Eq + + Debug + + ToString + + TryInto> + + TryFrom>; + + /// String ID for this coin. + const ID: &'static str; + /// The amount of confirmations required to consider a block 'final'. + const CONFIRMATIONS: usize; + /// The maximum amount of inputs which will fit in a TX. + /// This should be equal to MAX_OUTPUTS unless one is specifically limited. + /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. + const MAX_INPUTS: usize; + /// The maximum amount of outputs which will fit in a TX. + /// This should be equal to MAX_INPUTS unless one is specifically limited. + /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. + const MAX_OUTPUTS: usize; + + /// Minimum output value which will be handled. + const DUST: u64; + + /// Tweak keys for this coin. + fn tweak_keys(key: &mut ThresholdKeys); + + /// Address for the given group key to receive external coins to. + fn address(key: ::G) -> Self::Address; + /// Address for the given group key to use for scheduled branches. + // This is purely used for debugging purposes. Any output may be used to execute a branch. + fn branch_address(key: ::G) -> Self::Address; + + /// Get the latest block's number. + async fn get_latest_block_number(&self) -> Result; + /// Get a block by its number. + async fn get_block(&self, number: usize) -> Result; + /// Get the outputs within a block for a specific key. + async fn get_outputs( + &self, + block: &Self::Block, + key: ::G, + ) -> Result, CoinError>; + + /// Prepare a SignableTransaction for a transaction. + /// Returns None for the transaction if the SignableTransaction was dropped due to lack of value. + #[rustfmt::skip] + async fn prepare_send( + &self, + keys: ThresholdKeys, + block_number: usize, + plan: Plan, + fee: Self::Fee, + ) -> Result< + (Option<(Self::SignableTransaction, Self::Eventuality)>, Vec), + CoinError + >; + + /// Attempt to sign a SignableTransaction. + async fn attempt_send( + &self, + transaction: Self::SignableTransaction, + ) -> Result; + + /// Publish a transaction. + async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError>; + + /// Get a transaction by its ID. + async fn get_transaction( + &self, + id: &>::Id, + ) -> Result; + + /// Confirm a plan was completed by the specified transaction. + // This is allowed to take shortcuts. + // This may assume an honest multisig, solely checking the inputs specified were spent. + // This may solely check the outputs are equivalent *so long as it's locked to the plan ID*. + fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Self::Transaction) -> bool; + + /// Get a block's number by its ID. + #[cfg(test)] + async fn get_block_number(&self, id: &>::Id) -> usize; + + #[cfg(test)] + async fn get_fee(&self) -> Self::Fee; + + #[cfg(test)] + async fn mine_block(&self); + + /// Sends to the specified address. + /// Additionally mines enough blocks so that the TX is past the confirmation depth. + #[cfg(test)] + async fn test_send(&self, key: Self::Address) -> Self::Block; +} diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs new file mode 100644 index 00000000..0e7cfb18 --- /dev/null +++ b/processor/src/coins/monero.rs @@ -0,0 +1,503 @@ +use std::io; + +use async_trait::async_trait; + +use zeroize::Zeroizing; + +use transcript::RecommendedTranscript; + +use group::{ff::Field, Group}; +use dalek_ff_group::{Scalar, EdwardsPoint}; +use frost::{curve::Ed25519, ThresholdKeys}; + +use monero_serai::{ + Protocol, + transaction::Transaction, + block::Block as MBlock, + rpc::{RpcError, Rpc}, + wallet::{ + ViewPair, Scanner, + address::{Network, SubaddressIndex, AddressSpec}, + Fee, SpendableOutput, Change, TransactionError, SignableTransaction as MSignableTransaction, + Eventuality, TransactionMachine, + }, +}; + +pub use serai_client::{primitives::MAX_DATA_LEN, coins::monero::Address}; + +use crate::{ + Payment, Plan, additional_key, + coins::{ + CoinError, Block as BlockTrait, OutputType, Output as OutputTrait, + Transaction as TransactionTrait, Eventuality as EventualityTrait, PostFeeBranch, Coin, + drop_branches, amortize_fee, + }, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Output(SpendableOutput, Vec); + +const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(0, 0); +const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(1, 0); +const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 0); + +impl OutputTrait for Output { + // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. + // While we already are immune, thanks to using featured address, this doesn't hurt and is + // technically more efficient. + type Id = [u8; 32]; + + fn kind(&self) -> OutputType { + match self.0.output.metadata.subaddress { + EXTERNAL_SUBADDRESS => OutputType::External, + BRANCH_SUBADDRESS => OutputType::Branch, + CHANGE_SUBADDRESS => OutputType::Change, + _ => panic!("unrecognized address was scanned for"), + } + } + + fn id(&self) -> Self::Id { + self.0.output.data.key.compress().to_bytes() + } + + fn amount(&self) -> u64 { + self.0.commitment().amount + } + + fn data(&self) -> &[u8] { + &self.1 + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.0.write(writer)?; + writer.write_all(&u16::try_from(self.1.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.1)?; + Ok(()) + } + + fn read(reader: &mut R) -> io::Result { + let output = SpendableOutput::read(reader)?; + + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + + Ok(Output(output, data)) + } +} + +#[async_trait] +impl TransactionTrait for Transaction { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + self.hash() + } + fn serialize(&self) -> Vec { + self.serialize() + } + #[cfg(test)] + async fn fee(&self, _: &Monero) -> u64 { + self.rct_signatures.base.fee + } +} + +impl EventualityTrait for Eventuality { + fn read(reader: &mut R) -> io::Result { + Eventuality::read(reader) + } + fn serialize(&self) -> Vec { + self.serialize() + } +} + +#[derive(Clone, Debug)] +pub struct SignableTransaction { + keys: ThresholdKeys, + transcript: RecommendedTranscript, + // Monero height, defined as the length of the chain + height: usize, + actual: MSignableTransaction, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Block([u8; 32], MBlock); +impl BlockTrait for Block { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + self.0 + } + + fn median_fee(&self) -> Fee { + // TODO + Fee { per_weight: 80000, mask: 10000 } + } +} + +#[derive(Clone, Debug)] +pub struct Monero { + pub(crate) rpc: Rpc, +} +// Shim required for testing/debugging purposes due to generic arguments also necessitating trait +// bounds +impl PartialEq for Monero { + fn eq(&self, _: &Self) -> bool { + true + } +} +impl Eq for Monero {} + +impl Monero { + pub fn new(url: String) -> Monero { + Monero { rpc: Rpc::new(url).unwrap() } + } + + fn view_pair(spend: EdwardsPoint) -> ViewPair { + ViewPair::new(spend.0, Zeroizing::new(additional_key::(0).0)) + } + + fn address_internal(spend: EdwardsPoint, subaddress: Option) -> Address { + Address::new(Self::view_pair(spend).address( + Network::Mainnet, + AddressSpec::Featured { subaddress, payment_id: None, guaranteed: true }, + )) + .unwrap() + } + + fn scanner(spend: EdwardsPoint) -> Scanner { + let mut scanner = Scanner::from_view(Self::view_pair(spend), None); + debug_assert!(EXTERNAL_SUBADDRESS.is_none()); + scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); + scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); + scanner + } + + #[cfg(test)] + fn test_view_pair() -> ViewPair { + ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::one().0)) + } + + #[cfg(test)] + fn test_scanner() -> Scanner { + Scanner::from_view(Self::test_view_pair(), Some(std::collections::HashSet::new())) + } + + #[cfg(test)] + fn test_address() -> Address { + Address::new(Self::test_view_pair().address(Network::Mainnet, AddressSpec::Standard)).unwrap() + } +} + +#[async_trait] +impl Coin for Monero { + type Curve = Ed25519; + + type Fee = Fee; + type Transaction = Transaction; + type Block = Block; + + type Output = Output; + type SignableTransaction = SignableTransaction; + type Eventuality = Eventuality; + type TransactionMachine = TransactionMachine; + + type Address = Address; + + const ID: &'static str = "Monero"; + const CONFIRMATIONS: usize = 10; + + // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction + // larger than 150kb. This fits within the 100kb mark + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; + const MAX_OUTPUTS: usize = 16; + + // 0.01 XMR + const DUST: u64 = 10000000000; + + // Monero doesn't require/benefit from tweaking + fn tweak_keys(_: &mut ThresholdKeys) {} + + fn address(key: EdwardsPoint) -> Self::Address { + Self::address_internal(key, EXTERNAL_SUBADDRESS) + } + + fn branch_address(key: EdwardsPoint) -> Self::Address { + Self::address_internal(key, BRANCH_SUBADDRESS) + } + + async fn get_latest_block_number(&self) -> Result { + // Monero defines height as chain length, so subtract 1 for block number + Ok(self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError)? - 1) + } + + async fn get_block(&self, number: usize) -> Result { + let hash = self.rpc.get_block_hash(number).await.map_err(|_| CoinError::ConnectionError)?; + let block = self.rpc.get_block(hash).await.map_err(|_| CoinError::ConnectionError)?; + Ok(Block(hash, block)) + } + + async fn get_outputs( + &self, + block: &Self::Block, + key: EdwardsPoint, + ) -> Result, CoinError> { + let mut txs = Self::scanner(key) + .scan(&self.rpc, &block.1) + .await + .map_err(|_| CoinError::ConnectionError)? + .iter() + .filter_map(|outputs| Some(outputs.not_locked()).filter(|outputs| !outputs.is_empty())) + .collect::>(); + + // This should be pointless as we shouldn't be able to scan for any other subaddress + // This just ensures nothing invalid makes it through + for tx_outputs in &txs { + for output in tx_outputs { + assert!([EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS] + .contains(&output.output.metadata.subaddress)); + } + } + + let mut outputs = Vec::with_capacity(txs.len()); + for mut tx_outputs in txs.drain(..) { + for output in tx_outputs.drain(..) { + let mut data = output.arbitrary_data().get(0).cloned().unwrap_or(vec![]); + + // The Output serialization code above uses u16 to represent length + data.truncate(u16::MAX.into()); + // Monero data segments should be <= 255 already, and MAX_DATA_LEN is currently 512 + // This just allows either Monero to change, or MAX_DATA_LEN to change, without introducing + // complicationso + data.truncate(MAX_DATA_LEN.try_into().unwrap()); + + outputs.push(Output(output, data)); + } + } + + Ok(outputs) + } + + async fn prepare_send( + &self, + keys: ThresholdKeys, + block_number: usize, + mut plan: Plan, + fee: Fee, + ) -> Result<(Option<(SignableTransaction, Eventuality)>, Vec), CoinError> { + // Sanity check this has at least one output planned + assert!((!plan.payments.is_empty()) || plan.change.is_some()); + + let protocol = Protocol::v16; + // Check a fork hasn't occurred which this processor hasn't been updated for + assert_eq!(protocol, self.rpc.get_protocol().await.map_err(|_| CoinError::ConnectionError)?); + + let signable = |plan: &mut Plan, tx_fee: Option<_>| { + // Monero requires at least two outputs + // If we only have one output planned, add a dummy payment + let outputs = plan.payments.len() + usize::from(u8::from(plan.change.is_some())); + if outputs == 0 { + return Ok(None); + } else if outputs == 1 { + plan.payments.push(Payment { + address: Address::new( + ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::one().0)) + .address(Network::Mainnet, AddressSpec::Standard), + ) + .unwrap(), + amount: 0, + data: None, + }); + } + + let mut payments = vec![]; + for payment in &plan.payments { + // If we're solely estimating the fee, don't actually specify an amount + // This won't affect the fee calculation yet will ensure we don't hit an out of funds error + payments.push(( + payment.address.clone().into(), + if tx_fee.is_none() { 0 } else { payment.amount }, + )); + } + + match MSignableTransaction::new( + protocol, + // Use the plan ID as the r_seed + // This perfectly binds the plan while simultaneously allowing verifying the plan was + // executed with no additional communication + Some(Zeroizing::new(plan.id())), + plan.inputs.iter().cloned().map(|input| input.0).collect(), + payments, + plan.change.map(|key| { + Change::fingerprintable(Self::address_internal(key, CHANGE_SUBADDRESS).into()) + }), + vec![], + fee, + ) { + Ok(signable) => Ok(Some(signable)), + Err(e) => match e { + TransactionError::MultiplePaymentIds => { + panic!("multiple payment IDs despite not supporting integrated addresses"); + } + TransactionError::NoInputs | + TransactionError::NoOutputs | + TransactionError::NoChange | + TransactionError::TooManyOutputs | + TransactionError::TooMuchData | + TransactionError::TooLargeTransaction | + TransactionError::WrongPrivateKey => { + panic!("created an Monero invalid transaction: {e}"); + } + TransactionError::ClsagError(_) | + TransactionError::InvalidTransaction(_) | + TransactionError::FrostError(_) => { + panic!("supposedly unreachable (at this time) Monero error: {e}"); + } + TransactionError::NotEnoughFunds(_, _) => { + if tx_fee.is_none() { + Ok(None) + } else { + panic!("didn't have enough funds for a Monero TX"); + } + } + TransactionError::RpcError(e) => { + log::error!("RpcError when preparing transaction: {e:?}"); + Err(CoinError::ConnectionError) + } + }, + } + }; + + let tx_fee = match signable(&mut plan, None)? { + Some(tx) => tx.fee(), + None => return Ok((None, drop_branches(&plan))), + }; + + let branch_outputs = amortize_fee(&mut plan, tx_fee); + + let signable = SignableTransaction { + keys, + transcript: plan.transcript(), + height: block_number + 1, + actual: match signable(&mut plan, Some(tx_fee))? { + Some(signable) => signable, + None => return Ok((None, branch_outputs)), + }, + }; + let eventuality = signable.actual.eventuality().unwrap(); + Ok((Some((signable, eventuality)), branch_outputs)) + } + + async fn attempt_send( + &self, + transaction: SignableTransaction, + ) -> Result { + transaction + .actual + .clone() + .multisig( + &self.rpc, + transaction.keys.clone(), + transaction.transcript.clone(), + transaction.height, + ) + .await + .map_err(|_| CoinError::ConnectionError) + } + + async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), CoinError> { + match self.rpc.publish_transaction(tx).await { + Ok(_) => Ok(()), + Err(RpcError::ConnectionError) => Err(CoinError::ConnectionError)?, + // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs + // invalid transaction + Err(e) => panic!("failed to publish TX {:?}: {e}", tx.hash()), + } + } + + async fn get_transaction(&self, id: &[u8; 32]) -> Result { + self.rpc.get_transaction(*id).await.map_err(|_| CoinError::ConnectionError) + } + + fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool { + eventuality.matches(tx) + } + + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block(*id).await.unwrap().number() + } + + #[cfg(test)] + async fn get_fee(&self) -> Self::Fee { + self.rpc.get_fee().await.unwrap() + } + + #[cfg(test)] + async fn mine_block(&self) { + // https://github.com/serai-dex/serai/issues/198 + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + #[derive(serde::Deserialize, Debug)] + struct EmptyResponse {} + let _: EmptyResponse = self + .rpc + .rpc_call( + "json_rpc", + Some(serde_json::json!({ + "method": "generateblocks", + "params": { + "wallet_address": Self::test_address().to_string(), + "amount_of_blocks": 1 + }, + })), + ) + .await + .unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Self::Address) -> Block { + use zeroize::Zeroizing; + use rand_core::OsRng; + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + for _ in 0 .. 80 { + self.mine_block().await; + } + + let outputs = Self::test_scanner() + .scan(&self.rpc, &self.rpc.get_block_by_number(new_block).await.unwrap()) + .await + .unwrap() + .swap_remove(0) + .ignore_timelock(); + + let amount = outputs[0].commitment().amount; + // The dust should always be sufficient for the fee + let fee = Monero::DUST; + + let tx = MSignableTransaction::new( + self.rpc.get_protocol().await.unwrap(), + None, + outputs, + vec![(address.into(), amount - fee)], + Some(Change::fingerprintable(Self::test_address().into())), + vec![], + self.rpc.get_fee().await.unwrap(), + ) + .unwrap() + .sign(&mut OsRng, &self.rpc, &Zeroizing::new(Scalar::one().0)) + .await + .unwrap(); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.publish_transaction(&tx).await.unwrap(); + for _ in 0 .. 10 { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} diff --git a/processor/src/coordinator.rs b/processor/src/coordinator.rs new file mode 100644 index 00000000..92cba33e --- /dev/null +++ b/processor/src/coordinator.rs @@ -0,0 +1,42 @@ +use std::{ + sync::{Arc, RwLock}, + collections::VecDeque, +}; + +use messages::{ProcessorMessage, CoordinatorMessage}; + +// TODO: Also include the coin block height here so we can delay handling if not synced? +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Message { + pub id: u64, + pub msg: CoordinatorMessage, +} + +#[async_trait::async_trait] +pub trait Coordinator { + async fn send(&mut self, msg: ProcessorMessage); + async fn recv(&mut self) -> Message; + async fn ack(&mut self, msg: Message); +} + +// TODO: Move this to tests +pub struct MemCoordinator(Arc>>); +impl MemCoordinator { + #[allow(clippy::new_without_default)] + pub fn new() -> MemCoordinator { + MemCoordinator(Arc::new(RwLock::new(VecDeque::new()))) + } +} + +#[async_trait::async_trait] +impl Coordinator for MemCoordinator { + async fn send(&mut self, _: ProcessorMessage) { + todo!() + } + async fn recv(&mut self) -> Message { + todo!() + } + async fn ack(&mut self, _: Message) { + todo!() + } +} diff --git a/processor/src/db.rs b/processor/src/db.rs new file mode 100644 index 00000000..5b2fe8a6 --- /dev/null +++ b/processor/src/db.rs @@ -0,0 +1,149 @@ +use core::{marker::PhantomData, fmt::Debug}; +use std::{ + sync::{Arc, RwLock}, + collections::HashMap, +}; + +use crate::{Plan, coins::Coin}; + +pub trait DbTxn: Send + Sync + Clone + Debug { + fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>); + fn get(&self, key: impl AsRef<[u8]>) -> Option>; + fn del(&mut self, key: impl AsRef<[u8]>); + fn commit(self); +} + +pub trait Db: 'static + Send + Sync + Clone + Debug { + type Transaction: DbTxn; + fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + let db_len = u8::try_from(db_dst.len()).unwrap(); + let dst_len = u8::try_from(item_dst.len()).unwrap(); + [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat().to_vec() + } + fn txn(&mut self) -> Self::Transaction; + fn get(&self, key: impl AsRef<[u8]>) -> Option>; +} + +// TODO: Replace this with RocksDB +#[derive(Clone, Debug)] +pub struct MemDb(Arc, Vec>>>); +impl MemDb { + #[allow(clippy::new_without_default)] + pub fn new() -> MemDb { + MemDb(Arc::new(RwLock::new(HashMap::new()))) + } +} + +impl DbTxn for MemDb { + fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { + self.0.write().unwrap().insert(key.as_ref().to_vec(), value.as_ref().to_vec()); + } + fn get(&self, key: impl AsRef<[u8]>) -> Option> { + self.0.read().unwrap().get(key.as_ref()).cloned() + } + fn del(&mut self, key: impl AsRef<[u8]>) { + self.0.write().unwrap().remove(key.as_ref()); + } + fn commit(self) {} +} + +impl Db for MemDb { + type Transaction = MemDb; + fn txn(&mut self) -> MemDb { + Self(self.0.clone()) + } + fn get(&self, key: impl AsRef<[u8]>) -> Option> { + self.0.read().unwrap().get(key.as_ref()).cloned() + } +} + +#[derive(Debug)] +pub struct MainDb(D, PhantomData); +impl MainDb { + pub fn new(db: D) -> Self { + Self(db, PhantomData) + } + + fn main_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + D::key(b"MAIN", dst, key) + } + + fn plan_key(id: &[u8]) -> Vec { + Self::main_key(b"plan", id) + } + fn signing_key(key: &[u8]) -> Vec { + Self::main_key(b"signing", key) + } + pub fn save_signing(&mut self, key: &[u8], block_number: u64, time: u64, plan: &Plan) { + let id = plan.id(); + // Creating a TXN here is arguably an anti-pattern, yet nothing here expects atomicity + let mut txn = self.0.txn(); + + { + let mut signing = txn.get(Self::signing_key(key)).unwrap_or(vec![]); + + // If we've already noted we're signing this, return + assert_eq!(signing.len() % 32, 0); + for i in 0 .. (signing.len() / 32) { + if signing[(i * 32) .. ((i + 1) * 32)] == id { + return; + } + } + + signing.extend(&id); + txn.put(Self::signing_key(key), id); + } + + { + let mut buf = block_number.to_le_bytes().to_vec(); + buf.extend(&time.to_le_bytes()); + plan.write(&mut buf).unwrap(); + txn.put(Self::plan_key(&id), &buf); + } + + txn.commit(); + } + + pub fn signing(&self, key: &[u8]) -> Vec<(u64, u64, Plan)> { + let signing = self.0.get(Self::signing_key(key)).unwrap_or(vec![]); + let mut res = vec![]; + + assert_eq!(signing.len() % 32, 0); + for i in 0 .. (signing.len() / 32) { + let id = &signing[(i * 32) .. ((i + 1) * 32)]; + let buf = self.0.get(Self::plan_key(id)).unwrap(); + + let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap()); + let time = u64::from_le_bytes(buf[8 .. 16].try_into().unwrap()); + let plan = Plan::::read::<&[u8]>(&mut &buf[16 ..]).unwrap(); + assert_eq!(id, &plan.id()); + res.push((block_number, time, plan)); + } + + res + } + + pub fn finish_signing(&mut self, key: &[u8], id: [u8; 32]) { + let mut signing = self.0.get(Self::signing_key(key)).unwrap_or(vec![]); + assert_eq!(signing.len() % 32, 0); + + let mut found = false; + for i in 0 .. (signing.len() / 32) { + let start = i * 32; + let end = i + 32; + if signing[start .. end] == id { + found = true; + signing = [&signing[.. start], &signing[end ..]].concat().to_vec(); + break; + } + } + + if !found { + log::warn!("told to finish signing {} yet wasn't actively signing it", hex::encode(id)); + } + + let mut txn = self.0.txn(); + txn.put(Self::signing_key(key), signing); + txn.commit(); + } +} diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs new file mode 100644 index 00000000..835d978d --- /dev/null +++ b/processor/src/key_gen.rs @@ -0,0 +1,308 @@ +use core::marker::PhantomData; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use rand_core::SeedableRng; +use rand_chacha::ChaCha20Rng; + +use transcript::{Transcript, RecommendedTranscript}; +use group::GroupEncoding; +use frost::{ + curve::Ciphersuite, + dkg::{Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*}, +}; + +use log::info; + +use serai_client::validator_sets::primitives::ValidatorSetInstance; +use messages::key_gen::*; + +use crate::{DbTxn, Db, coins::Coin}; + +#[derive(Debug)] +pub enum KeyGenEvent { + KeyConfirmed { activation_number: usize, keys: ThresholdKeys }, + ProcessorMessage(ProcessorMessage), +} + +#[derive(Clone, Debug)] +struct KeyGenDb(D, PhantomData); +impl KeyGenDb { + fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + D::key(b"KEY_GEN", dst, key) + } + + fn params_key(set: &ValidatorSetInstance) -> Vec { + Self::key_gen_key(b"params", bincode::serialize(set).unwrap()) + } + fn save_params( + &mut self, + txn: &mut D::Transaction, + set: &ValidatorSetInstance, + params: &ThresholdParams, + ) { + txn.put(Self::params_key(set), bincode::serialize(params).unwrap()); + } + fn params(&self, set: &ValidatorSetInstance) -> ThresholdParams { + // Directly unwraps the .get() as this will only be called after being set + bincode::deserialize(&self.0.get(Self::params_key(set)).unwrap()).unwrap() + } + + // Not scoped to the set since that'd have latter attempts overwrite former + // A former attempt may become the finalized attempt, even if it doesn't in a timely manner + // Overwriting its commitments would be accordingly poor + fn commitments_key(id: &KeyGenId) -> Vec { + Self::key_gen_key(b"commitments", bincode::serialize(id).unwrap()) + } + fn save_commitments( + &mut self, + txn: &mut D::Transaction, + id: &KeyGenId, + commitments: &HashMap>, + ) { + txn.put(Self::commitments_key(id), bincode::serialize(commitments).unwrap()); + } + fn commitments( + &self, + id: &KeyGenId, + params: ThresholdParams, + ) -> HashMap>> { + bincode::deserialize::>>( + &self.0.get(Self::commitments_key(id)).unwrap(), + ) + .unwrap() + .drain() + .map(|(i, bytes)| { + ( + i, + EncryptionKeyMessage::>::read::<&[u8]>( + &mut bytes.as_ref(), + params, + ) + .unwrap(), + ) + }) + .collect() + } + + fn generated_keys_key(id: &KeyGenId) -> Vec { + Self::key_gen_key(b"generated_keys", bincode::serialize(id).unwrap()) + } + fn save_keys(&mut self, txn: &mut D::Transaction, id: &KeyGenId, keys: &ThresholdCore) { + txn.put(Self::generated_keys_key(id), keys.serialize()); + } + + fn keys_key(key: &::G) -> Vec { + Self::key_gen_key(b"keys", key.to_bytes()) + } + fn confirm_keys(&mut self, txn: &mut D::Transaction, id: &KeyGenId) -> ThresholdKeys { + let keys_vec = self.0.get(Self::generated_keys_key(id)).unwrap(); + let mut keys = + ThresholdKeys::new(ThresholdCore::read::<&[u8]>(&mut keys_vec.as_ref()).unwrap()); + C::tweak_keys(&mut keys); + txn.put(Self::keys_key(&keys.group_key()), keys_vec); + keys + } + fn keys(&self, key: &::G) -> ThresholdKeys { + let mut keys = ThresholdKeys::new( + ThresholdCore::read::<&[u8]>(&mut self.0.get(Self::keys_key(key)).unwrap().as_ref()).unwrap(), + ); + C::tweak_keys(&mut keys); + keys + } +} + +/// Coded so if the processor spontaneously reboots, one of two paths occur: +/// 1) It either didn't send its response, so the attempt will be aborted +/// 2) It did send its response, and has locally saved enough data to continue +#[derive(Debug)] +pub struct KeyGen { + db: KeyGenDb, + entropy: Zeroizing<[u8; 32]>, + + active_commit: HashMap>, + active_share: HashMap>, +} + +impl KeyGen { + #[allow(clippy::new_ret_no_self)] + pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen { + KeyGen { + db: KeyGenDb(db, PhantomData::), + entropy, + + active_commit: HashMap::new(), + active_share: HashMap::new(), + } + } + + pub fn keys(&self, key: &::G) -> ThresholdKeys { + self.db.keys(key) + } + + pub async fn handle(&mut self, msg: CoordinatorMessage) -> KeyGenEvent { + let context = |id: &KeyGenId| { + // TODO2: Also embed the chain ID/genesis block + format!( + "Serai Key Gen. Session: {}, Index: {}, Attempt: {}", + id.set.session.0, id.set.index.0, id.attempt + ) + }; + + let rng = |label, id: KeyGenId| { + let mut transcript = RecommendedTranscript::new(label); + transcript.append_message(b"entropy", self.entropy.as_ref()); + transcript.append_message(b"context", context(&id)); + ChaCha20Rng::from_seed(transcript.rng_seed(b"rng")) + }; + let coefficients_rng = |id| rng(b"Key Gen Coefficients", id); + let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id); + let share_rng = |id| rng(b"Key Gen Share", id); + + let key_gen_machine = |id, params| { + KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut coefficients_rng(id)) + }; + + match msg { + CoordinatorMessage::GenerateKey { id, params } => { + info!("Generating new key. ID: {:?} Params: {:?}", id, params); + + // Remove old attempts + if self.active_commit.remove(&id.set).is_none() && + self.active_share.remove(&id.set).is_none() + { + // If we haven't handled this set before, save the params + // This may overwrite previously written params if we rebooted, yet that isn't a + // concern + let mut txn = self.db.0.txn(); + self.db.save_params(&mut txn, &id.set, ¶ms); + txn.commit(); + } + + let (machine, commitments) = key_gen_machine(id, params); + self.active_commit.insert(id.set, machine); + + KeyGenEvent::ProcessorMessage(ProcessorMessage::Commitments { + id, + commitments: commitments.serialize(), + }) + } + + CoordinatorMessage::Commitments { id, commitments } => { + info!("Received commitments for {:?}", id); + + if self.active_share.contains_key(&id.set) { + // We should've been told of a new attempt before receiving commitments again + // The coordinator is either missing messages or repeating itself + // Either way, it's faulty + panic!("commitments when already handled commitments"); + } + + let params = self.db.params(&id.set); + + // Parse the commitments + let parsed = match commitments + .iter() + .map(|(i, commitments)| { + EncryptionKeyMessage::>::read::<&[u8]>( + &mut commitments.as_ref(), + params, + ) + .map(|commitments| (*i, commitments)) + }) + .collect() + { + Ok(commitments) => commitments, + Err(e) => todo!("malicious signer: {:?}", e), + }; + + // Get the machine, rebuilding it if we don't have it + // We won't if the processor rebooted + // This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for + // attempt y + // The coordinator is trusted to be proper in this regard + let machine = + self.active_commit.remove(&id.set).unwrap_or_else(|| key_gen_machine(id, params).0); + + let (machine, mut shares) = + match machine.generate_secret_shares(&mut secret_shares_rng(id), parsed) { + Ok(res) => res, + Err(e) => todo!("malicious signer: {:?}", e), + }; + self.active_share.insert(id.set, machine); + + let mut txn = self.db.0.txn(); + self.db.save_commitments(&mut txn, &id, &commitments); + txn.commit(); + + KeyGenEvent::ProcessorMessage(ProcessorMessage::Shares { + id, + shares: shares.drain().map(|(i, share)| (i, share.serialize())).collect(), + }) + } + + CoordinatorMessage::Shares { id, mut shares } => { + info!("Received shares for {:?}", id); + + let params = self.db.params(&id.set); + + // Parse the shares + let shares = match shares + .drain() + .map(|(i, share)| { + EncryptedMessage::::F>>::read::<&[u8]>( + &mut share.as_ref(), + params, + ) + .map(|share| (i, share)) + }) + .collect() + { + Ok(shares) => shares, + Err(e) => todo!("malicious signer: {:?}", e), + }; + + // Same commentary on inconsistency as above exists + let machine = self.active_share.remove(&id.set).unwrap_or_else(|| { + key_gen_machine(id, params) + .0 + .generate_secret_shares(&mut secret_shares_rng(id), self.db.commitments(&id, params)) + .unwrap() + .0 + }); + + // TODO2: Handle the blame machine properly + let keys = (match machine.calculate_share(&mut share_rng(id), shares) { + Ok(res) => res, + Err(e) => todo!("malicious signer: {:?}", e), + }) + .complete(); + + let mut txn = self.db.0.txn(); + self.db.save_keys(&mut txn, &id, &keys); + txn.commit(); + + let mut keys = ThresholdKeys::new(keys); + C::tweak_keys(&mut keys); + KeyGenEvent::ProcessorMessage(ProcessorMessage::GeneratedKey { + id, + key: keys.group_key().to_bytes().as_ref().to_vec(), + }) + } + + CoordinatorMessage::ConfirmKey { context, id } => { + let mut txn = self.db.0.txn(); + let keys = self.db.confirm_keys(&mut txn, &id); + txn.commit(); + + info!("Confirmed key {} from {:?}", hex::encode(keys.group_key().to_bytes()), id); + + KeyGenEvent::KeyConfirmed { + activation_number: context.coin_latest_block_number.try_into().unwrap(), + keys, + } + } + } + } +} diff --git a/processor/src/lib.rs b/processor/src/lib.rs deleted file mode 100644 index 462b682e..00000000 --- a/processor/src/lib.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::{marker::Send, collections::HashMap}; - -use async_trait::async_trait; -use thiserror::Error; - -use frost::{curve::Ciphersuite, Participant, FrostError}; - -mod coin; -use coin::{CoinError, Coin}; - -mod wallet; - -#[cfg(test)] -mod tests; - -#[derive(Clone, Error, Debug)] -pub enum NetworkError {} - -#[async_trait] -pub trait Network: Send { - async fn round(&mut self, data: Vec) -> Result>, NetworkError>; -} - -#[derive(Clone, Error, Debug)] -pub enum SignError { - #[error("FROST had an error {0}")] - FrostError(FrostError), - #[error("coin had an error {0}")] - CoinError(CoinError), - #[error("network had an error {0}")] - NetworkError(NetworkError), -} - -// Generate a static additional key for a given chain in a globally consistent manner -// Doesn't consider the current group key to increase the simplicity of verifying Serai's status -// Takes an index, k, to support protocols which use multiple secondary keys -// Presumably a view key -pub(crate) fn additional_key(k: u64) -> ::F { - ::hash_to_F( - b"Serai DEX Additional Key", - &[C::ID, &k.to_le_bytes()].concat(), - ) -} diff --git a/processor/src/main.rs b/processor/src/main.rs new file mode 100644 index 00000000..41935d85 --- /dev/null +++ b/processor/src/main.rs @@ -0,0 +1,458 @@ +use std::{ + env, + pin::Pin, + task::{Poll, Context}, + future::Future, + time::{Duration, SystemTime}, + collections::{VecDeque, HashMap}, +}; + +use zeroize::{Zeroize, Zeroizing}; + +use transcript::{Transcript, RecommendedTranscript}; +use group::GroupEncoding; +use frost::curve::Ciphersuite; + +use log::{info, warn, error}; +use tokio::time::sleep; + +use scale::Decode; + +use serai_client::{ + primitives::{Amount, WithAmount}, + tokens::primitives::OutInstruction, + in_instructions::primitives::{Shorthand, RefundableInInstruction}, +}; + +use messages::{SubstrateContext, sign, substrate, CoordinatorMessage, ProcessorMessage}; + +mod plan; +pub use plan::*; + +mod db; +pub use db::*; + +mod coordinator; +pub use coordinator::*; + +mod coins; +use coins::{OutputType, Output, PostFeeBranch, Block, Coin}; +#[cfg(feature = "bitcoin")] +use coins::Bitcoin; +#[cfg(feature = "monero")] +use coins::Monero; + +mod key_gen; +use key_gen::{KeyGenEvent, KeyGen}; + +mod signer; +use signer::{SignerEvent, Signer, SignerHandle}; + +mod scanner; +use scanner::{ScannerEvent, Scanner, ScannerHandle}; + +mod scheduler; +use scheduler::Scheduler; + +#[cfg(test)] +mod tests; + +// Generate a static additional key for a given chain in a globally consistent manner +// Doesn't consider the current group key to increase the simplicity of verifying Serai's status +// Takes an index, k, to support protocols which use multiple secondary keys +// Presumably a view key +pub(crate) fn additional_key(k: u64) -> ::F { + ::hash_to_F( + b"Serai DEX Additional Key", + &[C::ID.as_bytes(), &k.to_le_bytes()].concat(), + ) +} + +struct SignerMessageFuture<'a, C: Coin, D: Db>(&'a mut HashMap, SignerHandle>); +impl<'a, C: Coin, D: Db> Future for SignerMessageFuture<'a, C, D> { + type Output = (Vec, SignerEvent); + fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + for (key, signer) in self.0.iter_mut() { + match signer.events.poll_recv(ctx) { + Poll::Ready(event) => return Poll::Ready((key.clone(), event.unwrap())), + Poll::Pending => {} + } + } + Poll::Pending + } +} + +async fn get_fee(coin: &C, block_number: usize) -> C::Fee { + loop { + // TODO2: Use an fee representative of several blocks + match coin.get_block(block_number).await { + Ok(block) => { + return block.median_fee(); + } + Err(e) => { + error!("couldn't get block {}: {e}", block_number); + // Since this block is considered finalized, we shouldn't be unable to get it unless the + // node is offline, hence the long sleep + sleep(Duration::from_secs(60)).await; + } + } + } +} + +async fn prepare_send( + coin: &C, + signer: &SignerHandle, + block_number: usize, + fee: C::Fee, + plan: Plan, +) -> (Option<(C::SignableTransaction, C::Eventuality)>, Vec) { + let keys = signer.keys().await; + loop { + match coin.prepare_send(keys.clone(), block_number, plan.clone(), fee).await { + Ok(prepared) => { + return prepared; + } + Err(e) => { + error!("couldn't prepare a send for plan {}: {e}", hex::encode(plan.id())); + // The processor is either trying to create an invalid TX (fatal) or the node went + // offline + // The former requires a patch, the latter is a connection issue + // If the latter, this is an appropriate sleep. If the former, we should panic, yet + // this won't flood the console ad infinitum + sleep(Duration::from_secs(60)).await; + } + } + } +} + +async fn sign_plans( + db: &mut MainDb, + coin: &C, + schedulers: &mut HashMap, Scheduler>, + signers: &HashMap, SignerHandle>, + context: SubstrateContext, + plans: Vec>, +) { + let mut plans = VecDeque::from(plans); + let start = SystemTime::UNIX_EPOCH.checked_add(Duration::from_secs(context.time)).unwrap(); + let block_number = context.coin_latest_block_number.try_into().unwrap(); + + let fee = get_fee(coin, block_number).await; + + while let Some(plan) = plans.pop_front() { + let id = plan.id(); + info!("preparing plan {}: {:?}", hex::encode(id), plan); + + let key = plan.key.to_bytes(); + db.save_signing(key.as_ref(), context.coin_latest_block_number, context.time, &plan); + let (tx, branches) = prepare_send(coin, &signers[key.as_ref()], block_number, fee, plan).await; + + // TODO: If we reboot mid-sign_plans, for a DB-backed scheduler, these may be partially + // executed + // Global TXN object for the entire coordinator message? + // Re-ser the scheduler after every sign_plans call? + // To clarify, the scheduler is distinct as it mutates itself on new data. + // The key_gen/scanner/signer are designed to be deterministic to new data, irrelevant to prior + // states. + for branch in branches { + schedulers + .get_mut(key.as_ref()) + .expect("didn't have a scheduler for a key we have a plan for") + .created_output(branch.expected, branch.actual); + } + + if let Some((tx, eventuality)) = tx { + // TODO: Handle detection of already signed TXs (either on-chain or notified by a peer) + signers[key.as_ref()].sign_transaction(id, start, tx, eventuality).await; + } + } +} + +async fn run(raw_db: D, coin: C, mut coordinator: Co) { + let mut entropy_transcript = { + let entropy = + Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't provided as an env var")); + if entropy.len() != 64 { + panic!("entropy isn't the right length"); + } + let bytes = Zeroizing::new(hex::decode(entropy).expect("entropy wasn't hex-formatted")); + let mut entropy = Zeroizing::new([0; 32]); + entropy.as_mut().copy_from_slice(bytes.as_ref()); + + let mut transcript = RecommendedTranscript::new(b"Serai Processor Entropy"); + transcript.append_message(b"entropy", entropy.as_ref()); + transcript + }; + + let mut entropy = |label| { + let mut challenge = entropy_transcript.challenge(label); + let mut res = Zeroizing::new([0; 32]); + res.as_mut().copy_from_slice(&challenge[.. 32]); + challenge.zeroize(); + res + }; + + // We don't need to re-issue GenerateKey orders because the coordinator is expected to + // schedule/notify us of new attempts + let mut key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); + // The scanner has no long-standing orders to re-issue + let (mut scanner, active_keys) = Scanner::new(coin.clone(), raw_db.clone()); + + let mut schedulers = HashMap::, Scheduler>::new(); + let mut signers = HashMap::new(); + + let mut main_db = MainDb::new(raw_db.clone()); + + for key in &active_keys { + // TODO: Load existing schedulers + + let signer = Signer::new(raw_db.clone(), coin.clone(), key_gen.keys(key)); + + // Load any TXs being actively signed + let key = key.to_bytes(); + for (block_number, start, plan) in main_db.signing(key.as_ref()) { + let block_number = block_number.try_into().unwrap(); + let start = SystemTime::UNIX_EPOCH.checked_add(Duration::from_secs(start)).unwrap(); + + let fee = get_fee(&coin, block_number).await; + + let id = plan.id(); + info!("reloading plan {}: {:?}", hex::encode(id), plan); + + let (Some((tx, eventuality)), _) = + prepare_send(&coin, &signer, block_number, fee, plan).await else { + panic!("previously created transaction is no longer being created") + }; + signer.sign_transaction(id, start, tx, eventuality).await; + } + + signers.insert(key.as_ref().to_vec(), signer); + } + + // We can't load this from the DB as we can't guarantee atomic increments with the ack function + let mut last_coordinator_msg = None; + + loop { + tokio::select! { + // This blocks the entire processor until it finishes handling this message + // KeyGen specifically may take a notable amount of processing time + // While that shouldn't be an issue in practice, as after processing an attempt it'll handle + // the other messages in the queue, it may be beneficial to parallelize these + // They could likely be parallelized by type (KeyGen, Sign, Substrate) without issue + msg = coordinator.recv() => { + assert_eq!(msg.id, (last_coordinator_msg.unwrap_or(msg.id - 1) + 1)); + last_coordinator_msg = Some(msg.id); + + // If this message expects a higher block number than we have, halt until synced + async fn wait( + coin: &C, + scanner: &ScannerHandle, + context: &SubstrateContext + ) { + let needed = usize::try_from(context.coin_latest_block_number).unwrap(); + + loop { + let Ok(actual) = coin.get_latest_block_number().await else { + error!("couldn't get the latest block number"); + // Sleep for a minute as node errors should be incredibly uncommon yet take multiple + // seconds to resolve + sleep(Duration::from_secs(60)).await; + continue; + }; + + // Check our daemon has this block + // CONFIRMATIONS - 1 since any block's TXs have one confirmation (the block itself) + let confirmed = actual.saturating_sub(C::CONFIRMATIONS - 1); + if needed > confirmed { + // This may occur within some natural latency window + warn!( + "node is desynced. need block {}, have {}", + // Print the block needed for the needed block to be confirmed + needed + (C::CONFIRMATIONS - 1), + actual, + ); + // Sleep for one second per needed block + // If the node is disconnected from the network, this will be faster than it should + // be, yet presumably it just neeeds a moment to sync up + sleep(Duration::from_secs((needed - confirmed).try_into().unwrap())).await; + } + + // Check our scanner has scanned it + // This check does void the need for the last one, yet it provides a bit better + // debugging + let ram_scanned = scanner.ram_scanned().await; + if ram_scanned < needed { + warn!("scanner is behind. need block {}, scanned up to {}", needed, ram_scanned); + sleep(Duration::from_secs((needed - ram_scanned).try_into().unwrap())).await; + } + + // TODO: Sanity check we got an AckBlock (or this is the AckBlock) for the block in + // question + + /* + let synced = |context: &SubstrateContext, key| -> Result<(), ()> { + // Check that we've synced this block and can actually operate on it ourselves + let latest = scanner.latest_scanned(key); + if usize::try_from(context.coin_latest_block_number).unwrap() < latest { + log::warn!( + "coin node disconnected/desynced from rest of the network. \ + our block: {latest:?}, network's acknowledged: {}", + context.coin_latest_block_number + ); + Err(())?; + } + Ok(()) + }; + */ + + break; + } + } + + match &msg.msg { + CoordinatorMessage::KeyGen(_) => {}, + CoordinatorMessage::Sign(_) => {}, + CoordinatorMessage::Substrate(msg) => { + match msg { + substrate::CoordinatorMessage::BlockAcknowledged { context, .. } => { + wait(&coin, &scanner, context).await; + }, + substrate::CoordinatorMessage::Burns { context, .. } => { + wait(&coin, &scanner, context).await; + }, + } + }, + } + + match msg.msg.clone() { + CoordinatorMessage::KeyGen(msg) => { + match key_gen.handle(msg).await { + KeyGenEvent::KeyConfirmed { activation_number, keys } => { + let key = keys.group_key(); + scanner.rotate_key(activation_number, key).await; + schedulers.insert(key.to_bytes().as_ref().to_vec(), Scheduler::::new(key)); + signers.insert( + keys.group_key().to_bytes().as_ref().to_vec(), + Signer::new(raw_db.clone(), coin.clone(), keys) + ); + }, + + // TODO: This may be fired multiple times. What's our plan for that? + KeyGenEvent::ProcessorMessage(msg) => { + coordinator.send(ProcessorMessage::KeyGen(msg)).await; + }, + } + } + + CoordinatorMessage::Sign(msg) => { + signers[msg.key()].handle(msg).await; + } + + CoordinatorMessage::Substrate(msg) => { + match msg { + substrate::CoordinatorMessage::BlockAcknowledged { context, key: key_vec, block } => { + let key = + ::read_G::<&[u8]>(&mut key_vec.as_ref()).unwrap(); + let mut block_id = >::Id::default(); + block_id.as_mut().copy_from_slice(&block); + + let plans = schedulers + .get_mut(&key_vec) + .expect("key we don't have a scheduler for acknowledged a block") + .add_outputs(scanner.ack_block(key, block_id).await); + sign_plans(&mut main_db, &coin, &mut schedulers, &signers, context, plans).await; + } + + substrate::CoordinatorMessage::Burns { context, burns } => { + // TODO2: Rewrite rotation documentation + let schedule_key = active_keys.last().expect("burn event despite no keys"); + let scheduler = schedulers.get_mut(schedule_key.to_bytes().as_ref()).unwrap(); + + let mut payments = vec![]; + for out in burns.clone() { + let WithAmount { data: OutInstruction { address, data }, amount } = out; + if let Ok(address) = C::Address::try_from(address.consume()) { + payments.push(Payment { + address, + data: data.map(|data| data.consume()), + amount: amount.0, + }); + } + } + + let plans = scheduler.schedule(payments); + sign_plans(&mut main_db, &coin, &mut schedulers, &signers, context, plans).await; + } + } + } + } + + coordinator.ack(msg).await; + }, + + msg = scanner.events.recv() => { + // These need to be sent to the coordinator which needs to check they aren't replayed + // TODO + match msg.unwrap() { + ScannerEvent::Outputs(key, block, outputs) => { + coordinator.send(ProcessorMessage::Substrate(substrate::ProcessorMessage::Update { + key: key.to_bytes().as_ref().to_vec(), + block: block.as_ref().to_vec(), + instructions: outputs.iter().filter_map(|output| { + // If these aren't externally received funds, don't handle it as an instruction + if output.kind() != OutputType::External { + return None; + } + + let shorthand = Shorthand::decode(&mut output.data()).ok()?; + let instruction = RefundableInInstruction::try_from(shorthand).ok()?; + // TODO2: Set instruction.origin if not set (and handle refunds in general) + Some(WithAmount { data: instruction.instruction, amount: Amount(output.amount()) }) + }).collect(), + })).await; + }, + } + }, + + (key, msg) = SignerMessageFuture(&mut signers) => { + match msg { + SignerEvent::SignedTransaction { id, tx } => { + main_db.finish_signing(&key, id); + coordinator + .send(ProcessorMessage::Sign(sign::ProcessorMessage::Completed { + key, + id, + tx: tx.as_ref().to_vec() + })) + .await; + + // TODO + // 1) We need to stop signing whenever a peer informs us or the chain has an + // eventuality + // 2) If a peer informed us of an eventuality without an outbound payment, stop + // scanning the chain for it (or at least ack it's solely for sanity purposes?) + // 3) When the chain has an eventuality, if it had an outbound payment, report it up to + // Substrate for logging purposes + }, + SignerEvent::ProcessorMessage(msg) => { + coordinator.send(ProcessorMessage::Sign(msg)).await; + }, + } + }, + } + } +} + +#[tokio::main] +async fn main() { + let db = MemDb::new(); // TODO + let coordinator = MemCoordinator::new(); // TODO + let url = env::var("COIN_RPC").expect("coin rpc wasn't specified as an env var"); + match env::var("COIN").expect("coin wasn't specified as an env var").as_str() { + #[cfg(feature = "bitcoin")] + "bitcoin" => run(db, Bitcoin::new(url), coordinator).await, + #[cfg(feature = "monero")] + "monero" => run(db, Monero::new(url), coordinator).await, + _ => panic!("unrecognized coin"), + } +} diff --git a/processor/src/plan.rs b/processor/src/plan.rs new file mode 100644 index 00000000..28c62f84 --- /dev/null +++ b/processor/src/plan.rs @@ -0,0 +1,153 @@ +use std::io; + +use transcript::{Transcript, RecommendedTranscript}; +use group::GroupEncoding; +use frost::curve::Ciphersuite; + +use crate::coins::{Output, Coin}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Payment { + pub address: C::Address, + pub data: Option>, + pub amount: u64, +} + +impl Payment { + pub fn transcript(&self, transcript: &mut T) { + transcript.domain_separate(b"payment"); + transcript.append_message(b"address", self.address.to_string().as_bytes()); + if let Some(data) = self.data.as_ref() { + transcript.append_message(b"data", data); + } + transcript.append_message(b"amount", self.amount.to_le_bytes()); + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + let address: Vec = self + .address + .clone() + .try_into() + .map_err(|_| io::Error::new(io::ErrorKind::Other, "address couldn't be serialized"))?; + writer.write_all(&u32::try_from(address.len()).unwrap().to_le_bytes())?; + writer.write_all(&address)?; + + writer.write_all(&[u8::from(self.data.is_some())])?; + if let Some(data) = &self.data { + writer.write_all(&u32::try_from(data.len()).unwrap().to_le_bytes())?; + writer.write_all(data)?; + } + + writer.write_all(&self.amount.to_le_bytes()) + } + + pub fn read(reader: &mut R) -> io::Result { + let mut buf = [0; 4]; + reader.read_exact(&mut buf)?; + let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; + reader.read_exact(&mut address)?; + let address = C::Address::try_from(address) + .map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid address"))?; + + let mut buf = [0; 1]; + reader.read_exact(&mut buf)?; + let data = if buf[0] == 1 { + let mut buf = [0; 4]; + reader.read_exact(&mut buf)?; + let mut data = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; + reader.read_exact(&mut data)?; + Some(data) + } else { + None + }; + + let mut buf = [0; 8]; + reader.read_exact(&mut buf)?; + let amount = u64::from_le_bytes(buf); + + Ok(Payment { address, data, amount }) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Plan { + pub key: ::G, + pub inputs: Vec, + pub payments: Vec>, + pub change: Option<::G>, +} + +impl Plan { + pub fn transcript(&self) -> RecommendedTranscript { + let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID"); + transcript.domain_separate(b"meta"); + transcript.append_message(b"key", self.key.to_bytes()); + + transcript.domain_separate(b"inputs"); + for input in &self.inputs { + transcript.append_message(b"input", input.id()); + } + + transcript.domain_separate(b"payments"); + for payment in &self.payments { + payment.transcript(&mut transcript); + } + + if let Some(change) = self.change { + transcript.append_message(b"change", change.to_bytes()); + } + + transcript + } + + pub fn id(&self) -> [u8; 32] { + let challenge = self.transcript().challenge(b"id"); + let mut res = [0; 32]; + res.copy_from_slice(&challenge[.. 32]); + res + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(self.key.to_bytes().as_ref())?; + + writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; + for input in &self.inputs { + input.write(writer)?; + } + + writer.write_all(&u32::try_from(self.payments.len()).unwrap().to_le_bytes())?; + for payment in &self.payments { + payment.write(writer)?; + } + + writer.write_all(&[u8::from(self.change.is_some())])?; + if let Some(change) = &self.change { + writer.write_all(change.to_bytes().as_ref())?; + } + + Ok(()) + } + + pub fn read(reader: &mut R) -> io::Result { + let key = C::Curve::read_G(reader)?; + + let mut inputs = vec![]; + let mut buf = [0; 4]; + reader.read_exact(&mut buf)?; + for _ in 0 .. u32::from_le_bytes(buf) { + inputs.push(C::Output::read(reader)?); + } + + let mut payments = vec![]; + reader.read_exact(&mut buf)?; + for _ in 0 .. u32::from_le_bytes(buf) { + payments.push(Payment::::read(reader)?); + } + + let mut buf = [0; 1]; + reader.read_exact(&mut buf)?; + let change = if buf[0] == 1 { Some(C::Curve::read_G(reader)?) } else { None }; + + Ok(Plan { key, inputs, payments, change }) + } +} diff --git a/processor/src/scanner.rs b/processor/src/scanner.rs new file mode 100644 index 00000000..7ded6d12 --- /dev/null +++ b/processor/src/scanner.rs @@ -0,0 +1,384 @@ +use core::{marker::PhantomData, time::Duration}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, +}; + +use group::GroupEncoding; +use frost::curve::Ciphersuite; + +use log::{info, debug, warn}; +use tokio::{ + sync::{RwLock, mpsc}, + time::sleep, +}; + +use crate::{ + DbTxn, Db, + coins::{Output, Block, Coin}, +}; + +#[derive(Clone, Debug)] +pub enum ScannerEvent { + // Outputs received + Outputs(::G, >::Id, Vec), +} + +pub type ScannerEventChannel = mpsc::UnboundedReceiver>; + +#[derive(Clone, Debug)] +struct ScannerDb(D, PhantomData); +impl ScannerDb { + fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + D::key(b"SCANNER", dst, key) + } + + fn block_key(number: usize) -> Vec { + Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes()) + } + fn block_number_key(id: &>::Id) -> Vec { + Self::scanner_key(b"block_number", id) + } + fn save_block( + &mut self, + txn: &mut D::Transaction, + number: usize, + id: &>::Id, + ) { + txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes()); + txn.put(Self::block_key(number), id); + } + fn block(&self, number: usize) -> Option<>::Id> { + self.0.get(Self::block_key(number)).map(|id| { + let mut res = >::Id::default(); + res.as_mut().copy_from_slice(&id); + res + }) + } + fn block_number(&self, id: &>::Id) -> Option { + self + .0 + .get(Self::block_number_key(id)) + .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap()) + } + + fn active_keys_key() -> Vec { + Self::scanner_key(b"active_keys", b"") + } + fn add_active_key(&mut self, txn: &mut D::Transaction, key: ::G) { + let mut keys = self.0.get(Self::active_keys_key()).unwrap_or(vec![]); + // TODO: Don't do this if the key is already marked active (which can happen based on reboot + // timing) + keys.extend(key.to_bytes().as_ref()); + txn.put(Self::active_keys_key(), keys); + } + fn active_keys(&self) -> Vec<::G> { + let bytes_vec = self.0.get(Self::active_keys_key()).unwrap_or(vec![]); + let mut bytes: &[u8] = bytes_vec.as_ref(); + + let mut res = Vec::with_capacity(bytes.len() / 32); + while !bytes.is_empty() { + res.push(C::Curve::read_G(&mut bytes).unwrap()); + } + res + } + + fn seen_key(id: &::Id) -> Vec { + Self::scanner_key(b"seen", id) + } + fn seen(&self, id: &::Id) -> bool { + self.0.get(Self::seen_key(id)).is_some() + } + + fn outputs_key( + key: &::G, + block: &>::Id, + ) -> Vec { + let key_bytes = key.to_bytes(); + let key = key_bytes.as_ref(); + // This should be safe without the bincode serialize. Using bincode lets us not worry/have to + // think about this + let db_key = bincode::serialize(&(key, block.as_ref())).unwrap(); + // Assert this is actually length prefixing + debug_assert!(db_key.len() >= (1 + key.len() + 1 + block.as_ref().len())); + Self::scanner_key(b"outputs", db_key) + } + fn save_outputs( + &mut self, + txn: &mut D::Transaction, + key: &::G, + block: &>::Id, + outputs: &[C::Output], + ) { + let mut bytes = Vec::with_capacity(outputs.len() * 64); + for output in outputs { + output.write(&mut bytes).unwrap(); + } + txn.put(Self::outputs_key(key, block), bytes); + } + fn outputs( + &self, + key: &::G, + block: &>::Id, + ) -> Option> { + let bytes_vec = self.0.get(Self::outputs_key(key, block))?; + let mut bytes: &[u8] = bytes_vec.as_ref(); + + let mut res = vec![]; + while !bytes.is_empty() { + res.push(C::Output::read(&mut bytes).unwrap()); + } + Some(res) + } + + fn scanned_block_key(key: &::G) -> Vec { + Self::scanner_key(b"scanned_block", key.to_bytes()) + } + fn save_scanned_block( + &mut self, + txn: &mut D::Transaction, + key: &::G, + block: usize, + ) -> Vec { + let new_key = self.0.get(Self::scanned_block_key(key)).is_none(); + let outputs = self.block(block).and_then(|id| self.outputs(key, &id)); + // Either this is a new key, with no outputs, or we're acknowledging this block + // If we're acknowledging it, we should have outputs available + assert_eq!(new_key, outputs.is_none()); + let outputs = outputs.unwrap_or(vec![]); + + // Mark all the outputs from this block as seen + for output in &outputs { + txn.put(Self::seen_key(&output.id()), b""); + } + + txn.put(Self::scanned_block_key(key), u64::try_from(block).unwrap().to_le_bytes()); + + // Return this block's outputs so they can be pruned from the RAM cache + outputs + } + fn latest_scanned_block(&self, key: ::G) -> usize { + let bytes = self.0.get(Self::scanned_block_key(&key)).unwrap_or(vec![0; 8]); + u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap() + } +} + +/// The Scanner emits events relating to the blockchain, notably received outputs. +/// It WILL NOT fail to emit an event, even if it reboots at selected moments. +/// It MAY fire the same event multiple times. +#[derive(Debug)] +pub struct Scanner { + coin: C, + db: ScannerDb, + keys: Vec<::G>, + + ram_scanned: HashMap, usize>, + ram_outputs: HashSet>, + + events: mpsc::UnboundedSender>, +} + +#[derive(Debug)] +pub struct ScannerHandle { + scanner: Arc>>, + pub events: ScannerEventChannel, +} + +impl ScannerHandle { + pub async fn ram_scanned(&self) -> usize { + let mut res = None; + for scanned in self.scanner.read().await.ram_scanned.values() { + if res.is_none() { + res = Some(*scanned); + } + // Returns the lowest scanned value so no matter the keys interacted with, this is + // sufficiently scanned + res = Some(res.unwrap().min(*scanned)); + } + res.unwrap_or(0) + } + + /// Rotate the key being scanned for. + /// + /// If no key has been prior set, this will become the key with no further actions. + /// + /// If a key has been prior set, both keys will be scanned for as detailed in the Multisig + /// documentation. The old key will eventually stop being scanned for, leaving just the + /// updated-to key. + pub async fn rotate_key(&self, activation_number: usize, key: ::G) { + let mut scanner = self.scanner.write().await; + if !scanner.keys.is_empty() { + // Protonet will have a single, static validator set + // TODO2 + panic!("only a single key is supported at this time"); + } + + info!("Rotating to key {}", hex::encode(key.to_bytes())); + let mut txn = scanner.db.0.txn(); + assert!(scanner.db.save_scanned_block(&mut txn, &key, activation_number).is_empty()); + scanner.db.add_active_key(&mut txn, key); + txn.commit(); + scanner.keys.push(key); + } + + /// Acknowledge having handled a block for a key. + pub async fn ack_block( + &self, + key: ::G, + id: >::Id, + ) -> Vec { + let mut scanner = self.scanner.write().await; + debug!("Block {} acknowledged", hex::encode(&id)); + let number = + scanner.db.block_number(&id).expect("main loop trying to operate on data we haven't scanned"); + + let mut txn = scanner.db.0.txn(); + let outputs = scanner.db.save_scanned_block(&mut txn, &key, number); + txn.commit(); + + for output in &outputs { + scanner.ram_outputs.remove(output.id().as_ref()); + } + + outputs + } +} + +impl Scanner { + #[allow(clippy::new_ret_no_self)] + pub fn new(coin: C, db: D) -> (ScannerHandle, Vec<::G>) { + let (events_send, events_recv) = mpsc::unbounded_channel(); + + let db = ScannerDb(db, PhantomData); + let keys = db.active_keys(); + + let scanner = Arc::new(RwLock::new(Scanner { + coin, + db, + keys: keys.clone(), + + ram_scanned: HashMap::new(), + ram_outputs: HashSet::new(), + + events: events_send, + })); + tokio::spawn(Scanner::run(scanner.clone())); + + (ScannerHandle { scanner, events: events_recv }, keys) + } + + fn emit(&mut self, event: ScannerEvent) -> bool { + if self.events.send(event).is_err() { + info!("Scanner handler was dropped. Shutting down?"); + return false; + } + true + } + + // An async function, to be spawned on a task, to discover and report outputs + async fn run(scanner: Arc>) { + loop { + // Only check every five seconds for new blocks + sleep(Duration::from_secs(5)).await; + + // Scan new blocks + { + let mut scanner = scanner.write().await; + let latest = scanner.coin.get_latest_block_number().await; + let latest = match latest { + // Only scan confirmed blocks, which we consider effectively finalized + // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm + Ok(latest) => latest.saturating_sub(C::CONFIRMATIONS.saturating_sub(1)), + Err(_) => { + warn!("Couldn't get {}'s latest block number", C::ID); + sleep(Duration::from_secs(60)).await; + continue; + } + }; + + for key in scanner.keys.clone() { + let key_vec = key.to_bytes().as_ref().to_vec(); + let latest_scanned = { + // Grab the latest scanned block according to the DB + let db_scanned = scanner.db.latest_scanned_block(key); + // We may, within this process's lifetime, have scanned more blocks + // If they're still being processed, we will not have officially written them to the DB + // as scanned yet + // That way, if the process terminates, and is rebooted, we'll rescan from a handled + // point, re-firing all events along the way, enabling them to be properly processed + // In order to not re-fire them within this process's lifetime, check our RAM cache + // of what we've scanned + // We are allowed to re-fire them within this lifetime. It's just wasteful + let ram_scanned = scanner.ram_scanned.get(&key_vec).cloned().unwrap_or(0); + // Pick whichever is higher + db_scanned.max(ram_scanned) + }; + + for i in (latest_scanned + 1) ..= latest { + // TODO2: Check for key deprecation + + let block = match scanner.coin.get_block(i).await { + Ok(block) => block, + Err(_) => { + warn!("Couldn't get {} block {i}", C::ID); + break; + } + }; + let block_id = block.id(); + + if let Some(id) = scanner.db.block(i) { + // TODO2: Also check this block builds off the previous block + if id != block.id() { + panic!("{} reorg'd from {id:?} to {:?}", C::ID, hex::encode(block_id)); + } + } else { + info!("Found new block: {}", hex::encode(&block_id)); + let mut txn = scanner.db.0.txn(); + scanner.db.save_block(&mut txn, i, &block_id); + txn.commit(); + } + + let outputs = match scanner.coin.get_outputs(&block, key).await { + Ok(outputs) => outputs, + Err(_) => { + warn!("Couldn't scan {} block {i:?}", C::ID); + break; + } + }; + + // Panic if we've already seen these outputs + for output in &outputs { + let id = output.id(); + // On Bitcoin, the output ID should be unique for a given chain + // On Monero, it's trivial to make an output sharing an ID with another + // We should only scan outputs with valid IDs however, which will be unique + let seen = scanner.db.seen(&id); + let id = id.as_ref().to_vec(); + if seen || scanner.ram_outputs.contains(&id) { + panic!("scanned an output multiple times"); + } + scanner.ram_outputs.insert(id); + } + + // TODO: Still fire an empty Outputs event if we haven't had inputs in a while + if outputs.is_empty() { + continue; + } + + // Save the outputs to disk + let mut txn = scanner.db.0.txn(); + scanner.db.save_outputs(&mut txn, &key, &block_id, &outputs); + txn.commit(); + + // Send all outputs + if !scanner.emit(ScannerEvent::Outputs(key, block_id, outputs)) { + return; + } + // Write this number as scanned so we won't re-fire these outputs + scanner.ram_scanned.insert(key_vec.clone(), i); + } + } + } + } + } +} diff --git a/processor/src/scheduler.rs b/processor/src/scheduler.rs index 6c545eb4..e37c2c98 100644 --- a/processor/src/scheduler.rs +++ b/processor/src/scheduler.rs @@ -1,4 +1,265 @@ -// For n existing inputs, and n target outputs, multiplex the inputs in while log scheduling the -// outputs out. Monero, which has a limit of 16 TXOs, could do 15 at a time, carrying a change -// Combined with the 20 minute lock, this is completely infeasible. By instead doing 15 TX seeds, -// and then 16 outputs on each, in just two lock cycles you can accomplish 240 TXs (not just 30). +use std::collections::{VecDeque, HashMap}; + +use frost::curve::Ciphersuite; + +use crate::{ + coins::{Output, Coin}, + Payment, Plan, +}; + +/// Stateless, deterministic output/payment manager. +#[derive(Debug)] +pub struct Scheduler { + key: ::G, + + // Serai, when it has more outputs expected than it can handle in a single tranaction, will + // schedule the outputs to be handled later. Immediately, it just creates additional outputs + // which will eventually handle those outputs + // + // These maps map output amounts, which we'll receive in the future, to the payments they should + // be used on + // + // When those output amounts appear, their payments should be scheduled + // The Vec is for all payments that should be done per output instance + // The VecDeque allows multiple sets of payments with the same sum amount to properly co-exist + // + // queued_plans are for outputs which we will create, yet when created, will have their amount + // reduced by the fee it cost to be created. The Scheduler will then be told how what amount the + // output actually has, and it'll be moved into plans + // + // TODO2: Consider edge case where branch/change isn't mined yet keys are deprecated + queued_plans: HashMap>>>, + plans: HashMap>>>, + + // UTXOs available + utxos: Vec, + + // Payments awaiting scheduling due to the output availability problem + payments: VecDeque>, +} + +impl Scheduler { + pub fn new(key: ::G) -> Self { + Scheduler { + key, + queued_plans: HashMap::new(), + plans: HashMap::new(), + utxos: vec![], + payments: VecDeque::new(), + } + } + + fn execute(&mut self, inputs: Vec, mut payments: Vec>) -> Plan { + // This must be equal to plan.key due to how coins detect they created outputs which are to + // the branch address + let branch_address = C::branch_address(self.key); + // created_output will be called any time we send to a branch address + // If it's called, and it wasn't expecting to be called, that's almost certainly an error + // The only way it wouldn't be is if someone on Serai triggered a burn to a branch, which is + // pointless anyways + // If we allow such behavior, we lose the ability to detect the aforementioned class of errors + // Ignore these payments so we can safely assert there + let mut payments = + payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); + + let mut change = false; + let mut max = C::MAX_OUTPUTS; + + let payment_amounts = + |payments: &Vec>| payments.iter().map(|payment| payment.amount).sum::(); + + // Requires a change output + if inputs.iter().map(Output::amount).sum::() != payment_amounts(&payments) { + change = true; + max -= 1; + } + + let mut add_plan = |payments| { + let amount = payment_amounts(&payments); + self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments); + amount + }; + + // If we have more payments than we can handle in a single TX, create plans for them + // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: + // 15 branches of 16 leaves + // 1 branch of: + // - 1 branch of 16 leaves + // - 2 leaves + // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves + while payments.len() > max { + // The resulting TX will have the remaining payments and a new branch payment + let to_remove = (payments.len() + 1) - C::MAX_OUTPUTS; + // Don't remove more than possible + let to_remove = to_remove.min(C::MAX_OUTPUTS); + + // Create the plan + let removed = payments.drain((payments.len() - to_remove) ..).collect::>(); + assert_eq!(removed.len(), to_remove); + let amount = add_plan(removed); + + // Create the payment for the plan + // Push it to the front so it's not moved into a branch until all lower-depth items are + payments.insert(0, Payment { address: branch_address.clone(), data: None, amount }); + } + + // TODO2: Use the latest key for change + // TODO2: Update rotation documentation + Plan { key: self.key, inputs, payments, change: Some(self.key).filter(|_| change) } + } + + // When Substrate emits `Updates` for a coin, all outputs should be added up to the + // acknowledged block. + pub fn add_outputs(&mut self, mut utxos: Vec) -> Vec> { + let mut txs = vec![]; + + for utxo in utxos.drain(..) { + // If we can fulfill planned TXs with this output, do so + // We could limit this to UTXOs where `utxo.kind() == OutputType::Branch`, yet there's no + // practical benefit in doing so + if let Some(plans) = self.plans.get_mut(&utxo.amount()) { + // Execute the first set of payments possible with an output of this amount + let payments = plans.pop_front().unwrap(); + // They won't be equal if we dropped payments due to being dust + assert!(utxo.amount() >= payments.iter().map(|payment| payment.amount).sum::()); + + // If we've grabbed the last plan for this output amount, remove it from the map + if plans.is_empty() { + self.plans.remove(&utxo.amount()); + } + + // Create a TX for these payments + txs.push(self.execute(vec![utxo], payments)); + } else { + self.utxos.push(utxo); + } + } + + // Sort the UTXOs by amount + utxos.sort_by(|a, b| a.amount().cmp(&b.amount()).reverse()); + + // Return the now possible TXs + log::info!("created {} planned TXs to sign from now recived outputs", txs.len()); + txs + } + + // Schedule a series of payments. This should be called after `add_outputs`. + pub fn schedule(&mut self, payments: Vec>) -> Vec> { + log::debug!("scheduling payments"); + assert!(!payments.is_empty(), "tried to schedule zero payments"); + + // Add all new payments to the list of pending payments + self.payments.extend(payments); + + // If we don't have UTXOs available, don't try to continue + if self.utxos.is_empty() { + return vec![]; + } + + // We always want to aggregate our UTXOs into a single UTXO in the name of simplicity + // We may have more UTXOs than will fit into a TX though + // We use the most valuable UTXOs to handle our current payments, and we return aggregation TXs + // for the rest of the inputs + // Since we do multiple aggregation TXs at once, this will execute in logarithmic time + let utxos = self.utxos.drain(..).collect::>(); + let mut utxo_chunks = + utxos.chunks(C::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::>(); + let utxos = utxo_chunks.remove(0); + + // If the last chunk exists and only has one output, don't try aggregating it + // Just immediately consider it another output + if let Some(mut chunk) = utxo_chunks.pop() { + if chunk.len() == 1 { + self.utxos.push(chunk.pop().unwrap()); + } else { + utxo_chunks.push(chunk); + } + } + + let mut aggregating = vec![]; + for chunk in utxo_chunks.drain(..) { + aggregating.push(Plan { + key: self.key, + inputs: chunk, + payments: vec![], + change: Some(self.key), + }) + } + + // We want to use all possible UTXOs for all possible payments + let mut balance = utxos.iter().map(Output::amount).sum::(); + + // If we can't fulfill the next payment, we have encountered an instance of the UTXO + // availability problem + // This shows up in coins like Monero, where because we spent outputs, our change has yet to + // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset + // of our total balance + // Despite this, we may be order to fulfill a payment which is our total balance + // The solution is to wait for the temporarily unavailable change outputs to re-appear, + // granting us access to our full balance + let mut executing = vec![]; + while !self.payments.is_empty() { + let amount = self.payments[0].amount; + if balance.checked_sub(amount).is_some() { + balance -= amount; + executing.push(self.payments.pop_front().unwrap()); + } + } + + // Now that we have the list of payments we can successfully handle right now, create the TX + // for them + let mut txs = vec![self.execute(utxos, executing)]; + txs.append(&mut aggregating); + log::info!("created {} TXs to sign", txs.len()); + txs + } + + // Note a branch output as having been created, with the amount it was actually created with, + // or not having been created due to being too small + // This can be called whenever, so long as it's properly ordered + // (it's independent to Serai/the chain we're scheduling over, yet still expects outputs to be + // created in the same order Plans are returned in) + pub fn created_output(&mut self, expected: u64, actual: Option) { + log::debug!("output expected to have {} had {:?} after fees", expected, actual); + + // Get the payments this output is expected to handle + let queued = self.queued_plans.get_mut(&expected).unwrap(); + let mut payments = queued.pop_front().unwrap(); + assert_eq!(expected, payments.iter().map(|payment| payment.amount).sum::()); + // If this was the last set of payments at this amount, remove it + if queued.is_empty() { + self.queued_plans.remove(&expected); + } + + // If we didn't actually create this output, return, dropping the child payments + let actual = match actual { + Some(actual) => actual, + None => return, + }; + + // Amortize the fee amongst all payments + // While some coins, like Ethereum, may have some payments take notably more gas, those + // payments will have their own gas deducted when they're created. The difference in output + // value present here is solely the cost of the branch, which is used for all of these + // payments, regardless of how much they'll end up costing + let diff = actual - expected; + let payments_len = u64::try_from(payments.len()).unwrap(); + let per_payment = diff / payments_len; + // The above division isn't perfect + let mut remainder = diff - (per_payment * payments_len); + + for mut payment in payments.iter_mut() { + payment.amount = payment.amount.saturating_sub(per_payment + remainder); + // Only subtract the remainder once + remainder = 0; + } + + // Drop payments now below the dust threshold + let payments = + payments.drain(..).filter(|payment| payment.amount >= C::DUST).collect::>(); + // Sanity check this was done properly + assert!(actual >= payments.iter().map(|payment| payment.amount).sum::()); + + self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments); + } +} diff --git a/processor/src/signer.rs b/processor/src/signer.rs new file mode 100644 index 00000000..a403afb0 --- /dev/null +++ b/processor/src/signer.rs @@ -0,0 +1,512 @@ +use core::{marker::PhantomData, fmt}; +use std::{ + sync::Arc, + time::{SystemTime, Duration}, + collections::HashMap, +}; + +use rand_core::OsRng; + +use group::GroupEncoding; +use frost::{ + ThresholdKeys, + sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, +}; + +use log::{info, debug, warn, error}; +use tokio::{ + sync::{RwLock, mpsc}, + time::sleep, +}; + +use messages::sign::*; +use crate::{ + DbTxn, Db, + coins::{Transaction, Eventuality, Coin}, +}; + +const CHANNEL_MSG: &str = "Signer handler was dropped. Shutting down?"; + +#[derive(Debug)] +pub enum SignerEvent { + SignedTransaction { id: [u8; 32], tx: >::Id }, + ProcessorMessage(ProcessorMessage), +} + +pub type SignerEventChannel = mpsc::UnboundedReceiver>; + +#[derive(Debug)] +struct SignerDb(D, PhantomData); +impl SignerDb { + fn sign_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + D::key(b"SIGNER", dst, key) + } + + fn completed_key(id: [u8; 32]) -> Vec { + Self::sign_key(b"completed", id) + } + fn complete( + &mut self, + txn: &mut D::Transaction, + id: [u8; 32], + tx: >::Id, + ) { + // Transactions can be completed by multiple signatures + // Save every solution in order to be robust + let mut existing = txn.get(Self::completed_key(id)).unwrap_or(vec![]); + // TODO: Don't do this if this TX is already present + existing.extend(tx.as_ref()); + txn.put(Self::completed_key(id), existing); + } + fn completed(&self, id: [u8; 32]) -> Option> { + self.0.get(Self::completed_key(id)) + } + + fn eventuality_key(id: [u8; 32]) -> Vec { + Self::sign_key(b"eventuality", id) + } + fn save_eventuality( + &mut self, + txn: &mut D::Transaction, + id: [u8; 32], + eventuality: C::Eventuality, + ) { + txn.put(Self::eventuality_key(id), eventuality.serialize()); + } + fn eventuality(&self, id: [u8; 32]) -> Option { + Some( + C::Eventuality::read::<&[u8]>(&mut self.0.get(Self::eventuality_key(id))?.as_ref()).unwrap(), + ) + } + + fn attempt_key(id: &SignId) -> Vec { + Self::sign_key(b"attempt", bincode::serialize(id).unwrap()) + } + fn attempt(&mut self, txn: &mut D::Transaction, id: &SignId) { + txn.put(Self::attempt_key(id), []); + } + fn has_attempt(&mut self, id: &SignId) -> bool { + self.0.get(Self::attempt_key(id)).is_some() + } + + fn save_transaction(&mut self, txn: &mut D::Transaction, tx: &C::Transaction) { + txn.put(Self::sign_key(b"tx", tx.id()), tx.serialize()); + } +} + +/// Coded so if the processor spontaneously reboots, one of two paths occur: +/// 1) It either didn't send its response, so the attempt will be aborted +/// 2) It did send its response, and has locally saved enough data to continue +pub struct Signer { + coin: C, + db: SignerDb, + + keys: ThresholdKeys, + + signable: HashMap<[u8; 32], (SystemTime, C::SignableTransaction)>, + attempt: HashMap<[u8; 32], u32>, + preprocessing: HashMap<[u8; 32], ::SignMachine>, + #[allow(clippy::type_complexity)] + signing: HashMap< + [u8; 32], + < + ::SignMachine as SignMachine + >::SignatureMachine, + >, + + events: mpsc::UnboundedSender>, +} + +impl fmt::Debug for Signer { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt + .debug_struct("Signer") + .field("coin", &self.coin) + .field("signable", &self.signable) + .field("attempt", &self.attempt) + .finish_non_exhaustive() + } +} + +#[derive(Debug)] +pub struct SignerHandle { + signer: Arc>>, + pub events: SignerEventChannel, +} + +impl Signer { + #[allow(clippy::new_ret_no_self)] + pub fn new(db: D, coin: C, keys: ThresholdKeys) -> SignerHandle { + let (events_send, events_recv) = mpsc::unbounded_channel(); + + let signer = Arc::new(RwLock::new(Signer { + coin, + db: SignerDb(db, PhantomData), + + keys, + + signable: HashMap::new(), + attempt: HashMap::new(), + preprocessing: HashMap::new(), + signing: HashMap::new(), + + events: events_send, + })); + + tokio::spawn(Signer::run(signer.clone())); + + SignerHandle { signer, events: events_recv } + } + + fn verify_id(&self, id: &SignId) -> Result<(), ()> { + if !id.signing_set(&self.keys.params()).contains(&self.keys.params().i()) { + panic!("coordinator sent us preprocesses for a signing attempt we're not participating in"); + } + + // Check the attempt lines up + match self.attempt.get(&id.id) { + // If we don't have an attempt logged, it's because the coordinator is faulty OR + // because we rebooted + None => { + warn!("not attempting {:?}. this is an error if we didn't reboot", id); + // Don't panic on the assumption we rebooted + Err(())?; + } + Some(attempt) => { + // This could be an old attempt, or it may be a 'future' attempt if we rebooted and + // our SystemTime wasn't monotonic, as it may be + if attempt != &id.attempt { + debug!("sent signing data for a distinct attempt"); + Err(())?; + } + } + } + + Ok(()) + } + + fn emit(&mut self, event: SignerEvent) -> bool { + if self.events.send(event).is_err() { + info!("{}", CHANNEL_MSG); + false + } else { + true + } + } + + async fn handle(&mut self, msg: CoordinatorMessage) { + match msg { + CoordinatorMessage::Preprocesses { id, mut preprocesses } => { + if self.verify_id(&id).is_err() { + return; + } + + let machine = match self.preprocessing.remove(&id.id) { + // Either rebooted or RPC error, or some invariant + None => { + warn!("not preprocessing for {:?}. this is an error if we didn't reboot", id); + return; + } + Some(machine) => machine, + }; + + let preprocesses = match preprocesses + .drain() + .map(|(l, preprocess)| { + machine + .read_preprocess::<&[u8]>(&mut preprocess.as_ref()) + .map(|preprocess| (l, preprocess)) + }) + .collect::>() + { + Ok(preprocesses) => preprocesses, + Err(e) => todo!("malicious signer: {:?}", e), + }; + + // Use an empty message, as expected of TransactionMachines + let (machine, share) = match machine.sign(preprocesses, &[]) { + Ok(res) => res, + Err(e) => todo!("malicious signer: {:?}", e), + }; + self.signing.insert(id.id, machine); + + // Broadcast our share + self.emit(SignerEvent::ProcessorMessage(ProcessorMessage::Share { + id, + share: share.serialize(), + })); + } + + CoordinatorMessage::Shares { id, mut shares } => { + if self.verify_id(&id).is_err() { + return; + } + + let machine = match self.signing.remove(&id.id) { + // Rebooted, RPC error, or some invariant + None => { + // If preprocessing has this ID, it means we were never sent the preprocess by the + // coordinator + if self.preprocessing.contains_key(&id.id) { + panic!("never preprocessed yet signing?"); + } + + warn!("not preprocessing for {:?}. this is an error if we didn't reboot", id); + return; + } + Some(machine) => machine, + }; + + let shares = match shares + .drain() + .map(|(l, share)| { + machine.read_share::<&[u8]>(&mut share.as_ref()).map(|share| (l, share)) + }) + .collect::>() + { + Ok(shares) => shares, + Err(e) => todo!("malicious signer: {:?}", e), + }; + + let tx = match machine.complete(shares) { + Ok(res) => res, + Err(e) => todo!("malicious signer: {:?}", e), + }; + + // Save the transaction in case it's needed for recovery + let mut txn = self.db.0.txn(); + self.db.save_transaction(&mut txn, &tx); + self.db.complete(&mut txn, id.id, tx.id()); + txn.commit(); + + // Publish it + if let Err(e) = self.coin.publish_transaction(&tx).await { + error!("couldn't publish {:?}: {:?}", tx, e); + } else { + info!("published {:?}", hex::encode(tx.id())); + } + + // Stop trying to sign for this TX + assert!(self.signable.remove(&id.id).is_some()); + assert!(self.attempt.remove(&id.id).is_some()); + assert!(self.preprocessing.remove(&id.id).is_none()); + assert!(self.signing.remove(&id.id).is_none()); + + self.emit(SignerEvent::SignedTransaction { id: id.id, tx: tx.id() }); + } + + CoordinatorMessage::Completed { key: _, id, tx: tx_vec } => { + let mut tx = >::Id::default(); + if tx.as_ref().len() != tx_vec.len() { + warn!( + "a validator claimed {} completed {id:?} yet that's not a valid TX ID", + hex::encode(&tx) + ); + return; + } + tx.as_mut().copy_from_slice(&tx_vec); + + if let Some(eventuality) = self.db.eventuality(id) { + // Transaction hasn't hit our mempool/was dropped for a different signature + // The latter can happen given certain latency conditions/a single malicious signer + // In the case of a single malicious signer, they can drag multiple honest + // validators down with them, so we unfortunately can't slash on this case + let Ok(tx) = self.coin.get_transaction(&tx).await else { + todo!("queue checking eventualities"); // or give up here? + }; + + if self.coin.confirm_completion(&eventuality, &tx) { + // Stop trying to sign for this TX + let mut txn = self.db.0.txn(); + self.db.save_transaction(&mut txn, &tx); + self.db.complete(&mut txn, id, tx.id()); + txn.commit(); + + self.signable.remove(&id); + self.attempt.remove(&id); + self.preprocessing.remove(&id); + self.signing.remove(&id); + + self.emit(SignerEvent::SignedTransaction { id, tx: tx.id() }); + } else { + warn!("a validator claimed {} completed {id:?} when it did not", hex::encode(&tx.id())); + } + } + } + } + } + + // An async function, to be spawned on a task, to handle signing + async fn run(signer_arc: Arc>) { + const SIGN_TIMEOUT: u64 = 30; + + loop { + // Sleep until a timeout expires (or five seconds expire) + // Since this code start new sessions, it will delay any ordered signing sessions from + // starting for up to 5 seconds, hence why this number can't be too high (such as 30 seconds, + // the full timeout) + // This won't delay re-attempting any signing session however, nor will it block the + // sign_transaction function (since this doesn't hold any locks) + sleep({ + let now = SystemTime::now(); + let mut lowest = Duration::from_secs(5); + let signer = signer_arc.read().await; + for (id, (start, _)) in &signer.signable { + let until = if let Some(attempt) = signer.attempt.get(id) { + // Get when this attempt times out + (*start + Duration::from_secs(u64::from(attempt + 1) * SIGN_TIMEOUT)) + .duration_since(now) + .unwrap_or(Duration::ZERO) + } else { + Duration::ZERO + }; + + if until < lowest { + lowest = until; + } + } + lowest + }) + .await; + + // Because a signing attempt has timed out (or five seconds has passed), check all + // sessions' timeouts + { + let mut signer = signer_arc.write().await; + let keys = signer.signable.keys().cloned().collect::>(); + for id in keys { + let (start, tx) = &signer.signable[&id]; + let start = *start; + + let attempt = u32::try_from( + SystemTime::now().duration_since(start).unwrap_or(Duration::ZERO).as_secs() / + SIGN_TIMEOUT, + ) + .unwrap(); + + // Check if we're already working on this attempt + if let Some(curr_attempt) = signer.attempt.get(&id) { + if curr_attempt >= &attempt { + continue; + } + } + + // Start this attempt + // Clone the TX so we don't have an immutable borrow preventing the below mutable actions + // (also because we do need an owned tx anyways) + let tx = tx.clone(); + + // Delete any existing machines + signer.preprocessing.remove(&id); + signer.signing.remove(&id); + + // Update the attempt number so we don't re-enter this conditional + signer.attempt.insert(id, attempt); + + let id = + SignId { key: signer.keys.group_key().to_bytes().as_ref().to_vec(), id, attempt }; + // Only preprocess if we're a signer + if !id.signing_set(&signer.keys.params()).contains(&signer.keys.params().i()) { + continue; + } + info!("selected to sign {:?}", id); + + // If we reboot mid-sign, the current design has us abort all signs and wait for latter + // attempts/new signing protocols + // This is distinct from the DKG which will continue DKG sessions, even on reboot + // This is because signing is tolerant of failures of up to 1/3rd of the group + // The DKG requires 100% participation + // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for + // reboots, it's not worth the complexity when messing up here leaks our secret share + // + // Despite this, on reboot, we'll get told of active signing items, and may be in this + // branch again for something we've already attempted + // + // Only run if this hasn't already been attempted + if signer.db.has_attempt(&id) { + warn!("already attempted {:?}. this is an error if we didn't reboot", id); + continue; + } + + let mut txn = signer.db.0.txn(); + signer.db.attempt(&mut txn, &id); + txn.commit(); + + // Attempt to create the TX + let machine = match signer.coin.attempt_send(tx).await { + Err(e) => { + error!("failed to attempt {:?}: {:?}", id, e); + continue; + } + Ok(machine) => machine, + }; + + let (machine, preprocess) = machine.preprocess(&mut OsRng); + signer.preprocessing.insert(id.id, machine); + + // Broadcast our preprocess + if !signer.emit(SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess { + id, + preprocess: preprocess.serialize(), + })) { + return; + } + } + } + } + } +} + +impl SignerHandle { + pub async fn keys(&self) -> ThresholdKeys { + self.signer.read().await.keys.clone() + } + + pub async fn sign_transaction( + &self, + id: [u8; 32], + start: SystemTime, + tx: C::SignableTransaction, + eventuality: C::Eventuality, + ) { + let mut signer = self.signer.write().await; + + if let Some(txs) = signer.db.completed(id) { + debug!("SignTransaction order for ID we've already completed signing"); + + // Find the first instance we noted as having completed *and can still get from our node* + let mut tx = None; + let mut buf = >::Id::default(); + let tx_id_len = buf.as_ref().len(); + assert_eq!(txs.len() % tx_id_len, 0); + for id in 0 .. (txs.len() / tx_id_len) { + let start = id * tx_id_len; + buf.as_mut().copy_from_slice(&txs[start .. (start + tx_id_len)]); + if signer.coin.get_transaction(&buf).await.is_ok() { + tx = Some(buf); + break; + } + } + + // Fire the SignedTransaction event again + if let Some(tx) = tx { + if !signer.emit(SignerEvent::SignedTransaction { id, tx }) { + return; + } + } else { + warn!("completed signing {} yet couldn't get any of the completing TXs", hex::encode(id)); + } + return; + } + + let mut txn = signer.db.0.txn(); + signer.db.save_eventuality(&mut txn, id, eventuality); + txn.commit(); + + signer.signable.insert(id, (start, tx)); + } + + pub async fn handle(&self, msg: CoordinatorMessage) { + self.signer.write().await.handle(msg).await; + } +} diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs new file mode 100644 index 00000000..a8fcd889 --- /dev/null +++ b/processor/src/tests/addresses.rs @@ -0,0 +1,98 @@ +use core::time::Duration; +use std::collections::HashMap; + +use rand_core::OsRng; + +use frost::{Participant, ThresholdKeys}; + +use tokio::time::timeout; + +use crate::{ + Plan, Db, + coins::{OutputType, Output, Block, Coin}, + scanner::{ScannerEvent, Scanner, ScannerHandle}, + tests::{util::db::MemDb, sign}, +}; + +async fn spend( + coin: &C, + keys: &HashMap>, + scanner: &mut ScannerHandle, + outputs: Vec, +) -> Vec { + let key = keys[&Participant::new(1).unwrap()].group_key(); + + let mut keys_txs = HashMap::new(); + for (i, keys) in keys { + keys_txs.insert( + *i, + ( + keys.clone(), + coin + .prepare_send( + keys.clone(), + coin.get_latest_block_number().await.unwrap() - C::CONFIRMATIONS, + // Send to a change output + Plan { key, inputs: outputs.clone(), payments: vec![], change: Some(key) }, + coin.get_fee().await, + ) + .await + .unwrap() + .0 + .unwrap(), + ), + ); + } + sign(coin.clone(), keys_txs).await; + + for _ in 0 .. C::CONFIRMATIONS { + coin.mine_block().await; + } + match timeout(Duration::from_secs(10), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Outputs(this_key, _, outputs) => { + assert_eq!(this_key, key); + assert_eq!(outputs.len(), 1); + // Make sure this is actually a change output + assert_eq!(outputs[0].kind(), OutputType::Change); + outputs + } + } +} + +pub async fn test_addresses(coin: C) { + let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng); + for (_, keys) in keys.iter_mut() { + C::tweak_keys(keys); + } + let key = keys[&Participant::new(1).unwrap()].group_key(); + + // Mine blocks so there's a confirmed block + for _ in 0 .. C::CONFIRMATIONS { + coin.mine_block().await; + } + + let db = MemDb::new(); + let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone()); + assert!(active_keys.is_empty()); + scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), key).await; + + // Receive funds to the branch address and make sure it's properly identified + let block_id = coin.test_send(C::branch_address(key)).await.id(); + + // Verify the Scanner picked them up + let outputs = + match timeout(Duration::from_secs(10), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Outputs(this_key, block, outputs) => { + assert_eq!(this_key, key); + assert_eq!(block, block_id); + assert_eq!(outputs.len(), 1); + assert_eq!(outputs[0].kind(), OutputType::Branch); + outputs + } + }; + + // Spend the branch output, creating a change output and ensuring we actually get change + let outputs = spend(&coin, &keys, &mut scanner, outputs).await; + // Also test spending the change output + spend(&coin, &keys, &mut scanner, outputs).await; +} diff --git a/processor/src/tests/bitcoin.rs b/processor/src/tests/bitcoin.rs deleted file mode 100644 index dcf3aeed..00000000 --- a/processor/src/tests/bitcoin.rs +++ /dev/null @@ -1,12 +0,0 @@ -use crate::{ - coin::{Coin, Bitcoin}, - tests::test_send, -}; - -#[tokio::test] -async fn bitcoin() { - let bitcoin = Bitcoin::new("http://serai:seraidex@127.0.0.1:18443".to_string()).await; - bitcoin.fresh_chain().await; - let fee = bitcoin.get_fee().await; - test_send(bitcoin, fee).await; -} diff --git a/processor/src/tests/key_gen.rs b/processor/src/tests/key_gen.rs new file mode 100644 index 00000000..19cf2e4f --- /dev/null +++ b/processor/src/tests/key_gen.rs @@ -0,0 +1,136 @@ +use core::time::Duration; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use rand_core::{RngCore, OsRng}; + +use group::GroupEncoding; +use frost::{Participant, ThresholdParams, tests::clone_without}; + +use serai_client::validator_sets::primitives::{Session, ValidatorSetIndex, ValidatorSetInstance}; + +use messages::{SubstrateContext, key_gen::*}; +use crate::{ + coins::Coin, + key_gen::{KeyGenEvent, KeyGen}, + tests::util::db::MemDb, +}; + +const ID: KeyGenId = KeyGenId { + set: ValidatorSetInstance { session: Session(1), index: ValidatorSetIndex(2) }, + attempt: 3, +}; + +pub async fn test_key_gen() { + let mut entropies = HashMap::new(); + let mut dbs = HashMap::new(); + let mut key_gens = HashMap::new(); + for i in 1 ..= 5 { + let mut entropy = Zeroizing::new([0; 32]); + OsRng.fill_bytes(entropy.as_mut()); + entropies.insert(i, entropy); + dbs.insert(i, MemDb::new()); + key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); + } + + let mut all_commitments = HashMap::new(); + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + if let KeyGenEvent::ProcessorMessage(ProcessorMessage::Commitments { id, commitments }) = + key_gen + .handle(CoordinatorMessage::GenerateKey { + id: ID, + params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) + .unwrap(), + }) + .await + { + assert_eq!(id, ID); + all_commitments.insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments); + } else { + panic!("didn't get commitments back"); + } + } + + // 1 is rebuilt on every step + // 2 is rebuilt here + // 3 ... are rebuilt once, one at each of the following steps + let rebuild = |key_gens: &mut HashMap<_, _>, i| { + key_gens.remove(&i); + key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); + }; + rebuild(&mut key_gens, 1); + rebuild(&mut key_gens, 2); + + let mut all_shares = HashMap::new(); + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); + if let KeyGenEvent::ProcessorMessage(ProcessorMessage::Shares { id, shares }) = key_gen + .handle(CoordinatorMessage::Commitments { + id: ID, + commitments: clone_without(&all_commitments, &i), + }) + .await + { + assert_eq!(id, ID); + all_shares.insert(i, shares); + } else { + panic!("didn't get shares back"); + } + } + + // Rebuild 1 and 3 + rebuild(&mut key_gens, 1); + rebuild(&mut key_gens, 3); + + let mut res = None; + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); + if let KeyGenEvent::ProcessorMessage(ProcessorMessage::GeneratedKey { id, key }) = key_gen + .handle(CoordinatorMessage::Shares { + id: ID, + shares: all_shares + .iter() + .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) + .collect(), + }) + .await + { + assert_eq!(id, ID); + if res.is_none() { + res = Some(key.clone()); + } + assert_eq!(res.as_ref().unwrap(), &key); + } else { + panic!("didn't get key back"); + } + } + + // Rebuild 1 and 4 + rebuild(&mut key_gens, 1); + rebuild(&mut key_gens, 4); + + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + if let KeyGenEvent::KeyConfirmed { activation_number, keys } = key_gen + .handle(CoordinatorMessage::ConfirmKey { + context: SubstrateContext { time: 0, coin_latest_block_number: 111 }, + id: ID, + }) + .await + { + assert_eq!(activation_number, 111); + assert_eq!( + keys.params(), + ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap() + ); + assert_eq!(keys.group_key().to_bytes().as_ref(), res.as_ref().unwrap()); + } else { + panic!("didn't get key back"); + } + } + tokio::time::sleep(Duration::from_secs(1)).await; +} diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs new file mode 100644 index 00000000..78cf75a7 --- /dev/null +++ b/processor/src/tests/literal/mod.rs @@ -0,0 +1,43 @@ +#[cfg(feature = "bitcoin")] +mod bitcoin { + use crate::coins::Bitcoin; + + async fn bitcoin() -> Bitcoin { + let bitcoin = Bitcoin::new("http://serai:seraidex@127.0.0.1:18443".to_string()); + bitcoin.fresh_chain().await; + bitcoin + } + + test_coin!( + Bitcoin, + bitcoin, + bitcoin_key_gen, + bitcoin_scanner, + bitcoin_signer, + bitcoin_wallet, + bitcoin_addresses, + ); +} + +#[cfg(feature = "monero")] +mod monero { + use crate::coins::{Coin, Monero}; + + async fn monero() -> Monero { + let monero = Monero::new("http://127.0.0.1:18081".to_string()); + while monero.get_latest_block_number().await.unwrap() < 150 { + monero.mine_block().await; + } + monero + } + + test_coin!( + Monero, + monero, + monero_key_gen, + monero_scanner, + monero_signer, + monero_wallet, + monero_addresses, + ); +} diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 4f07d090..8a41fd52 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -1,5 +1,99 @@ -mod send; -pub(crate) use send::test_send; +pub(crate) mod util; -mod bitcoin; -mod monero; +mod key_gen; +pub(crate) use key_gen::test_key_gen; + +mod scanner; +pub(crate) use scanner::test_scanner; + +mod signer; +pub(crate) use signer::{sign, test_signer}; + +mod wallet; +pub(crate) use wallet::test_wallet; + +mod addresses; +pub(crate) use addresses::test_addresses; + +// Effective Once +lazy_static::lazy_static! { + static ref INIT_LOGGER: () = env_logger::init(); +} + +#[macro_export] +macro_rules! sequential { + () => { + lazy_static::lazy_static! { + static ref SEQUENTIAL: tokio::sync::Mutex<()> = tokio::sync::Mutex::new(()); + } + }; +} + +#[macro_export] +macro_rules! async_sequential { + ($(async fn $name: ident() $body: block)*) => { + $( + #[tokio::test] + async fn $name() { + *$crate::tests::INIT_LOGGER; + let guard = SEQUENTIAL.lock().await; + let local = tokio::task::LocalSet::new(); + local.run_until(async move { + if let Err(err) = tokio::task::spawn_local(async move { $body }).await { + drop(guard); + Err(err).unwrap() + } + }).await; + } + )* + } +} + +#[macro_export] +macro_rules! test_coin { + ( + $C: ident, + $coin: ident, + $key_gen: ident, + $scanner: ident, + $signer: ident, + $wallet: ident, + $addresses: ident, + ) => { + use $crate::tests::{test_key_gen, test_scanner, test_signer, test_wallet, test_addresses}; + + // This doesn't interact with a node and accordingly doesn't need to be run sequentially + #[tokio::test] + async fn $key_gen() { + test_key_gen::<$C>().await; + } + + sequential!(); + + async_sequential! { + async fn $scanner() { + test_scanner($coin().await).await; + } + } + + async_sequential! { + async fn $signer() { + test_signer($coin().await).await; + } + } + + async_sequential! { + async fn $wallet() { + test_wallet($coin().await).await; + } + } + + async_sequential! { + async fn $addresses() { + test_addresses($coin().await).await; + } + } + }; +} + +mod literal; diff --git a/processor/src/tests/monero.rs b/processor/src/tests/monero.rs deleted file mode 100644 index 68b8a621..00000000 --- a/processor/src/tests/monero.rs +++ /dev/null @@ -1,11 +0,0 @@ -use crate::{ - coin::{Coin, Monero}, - tests::test_send, -}; - -#[tokio::test] -async fn monero() { - let monero = Monero::new("http://127.0.0.1:18081".to_string()).await; - let fee = monero.get_fee().await; - test_send(monero, fee).await; -} diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs new file mode 100644 index 00000000..6aaffcb1 --- /dev/null +++ b/processor/src/tests/scanner.rs @@ -0,0 +1,72 @@ +use core::time::Duration; +use std::sync::{Arc, Mutex}; + +use rand_core::OsRng; + +use frost::Participant; + +use tokio::time::timeout; + +use crate::{ + coins::{OutputType, Output, Block, Coin}, + scanner::{ScannerEvent, Scanner, ScannerHandle}, + tests::util::db::MemDb, +}; + +pub async fn test_scanner(coin: C) { + let mut keys = + frost::tests::key_gen::<_, C::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); + C::tweak_keys(&mut keys); + + // Mine blocks so there's a confirmed block + for _ in 0 .. C::CONFIRMATIONS { + coin.mine_block().await; + } + + let first = Arc::new(Mutex::new(true)); + let db = MemDb::new(); + let new_scanner = || async { + let (scanner, active_keys) = Scanner::new(coin.clone(), db.clone()); + let mut first = first.lock().unwrap(); + if *first { + assert!(active_keys.is_empty()); + scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), keys.group_key()).await; + *first = false; + } else { + assert_eq!(active_keys.len(), 1); + } + scanner + }; + let scanner = new_scanner().await; + + // Receive funds + let block_id = coin.test_send(C::address(keys.group_key())).await.id(); + + // Verify the Scanner picked them up + let verify_event = |mut scanner: ScannerHandle| async { + let outputs = + match timeout(Duration::from_secs(10), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Outputs(key, block, outputs) => { + assert_eq!(key, keys.group_key()); + assert_eq!(block, block_id); + assert_eq!(outputs.len(), 1); + assert_eq!(outputs[0].kind(), OutputType::External); + outputs + } + }; + (scanner, outputs) + }; + let (mut scanner, outputs) = verify_event(scanner).await; + + // Create a new scanner off the current DB and verify it re-emits the above events + verify_event(new_scanner().await).await; + + // Acknowledge the block + assert_eq!(scanner.ack_block(keys.group_key(), block_id.clone()).await, outputs); + + // There should be no more events + assert!(timeout(Duration::from_secs(10), scanner.events.recv()).await.is_err()); + + // Create a new scanner off the current DB and make sure it also does nothing + assert!(timeout(Duration::from_secs(10), new_scanner().await.events.recv()).await.is_err()); +} diff --git a/processor/src/tests/send.rs b/processor/src/tests/send.rs deleted file mode 100644 index f493d3af..00000000 --- a/processor/src/tests/send.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::{ - sync::{Arc, RwLock}, - collections::HashMap, -}; - -use async_trait::async_trait; - -use rand_core::OsRng; - -use frost::Participant; - -use crate::{ - NetworkError, Network, - coin::Coin, - wallet::{WalletKeys, MemCoinDb, Wallet}, -}; - -#[derive(Clone)] -struct LocalNetwork { - i: Participant, - size: u16, - round: usize, - #[allow(clippy::type_complexity)] - rounds: Arc>>>>, -} - -impl LocalNetwork { - fn new(size: u16) -> Vec { - let rounds = Arc::new(RwLock::new(vec![])); - let mut res = vec![]; - for i in 1 ..= size { - res.push(LocalNetwork { - i: Participant::new(i).unwrap(), - size, - round: 0, - rounds: rounds.clone(), - }); - } - res - } -} - -#[async_trait] -impl Network for LocalNetwork { - async fn round(&mut self, data: Vec) -> Result>, NetworkError> { - { - let mut rounds = self.rounds.write().unwrap(); - if rounds.len() == self.round { - rounds.push(HashMap::new()); - } - rounds[self.round].insert(self.i, data); - } - - while { - let read = self.rounds.try_read().unwrap(); - read[self.round].len() != usize::from(self.size) - } { - tokio::task::yield_now().await; - } - - let mut res = self.rounds.try_read().unwrap()[self.round].clone(); - res.remove(&self.i); - self.round += 1; - Ok(res) - } -} - -pub async fn test_send(coin: C, fee: C::Fee) { - // Mine blocks so there's a confirmed block - coin.mine_block().await; - let latest = coin.get_latest_block_number().await.unwrap(); - - let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng); - let threshold = keys[&Participant::new(1).unwrap()].params().t(); - let mut networks = LocalNetwork::new(threshold); - - let mut wallets = vec![]; - for i in 1 ..= threshold { - let mut wallet = Wallet::new(MemCoinDb::new(), coin.clone()); - wallet.acknowledge_block(0, latest); - wallet.add_keys(&WalletKeys::new(keys.remove(&Participant::new(i).unwrap()).unwrap(), 0)); - wallets.push(wallet); - } - - // Get the chain to a length where blocks have sufficient confirmations - while (latest + (C::CONFIRMATIONS - 1)) > coin.get_latest_block_number().await.unwrap() { - coin.mine_block().await; - } - - for wallet in wallets.iter_mut() { - // Poll to activate the keys - wallet.poll().await.unwrap(); - } - - coin.test_send(wallets[0].address()).await; - - let mut futures = vec![]; - for (network, wallet) in networks.iter_mut().zip(wallets.iter_mut()) { - wallet.poll().await.unwrap(); - - let latest = coin.get_latest_block_number().await.unwrap(); - wallet.acknowledge_block(1, latest - (C::CONFIRMATIONS - 1)); - let signable = wallet - .prepare_sends(1, vec![(wallet.address(), 100000000)], fee) - .await - .unwrap() - .1 - .swap_remove(0); - futures.push(wallet.attempt_send(network, signable)); - } - - println!("{:?}", hex::encode(futures::future::join_all(futures).await.swap_remove(0).unwrap())); -} diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs new file mode 100644 index 00000000..6f9372e3 --- /dev/null +++ b/processor/src/tests/signer.rs @@ -0,0 +1,187 @@ +use std::{ + time::{Duration, SystemTime}, + collections::HashMap, +}; + +use rand_core::OsRng; + +use group::GroupEncoding; +use frost::{ + Participant, ThresholdKeys, + dkg::tests::{key_gen, clone_without}, +}; + +use tokio::time::timeout; + +use messages::sign::*; +use crate::{ + Payment, Plan, + coins::{Output, Transaction, Coin}, + signer::{SignerEvent, Signer}, + tests::util::db::MemDb, +}; + +#[allow(clippy::type_complexity)] +pub async fn sign( + coin: C, + mut keys_txs: HashMap< + Participant, + (ThresholdKeys, (C::SignableTransaction, C::Eventuality)), + >, +) -> >::Id { + let actual_id = SignId { + key: keys_txs[&Participant::new(1).unwrap()].0.group_key().to_bytes().as_ref().to_vec(), + id: [0xaa; 32], + attempt: 0, + }; + + let signing_set = actual_id.signing_set(&keys_txs[&Participant::new(1).unwrap()].0.params()); + let mut keys = HashMap::new(); + let mut txs = HashMap::new(); + for (i, (these_keys, this_tx)) in keys_txs.drain() { + assert_eq!(actual_id.signing_set(&these_keys.params()), signing_set); + keys.insert(i, these_keys); + txs.insert(i, this_tx); + } + + let mut signers = HashMap::new(); + for i in 1 ..= keys.len() { + let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); + signers.insert(i, Signer::new(MemDb::new(), coin.clone(), keys.remove(&i).unwrap())); + } + + let start = SystemTime::now(); + for i in 1 ..= signers.len() { + let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); + let (tx, eventuality) = txs.remove(&i).unwrap(); + signers[&i].sign_transaction(actual_id.id, start, tx, eventuality).await; + } + + let mut preprocesses = HashMap::new(); + for i in &signing_set { + if let Some(SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess { id, preprocess })) = + signers.get_mut(i).unwrap().events.recv().await + { + assert_eq!(id, actual_id); + preprocesses.insert(*i, preprocess); + } else { + panic!("didn't get preprocess back"); + } + } + + let mut shares = HashMap::new(); + for i in &signing_set { + signers[i] + .handle(CoordinatorMessage::Preprocesses { + id: actual_id.clone(), + preprocesses: clone_without(&preprocesses, i), + }) + .await; + if let Some(SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, share })) = + signers.get_mut(i).unwrap().events.recv().await + { + assert_eq!(id, actual_id); + shares.insert(*i, share); + } else { + panic!("didn't get share back"); + } + } + + let mut tx_id = None; + for i in &signing_set { + signers[i] + .handle(CoordinatorMessage::Shares { + id: actual_id.clone(), + shares: clone_without(&shares, i), + }) + .await; + if let Some(SignerEvent::SignedTransaction { id, tx }) = + signers.get_mut(i).unwrap().events.recv().await + { + assert_eq!(id, actual_id.id); + if tx_id.is_none() { + tx_id = Some(tx.clone()); + } + assert_eq!(tx_id, Some(tx)); + } else { + panic!("didn't get TX back"); + } + } + + // Make sure the signers not included didn't do anything + let mut excluded = (1 ..= signers.len()) + .map(|i| Participant::new(u16::try_from(i).unwrap()).unwrap()) + .collect::>(); + for i in signing_set { + excluded.remove(excluded.binary_search(&i).unwrap()); + } + for i in excluded { + assert!(timeout( + Duration::from_secs(1), + signers.get_mut(&Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap().events.recv() + ) + .await + .is_err()); + } + + tx_id.unwrap() +} + +pub async fn test_signer(coin: C) { + let mut keys = key_gen(&mut OsRng); + for (_, keys) in keys.iter_mut() { + C::tweak_keys(keys); + } + let key = keys[&Participant::new(1).unwrap()].group_key(); + + let outputs = coin.get_outputs(&coin.test_send(C::address(key)).await, key).await.unwrap(); + let sync_block = coin.get_latest_block_number().await.unwrap() - C::CONFIRMATIONS; + let fee = coin.get_fee().await; + + let amount = 2 * C::DUST; + let mut keys_txs = HashMap::new(); + let mut eventualities = vec![]; + for (i, keys) in keys.drain() { + let (signable, eventuality) = coin + .prepare_send( + keys.clone(), + sync_block, + Plan { + key, + inputs: outputs.clone(), + payments: vec![Payment { address: C::address(key), data: None, amount }], + change: Some(key), + }, + fee, + ) + .await + .unwrap() + .0 + .unwrap(); + + eventualities.push(eventuality.clone()); + keys_txs.insert(i, (keys, (signable, eventuality))); + } + + // The signer may not publish the TX if it has a connection error + // It doesn't fail in this case + let txid = sign(coin.clone(), keys_txs).await; + let tx = coin.get_transaction(&txid).await.unwrap(); + assert_eq!(tx.id(), txid); + // Mine a block, and scan it, to ensure that the TX actually made it on chain + coin.mine_block().await; + let outputs = coin + .get_outputs(&coin.get_block(coin.get_latest_block_number().await.unwrap()).await.unwrap(), key) + .await + .unwrap(); + assert_eq!(outputs.len(), 2); + // Adjust the amount for the fees + let amount = amount - tx.fee(&coin).await; + // Check either output since Monero will randomize its output order + assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount)); + + // Check the eventualities pass + for eventuality in eventualities { + assert!(coin.confirm_completion(&eventuality, &tx)); + } +} diff --git a/processor/src/tests/util/db.rs b/processor/src/tests/util/db.rs new file mode 100644 index 00000000..6fc2d6ed --- /dev/null +++ b/processor/src/tests/util/db.rs @@ -0,0 +1,42 @@ +use std::{ + sync::{Arc, RwLock}, + collections::HashMap, +}; + +use crate::{DbTxn, Db}; + +#[derive(Clone, Debug)] +pub struct MemDb(Arc, Vec>>>); +impl MemDb { + pub(crate) fn new() -> MemDb { + MemDb(Arc::new(RwLock::new(HashMap::new()))) + } +} +impl Default for MemDb { + fn default() -> MemDb { + MemDb::new() + } +} + +impl DbTxn for MemDb { + fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { + self.0.write().unwrap().insert(key.as_ref().to_vec(), value.as_ref().to_vec()); + } + fn get(&self, key: impl AsRef<[u8]>) -> Option> { + self.0.read().unwrap().get(key.as_ref()).cloned() + } + fn del(&mut self, key: impl AsRef<[u8]>) { + self.0.write().unwrap().remove(key.as_ref()); + } + fn commit(self) {} +} + +impl Db for MemDb { + type Transaction = MemDb; + fn txn(&mut self) -> MemDb { + Self(self.0.clone()) + } + fn get(&self, key: impl AsRef<[u8]>) -> Option> { + self.0.read().unwrap().get(key.as_ref()).cloned() + } +} diff --git a/processor/src/tests/util/mod.rs b/processor/src/tests/util/mod.rs new file mode 100644 index 00000000..eaaec036 --- /dev/null +++ b/processor/src/tests/util/mod.rs @@ -0,0 +1 @@ +pub(crate) mod db; diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs new file mode 100644 index 00000000..700b744a --- /dev/null +++ b/processor/src/tests/wallet.rs @@ -0,0 +1,108 @@ +use std::{time::Duration, collections::HashMap}; + +use rand_core::OsRng; + +use frost::{Participant, dkg::tests::key_gen}; + +use tokio::time::timeout; + +use crate::{ + Payment, Plan, + coins::{Output, Transaction, Block, Coin}, + scanner::{ScannerEvent, Scanner}, + scheduler::Scheduler, + tests::{util::db::MemDb, sign}, +}; + +// Tests the Scanner, Scheduler, and Signer together +pub async fn test_wallet(coin: C) { + let mut keys = key_gen(&mut OsRng); + for (_, keys) in keys.iter_mut() { + C::tweak_keys(keys); + } + let key = keys[&Participant::new(1).unwrap()].group_key(); + + let (mut scanner, active_keys) = Scanner::new(coin.clone(), MemDb::new()); + assert!(active_keys.is_empty()); + let (block_id, outputs) = { + scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), key).await; + + let block_id = coin.test_send(C::address(key)).await.id(); + + match timeout(Duration::from_secs(10), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Outputs(this_key, block, outputs) => { + assert_eq!(this_key, key); + assert_eq!(block, block_id); + assert_eq!(outputs.len(), 1); + (block_id, outputs) + } + } + }; + + let mut scheduler = Scheduler::new(key); + // Add these outputs, which should return no plans + assert!(scheduler.add_outputs(outputs.clone()).is_empty()); + + let amount = 2 * C::DUST; + let plans = scheduler.schedule(vec![Payment { address: C::address(key), data: None, amount }]); + assert_eq!( + plans, + vec![Plan { + key, + inputs: outputs, + payments: vec![Payment { address: C::address(key), data: None, amount }], + change: Some(key), + }] + ); + + { + let mut buf = vec![]; + plans[0].write(&mut buf).unwrap(); + assert_eq!(plans[0], Plan::::read::<&[u8]>(&mut buf.as_ref()).unwrap()); + } + + // Execute the plan + let fee = coin.get_fee().await; + let mut keys_txs = HashMap::new(); + let mut eventualities = vec![]; + for (i, keys) in keys.drain() { + let (signable, eventuality) = coin + .prepare_send(keys.clone(), coin.get_block_number(&block_id).await, plans[0].clone(), fee) + .await + .unwrap() + .0 + .unwrap(); + + eventualities.push(eventuality.clone()); + keys_txs.insert(i, (keys, (signable, eventuality))); + } + + let txid = sign(coin.clone(), keys_txs).await; + let tx = coin.get_transaction(&txid).await.unwrap(); + coin.mine_block().await; + let block_number = coin.get_latest_block_number().await.unwrap(); + let block = coin.get_block(block_number).await.unwrap(); + let outputs = coin.get_outputs(&block, key).await.unwrap(); + assert_eq!(outputs.len(), 2); + let amount = amount - tx.fee(&coin).await; + assert!((outputs[0].amount() == amount) || (outputs[1].amount() == amount)); + + for eventuality in eventualities { + assert!(coin.confirm_completion(&eventuality, &tx)); + } + + for _ in 1 .. C::CONFIRMATIONS { + coin.mine_block().await; + } + + match timeout(Duration::from_secs(10), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Outputs(this_key, block_id, these_outputs) => { + assert_eq!(this_key, key); + assert_eq!(block_id, block.id()); + assert_eq!(these_outputs, outputs); + } + } + + // Check the Scanner DB can reload the outputs + assert_eq!(scanner.ack_block(key, block.id()).await, outputs); +} diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs deleted file mode 100644 index 73a90bb9..00000000 --- a/processor/src/wallet.rs +++ /dev/null @@ -1,385 +0,0 @@ -use std::collections::HashMap; - -use rand_core::OsRng; - -use group::GroupEncoding; - -use transcript::{Transcript, RecommendedTranscript}; -use frost::{ - curve::{Ciphersuite, Curve}, - FrostError, ThresholdKeys, - sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, -}; - -use crate::{ - coin::{CoinError, Output, Coin}, - SignError, Network, -}; - -pub struct WalletKeys { - keys: ThresholdKeys, - creation_block: usize, -} - -impl WalletKeys { - pub fn new(keys: ThresholdKeys, creation_block: usize) -> WalletKeys { - WalletKeys { keys, creation_block } - } - - // Bind this key to a specific network by applying an additive offset - // While it would be fine to just C::ID, including the group key creates distinct - // offsets instead of static offsets. Under a statically offset system, a BTC key could - // have X subtracted to find the potential group key, and then have Y added to find the - // potential ETH group key. While this shouldn't be an issue, as this isn't a private - // system, there are potentially other benefits to binding this to a specific group key - // It's no longer possible to influence group key gen to key cancel without breaking the hash - // function as well, although that degree of influence means key gen is broken already - fn bind(&self, chain: &[u8]) -> ThresholdKeys { - const DST: &[u8] = b"Serai Processor Wallet Chain Bind"; - let mut transcript = RecommendedTranscript::new(DST); - transcript.append_message(b"chain", chain); - transcript.append_message(b"curve", C::ID); - transcript.append_message(b"group_key", self.keys.group_key().to_bytes()); - self.keys.offset(::hash_to_F(DST, &transcript.challenge(b"offset"))) - } -} - -pub trait CoinDb { - // Set a block as scanned to - fn scanned_to_block(&mut self, block: usize); - // Acknowledge a specific block number as part of a canonical block - fn acknowledge_block(&mut self, canonical: usize, block: usize); - - // Adds an output to the DB. Returns false if the output was already added - fn add_output(&mut self, output: &O) -> bool; - - // Block this coin has been scanned to (inclusive) - fn scanned_block(&self) -> usize; - // Acknowledged block for a given canonical block - fn acknowledged_block(&self, canonical: usize) -> usize; -} - -pub struct MemCoinDb { - // Block number of the block this coin has been scanned to - scanned_block: usize, - // Acknowledged block for a given canonical block - acknowledged_blocks: HashMap, - outputs: HashMap, Vec>, -} - -impl MemCoinDb { - pub fn new() -> MemCoinDb { - MemCoinDb { scanned_block: 0, acknowledged_blocks: HashMap::new(), outputs: HashMap::new() } - } -} - -impl CoinDb for MemCoinDb { - fn scanned_to_block(&mut self, block: usize) { - self.scanned_block = block; - } - - fn acknowledge_block(&mut self, canonical: usize, block: usize) { - debug_assert!(!self.acknowledged_blocks.contains_key(&canonical)); - self.acknowledged_blocks.insert(canonical, block); - } - - fn add_output(&mut self, output: &O) -> bool { - // This would be insecure as we're indexing by ID and this will replace the output as a whole - // Multiple outputs may have the same ID in edge cases such as Monero, where outputs are ID'd - // by output key, not by hash + index - // self.outputs.insert(output.id(), output).is_some() - let id = output.id().as_ref().to_vec(); - if self.outputs.contains_key(&id) { - return false; - } - self.outputs.insert(id, output.serialize()); - true - } - - fn scanned_block(&self) -> usize { - self.scanned_block - } - - fn acknowledged_block(&self, canonical: usize) -> usize { - self.acknowledged_blocks[&canonical] - } -} - -fn select_inputs(inputs: &mut Vec) -> (Vec, u64) { - // Sort to ensure determinism. Inefficient, yet produces the most legible code to be optimized - // later - inputs.sort_by_key(|a| a.amount()); - - // Select the maximum amount of outputs possible - let res = inputs.split_off(inputs.len() - C::MAX_INPUTS.min(inputs.len())); - // Calculate their sum value, minus the fee needed to spend them - let sum = res.iter().map(|input| input.amount()).sum(); - // sum -= C::MAX_FEE; // TODO - (res, sum) -} - -fn select_outputs( - payments: &mut Vec<(C::Address, u64)>, - value: &mut u64, -) -> Vec<(C::Address, u64)> { - // Prioritize large payments which will most efficiently use large inputs - payments.sort_by(|a, b| a.1.cmp(&b.1)); - - // Grab the payments this will successfully fund - let mut outputs = vec![]; - let mut p = payments.len(); - while p != 0 { - p -= 1; - if *value >= payments[p].1 { - *value -= payments[p].1; - // Swap remove will either pop the tail or insert an element that wouldn't fit, making it - // always safe to move past - outputs.push(payments.swap_remove(p)); - } - // Doesn't break in this else case as a smaller payment may still fit - } - - outputs -} - -// Optimizes on the expectation selected/inputs are sorted from lowest value to highest -fn refine_inputs( - selected: &mut Vec, - inputs: &mut Vec, - mut remaining: u64, -) { - // Drop unused inputs - let mut s = 0; - while remaining > selected[s].amount() { - remaining -= selected[s].amount(); - s += 1; - } - // Add them back to the inputs pool - inputs.extend(selected.drain(.. s)); - - // Replace large inputs with smaller ones - for s in (0 .. selected.len()).rev() { - for input in inputs.iter_mut() { - // Doesn't break due to inputs no longer being sorted - // This could be made faster if we prioritized small input usage over transaction size/fees - // TODO: Consider. This would implicitly consolidate inputs which would be advantageous - if selected[s].amount() < input.amount() { - continue; - } - - // If we can successfully replace this input, do so - let diff = selected[s].amount() - input.amount(); - if remaining > diff { - remaining -= diff; - - let old = selected[s].clone(); - selected[s] = input.clone(); - *input = old; - } - } - } -} - -#[allow(clippy::type_complexity)] -fn select_inputs_outputs( - inputs: &mut Vec, - outputs: &mut Vec<(C::Address, u64)>, -) -> (Vec, Vec<(C::Address, u64)>) { - if inputs.is_empty() { - return (vec![], vec![]); - } - - let (mut selected, mut value) = select_inputs::(inputs); - - let outputs = select_outputs::(outputs, &mut value); - if outputs.is_empty() { - inputs.extend(selected); - return (vec![], vec![]); - } - - refine_inputs::(&mut selected, inputs, value); - (selected, outputs) -} - -#[allow(clippy::type_complexity)] -pub struct Wallet { - db: D, - coin: C, - keys: Vec<(ThresholdKeys, Vec)>, - pending: Vec<(usize, ThresholdKeys)>, -} - -impl Wallet { - pub fn new(db: D, coin: C) -> Wallet { - Wallet { db, coin, keys: vec![], pending: vec![] } - } - - pub fn scanned_block(&self) -> usize { - self.db.scanned_block() - } - pub fn acknowledge_block(&mut self, canonical: usize, block: usize) { - self.db.acknowledge_block(canonical, block); - } - pub fn acknowledged_block(&self, canonical: usize) -> usize { - self.db.acknowledged_block(canonical) - } - - pub fn add_keys(&mut self, keys: &WalletKeys) { - let creation_block = keys.creation_block; - let mut keys = keys.bind(C::ID); - self.coin.tweak_keys(&mut keys); - self.pending.push((self.acknowledged_block(creation_block), keys)); - } - - pub fn address(&self) -> C::Address { - self.coin.address(self.keys[self.keys.len() - 1].0.group_key()) - } - - pub async fn poll(&mut self) -> Result<(), CoinError> { - if self.coin.get_latest_block_number().await? < (C::CONFIRMATIONS - 1) { - return Ok(()); - } - let confirmed_block = self.coin.get_latest_block_number().await? - (C::CONFIRMATIONS - 1); - - // Will never scan the genesis block, which shouldn't be an issue - for b in (self.scanned_block() + 1) ..= confirmed_block { - // If any keys activated at this block, shift them over - { - let mut k = 0; - while k < self.pending.len() { - // TODO - //if b < self.pending[k].0 { - //} else if b == self.pending[k].0 { - if b <= self.pending[k].0 { - self.keys.push((self.pending.swap_remove(k).1, vec![])); - } else { - k += 1; - } - } - } - - let block = self.coin.get_block(b).await?; - for (keys, outputs) in self.keys.iter_mut() { - outputs.extend( - self - .coin - .get_outputs(&block, keys.group_key()) - .await? - .drain(..) - .filter(|output| self.db.add_output(output)), - ); - } - - self.db.scanned_to_block(b); - } - - Ok(()) - } - - // This should be called whenever new outputs are received, meaning there was a new block - // If these outputs were received and sent to Substrate, it should be called after they're - // included in a block and we have results to act on - // If these outputs weren't sent to Substrate (change), it should be called immediately - // with all payments still queued from the last call - pub async fn prepare_sends( - &mut self, - canonical: usize, - mut payments: Vec<(C::Address, u64)>, - fee: C::Fee, - ) -> Result<(Vec<(C::Address, u64)>, Vec), CoinError> { - if payments.is_empty() { - return Ok((vec![], vec![])); - } - - let acknowledged_block = self.acknowledged_block(canonical); - - // TODO: Log schedule outputs when MAX_OUTPUTS is lower than payments.len() - // Payments is the first set of TXs in the schedule - // As each payment re-appears, let mut payments = schedule[payment] where the only input is - // the source payment - // let (mut payments, schedule) = schedule(payments); - - let mut txs = vec![]; - for (keys, outputs) in self.keys.iter_mut() { - while !outputs.is_empty() { - let (inputs, outputs) = select_inputs_outputs::(outputs, &mut payments); - // If we can no longer process any payments, move to the next set of keys - if outputs.is_empty() { - debug_assert_eq!(inputs.len(), 0); - break; - } - - // Create the transcript for this transaction - let mut transcript = RecommendedTranscript::new(b"Serai Processor Wallet Send"); - transcript - .append_message(b"canonical_block", u64::try_from(canonical).unwrap().to_le_bytes()); - transcript.append_message( - b"acknowledged_block", - u64::try_from(acknowledged_block).unwrap().to_le_bytes(), - ); - transcript.append_message(b"index", u64::try_from(txs.len()).unwrap().to_le_bytes()); - - let tx = self - .coin - .prepare_send( - keys.clone(), - transcript, - acknowledged_block, - inputs, - &outputs, - Some(keys.group_key()), - fee, - ) - .await?; - // self.db.save_tx(tx) // TODO - txs.push(tx); - } - } - - Ok((payments, txs)) - } - - pub async fn attempt_send( - &mut self, - network: &mut N, - prepared: C::SignableTransaction, - ) -> Result, SignError> { - let attempt = self.coin.attempt_send(prepared).await.map_err(SignError::CoinError)?; - - let (attempt, commitments) = attempt.preprocess(&mut OsRng); - let commitments = network - .round(commitments.serialize()) - .await - .map_err(SignError::NetworkError)? - .drain() - .map(|(validator, preprocess)| { - Ok(( - validator, - attempt - .read_preprocess::<&[u8]>(&mut preprocess.as_ref()) - .map_err(|_| SignError::FrostError(FrostError::InvalidPreprocess(validator)))?, - )) - }) - .collect::, _>>()?; - - let (attempt, share) = attempt.sign(commitments, b"").map_err(SignError::FrostError)?; - let shares = network - .round(share.serialize()) - .await - .map_err(SignError::NetworkError)? - .drain() - .map(|(validator, share)| { - Ok(( - validator, - attempt - .read_share::<&[u8]>(&mut share.as_ref()) - .map_err(|_| SignError::FrostError(FrostError::InvalidShare(validator)))?, - )) - }) - .collect::, _>>()?; - - let tx = attempt.complete(shares).map_err(SignError::FrostError)?; - - self.coin.publish_transaction(&tx).await.map_err(SignError::CoinError) - } -} diff --git a/substrate/in-instructions/client/src/lib.rs b/substrate/in-instructions/client/src/lib.rs index 1571f999..f5114373 100644 --- a/substrate/in-instructions/client/src/lib.rs +++ b/substrate/in-instructions/client/src/lib.rs @@ -8,7 +8,7 @@ use jsonrpsee_http_client::HttpClientBuilder; use sp_inherents::{Error, InherentData, InherentIdentifier}; -use in_instructions_pallet::{INHERENT_IDENTIFIER, Updates, InherentError}; +use in_instructions_pallet::{primitives::Updates, INHERENT_IDENTIFIER, InherentError}; pub struct InherentDataProvider; impl InherentDataProvider { diff --git a/substrate/in-instructions/pallet/Cargo.toml b/substrate/in-instructions/pallet/Cargo.toml index c8c3657d..3c7b03c8 100644 --- a/substrate/in-instructions/pallet/Cargo.toml +++ b/substrate/in-instructions/pallet/Cargo.toml @@ -17,9 +17,6 @@ thiserror = { version = "1", optional = true } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2", default-features = false, features = ["derive"] } -serde = { version = "1", optional = true } - -sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-inherents = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -38,9 +35,6 @@ std = [ "scale/std", "scale-info/std", - "serde", - - "sp-std/std", "sp-inherents/std", "sp-runtime/std", diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index ab48d564..1daa00d7 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -3,12 +3,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use scale::{Encode, Decode}; -use scale_info::TypeInfo; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; - -use sp_std::vec::Vec; use sp_inherents::{InherentIdentifier, IsFatalError}; use sp_runtime::RuntimeDebug; @@ -16,29 +11,10 @@ use sp_runtime::RuntimeDebug; use serai_primitives::{BlockNumber, BlockHash, Coin, WithAmount, Balance}; pub use in_instructions_primitives as primitives; -use primitives::InInstruction; +use primitives::{InInstruction, Updates}; pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"ininstrs"; -#[derive(Clone, PartialEq, Eq, Encode, Decode, TypeInfo, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Batch { - pub id: BlockHash, - pub instructions: Vec>, -} - -#[derive(Clone, PartialEq, Eq, Encode, Decode, TypeInfo, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Update { - // Coin's latest block number - pub block_number: BlockNumber, - pub batches: Vec, -} - -// None if the current block producer isn't operating over this coin or otherwise failed to get -// data -pub type Updates = Vec>; - #[derive(Clone, Copy, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, thiserror::Error))] pub enum InherentError { @@ -94,7 +70,7 @@ pub mod pallet { use super::*; #[pallet::config] - pub trait Config: frame_system::Config + TokensConfig { + pub trait Config: frame_system::Config + TokensConfig { type RuntimeEvent: From> + IsType<::RuntimeEvent>; } diff --git a/substrate/in-instructions/primitives/Cargo.toml b/substrate/in-instructions/primitives/Cargo.toml index fff968ce..7947ecf8 100644 --- a/substrate/in-instructions/primitives/Cargo.toml +++ b/substrate/in-instructions/primitives/Cargo.toml @@ -11,14 +11,32 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] +zeroize = { version = "^1.5", features = ["derive"], optional = true } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } serde = { version = "1", features = ["derive"], optional = true } +sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false } + serai-primitives = { path = "../../serai/primitives", default-features = false } tokens-primitives = { path = "../../tokens/primitives", default-features = false } [features] -std = ["scale/std", "scale-info/std", "serde", "serai-primitives/std", "tokens-primitives/std"] +std = [ + "zeroize", + + "scale/std", + "scale-info/std", + + "serde", + + "sp-std/std", + "sp-runtime/std", + + "serai-primitives/std", + "tokens-primitives/std" +] default = ["std"] diff --git a/substrate/in-instructions/primitives/src/lib.rs b/substrate/in-instructions/primitives/src/lib.rs index efcb85fa..a351333a 100644 --- a/substrate/in-instructions/primitives/src/lib.rs +++ b/substrate/in-instructions/primitives/src/lib.rs @@ -2,40 +2,66 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -use serai_primitives::{SeraiAddress, ExternalAddress, Data}; +#[cfg(not(feature = "std"))] +use sp_std::vec::Vec; +use sp_runtime::RuntimeDebug; + +use serai_primitives::{BlockNumber, BlockHash, SeraiAddress, ExternalAddress, Data, WithAmount}; mod shorthand; pub use shorthand::*; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub enum Application { DEX, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct ApplicationCall { application: Application, data: Data, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub enum InInstruction { Transfer(SeraiAddress), Call(ApplicationCall), } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct RefundableInInstruction { pub origin: Option, pub instruction: InInstruction, } + +#[derive(Clone, PartialEq, Eq, Encode, Decode, TypeInfo, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] +pub struct Batch { + pub id: BlockHash, + pub instructions: Vec>, +} + +#[derive(Clone, PartialEq, Eq, Encode, Decode, TypeInfo, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] +pub struct Update { + // Coin's latest block number + pub block_number: BlockNumber, + pub batches: Vec, +} + +// None if the current block producer isn't operating over this coin or otherwise failed to get +// data +pub type Updates = Vec>; diff --git a/substrate/in-instructions/primitives/src/shorthand.rs b/substrate/in-instructions/primitives/src/shorthand.rs index 618787b4..f35a840e 100644 --- a/substrate/in-instructions/primitives/src/shorthand.rs +++ b/substrate/in-instructions/primitives/src/shorthand.rs @@ -1,3 +1,6 @@ +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -13,7 +16,7 @@ use crate::RefundableInInstruction; use crate::InInstruction; #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub enum Shorthand { Raw(Data), Swap { diff --git a/substrate/node/src/command_helper.rs b/substrate/node/src/command_helper.rs index beea5b12..5b30a11a 100644 --- a/substrate/node/src/command_helper.rs +++ b/substrate/node/src/command_helper.rs @@ -57,8 +57,8 @@ pub fn create_benchmark_extrinsic( system::CheckTxVersion::::new(), system::CheckGenesis::::new(), system::CheckEra::::from(sp_runtime::generic::Era::mortal( - u64::from(BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2)), - client.chain_info().best_number.into(), + BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2), + client.chain_info().best_number, )), system::CheckNonce::::from(nonce), system::CheckWeight::::new(), diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index bf402cfe..691ac8c7 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -55,7 +55,7 @@ use transaction_payment::CurrencyAdapter; use session::PeriodicSessions; /// An index to a block. -pub type BlockNumber = u32; +pub type BlockNumber = u64; /// Index of a transaction in the chain, for a given account. pub type Index = u32; @@ -100,7 +100,7 @@ pub const BLOCK_SIZE: u32 = 1024 * 1024; pub const TARGET_BLOCK_TIME: u64 = 6; /// Measured in blocks. -pub const MINUTES: BlockNumber = 60 / (TARGET_BLOCK_TIME as BlockNumber); +pub const MINUTES: BlockNumber = 60 / TARGET_BLOCK_TIME; pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; @@ -249,7 +249,7 @@ impl in_instructions::Config for Runtime { } const SESSION_LENGTH: BlockNumber = 5 * DAYS; -type Sessions = PeriodicSessions, ConstU32<{ SESSION_LENGTH }>>; +type Sessions = PeriodicSessions, ConstU64<{ SESSION_LENGTH }>>; pub struct IdentityValidatorIdOf; impl Convert> for IdentityValidatorIdOf { diff --git a/substrate/serai/client/Cargo.toml b/substrate/serai/client/Cargo.toml index f26f2485..a844a0e7 100644 --- a/substrate/serai/client/Cargo.toml +++ b/substrate/serai/client/Cargo.toml @@ -13,18 +13,31 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -thiserror = "1" +thiserror = { version = "1", optional = true } scale = { package = "parity-scale-codec", version = "3" } -scale-info = "2" -scale-value = "0.6" +scale-info = { version = "2", optional = true } -sp-core = { git = "https://github.com/serai-dex/substrate", version = "7" } - -serai-primitives = { path = "../primitives", version = "0.1" } serai-runtime = { path = "../../runtime", version = "0.1" } -subxt = "0.25" +sp-core = { git = "https://github.com/serai-dex/substrate" } +subxt = { version = "0.27", default-features = false, features = ["jsonrpsee-ws"], optional = true } + +bitcoin = { version = "0.29", optional = true } + +ciphersuite = { path = "../../../crypto/ciphersuite", version = "0.2", optional = true } +monero-serai = { path = "../../../coins/monero", version = "0.1.3-alpha", optional = true } + +[features] +serai = ["thiserror", "scale-info", "subxt"] + +coins = [] +bitcoin = ["coins", "dep:bitcoin"] +monero = ["coins", "ciphersuite/ed25519", "monero-serai"] + +# Assumes the default usage is to use Serai as a DEX, which doesn't actually +# require connecting to a Serai node +default = ["bitcoin", "monero"] [dev-dependencies] lazy_static = "1" diff --git a/substrate/serai/client/src/coins/bitcoin.rs b/substrate/serai/client/src/coins/bitcoin.rs new file mode 100644 index 00000000..cedca9ab --- /dev/null +++ b/substrate/serai/client/src/coins/bitcoin.rs @@ -0,0 +1,89 @@ +use core::str::FromStr; + +use scale::{Encode, Decode}; + +use bitcoin::{ + hashes::{Hash as HashTrait, hash160::Hash}, + PubkeyHash, ScriptHash, + network::constants::Network, + util::address::{Error, WitnessVersion, Payload, Address as BAddress}, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Address(pub BAddress); + +impl FromStr for Address { + type Err = Error; + fn from_str(str: &str) -> Result { + BAddress::from_str(str).map(Address) + } +} + +impl ToString for Address { + fn to_string(&self) -> String { + self.0.to_string() + } +} + +// SCALE-encoded variant of Monero addresses. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +enum EncodedAddress { + P2PKH([u8; 20]), + P2SH([u8; 20]), + P2WPKH([u8; 20]), + P2WSH([u8; 32]), + P2TR([u8; 32]), +} + +impl TryFrom> for Address { + type Error = (); + fn try_from(data: Vec) -> Result { + Ok(Address(BAddress { + network: Network::Bitcoin, + payload: match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { + EncodedAddress::P2PKH(hash) => { + Payload::PubkeyHash(PubkeyHash::from_hash(Hash::from_inner(hash))) + } + EncodedAddress::P2SH(hash) => { + Payload::ScriptHash(ScriptHash::from_hash(Hash::from_inner(hash))) + } + EncodedAddress::P2WPKH(hash) => { + Payload::WitnessProgram { version: WitnessVersion::V0, program: hash.to_vec() } + } + EncodedAddress::P2WSH(hash) => { + Payload::WitnessProgram { version: WitnessVersion::V0, program: hash.to_vec() } + } + EncodedAddress::P2TR(hash) => { + Payload::WitnessProgram { version: WitnessVersion::V1, program: hash.to_vec() } + } + }, + })) + } +} + +#[allow(clippy::from_over_into)] +impl TryInto> for Address { + type Error = (); + fn try_into(self) -> Result, ()> { + Ok( + (match self.0.payload { + Payload::PubkeyHash(hash) => EncodedAddress::P2PKH(hash.as_hash().into_inner()), + Payload::ScriptHash(hash) => EncodedAddress::P2SH(hash.as_hash().into_inner()), + Payload::WitnessProgram { version: WitnessVersion::V0, program } => { + if program.len() == 20 { + EncodedAddress::P2WPKH(program.try_into().map_err(|_| ())?) + } else if program.len() == 32 { + EncodedAddress::P2WSH(program.try_into().map_err(|_| ())?) + } else { + Err(())? + } + } + Payload::WitnessProgram { version: WitnessVersion::V1, program } => { + EncodedAddress::P2TR(program.try_into().map_err(|_| ())?) + } + _ => Err(())?, + }) + .encode(), + ) + } +} diff --git a/substrate/serai/client/src/coins/mod.rs b/substrate/serai/client/src/coins/mod.rs new file mode 100644 index 00000000..63ebf481 --- /dev/null +++ b/substrate/serai/client/src/coins/mod.rs @@ -0,0 +1,5 @@ +#[cfg(feature = "bitcoin")] +pub mod bitcoin; + +#[cfg(feature = "monero")] +pub mod monero; diff --git a/substrate/serai/client/src/coins/monero.rs b/substrate/serai/client/src/coins/monero.rs new file mode 100644 index 00000000..65ef13d5 --- /dev/null +++ b/substrate/serai/client/src/coins/monero.rs @@ -0,0 +1,101 @@ +use core::str::FromStr; + +use scale::{Encode, Decode}; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_serai::wallet::address::{AddressError, Network, AddressType, AddressMeta, MoneroAddress}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Address(MoneroAddress); +impl Address { + pub fn new(address: MoneroAddress) -> Option
{ + if address.payment_id().is_some() { + return None; + } + Some(Address(address)) + } +} + +impl FromStr for Address { + type Err = AddressError; + fn from_str(str: &str) -> Result { + MoneroAddress::from_str(Network::Mainnet, str).map(Address) + } +} + +impl ToString for Address { + fn to_string(&self) -> String { + self.0.to_string() + } +} + +// SCALE-encoded variant of Monero addresses. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +enum EncodedAddressType { + Standard, + Subaddress, + Featured(u8), +} + +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +struct EncodedAddress { + kind: EncodedAddressType, + spend: [u8; 32], + view: [u8; 32], +} + +impl TryFrom> for Address { + type Error = (); + fn try_from(data: Vec) -> Result { + // Decode as SCALE + let addr = EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())?; + // Convert over + Ok(Address(MoneroAddress::new( + AddressMeta::new( + Network::Mainnet, + match addr.kind { + EncodedAddressType::Standard => AddressType::Standard, + EncodedAddressType::Subaddress => AddressType::Subaddress, + EncodedAddressType::Featured(flags) => { + let subaddress = (flags & 1) != 0; + let integrated = (flags & (1 << 1)) != 0; + let guaranteed = (flags & (1 << 2)) != 0; + if integrated { + Err(())?; + } + AddressType::Featured { subaddress, payment_id: None, guaranteed } + } + }, + ), + Ed25519::read_G(&mut addr.spend.as_ref()).map_err(|_| ())?.0, + Ed25519::read_G(&mut addr.view.as_ref()).map_err(|_| ())?.0, + ))) + } +} + +#[allow(clippy::from_over_into)] +impl Into for Address { + fn into(self) -> MoneroAddress { + self.0 + } +} + +#[allow(clippy::from_over_into)] +impl Into> for Address { + fn into(self) -> Vec { + EncodedAddress { + kind: match self.0.meta.kind { + AddressType::Standard => EncodedAddressType::Standard, + AddressType::Subaddress => EncodedAddressType::Subaddress, + AddressType::Integrated(_) => panic!("integrated address became Serai Monero address"), + AddressType::Featured { subaddress, payment_id: _, guaranteed } => { + EncodedAddressType::Featured(u8::from(subaddress) + (u8::from(guaranteed) << 2)) + } + }, + spend: self.0.spend.compress().0, + view: self.0.view.compress().0, + } + .encode() + } +} diff --git a/substrate/serai/client/src/lib.rs b/substrate/serai/client/src/lib.rs index 00af5352..d7311d95 100644 --- a/substrate/serai/client/src/lib.rs +++ b/substrate/serai/client/src/lib.rs @@ -1,126 +1,28 @@ -use thiserror::Error; +#[cfg(feature = "coins")] +pub mod coins; -use scale::{Encode, Decode}; -mod scale_value; -pub(crate) use crate::scale_value::{scale_value, scale_composite}; -use ::scale_value::Value; +#[cfg(feature = "serai")] +mod serai; +#[cfg(feature = "serai")] +pub use serai::*; -use subxt::{ - utils::Encoded, - tx::{Signer, DynamicTxPayload, BaseExtrinsicParams, BaseExtrinsicParamsBuilder, TxClient}, - Config as SubxtConfig, OnlineClient, -}; - -pub use serai_primitives as primitives; -use primitives::{Signature, SeraiAddress}; - -use serai_runtime::{ - system::Config, support::traits::PalletInfo as PalletInfoTrait, PalletInfo, Runtime, -}; - -pub mod tokens; -pub mod in_instructions; - -#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Encode, Decode)] -pub struct Tip { - #[codec(compact)] - pub tip: u64, -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct SeraiConfig; -impl SubxtConfig for SeraiConfig { - type BlockNumber = ::BlockNumber; - - type Hash = ::Hash; - type Hashing = ::Hashing; - - type Index = ::Index; - type AccountId = ::AccountId; - // TODO: Bech32m - type Address = SeraiAddress; - - type Header = ::Header; - type Signature = Signature; - - type ExtrinsicParams = BaseExtrinsicParams; -} - -#[derive(Clone, Error, Debug)] -pub enum SeraiError { - #[error("failed to connect to serai")] - RpcError, - #[error("serai-client library was intended for a different runtime version")] - InvalidRuntime, -} - -#[derive(Clone)] -pub struct Serai(OnlineClient); - -impl Serai { - pub async fn new(url: &str) -> Result { - Ok(Serai(OnlineClient::::from_url(url).await.map_err(|_| SeraiError::RpcError)?)) +// If we aren't exposing the Serai client (subxt), still expose all primitives +#[cfg(not(feature = "serai"))] +pub use serai_runtime::primitives; +#[cfg(not(feature = "serai"))] +mod other_primitives { + pub mod in_instructions { + pub use serai_runtime::in_instructions::primitives; } - - async fn storage( - &self, - pallet: &'static str, - name: &'static str, - keys: Option>, - block: [u8; 32], - ) -> Result, SeraiError> { - let storage = self.0.storage(); - let address = subxt::dynamic::storage(pallet, name, keys.unwrap_or(vec![])); - debug_assert!(storage.validate(&address).is_ok(), "invalid storage address"); - - storage - .fetch(&address, Some(block.into())) - .await - .map_err(|_| SeraiError::RpcError)? - .map(|res| R::decode(&mut res.encoded()).map_err(|_| SeraiError::InvalidRuntime)) - .transpose() + pub mod tokens { + pub use serai_runtime::tokens::primitives; } - - async fn events( - &self, - block: [u8; 32], - filter: impl Fn(&E) -> bool, - ) -> Result, SeraiError> { - let mut res = vec![]; - for event in - self.0.events().at(Some(block.into())).await.map_err(|_| SeraiError::RpcError)?.iter() - { - let event = event.map_err(|_| SeraiError::InvalidRuntime)?; - if PalletInfo::index::

().unwrap() == usize::from(event.pallet_index()) { - let mut with_variant: &[u8] = - &[[event.variant_index()].as_ref(), event.field_bytes()].concat(); - let event = E::decode(&mut with_variant).map_err(|_| SeraiError::InvalidRuntime)?; - if filter(&event) { - res.push(event); - } - } - } - Ok(res) - } - - pub async fn get_latest_block_hash(&self) -> Result<[u8; 32], SeraiError> { - Ok(self.0.rpc().finalized_head().await.map_err(|_| SeraiError::RpcError)?.into()) - } - - pub fn sign>( - &self, - signer: &S, - payload: &DynamicTxPayload<'static>, - nonce: u32, - params: BaseExtrinsicParamsBuilder, - ) -> Result { - TxClient::new(self.0.offline()) - .create_signed_with_nonce(payload, signer, nonce, params) - .map(|tx| Encoded(tx.into_encoded())) - .map_err(|_| SeraiError::InvalidRuntime) - } - - pub async fn publish(&self, tx: &Encoded) -> Result<[u8; 32], SeraiError> { - self.0.rpc().submit_extrinsic(tx).await.map(Into::into).map_err(|_| SeraiError::RpcError) + pub mod validator_sets { + pub use serai_runtime::validator_sets::primitives; } } +#[cfg(not(feature = "serai"))] +pub use other_primitives::*; + +#[cfg(test)] +mod tests; diff --git a/substrate/serai/client/src/in_instructions.rs b/substrate/serai/client/src/serai/in_instructions.rs similarity index 100% rename from substrate/serai/client/src/in_instructions.rs rename to substrate/serai/client/src/serai/in_instructions.rs diff --git a/substrate/serai/client/src/serai/mod.rs b/substrate/serai/client/src/serai/mod.rs new file mode 100644 index 00000000..067d314f --- /dev/null +++ b/substrate/serai/client/src/serai/mod.rs @@ -0,0 +1,155 @@ +use thiserror::Error; + +use scale::{Encode, Decode}; +mod scale_value; +pub(crate) use scale_value::{scale_value, scale_composite}; +use subxt::ext::scale_value::Value; + +use sp_core::{Pair as PairTrait, sr25519::Pair}; +use subxt::{ + utils::Encoded, + config::{ + substrate::{BlakeTwo256, SubstrateHeader}, + extrinsic_params::{BaseExtrinsicParams, BaseExtrinsicParamsBuilder}, + }, + tx::{Signer, DynamicTxPayload, TxClient}, + Config as SubxtConfig, OnlineClient, +}; + +pub use serai_runtime::primitives; +use primitives::{Signature, SeraiAddress}; + +use serai_runtime::{ + system::Config, support::traits::PalletInfo as PalletInfoTrait, PalletInfo, Runtime, +}; + +pub mod tokens; +pub mod in_instructions; +pub mod validator_sets { + pub use serai_runtime::validator_sets::primitives; +} + +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Encode, Decode)] +pub struct Tip { + #[codec(compact)] + pub tip: u64, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct SeraiConfig; +impl SubxtConfig for SeraiConfig { + type Hash = ::Hash; + type Hasher = BlakeTwo256; + + type Index = ::Index; + type AccountId = ::AccountId; + // TODO: Bech32m + type Address = SeraiAddress; + + type Header = SubstrateHeader<::BlockNumber, BlakeTwo256>; + type Signature = Signature; + + type ExtrinsicParams = BaseExtrinsicParams; +} + +#[derive(Clone, Error, Debug)] +pub enum SeraiError { + #[error("failed to connect to serai")] + RpcError, + #[error("serai-client library was intended for a different runtime version")] + InvalidRuntime, +} + +#[derive(Clone)] +pub struct Serai(OnlineClient); + +impl Serai { + pub async fn new(url: &str) -> Result { + Ok(Serai(OnlineClient::::from_url(url).await.map_err(|_| SeraiError::RpcError)?)) + } + + async fn storage( + &self, + pallet: &'static str, + name: &'static str, + keys: Option>, + block: [u8; 32], + ) -> Result, SeraiError> { + let storage = self.0.storage(); + let address = subxt::dynamic::storage(pallet, name, keys.unwrap_or(vec![])); + debug_assert!(storage.validate(&address).is_ok(), "invalid storage address"); + + storage + .at(Some(block.into())) + .await + .map_err(|_| SeraiError::RpcError)? + .fetch(&address) + .await + .map_err(|_| SeraiError::RpcError)? + .map(|res| R::decode(&mut res.encoded()).map_err(|_| SeraiError::InvalidRuntime)) + .transpose() + } + + async fn events( + &self, + block: [u8; 32], + filter: impl Fn(&E) -> bool, + ) -> Result, SeraiError> { + let mut res = vec![]; + for event in + self.0.events().at(Some(block.into())).await.map_err(|_| SeraiError::RpcError)?.iter() + { + let event = event.map_err(|_| SeraiError::InvalidRuntime)?; + if PalletInfo::index::

().unwrap() == usize::from(event.pallet_index()) { + let mut with_variant: &[u8] = + &[[event.variant_index()].as_ref(), event.field_bytes()].concat(); + let event = E::decode(&mut with_variant).map_err(|_| SeraiError::InvalidRuntime)?; + if filter(&event) { + res.push(event); + } + } + } + Ok(res) + } + + pub async fn get_latest_block_hash(&self) -> Result<[u8; 32], SeraiError> { + Ok(self.0.rpc().finalized_head().await.map_err(|_| SeraiError::RpcError)?.into()) + } + + pub fn sign>( + &self, + signer: &S, + payload: &DynamicTxPayload<'static>, + nonce: u32, + params: BaseExtrinsicParamsBuilder, + ) -> Result { + TxClient::new(self.0.offline()) + .create_signed_with_nonce(payload, signer, nonce, params) + .map(|tx| Encoded(tx.into_encoded())) + .map_err(|_| SeraiError::InvalidRuntime) + } + + pub async fn publish(&self, tx: &Encoded) -> Result<[u8; 32], SeraiError> { + self.0.rpc().submit_extrinsic(tx).await.map(Into::into).map_err(|_| SeraiError::RpcError) + } +} + +#[derive(Clone)] +pub struct PairSigner(Pair, ::AccountId); +impl PairSigner { + pub fn new(pair: Pair) -> Self { + let id = pair.public(); + PairSigner(pair, id) + } +} +impl Signer for PairSigner { + fn account_id(&self) -> &::AccountId { + &self.1 + } + fn address(&self) -> ::Address { + self.1.into() + } + fn sign(&self, payload: &[u8]) -> ::Signature { + self.0.sign(payload) + } +} diff --git a/substrate/serai/client/src/scale_value.rs b/substrate/serai/client/src/serai/scale_value.rs similarity index 91% rename from substrate/serai/client/src/scale_value.rs rename to substrate/serai/client/src/serai/scale_value.rs index 0e74b1b2..c09b2eea 100644 --- a/substrate/serai/client/src/scale_value.rs +++ b/substrate/serai/client/src/serai/scale_value.rs @@ -1,6 +1,6 @@ use ::scale::Encode; use scale_info::{MetaType, TypeInfo, Registry, PortableRegistry}; -use scale_value::{Composite, ValueDef, Value, scale}; +use subxt::ext::scale_value::{Composite, ValueDef, Value, scale}; pub(crate) fn scale_value(value: V) -> Value { let mut registry = Registry::new(); diff --git a/substrate/serai/client/src/tokens.rs b/substrate/serai/client/src/serai/tokens.rs similarity index 100% rename from substrate/serai/client/src/tokens.rs rename to substrate/serai/client/src/serai/tokens.rs diff --git a/substrate/serai/client/src/tests/coins/bitcoin.rs b/substrate/serai/client/src/tests/coins/bitcoin.rs new file mode 100644 index 00000000..2e93d51b --- /dev/null +++ b/substrate/serai/client/src/tests/coins/bitcoin.rs @@ -0,0 +1 @@ +// TODO: Test the address back and forth diff --git a/substrate/serai/client/src/tests/coins/mod.rs b/substrate/serai/client/src/tests/coins/mod.rs new file mode 100644 index 00000000..5dd6b762 --- /dev/null +++ b/substrate/serai/client/src/tests/coins/mod.rs @@ -0,0 +1,5 @@ +#[cfg(feature = "bitcoin")] +mod bitcoin; + +#[cfg(feature = "monero")] +mod monero; diff --git a/substrate/serai/client/src/tests/coins/monero.rs b/substrate/serai/client/src/tests/coins/monero.rs new file mode 100644 index 00000000..2e93d51b --- /dev/null +++ b/substrate/serai/client/src/tests/coins/monero.rs @@ -0,0 +1 @@ +// TODO: Test the address back and forth diff --git a/substrate/serai/client/src/tests/mod.rs b/substrate/serai/client/src/tests/mod.rs new file mode 100644 index 00000000..7cf73e9a --- /dev/null +++ b/substrate/serai/client/src/tests/mod.rs @@ -0,0 +1,2 @@ +#[cfg(feature = "coins")] +mod coins; diff --git a/substrate/serai/client/tests/burn.rs b/substrate/serai/client/tests/burn.rs index 9e58ed4b..7490a39e 100644 --- a/substrate/serai/client/tests/burn.rs +++ b/substrate/serai/client/tests/burn.rs @@ -2,21 +2,19 @@ use core::time::Duration; use rand_core::{RngCore, OsRng}; -use sp_core::Pair; -use serai_runtime::in_instructions::{Batch, Update}; - use tokio::time::sleep; -use subxt::tx::{BaseExtrinsicParamsBuilder, PairSigner}; +use sp_core::Pair; +use subxt::{config::extrinsic_params::BaseExtrinsicParamsBuilder}; use serai_client::{ primitives::{ BITCOIN, BlockNumber, BlockHash, SeraiAddress, Amount, WithAmount, Balance, Data, ExternalAddress, insecure_pair_from_name, }, - in_instructions::primitives::InInstruction, + in_instructions::primitives::{InInstruction, Batch, Update}, tokens::{primitives::OutInstruction, TokensEvent}, - Serai, + PairSigner, Serai, }; mod runner; @@ -27,7 +25,7 @@ serai_test!( let coin = BITCOIN; let mut id = BlockHash([0; 32]); OsRng.fill_bytes(&mut id.0); - let block_number = BlockNumber(u32::try_from(OsRng.next_u64() >> 32).unwrap()); + let block_number = BlockNumber(OsRng.next_u64()); let pair = insecure_pair_from_name("Alice"); let public = pair.public(); diff --git a/substrate/serai/client/tests/runner.rs b/substrate/serai/client/tests/runner.rs index 61a56ceb..1e90315b 100644 --- a/substrate/serai/client/tests/runner.rs +++ b/substrate/serai/client/tests/runner.rs @@ -5,8 +5,11 @@ use lazy_static::lazy_static; use tokio::{sync::Mutex, time::sleep}; -use serai_runtime::in_instructions::Update; -use serai_client::{primitives::Coin, in_instructions::InInstructionsEvent, Serai}; +use serai_client::{ + primitives::Coin, + in_instructions::{primitives::Updates, InInstructionsEvent}, + Serai, +}; use jsonrpsee_server::RpcModule; @@ -17,7 +20,7 @@ lazy_static! { } #[allow(dead_code)] -pub async fn provide_updates(updates: Vec>) -> [u8; 32] { +pub async fn provide_updates(updates: Updates) -> [u8; 32] { let done = Arc::new(Mutex::new(false)); let done_clone = done.clone(); let updates_clone = updates.clone(); diff --git a/substrate/serai/client/tests/updates.rs b/substrate/serai/client/tests/updates.rs index 05c30c83..fdc44d06 100644 --- a/substrate/serai/client/tests/updates.rs +++ b/substrate/serai/client/tests/updates.rs @@ -1,11 +1,12 @@ use rand_core::{RngCore, OsRng}; -use serai_runtime::in_instructions::{Batch, Update}; - use serai_client::{ primitives::{BITCOIN, BlockNumber, BlockHash, SeraiAddress, Amount, WithAmount, Balance}, tokens::TokensEvent, - in_instructions::{primitives::InInstruction, InInstructionsEvent}, + in_instructions::{ + primitives::{InInstruction, Batch, Update}, + InInstructionsEvent, + }, Serai, }; @@ -17,7 +18,7 @@ serai_test!( let coin = BITCOIN; let mut id = BlockHash([0; 32]); OsRng.fill_bytes(&mut id.0); - let block_number = BlockNumber(u32::try_from(OsRng.next_u64() >> 32).unwrap()); + let block_number = BlockNumber(OsRng.next_u64()); let mut address = SeraiAddress::new([0; 32]); OsRng.fill_bytes(&mut address.0); diff --git a/substrate/serai/primitives/Cargo.toml b/substrate/serai/primitives/Cargo.toml index 303a27b0..226727c0 100644 --- a/substrate/serai/primitives/Cargo.toml +++ b/substrate/serai/primitives/Cargo.toml @@ -12,6 +12,8 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] +zeroize = { version = "^1.5", features = ["derive"], optional = true } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } @@ -21,5 +23,5 @@ sp-core = { git = "https://github.com/serai-dex/substrate", default-features = f sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false } [features] -std = ["scale/std", "scale-info/std", "serde", "sp-core/std", "sp-runtime/std"] +std = ["zeroize", "scale/std", "scale-info/std", "serde", "sp-core/std", "sp-runtime/std"] default = ["std"] diff --git a/substrate/serai/primitives/src/account.rs b/substrate/serai/primitives/src/account.rs index 4fcd2390..f7d37c0e 100644 --- a/substrate/serai/primitives/src/account.rs +++ b/substrate/serai/primitives/src/account.rs @@ -1,5 +1,9 @@ +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; + #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; @@ -14,7 +18,7 @@ pub type PublicKey = Public; #[derive( Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Encode, Decode, MaxEncodedLen, TypeInfo, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct SeraiAddress(pub [u8; 32]); impl SeraiAddress { pub fn new(key: [u8; 32]) -> SeraiAddress { diff --git a/substrate/serai/primitives/src/amount.rs b/substrate/serai/primitives/src/amount.rs index 5d1875c5..67c183d4 100644 --- a/substrate/serai/primitives/src/amount.rs +++ b/substrate/serai/primitives/src/amount.rs @@ -3,6 +3,9 @@ use core::{ fmt::Debug, }; +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -18,7 +21,7 @@ pub type SubstrateAmount = u64; #[derive( Clone, Copy, PartialEq, Eq, PartialOrd, Debug, Encode, Decode, MaxEncodedLen, TypeInfo, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct Amount(pub SubstrateAmount); impl Add for Amount { @@ -44,7 +47,7 @@ impl Mul for Amount { } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct WithAmount< T: Clone + PartialEq + Eq + Debug + Encode + Decode + MaxEncodedLen + TypeInfo, > { diff --git a/substrate/serai/primitives/src/balance.rs b/substrate/serai/primitives/src/balance.rs index 7842a213..9b4322f5 100644 --- a/substrate/serai/primitives/src/balance.rs +++ b/substrate/serai/primitives/src/balance.rs @@ -1,5 +1,8 @@ use core::ops::{Add, Sub, Mul}; +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -9,7 +12,7 @@ use crate::{Coin, Amount}; /// The type used for balances (a Coin and Balance). #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct Balance { pub coin: Coin, pub amount: Amount, diff --git a/substrate/serai/primitives/src/block.rs b/substrate/serai/primitives/src/block.rs index a31b8a3d..deba665e 100644 --- a/substrate/serai/primitives/src/block.rs +++ b/substrate/serai/primitives/src/block.rs @@ -1,5 +1,9 @@ +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; + #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; @@ -10,10 +14,10 @@ use sp_core::H256; #[derive( Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo, )] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct BlockNumber(pub u32); -impl From for BlockNumber { - fn from(number: u32) -> BlockNumber { +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] +pub struct BlockNumber(pub u64); +impl From for BlockNumber { + fn from(number: u64) -> BlockNumber { BlockNumber(number) } } @@ -24,7 +28,7 @@ impl From for BlockNumber { // This would require the processor to maintain a mapping of 32-byte IDs to actual hashes, which // would be fine #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct BlockHash(pub [u8; 32]); impl AsRef<[u8]> for BlockHash { diff --git a/substrate/serai/primitives/src/coins.rs b/substrate/serai/primitives/src/coins.rs index 6a587df4..4cb4c6b3 100644 --- a/substrate/serai/primitives/src/coins.rs +++ b/substrate/serai/primitives/src/coins.rs @@ -1,11 +1,15 @@ +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; + #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; /// The type used to identify coins. #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct Coin(pub u32); impl From for Coin { fn from(coin: u32) -> Coin { diff --git a/substrate/serai/primitives/src/lib.rs b/substrate/serai/primitives/src/lib.rs index b77dd249..861eeb85 100644 --- a/substrate/serai/primitives/src/lib.rs +++ b/substrate/serai/primitives/src/lib.rs @@ -2,6 +2,9 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -31,6 +34,14 @@ pub const MAX_ADDRESS_LEN: u32 = 74; #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct ExternalAddress(BoundedVec>); + +#[cfg(feature = "std")] +impl Zeroize for ExternalAddress { + fn zeroize(&mut self) { + self.0.as_mut().zeroize() + } +} + impl ExternalAddress { #[cfg(feature = "std")] pub fn new(address: Vec) -> Result { @@ -58,6 +69,14 @@ pub const MAX_DATA_LEN: u32 = 512; #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Data(BoundedVec>); + +#[cfg(feature = "std")] +impl Zeroize for Data { + fn zeroize(&mut self) { + self.0.as_mut().zeroize() + } +} + impl Data { #[cfg(feature = "std")] pub fn new(data: Vec) -> Result { diff --git a/substrate/tokens/primitives/Cargo.toml b/substrate/tokens/primitives/Cargo.toml index 155376d6..25daffaf 100644 --- a/substrate/tokens/primitives/Cargo.toml +++ b/substrate/tokens/primitives/Cargo.toml @@ -11,6 +11,8 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] +zeroize = { version = "^1.5", features = ["derive"], optional = true } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } @@ -22,5 +24,5 @@ serai-primitives = { path = "../../serai/primitives", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false } [features] -std = ["scale/std", "scale-info/std", "serde", "sp-runtime/std", "serai-primitives/std"] +std = ["zeroize", "scale/std", "scale-info/std", "serde", "sp-runtime/std", "serai-primitives/std"] default = ["std"] diff --git a/substrate/tokens/primitives/src/lib.rs b/substrate/tokens/primitives/src/lib.rs index 94c62a5a..916b10b1 100644 --- a/substrate/tokens/primitives/src/lib.rs +++ b/substrate/tokens/primitives/src/lib.rs @@ -2,6 +2,9 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; @@ -13,14 +16,14 @@ use serai_primitives::{SeraiAddress, ExternalAddress, Data, pallet_address}; pub const ADDRESS: SeraiAddress = pallet_address(b"Tokens"); #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct OutInstruction { pub address: ExternalAddress, pub data: Option, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub enum Destination { Native(SeraiAddress), External(OutInstruction), diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index b99fc2f5..db5a6b06 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -9,7 +9,8 @@ pub mod pallet { use frame_support::pallet_prelude::*; use serai_primitives::*; - use validator_sets_primitives::*; + pub use validator_sets_primitives as primitives; + use primitives::*; #[pallet::config] pub trait Config: frame_system::Config + TypeInfo { @@ -101,7 +102,7 @@ pub mod pallet { } ValidatorSets::::set( - ValidatorSetInstance(Session(0), ValidatorSetIndex(0)), + ValidatorSetInstance { session: Session(0), index: ValidatorSetIndex(0) }, Some(ValidatorSet { bond: self.bond, coins: BoundedVec::try_from(self.coins.clone()).unwrap(), @@ -160,7 +161,7 @@ pub mod pallet { let session: Session = Session(0); // Confirm a key hasn't been set for this set instance - let instance = ValidatorSetInstance(session, index); + let instance = ValidatorSetInstance { session, index }; if Keys::::get((instance, coin)).is_some() { Err(Error::::AlreadyGeneratedKeys)?; } diff --git a/substrate/validator-sets/primitives/Cargo.toml b/substrate/validator-sets/primitives/Cargo.toml index e458ae95..4b2aca04 100644 --- a/substrate/validator-sets/primitives/Cargo.toml +++ b/substrate/validator-sets/primitives/Cargo.toml @@ -12,11 +12,13 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } +zeroize = { version = "^1.5", features = ["derive"], optional = true } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2", default-features = false, features = ["derive"] } serde = { version = "1", features = ["derive"], optional = true } [features] -std = ["scale/std", "scale-info/std", "serde"] +std = ["zeroize", "scale/std", "scale-info/std", "serde"] default = ["std"] diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 642d2800..2604c039 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -1,21 +1,28 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use zeroize::Zeroize; + use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; + #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; /// The type used to identify a specific session of validators. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct Session(pub u32); /// The type used to identify a validator set. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] pub struct ValidatorSetIndex(pub u16); /// The type used to identify a specific validator set during a specific session. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct ValidatorSetInstance(pub Session, pub ValidatorSetIndex); +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(Zeroize, Serialize, Deserialize))] +pub struct ValidatorSetInstance { + pub session: Session, + pub index: ValidatorSetIndex, +}