From 7b4c5dbe529266f2b29376ce397d16c7b7bc4fdb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 31 May 2022 02:12:14 -0400 Subject: [PATCH 001/105] Remove rng_seed's additional entropy It was never used as we derive entropy via the other fields in the transcript, and explicitly add fields directly as needed for entropy. Also drops an unused crate and corrects a bug in FROST's Schnorr implementation which used the Group's generator, instead of the Curve's. Also updates the Monero crate's description. --- coins/monero/Cargo.toml | 2 +- coins/monero/src/ringct/clsag/multisig.rs | 2 +- coins/monero/src/wallet/send/multisig.rs | 8 ++++---- crypto/frost/Cargo.toml | 1 - crypto/frost/src/algorithm.rs | 2 +- crypto/frost/src/schnorr.rs | 3 +-- crypto/transcript/src/lib.rs | 8 ++------ crypto/transcript/src/merlin.rs | 6 +----- 8 files changed, 11 insertions(+), 21 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index daa99000..e9fdeaf0 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "monero-serai" version = "0.1.0" -description = "Implementation of Monero transaction signing in Rust" +description = "A modern Monero wallet library" license = "MIT" authors = ["Luke Parker "] edition = "2021" diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index f4b01779..c42ac029 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -206,7 +206,7 @@ impl Algorithm for ClsagMultisig { // process even if they have access to commitments (specifically, the ring index being signed // for, along with the mask which should not only require knowing the shared keys yet also the // input commitment masks) - let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"decoy_responses", None)); + let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"decoy_responses")); self.msg = Some(msg.try_into().expect("CLSAG message should be 32-bytes")); diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 12427561..1656b1ee 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -90,7 +90,7 @@ impl SignableTransaction { let decoys = Decoys::select( // Using a seeded RNG with a specific height, committed to above, should make these decoys // committed to. They'll also be committed to later via the TX message as a whole - &mut ChaCha12Rng::from_seed(transcript.rng_seed(b"decoys", None)), + &mut ChaCha12Rng::from_seed(transcript.rng_seed(b"decoys")), rpc, height, &self.inputs @@ -216,7 +216,7 @@ impl StateMachine for TransactionMachine { // Not invalid outputs due to already doing a dummy prep let (commitments, output_masks) = self.signable.prepare_outputs( - &mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"tx_keys", None)), + &mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"tx_keys")), uniqueness( &images.iter().map(|image| Input::ToKey { amount: 0, @@ -230,7 +230,7 @@ impl StateMachine for TransactionMachine { self.signable.prepare_transaction( &commitments, Bulletproofs::new( - &mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"bulletproofs", None)), + &mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"bulletproofs")), &commitments ).unwrap() ) @@ -249,7 +249,7 @@ impl StateMachine for TransactionMachine { } sorted.sort_by(|x, y| x.2.compress().to_bytes().cmp(&y.2.compress().to_bytes()).reverse()); - let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks", None)); + let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks")); let mut sum_pseudo_outs = Scalar::zero(); while sorted.len() != 0 { let value = sorted.remove(0); diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index efe7a5a0..e0ae0b85 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -14,7 +14,6 @@ rand_core = "0.6" ff = "0.11" group = "0.11" -blake2 = "0.10" transcript = { path = "../transcript" } multiexp = { path = "../multiexp", features = ["batch"] } diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index 2d00f508..fbd1dec8 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -72,7 +72,7 @@ impl Transcript for IetfTranscript { self.0.clone() } - fn rng_seed(&mut self, _: &'static [u8], _: Option<[u8; 32]>) -> [u8; 32] { + fn rng_seed(&mut self, _: &'static [u8]) -> [u8; 32] { unimplemented!() } } diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index 31cc6065..238d8f4b 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -1,7 +1,6 @@ use rand_core::{RngCore, CryptoRng}; use ff::Field; -use group::Group; use multiexp::BatchVerifier; @@ -46,7 +45,7 @@ pub(crate) fn batch_verify( rng: &mut R, triplets: &[(u16, C::G, C::F, SchnorrSignature)] ) -> Result<(), u16> { - let mut values = [(C::F::one(), C::G::generator()); 3]; + let mut values = [(C::F::one(), C::generator()); 3]; let mut batch = BatchVerifier::new(triplets.len(), C::little_endian()); for triple in triplets { // s = r + ca diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index 483267a2..5a04ada8 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -11,7 +11,7 @@ pub trait Transcript { fn domain_separate(&mut self, label: &[u8]); fn append_message(&mut self, label: &'static [u8], message: &[u8]); fn challenge(&mut self, label: &'static [u8]) -> Vec; - fn rng_seed(&mut self, label: &'static [u8], additional_entropy: Option<[u8; 32]>) -> [u8; 32]; + fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } #[derive(Clone, Debug)] @@ -49,11 +49,7 @@ impl Transcript for DigestTranscript { D::new().chain_update(&self.0).finalize().to_vec() } - fn rng_seed(&mut self, label: &'static [u8], additional_entropy: Option<[u8; 32]>) -> [u8; 32] { - if additional_entropy.is_some() { - self.append_message(b"additional_entropy", &additional_entropy.unwrap()); - } - + fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { let mut seed = [0; 32]; seed.copy_from_slice(&self.challenge(label)[0 .. 32]); seed diff --git a/crypto/transcript/src/merlin.rs b/crypto/transcript/src/merlin.rs index 88dfa9c3..18671545 100644 --- a/crypto/transcript/src/merlin.rs +++ b/crypto/transcript/src/merlin.rs @@ -30,11 +30,7 @@ impl Transcript for MerlinTranscript { challenge } - fn rng_seed(&mut self, label: &'static [u8], additional_entropy: Option<[u8; 32]>) -> [u8; 32] { - if additional_entropy.is_some() { - transcript.append_message(b"additional_entropy", &additional_entropy.unwrap()); - } - + fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { let mut seed = [0; 32]; transcript.challenge_bytes(label, &mut seed); seed From d8e794871c4a41dba495a58570b2856a4573fa96 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 1 Jun 2022 01:58:07 -0400 Subject: [PATCH 002/105] Move the processor to AGPL See https://github.com/serai-dex/serai/issues/13. --- processor/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/processor/Cargo.toml b/processor/Cargo.toml index aa687755..c82e4586 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -2,7 +2,7 @@ name = "serai-processor" version = "0.1.0" description = "Multichain processor premised on canonicity to reach distributed consensus automatically" -license = "MIT" +license = "AGPL-3.0-only" authors = ["Luke Parker "] edition = "2021" From df2876dbd411b32538d151edad775f1a4d0842ac Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 1 Jun 2022 03:30:57 -0400 Subject: [PATCH 003/105] Acknowledge Substrate's ordering and move to a multi-key wallet setup --- processor/Cargo.toml | 1 + processor/src/coins/monero.rs | 40 +++++++------ processor/src/lib.rs | 22 +++---- processor/src/tests/mod.rs | 14 ++++- processor/src/wallet.rs | 104 ++++++++++++++++++++++++++++------ 5 files changed, 135 insertions(+), 46 deletions(-) diff --git a/processor/Cargo.toml b/processor/Cargo.toml index c82e4586..14c0c487 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -21,4 +21,5 @@ monero = { version = "0.16", features = ["experimental"] } monero-serai = { path = "../coins/monero", features = ["multisig"] } [dev-dependencies] +rand = "0.8" tokio = { version = "1", features = ["full"] } diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index 614b01a4..55b5e911 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -1,24 +1,30 @@ use async_trait::async_trait; use rand_core::{RngCore, CryptoRng}; -use curve25519_dalek::scalar::Scalar; +use curve25519_dalek::{scalar::Scalar, edwards::CompressedEdwardsY}; use dalek_ff_group as dfg; use frost::MultisigKeys; use monero::util::address::Address; -use monero_serai::{frost::Ed25519, rpc::Rpc, wallet::{SpendableOutput, SignableTransaction}}; +use monero_serai::{ + frost::Ed25519, + transaction::Transaction, + rpc::Rpc, + wallet::{SpendableOutput, SignableTransaction} +}; use crate::{Output as OutputTrait, CoinError, Coin, view_key}; pub struct Output(SpendableOutput); impl OutputTrait for Output { - // If Monero ever does support more than 255 outputs at once, which it could, this u8 could be a - // u16 which serializes as little endian, dropping the last byte if empty, without conflict - type Id = ([u8; 32], u8); + // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. + // While the Monero library offers a variant which allows senders to ensure their TXs have unique + // output keys, Serai can still be targeted using the classic burning bug + type Id = CompressedEdwardsY; fn id(&self) -> Self::Id { - (self.0.tx, self.0.o.try_into().unwrap()) + self.0.key.compress() } fn amount(&self) -> u64 { @@ -59,34 +65,32 @@ impl Coin for Monero { type Curve = Ed25519; type Output = Output; + type Block = Vec; type SignableTransaction = SignableTransaction; type Address = Address; fn id() -> &'static [u8] { b"Monero" } - async fn confirmations() -> usize { 10 } + fn confirmations() -> usize { 10 } // Testnet TX bb4d188a4c571f2f0de70dca9d475abc19078c10ffa8def26dd4f63ce1bcfd79 uses 146 inputs // while using less than 100kb of space, albeit with just 2 outputs (though outputs share a BP) // The TX size limit is half the contextual median block weight, where said weight is >= 300,000 // This means any TX which fits into 150kb will be accepted by Monero // 128, even with 16 outputs, should fit into 100kb. Further efficiency by 192 may be viable // TODO: Get hard numbers and tune - async fn max_inputs() -> usize { 128 } - async fn max_outputs() -> usize { 16 } + fn max_inputs() -> usize { 128 } + fn max_outputs() -> usize { 16 } async fn get_height(&self) -> Result { self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError) } - async fn get_outputs_in_block( - &self, - height: usize, - key: dfg::EdwardsPoint - ) -> Result, CoinError> { - Ok( - self.rpc.get_block_transactions_possible(height).await.map_err(|_| CoinError::ConnectionError)? - .iter().flat_map(|tx| tx.scan(self.view, key.0)).map(Output::from).collect() - ) + async fn get_block(&self, height: usize) -> Result { + self.rpc.get_block_transactions_possible(height).await.map_err(|_| CoinError::ConnectionError) + } + + async fn get_outputs(&self, block: &Self::Block, key: dfg::EdwardsPoint) -> Vec { + block.iter().flat_map(|tx| tx.scan(self.view, key.0)).map(Output::from).collect() } async fn prepare_send( diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 037c45b0..e87ef456 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -14,7 +14,7 @@ mod wallet; #[cfg(test)] mod tests; -trait Output: Sized { +pub trait Output: Sized { type Id; fn id(&self) -> Self::Id; @@ -25,31 +25,33 @@ trait Output: Sized { } #[derive(Clone, Error, Debug)] -enum CoinError { +pub enum CoinError { #[error("failed to connect to coin daemon")] ConnectionError } #[async_trait] -trait Coin { +pub trait Coin { type Curve: Curve; type Output: Output; + type Block; type SignableTransaction; type Address: Send; fn id() -> &'static [u8]; - async fn confirmations() -> usize; - async fn max_inputs() -> usize; - async fn max_outputs() -> usize; + fn confirmations() -> usize; + fn max_inputs() -> usize; + fn max_outputs() -> usize; async fn get_height(&self) -> Result; - async fn get_outputs_in_block( + async fn get_block(&self, height: usize) -> Result; + async fn get_outputs( &self, - height: usize, + block: &Self::Block, key: ::G - ) -> Result, CoinError>; + ) -> Vec; async fn prepare_send( &self, @@ -73,6 +75,6 @@ trait Coin { // Takes an index, k, for more modern privacy protocols which use multiple view keys // Doesn't run Curve::hash_to_F, instead returning the hash object, due to hash_to_F being a FROST // definition instead of a wide reduction from a hash object -fn view_key(k: u64) -> Blake2b512 { +pub fn view_key(k: u64) -> Blake2b512 { Blake2b512::new().chain(b"Serai DEX View Key").chain(C::id()).chain(k.to_le_bytes()) } diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 272342f4..1bddc91a 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -1,6 +1,16 @@ -use crate::{Coin, coins::monero::Monero}; +use std::rc::Rc; + +use rand::rngs::OsRng; + +use crate::{Coin, coins::monero::Monero, wallet::{WalletKeys, Wallet}}; #[tokio::test] async fn test() { - println!("{}", Monero::new("http://127.0.0.1:18081".to_string()).get_height().await.unwrap()); + let monero = Monero::new("http://127.0.0.1:18081".to_string()); + println!("{}", monero.get_height().await.unwrap()); + let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng); + let mut wallet = Wallet::new(monero); + wallet.acknowledge_height(0, 0); + wallet.add_keys(&WalletKeys::new(Rc::try_unwrap(keys.remove(&1).take().unwrap()).unwrap(), 0)); + dbg!(0); } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 347bd787..c83a7f5a 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -1,30 +1,102 @@ +use std::collections::HashMap; + use frost::{Curve, MultisigKeys}; -use crate::Coin; +use crate::{CoinError, Coin}; -struct Wallet { - keys: MultisigKeys, +pub struct WalletKeys { + keys: MultisigKeys, + creation_height: usize +} + +impl WalletKeys { + pub fn new(keys: MultisigKeys, creation_height: usize) -> WalletKeys { + WalletKeys { keys, creation_height } + } + + // Bind this key to a specific network by applying an additive offset + // While it would be fine to just C::id(), including the group key creates distinct + // offsets instead of static offsets. Under a statically offset system, a BTC key could + // have X subtracted to find the potential group key, and then have Y added to find the + // potential ETH group key. While this shouldn't be an issue, as this isn't a private + // system, there are potentially other benefits to binding this to a specific group key + // It's no longer possible to influence group key gen to key cancel without breaking the hash + // function, although that degree of influence means key gen is broken already + fn bind(&self, chain: &[u8]) -> MultisigKeys { + self.keys.offset( + C::hash_to_F( + &[ + b"Serai Processor Wallet", + chain, + &C::G_to_bytes(&self.keys.group_key()) + ].concat() + ) + ) + } +} + +pub struct CoinDb { + // Height this coin has been scanned to + scanned_height: usize, + // Acknowledged height for a given canonical height + acknowledged_heights: HashMap +} + +pub struct Wallet { + db: CoinDb, + coin: C, + keys: Vec>, + pending: Vec<(usize, MultisigKeys)>, outputs: Vec } impl Wallet { - fn new(keys: &MultisigKeys) -> Wallet { + pub fn new(coin: C) -> Wallet { Wallet { - keys: keys.offset( - C::Curve::hash_to_F( - // Use distinct keys on each network by applying an additive offset - // While it would be fine to just C::id(), including the group key creates distinct - // offsets instead of static offsets. Under a statically offset system, a BTC key could - // have X subtracted to find the potential group key, and then have Y added to find the - // potential BCH group key. While this shouldn't be an issue, as this isn't a private - // system, there are potentially other benefits to binding this to a specific group key - &[b"Serai Processor Wallet", C::id(), &C::Curve::G_to_bytes(&keys.group_key())].concat() - ) - ), + db: CoinDb { + scanned_height: 0, + acknowledged_heights: HashMap::new(), + }, + coin, + + keys: vec![], + pending: vec![], outputs: vec![] } } - async fn poll() { todo!() } + pub fn scanned_height(&self) -> usize { self.db.scanned_height } + pub fn acknowledge_height(&mut self, canonical: usize, height: usize) { + debug_assert!(!self.db.acknowledged_heights.contains_key(&canonical)); + self.db.acknowledged_heights.insert(canonical, height); + } + pub fn acknowledged_height(&self, canonical: usize) -> usize { + self.db.acknowledged_heights[&canonical] + } + + pub fn add_keys(&mut self, keys: &WalletKeys) { + // Doesn't use +1 as this is height, not block index, and poll moves by block index + self.pending.push((self.acknowledged_height(keys.creation_height), keys.bind(C::id()))); + } + + pub async fn poll(&mut self) -> Result<(), CoinError> { + let confirmed_height = self.coin.get_height().await? - C::confirmations(); + for h in self.scanned_height() .. confirmed_height { + let mut k = 0; + while k < self.pending.len() { + if h == self.pending[k].0 { + self.keys.push(self.pending.swap_remove(k).1); + } else { + k += 1; + } + } + + let block = self.coin.get_block(h).await?; + for keys in &self.keys { + let outputs = self.coin.get_outputs(&block, keys.group_key()); + } + } + Ok(()) + } } From 2ae715f899be50fa6e266f99a1b866d5817dc9ea Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 1 Jun 2022 17:14:57 -0400 Subject: [PATCH 004/105] Ignore transactions which use a timelock --- coins/monero/src/wallet/scan.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index df5d1d6f..644a3a73 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -58,6 +58,12 @@ impl Transaction { view: Scalar, spend: EdwardsPoint ) -> Vec { + // Ignore transactions which utilize a timelock. Almost no transactions on Monero do, + // and they're not worth the effort to track given their complexities + if self.prefix.unlock_time != 0 { + return vec![]; + } + let mut extra = vec![]; write_varint(&u64::try_from(self.prefix.extra.len()).unwrap(), &mut extra).unwrap(); extra.extend(&self.prefix.extra); From dfd2f624eea53686a367dd007982d5aea438cdf3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 2 Jun 2022 00:00:26 -0400 Subject: [PATCH 005/105] Implement a proper Monero Timelock type Transaction scanning now returns the timelock to ensure it's acknowledged by wallets. Fixes https://github.com/serai-dex/serai/issues/16. --- coins/monero/src/rpc.rs | 11 +++++---- coins/monero/src/transaction.rs | 36 ++++++++++++++++++++++++++--- coins/monero/src/wallet/scan.rs | 15 ++++-------- coins/monero/src/wallet/send/mod.rs | 4 ++-- coins/monero/tests/send.rs | 4 ++-- processor/src/coins/monero.rs | 15 ++++++++++-- 6 files changed, 62 insertions(+), 23 deletions(-) diff --git a/coins/monero/src/rpc.rs b/coins/monero/src/rpc.rs index 13ba9026..a609901c 100644 --- a/coins/monero/src/rpc.rs +++ b/coins/monero/src/rpc.rs @@ -9,7 +9,7 @@ use serde_json::json; use reqwest; -use crate::{transaction::{Input, Transaction}, block::Block}; +use crate::{transaction::{Input, Timelock, Transaction}, block::Block}; #[derive(Deserialize, Debug)] pub struct EmptyResponse {} @@ -267,9 +267,12 @@ impl Rpc { // get the median time for the given height, yet we do need to in order to be complete outs.outs.iter().enumerate().map( |(i, out)| Ok( - if txs[i].prefix.unlock_time <= u64::try_from(height).unwrap() { - Some([rpc_point(&out.key)?, rpc_point(&out.mask)?]) - } else { None } + Some([rpc_point(&out.key)?, rpc_point(&out.mask)?]).filter(|_| { + match txs[i].prefix.timelock { + Timelock::Block(t_height) => (t_height <= height), + _ => false + } + }) ) ).collect() } diff --git a/coins/monero/src/transaction.rs b/coins/monero/src/transaction.rs index da18773d..338a16e5 100644 --- a/coins/monero/src/transaction.rs +++ b/coins/monero/src/transaction.rs @@ -84,10 +84,40 @@ impl Output { } } +#[derive(Clone, Copy, PartialEq, Debug)] +pub enum Timelock { + None, + Block(usize), + Time(u64) +} + +impl Timelock { + fn from_raw(raw: u64) -> Timelock { + if raw == 0 { + Timelock::None + } else if raw < 500_000_000 { + Timelock::Block(usize::try_from(raw).unwrap()) + } else { + Timelock::Time(raw) + } + } + + fn serialize(&self, w: &mut W) -> std::io::Result<()> { + write_varint( + &match self { + Timelock::None => 0, + Timelock::Block(block) => (*block).try_into().unwrap(), + Timelock::Time(time) => *time + }, + w + ) + } +} + #[derive(Clone, PartialEq, Debug)] pub struct TransactionPrefix { pub version: u64, - pub unlock_time: u64, + pub timelock: Timelock, pub inputs: Vec, pub outputs: Vec, pub extra: Vec @@ -96,7 +126,7 @@ pub struct TransactionPrefix { impl TransactionPrefix { pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { write_varint(&self.version, w)?; - write_varint(&self.unlock_time, w)?; + self.timelock.serialize(w)?; write_vec(Input::serialize, &self.inputs, w)?; write_vec(Output::serialize, &self.outputs, w)?; write_varint(&self.extra.len().try_into().unwrap(), w)?; @@ -106,7 +136,7 @@ impl TransactionPrefix { pub fn deserialize(r: &mut R) -> std::io::Result { let mut prefix = TransactionPrefix { version: read_varint(r)?, - unlock_time: read_varint(r)?, + timelock: Timelock::from_raw(read_varint(r)?), inputs: read_vec(Input::deserialize, r)?, outputs: read_vec(Output::deserialize, r)?, extra: vec![] diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index 644a3a73..d8feb7da 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -11,7 +11,7 @@ use monero::{consensus::deserialize, blockdata::transaction::ExtraField}; use crate::{ Commitment, serialize::{write_varint, read_32, read_scalar, read_point}, - transaction::Transaction, + transaction::{Timelock, Transaction}, wallet::{uniqueness, shared_key, amount_decryption, commitment_mask} }; @@ -57,13 +57,7 @@ impl Transaction { &self, view: Scalar, spend: EdwardsPoint - ) -> Vec { - // Ignore transactions which utilize a timelock. Almost no transactions on Monero do, - // and they're not worth the effort to track given their complexities - if self.prefix.unlock_time != 0 { - return vec![]; - } - + ) -> (Vec, Timelock) { let mut extra = vec![]; write_varint(&u64::try_from(self.prefix.extra.len()).unwrap(), &mut extra).unwrap(); extra.extend(&self.prefix.extra); @@ -81,7 +75,7 @@ impl Transaction { pubkeys = m_pubkeys.iter().map(|key| key.point.decompress()).filter_map(|key| key).collect(); } else { - return vec![]; + return (vec![], self.prefix.timelock); }; let mut res = vec![]; @@ -136,6 +130,7 @@ impl Transaction { } } } - res + + (res, self.prefix.timelock) } } diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index a7274b70..8ad88a5e 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -27,7 +27,7 @@ use crate::{ bulletproofs::Bulletproofs, RctBase, RctPrunable, RctSignatures }, - transaction::{Input, Output, TransactionPrefix, Transaction}, + transaction::{Input, Output, Timelock, TransactionPrefix, Transaction}, rpc::{Rpc, RpcError}, wallet::{SpendableOutput, Decoys, key_image_sort, uniqueness, shared_key, commitment_mask, amount_encryption} }; @@ -255,7 +255,7 @@ impl SignableTransaction { Transaction { prefix: TransactionPrefix { version: 2, - unlock_time: 0, + timelock: Timelock::None, inputs: vec![], outputs: tx_outputs, extra diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index cb7a2b4f..19cc6fdf 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -100,7 +100,7 @@ async fn send_core(test: usize, multisig: bool) { // Grab the largest output available let output = { - let mut outputs = tx.as_ref().unwrap().scan(view, spend_pub); + let mut outputs = tx.as_ref().unwrap().scan(view, spend_pub).0; outputs.sort_by(|x, y| x.commitment.amount.cmp(&y.commitment.amount).reverse()); outputs.swap_remove(0) }; @@ -125,7 +125,7 @@ async fn send_core(test: usize, multisig: bool) { for i in (start + 1) .. (start + 9) { let tx = rpc.get_block_transactions(i).await.unwrap().swap_remove(0); - let output = tx.scan(view, spend_pub).swap_remove(0); + let output = tx.scan(view, spend_pub).0.swap_remove(0); amount += output.commitment.amount; outputs.push(output); } diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index 55b5e911..6e9140f9 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -9,7 +9,7 @@ use frost::MultisigKeys; use monero::util::address::Address; use monero_serai::{ frost::Ed25519, - transaction::Transaction, + transaction::{Timelock, Transaction}, rpc::Rpc, wallet::{SpendableOutput, SignableTransaction} }; @@ -90,7 +90,18 @@ impl Coin for Monero { } async fn get_outputs(&self, block: &Self::Block, key: dfg::EdwardsPoint) -> Vec { - block.iter().flat_map(|tx| tx.scan(self.view, key.0)).map(Output::from).collect() + block + .iter() + .flat_map(|tx| { + let (outputs, timelock) = tx.scan(self.view, key.0); + if timelock == Timelock::None { + outputs + } else { + vec![] + } + }) + .map(Output::from) + .collect() } async fn prepare_send( From de9710413a33fafa29b46841b31135dd3789a1f2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 00:55:41 -0400 Subject: [PATCH 006/105] Use big endian throughout FROST Slightly changes serialization of keys to be t-n-i instead of n-t-i. --- crypto/frost/src/lib.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index 5d2c5f19..35071889 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -312,9 +312,9 @@ impl MultisigKeys { ); serialized.push(C::id_len()); serialized.extend(C::id().as_bytes()); - serialized.extend(&self.params.n.to_le_bytes()); - serialized.extend(&self.params.t.to_le_bytes()); - serialized.extend(&self.params.i.to_le_bytes()); + serialized.extend(&self.params.t.to_be_bytes()); + serialized.extend(&self.params.n.to_be_bytes()); + serialized.extend(&self.params.i.to_be_bytes()); serialized.extend(&C::F_to_bytes(&self.secret_share)); serialized.extend(&C::G_to_bytes(&self.group_key)); for l in 1 ..= self.params.n.into() { @@ -346,19 +346,20 @@ impl MultisigKeys { } cursor += id_len; - if serialized.len() < (cursor + 8) { - Err(FrostError::InternalError("participant quantity wasn't included".to_string()))?; + if serialized.len() < (cursor + 4) { + Err(FrostError::InternalError("participant quantities weren't included".to_string()))?; } - let n = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); + let t = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); + cursor += 2; + + let n = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); cursor += 2; if serialized.len() != MultisigKeys::::serialized_len(n) { Err(FrostError::InternalError("incorrect serialization length".to_string()))?; } - let t = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); - cursor += 2; - let i = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); + let i = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); cursor += 2; let secret_share = C::F_from_slice(&serialized[cursor .. (cursor + C::F_len())]) From 44452d9bfe0a2437ffac98008dbc6e6027e77e46 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 01:25:46 -0400 Subject: [PATCH 007/105] Verify being FROST v5 compliant No functional changes have been made to signing, with solely slight API changes being made. Technically not actually FROST v5 compatible, due to differing on zero checks and randomness, yet the vectors do confirm the core algorithm. For any valid FROST implementation, this will be interoperable if they can successfully communicate. For any devious FROST implementation, this will be fingerprintable, yet should still be valid. Relevant to https://github.com/serai-dex/serai/issues/9 as any curve can now specify vectors for itself and be tested against them. Moves the FROST testing curve from k256 to p256. Does not expose p256 despite being compliant. It's not at a point I'm happy with it, notably regarding hash to curve, and I'm not sure I care to support p256. If it has value to the larger FROST ecosystem... --- coins/monero/src/frost.rs | 14 +- crypto/frost/Cargo.toml | 3 +- crypto/frost/src/key_gen.rs | 7 +- crypto/frost/src/lib.rs | 18 +- crypto/frost/src/sign.rs | 16 +- crypto/frost/src/tests/literal/mod.rs | 2 +- crypto/frost/src/tests/literal/p256.rs | 222 ++++++++++++++++++++ crypto/frost/src/tests/literal/schnorr.rs | 16 +- crypto/frost/src/tests/literal/secp256k1.rs | 120 ----------- crypto/frost/src/tests/mod.rs | 1 + crypto/frost/src/tests/vectors.rs | 117 +++++++++++ 11 files changed, 387 insertions(+), 149 deletions(-) create mode 100644 crypto/frost/src/tests/literal/p256.rs delete mode 100644 crypto/frost/src/tests/literal/secp256k1.rs create mode 100644 crypto/frost/src/tests/vectors.rs diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index dfe24ad6..0da52dc0 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -39,14 +39,14 @@ impl Curve for Ed25519 { type G = dfg::EdwardsPoint; type T = &'static dfg::EdwardsBasepointTable; - fn id() -> String { - "Ed25519".to_string() - } - fn id_len() -> u8 { u8::try_from(Self::id().len()).unwrap() } + fn id() -> &'static [u8] { + b"Ed25519" + } + fn generator() -> Self::G { Self::G::generator() } @@ -67,11 +67,11 @@ impl Curve for Ed25519 { } fn hash_binding_factor(binding: &[u8]) -> Self::F { - Self::hash_to_F(&[b"rho", binding].concat()) + Self::hash_to_F(b"rho", binding) } - fn hash_to_F(data: &[u8]) -> Self::F { - dfg::Scalar::from_hash(Blake2b512::new().chain(data)) + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + dfg::Scalar::from_hash(Blake2b512::new().chain(dst).chain(msg)) } fn F_len() -> usize { diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index e0ae0b85..d5f5f2dc 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -10,6 +10,7 @@ edition = "2021" thiserror = "1" rand_core = "0.6" +hex = "0.4" ff = "0.11" group = "0.11" @@ -21,4 +22,4 @@ multiexp = { path = "../multiexp", features = ["batch"] } [dev-dependencies] rand = "0.8" sha2 = "0.10" -k256 = { version = "0.10", features = ["arithmetic"] } +p256 = { version = "0.10", features = ["arithmetic"] } diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index 67e32a76..643a2454 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -16,8 +16,13 @@ use crate::{ #[allow(non_snake_case)] fn challenge(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F { const DST: &'static [u8] = b"FROST Schnorr Proof of Knowledge"; + // Uses hash_msg to get a fixed size value out of the context string - C::hash_to_F(&[DST, &C::hash_msg(context.as_bytes()), &l.to_be_bytes(), R, Am].concat()) + let mut transcript = C::hash_msg(context.as_bytes()); + transcript.extend(l.to_be_bytes()); + transcript.extend(R); + transcript.extend(Am); + C::hash_to_F(DST, &transcript) } // Implements steps 1 through 3 of round 1 of FROST DKG. Returns the coefficients, commitments, and diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index 35071889..a600466a 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -40,12 +40,11 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { /// Precomputed table type type T: Mul; - /// ID for this curve - fn id() -> String; /// Byte length of the curve ID - // While curve.id().len() is trivial, this bounds it to u8 and lets us ignore the possibility it - // contains Unicode, therefore having a String length which is different from its byte length + // While C::id().len() is trivial, this bounds it to u8 for any proper Curve implementation fn id_len() -> u8; + /// ID for this curve + fn id() -> &'static [u8]; /// Generator for the group // While group does provide this in its API, Jubjub users will want to use a custom basepoint @@ -79,7 +78,7 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { // Not parameterized by Digest as it's fine for it to use its own hash function as relevant to // hash_msg and hash_binding_factor #[allow(non_snake_case)] - fn hash_to_F(data: &[u8]) -> Self::F; + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F; /// Constant size of a serialized field element // The alternative way to grab this would be either serializing a junk element and getting its @@ -255,6 +254,10 @@ pub struct MultisigKeys { } impl MultisigKeys { + /// Offset the keys by a given scalar to allow for account and privacy schemes + /// This offset is ephemeral and will not be included when these keys are serialized + /// Keys offset multiple times will form a new offset of their sum + /// Not IETF compliant pub fn offset(&self, offset: C::F) -> MultisigKeys { let mut res = self.clone(); // Carry any existing offset @@ -311,7 +314,7 @@ impl MultisigKeys { 1 + usize::from(C::id_len()) + MultisigKeys::::serialized_len(self.params.n) ); serialized.push(C::id_len()); - serialized.extend(C::id().as_bytes()); + serialized.extend(C::id()); serialized.extend(&self.params.t.to_be_bytes()); serialized.extend(&self.params.n.to_be_bytes()); serialized.extend(&self.params.i.to_be_bytes()); @@ -336,8 +339,7 @@ impl MultisigKeys { Err(FrostError::InternalError("ID wasn't included".to_string()))?; } - let id = &serialized[cursor .. (cursor + id_len)]; - if C::id().as_bytes() != id { + if C::id() != &serialized[cursor .. (cursor + id_len)] { Err( FrostError::InternalError( "curve is distinct between serialization and deserialization".to_string() diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 23a63f50..ae33735b 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -69,9 +69,9 @@ impl> Params { } } -struct PreprocessPackage { - nonces: [C::F; 2], - serialized: Vec, +pub(crate) struct PreprocessPackage { + pub(crate) nonces: [C::F; 2], + pub(crate) serialized: Vec, } // This library unifies the preprocessing step with signing due to security concerns and to provide @@ -306,6 +306,16 @@ impl> AlgorithmMachine { } ) } + + pub(crate) fn unsafe_override_preprocess(&mut self, preprocess: PreprocessPackage) { + if self.state != State::Fresh { + // This would be unacceptable, yet this is pub(crate) and explicitly labelled unsafe + // It's solely used in a testing environment, which is how it's justified + Err::<(), _>(FrostError::InvalidSignTransition(State::Fresh, self.state)).unwrap(); + } + self.preprocess = Some(preprocess); + self.state = State::Preprocessed; + } } impl> StateMachine for AlgorithmMachine { diff --git a/crypto/frost/src/tests/literal/mod.rs b/crypto/frost/src/tests/literal/mod.rs index d766f844..adb87b1a 100644 --- a/crypto/frost/src/tests/literal/mod.rs +++ b/crypto/frost/src/tests/literal/mod.rs @@ -1,2 +1,2 @@ -mod secp256k1; +mod p256; mod schnorr; diff --git a/crypto/frost/src/tests/literal/p256.rs b/crypto/frost/src/tests/literal/p256.rs new file mode 100644 index 00000000..75aa07bb --- /dev/null +++ b/crypto/frost/src/tests/literal/p256.rs @@ -0,0 +1,222 @@ +use core::convert::TryInto; + +use rand::rngs::OsRng; + +use ff::{Field, PrimeField}; +use group::GroupEncoding; + +use sha2::{digest::Update, Digest, Sha256}; + +use p256::{elliptic_curve::bigint::{Encoding, U384}, Scalar, ProjectivePoint}; + +use crate::{ + CurveError, Curve, + algorithm::Hram, + tests::{curve::test_curve, vectors::{Vectors, vectors}} +}; + +const CONTEXT_STRING: &[u8] = b"FROST-P256-SHA256-v5"; + +fn expand_message_xmd_sha256(dst: &[u8], msg: &[u8], len: u16) -> Option> { + const OUTPUT_SIZE: u16 = 32; + const BLOCK_SIZE: u16 = 64; + + let blocks = ((len + OUTPUT_SIZE) - 1) / OUTPUT_SIZE; + if blocks > 255 { + return None; + } + let blocks = blocks as u8; + + let mut dst = dst; + let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); + if dst.len() > 255 { + dst = &oversize; + } + let dst_prime = &[dst, &[dst.len() as u8]].concat(); + + let mut msg_prime = vec![0; BLOCK_SIZE.into()]; + msg_prime.extend(msg); + msg_prime.extend(len.to_be_bytes()); + msg_prime.push(0); + msg_prime.extend(dst_prime); + + let mut b = vec![Sha256::digest(&msg_prime).to_vec()]; + + { + let mut b1 = b[0].clone(); + b1.push(1); + b1.extend(dst_prime); + b.push(Sha256::digest(&b1).to_vec()); + } + + for i in 2 ..= blocks { + let mut msg = b[0] + .iter().zip(b[usize::from(i) - 1].iter()) + .map(|(a, b)| *a ^ b).collect::>(); + msg.push(i); + msg.extend(dst_prime); + b.push(Sha256::digest(msg).to_vec()); + } + + Some(b[1 ..].concat()[.. usize::from(len)].to_vec()) +} + +#[test] +fn test_xmd_sha256() { + assert_eq!( + hex::encode(expand_message_xmd_sha256(b"QUUX-V01-CS02-with-expander", b"", 0x80).unwrap()), + ( + "8bcffd1a3cae24cf9cd7ab85628fd111bb17e3739d3b53f8".to_owned() + + "9580d217aa79526f1708354a76a402d3569d6a9d19ef3de4d0b991" + + "e4f54b9f20dcde9b95a66824cbdf6c1a963a1913d43fd7ac443a02" + + "fc5d9d8d77e2071b86ab114a9f34150954a7531da568a1ea8c7608" + + "61c0cde2005afc2c114042ee7b5848f5303f0611cf297f" + ) + ); +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct P256; +impl Curve for P256 { + type F = Scalar; + type G = ProjectivePoint; + type T = ProjectivePoint; + + fn id_len() -> u8 { + u8::try_from(Self::id().len()).unwrap() + } + + fn id() -> &'static [u8] { + b"P-256" + } + + fn generator() -> Self::G { + Self::G::GENERATOR + } + + fn generator_table() -> Self::T { + Self::G::GENERATOR + } + + fn little_endian() -> bool { + false + } + + fn hash_msg(msg: &[u8]) -> Vec { + (&Sha256::new() + .chain(CONTEXT_STRING) + .chain(b"digest") + .chain(msg) + .finalize() + ).to_vec() + } + + fn hash_binding_factor(binding: &[u8]) -> Self::F { + Self::hash_to_F(&[CONTEXT_STRING, b"rho"].concat(), binding) + } + + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + let mut modulus = vec![0; 16]; + modulus.extend(&(Scalar::zero() - Scalar::one()).to_repr()); + let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); + Self::F_from_slice( + &U384::from_be_slice( + &expand_message_xmd_sha256(dst, msg, 48).unwrap() + ).reduce(&modulus).unwrap().to_be_bytes()[16 ..] + ).unwrap() + } + + fn F_len() -> usize { + 32 + } + + fn G_len() -> usize { + 33 + } + + fn F_from_slice(slice: &[u8]) -> Result { + let bytes: [u8; 32] = slice.try_into() + .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; + + let scalar = Scalar::from_repr(bytes.into()); + if scalar.is_none().into() { + Err(CurveError::InvalidScalar)?; + } + + Ok(scalar.unwrap()) + } + + fn G_from_slice(slice: &[u8]) -> Result { + let bytes: [u8; 33] = slice.try_into() + .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; + + let point = ProjectivePoint::from_bytes(&bytes.into()); + if point.is_none().into() { + Err(CurveError::InvalidPoint)?; + } + + Ok(point.unwrap()) + } + + fn F_to_bytes(f: &Self::F) -> Vec { + (&f.to_bytes()).to_vec() + } + + fn G_to_bytes(g: &Self::G) -> Vec { + (&g.to_bytes()).to_vec() + } +} + +#[test] +fn p256_curve() { + test_curve::<_, P256>(&mut OsRng); +} + +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct IetfP256Hram {} +impl Hram for IetfP256Hram { + #[allow(non_snake_case)] + fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { + P256::hash_to_F( + &[CONTEXT_STRING, b"chal"].concat(), + &[&P256::G_to_bytes(R), &P256::G_to_bytes(A), m].concat() + ) + } +} + +#[test] +fn p256_vectors() { + vectors::( + Vectors { + threshold: 2, + shares: &[ + "0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731", + "8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5", + "0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928" + ], + group_secret: "8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de", + group_key: "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70", + + msg: "74657374", + included: &[1, 3], + nonces: &[ + [ + "081617b24375e069b39f649d4c4ce2fba6e38b73e7c16759de0b6079a22c4c7e", + "4de5fb77d99f03a2491a83a6a4cb91ca3c82a3f34ce94cec939174f47c9f95dd" + ], + [ + "d186ea92593f83ea83181b184d41aa93493301ac2bc5b4b1767e94d2db943e38", + "486e2ee25a3fbc8e6399d748b077a2755fde99fa85cc24fa647ea4ebf5811a15" + ] + ], + binding: "cf7ffe4b8ad6edb6237efaa8cbfb2dfb2fd08d163b6ad9063720f14779a9e143", + sig_shares: &[ + "9e4d8865faf8c7b3193a3b35eda3d9e12118447114b1e7d5b4809ea28067f8a9", + "b7d094eab6305ae74daeed1acd31abba9ab81f638d38b72c132cb25a5dfae1fc" + ], + sig: "0342c14c77f9d4ef9b8bd64fb0d7bbfdb9f8216a44e5f7bbe6ac0f3ed5e1a57367".to_owned() + + "561e1d51b129229966e92850bad5859bfee96926fad3007cd3f38639e1ffb554" + } + ); +} diff --git a/crypto/frost/src/tests/literal/schnorr.rs b/crypto/frost/src/tests/literal/schnorr.rs index 1ceac748..0d81bf8a 100644 --- a/crypto/frost/src/tests/literal/schnorr.rs +++ b/crypto/frost/src/tests/literal/schnorr.rs @@ -4,7 +4,7 @@ use rand::rngs::OsRng; use crate::{ Curve, schnorr, algorithm::{Hram, Schnorr}, - tests::{key_gen, algorithm_machines, sign as sign_test, literal::secp256k1::{Secp256k1, TestHram}} + tests::{key_gen, algorithm_machines, sign as sign_test, literal::p256::{P256, IetfP256Hram}} }; const MESSAGE: &[u8] = b"Hello World"; @@ -15,8 +15,8 @@ fn sign() { &mut OsRng, algorithm_machines( &mut OsRng, - Schnorr::::new(), - &key_gen::<_, Secp256k1>(&mut OsRng) + Schnorr::::new(), + &key_gen::<_, P256>(&mut OsRng) ), MESSAGE ); @@ -24,19 +24,19 @@ fn sign() { #[test] fn sign_with_offset() { - let mut keys = key_gen::<_, Secp256k1>(&mut OsRng); + let mut keys = key_gen::<_, P256>(&mut OsRng); let group_key = keys[&1].group_key(); - let offset = Secp256k1::hash_to_F(b"offset"); + let offset = P256::hash_to_F(b"offset", &[]); for i in 1 ..= u16::try_from(keys.len()).unwrap() { keys.insert(i, Rc::new(keys[&i].offset(offset))); } - let offset_key = group_key + (Secp256k1::generator_table() * offset); + let offset_key = group_key + (P256::generator_table() * offset); let sig = sign_test( &mut OsRng, - algorithm_machines(&mut OsRng, Schnorr::::new(), &keys), + algorithm_machines(&mut OsRng, Schnorr::::new(), &keys), MESSAGE ); - assert!(schnorr::verify(offset_key, TestHram::hram(&sig.R, &offset_key, MESSAGE), &sig)); + assert!(schnorr::verify(offset_key, IetfP256Hram::hram(&sig.R, &offset_key, MESSAGE), &sig)); } diff --git a/crypto/frost/src/tests/literal/secp256k1.rs b/crypto/frost/src/tests/literal/secp256k1.rs deleted file mode 100644 index e10bee07..00000000 --- a/crypto/frost/src/tests/literal/secp256k1.rs +++ /dev/null @@ -1,120 +0,0 @@ -use core::convert::TryInto; - -use rand::rngs::OsRng; - -use ff::PrimeField; -use group::GroupEncoding; - -use sha2::{Digest, Sha256, Sha512}; - -use k256::{ - elliptic_curve::{generic_array::GenericArray, bigint::{ArrayEncoding, U512}, ops::Reduce}, - Scalar, - ProjectivePoint -}; - -use crate::{CurveError, Curve, algorithm::Hram, tests::curve::test_curve}; - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Secp256k1; -impl Curve for Secp256k1 { - type F = Scalar; - type G = ProjectivePoint; - type T = ProjectivePoint; - - fn id() -> String { - "secp256k1".to_string() - } - - fn id_len() -> u8 { - u8::try_from(Self::id().len()).unwrap() - } - - fn generator() -> Self::G { - Self::G::GENERATOR - } - - fn generator_table() -> Self::T { - Self::G::GENERATOR - } - - fn little_endian() -> bool { - false - } - - // The IETF draft doesn't specify a secp256k1 ciphersuite - // This test just uses the simplest ciphersuite which would still be viable to deploy - // The comparable P-256 curve uses hash_to_field from the Hash To Curve IETF draft with a context - // string and further DST for H1 ("rho") and H3 ("digest"). It's not currently worth it to add - // that weight, yet if secp256k1 is ever officially acknowledged (not just a testing curve), it - // must be properly implemented. - fn hash_msg(msg: &[u8]) -> Vec { - (&Sha256::digest(msg)).to_vec() - } - - fn hash_binding_factor(binding: &[u8]) -> Self::F { - Self::hash_to_F(&[b"rho", binding].concat()) - } - - // Use wide reduction for security - fn hash_to_F(data: &[u8]) -> Self::F { - Scalar::from_uint_reduced(U512::from_be_byte_array(Sha512::digest(data))) - } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 33 - } - - fn F_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 32] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - let scalar = Scalar::from_repr(bytes.into()); - if scalar.is_none().unwrap_u8() == 1 { - Err(CurveError::InvalidScalar)?; - } - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let point = ProjectivePoint::from_bytes(GenericArray::from_slice(slice)); - if point.is_none().unwrap_u8() == 1 { - Err(CurveError::InvalidScalar)?; - } - Ok(point.unwrap()) - } - - fn F_to_bytes(f: &Self::F) -> Vec { - (&f.to_bytes()).to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - (&g.to_bytes()).to_vec() - } -} - -#[allow(non_snake_case)] -#[derive(Clone)] -pub struct TestHram {} -impl Hram for TestHram { - #[allow(non_snake_case)] - fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - Scalar::from_uint_reduced( - U512::from_be_byte_array( - Sha512::new() - .chain_update(Secp256k1::G_to_bytes(R)) - .chain_update(Secp256k1::G_to_bytes(A)) - .chain_update(m) - .finalize() - ) - ) - } -} - -#[test] -fn secp256k1_curve() { - test_curve::<_, Secp256k1>(&mut OsRng); -} diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index f87ce812..cc9c8aad 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -18,6 +18,7 @@ mod schnorr; // Test suites for public usage pub mod curve; +pub mod vectors; // Literal test definitions to run during `cargo test` #[cfg(test)] diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs new file mode 100644 index 00000000..5c984aa3 --- /dev/null +++ b/crypto/frost/src/tests/vectors.rs @@ -0,0 +1,117 @@ +use std::{rc::Rc, collections::HashMap}; + +use crate::{ + Curve, MultisigKeys, + algorithm::{Schnorr, Hram}, + sign::{PreprocessPackage, StateMachine, AlgorithmMachine}, + tests::recover +}; + +pub struct Vectors { + pub threshold: u16, + pub shares: &'static [&'static str], + pub group_secret: &'static str, + pub group_key: &'static str, + + pub msg: &'static str, + pub included: &'static [u16], + pub nonces: &'static [[&'static str; 2]], + pub binding: &'static str, + pub sig_shares: &'static [&'static str], + pub sig: String +} + +// Load these vectors into MultisigKeys using a custom serialization it'll deserialize +fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap> { + let shares = vectors.shares.iter().map( + |secret| C::F_from_slice(&hex::decode(secret).unwrap()).unwrap() + ).collect::>(); + let verification_shares = shares.iter().map( + |secret| C::generator() * secret + ).collect::>(); + + let mut keys = HashMap::new(); + for i in 1 ..= u16::try_from(shares.len()).unwrap() { + let mut serialized = vec![]; + serialized.push(C::id_len()); + serialized.extend(C::id()); + serialized.extend(vectors.threshold.to_be_bytes()); + serialized.extend(u16::try_from(shares.len()).unwrap().to_be_bytes()); + serialized.extend(i.to_be_bytes()); + serialized.extend(C::F_to_bytes(&shares[usize::from(i) - 1])); + serialized.extend(&hex::decode(vectors.group_key).unwrap()); + for share in &verification_shares { + serialized.extend(&C::G_to_bytes(share)); + } + + let these_keys = MultisigKeys::::deserialize(&serialized).unwrap(); + assert_eq!(these_keys.params().t(), vectors.threshold); + assert_eq!(usize::from(these_keys.params().n()), shares.len()); + assert_eq!(these_keys.params().i(), i); + assert_eq!(these_keys.secret_share(), shares[usize::from(i - 1)]); + assert_eq!(&hex::encode(&C::G_to_bytes(&these_keys.group_key())), vectors.group_key); + keys.insert(i, these_keys); + } + + keys +} + +pub fn vectors>(vectors: Vectors) { + let keys = vectors_to_multisig_keys::(&vectors); + let group_key = C::G_from_slice(&hex::decode(vectors.group_key).unwrap()).unwrap(); + assert_eq!( + C::generator() * C::F_from_slice(&hex::decode(vectors.group_secret).unwrap()).unwrap(), + group_key + ); + assert_eq!( + recover(&keys), + C::F_from_slice(&hex::decode(vectors.group_secret).unwrap()).unwrap() + ); + + let mut machines = vec![]; + for i in vectors.included { + machines.push(( + *i, + AlgorithmMachine::new( + Schnorr::::new(), + Rc::new(keys[i].clone()), + vectors.included.clone() + ).unwrap() + )); + } + + let mut commitments = HashMap::new(); + let mut c = 0; + for (i, machine) in machines.iter_mut() { + let nonces = [ + C::F_from_slice(&hex::decode(vectors.nonces[c][0]).unwrap()).unwrap(), + C::F_from_slice(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap() + ]; + + let mut serialized = C::G_to_bytes(&(C::generator() * nonces[0])); + serialized.extend(&C::G_to_bytes(&(C::generator() * nonces[1]))); + + machine.unsafe_override_preprocess( + PreprocessPackage { nonces, serialized: serialized.clone() } + ); + + commitments.insert(*i, serialized); + c += 1; + } + + let mut shares = HashMap::new(); + c = 0; + for (i, machine) in machines.iter_mut() { + let share = machine.sign(commitments.clone(), &hex::decode(vectors.msg).unwrap()).unwrap(); + assert_eq!(share, hex::decode(vectors.sig_shares[c]).unwrap()); + shares.insert(*i, share); + c += 1; + } + + for (_, machine) in machines.iter_mut() { + let sig = machine.complete(shares.clone()).unwrap(); + let mut serialized = C::G_to_bytes(&sig.R); + serialized.extend(C::F_to_bytes(&sig.s)); + assert_eq!(hex::encode(serialized), vectors.sig); + } +} From e4fc469e585b4849a3a52acab30800e5ddc9e086 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 01:37:12 -0400 Subject: [PATCH 008/105] Use a transcript when generating the per-chain binding for a given set of keys While it was fine as-is, as it only had one variable length property, this is a bit more robust. Also binds the Curve ID, which should declare differently even for just different basepoints, and therefore adds two variable length properties (justifying the transcript). --- coins/monero/src/frost.rs | 2 +- coins/monero/src/tests/clsag.rs | 2 +- coins/monero/src/wallet/send/multisig.rs | 5 +++-- coins/monero/tests/send.rs | 4 ++-- crypto/transcript/src/lib.rs | 9 +++------ crypto/transcript/src/merlin.rs | 2 +- processor/Cargo.toml | 1 + processor/src/wallet.rs | 18 ++++++++---------- 8 files changed, 20 insertions(+), 23 deletions(-) diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 0da52dc0..e0cef7c4 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -135,7 +135,7 @@ impl DLEqProof { // the proper order if they want to reach consensus // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to // merge later in some form, when it should instead just merge xH (as it does) - let mut transcript = Transcript::new(b"DLEq Proof".to_vec()); + let mut transcript = Transcript::new(b"DLEq Proof"); // Bit redundant, keeps things consistent transcript.domain_separate(b"DLEq"); // Doesn't include G which is constant, does include H which isn't, even though H manipulation diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 20398117..102b64be 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -96,7 +96,7 @@ fn clsag_multisig() -> Result<(), MultisigError> { algorithm_machines( &mut OsRng, ClsagMultisig::new( - Transcript::new(b"Monero Serai CLSAG Test".to_vec()), + Transcript::new(b"Monero Serai CLSAG Test"), Rc::new(RefCell::new(Some( ClsagDetails::new( ClsagInput::new( diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 1656b1ee..c1305cde 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -37,7 +37,7 @@ pub struct TransactionMachine { impl SignableTransaction { pub async fn multisig( mut self, - label: Vec, + mut transcript: Transcript, rng: &mut R, rpc: &Rpc, height: usize, @@ -56,8 +56,9 @@ impl SignableTransaction { // Create a RNG out of the input shared keys, which either requires the view key or being every // sender, and the payments (address and amount), which a passive adversary may be able to know // depending on how these transactions are coordinated + // Being every sender would already let you note rings which happen to use your transactions + // multiple times, already breaking privacy there - let mut transcript = Transcript::new(label); transcript.domain_separate(b"monero_transaction"); // Include the height we're using for our data // The data itself will be included, making this unnecessary, yet a lot of this is technically diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index 19cc6fdf..a50ffd33 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -25,7 +25,7 @@ mod rpc; use crate::rpc::{rpc, mine_block}; #[cfg(feature = "multisig")] -use monero_serai::frost::Ed25519; +use monero_serai::frost::{Transcript, Ed25519}; lazy_static! { static ref SEQUENTIAL: Mutex<()> = Mutex::new(()); @@ -145,7 +145,7 @@ async fn send_core(test: usize, multisig: bool) { machines.insert( i, signable.clone().multisig( - b"Monero Serai Test Transaction".to_vec(), + Transcript::new(b"Monero Serai Test Transaction"), &mut OsRng, &rpc, rpc.get_height().await.unwrap() - 10, diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index 5a04ada8..b324cc31 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -8,7 +8,7 @@ pub use merlin::MerlinTranscript; use digest::Digest; pub trait Transcript { - fn domain_separate(&mut self, label: &[u8]); + fn domain_separate(&mut self, label: &'static [u8]); fn append_message(&mut self, label: &'static [u8], message: &[u8]); fn challenge(&mut self, label: &'static [u8]) -> Vec; fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; @@ -24,15 +24,12 @@ impl PartialEq for DigestTranscript { } impl DigestTranscript { - pub fn new(label: Vec) -> Self { - DigestTranscript(label, PhantomData) + pub fn new(label: &'static [u8]) -> Self { + DigestTranscript(label.to_vec(), PhantomData) } } impl Transcript for DigestTranscript { - // It may be beneficial for each domain to be a nested transcript which is itself length prefixed - // This would go further than Merlin though and require an accurate end_domain function which has - // frustrations not worth bothering with when this shouldn't actually be meaningful fn domain_separate(&mut self, label: &[u8]) { self.append_message(b"domain", label); } diff --git a/crypto/transcript/src/merlin.rs b/crypto/transcript/src/merlin.rs index 18671545..b3d2ab50 100644 --- a/crypto/transcript/src/merlin.rs +++ b/crypto/transcript/src/merlin.rs @@ -10,7 +10,7 @@ impl Debug for MerlinTranscript { } impl Transcript for MerlinTranscript { - fn domain_separate(&mut self, label: &[u8]) { + fn domain_separate(&mut self, label: &'static [u8]) { self.append_message(b"dom-sep", label); } diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 14c0c487..19171348 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -14,6 +14,7 @@ thiserror = "1" curve25519-dalek = { version = "3", features = ["std"] } blake2 = "0.10" +transcript = { path = "../crypto/transcript" } dalek-ff-group = { path = "../crypto/dalek-ff-group" } frost = { path = "../crypto/frost" } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index c83a7f5a..d55c3a11 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use transcript::{Transcript, DigestTranscript}; use frost::{Curve, MultisigKeys}; use crate::{CoinError, Coin}; @@ -21,17 +22,14 @@ impl WalletKeys { // potential ETH group key. While this shouldn't be an issue, as this isn't a private // system, there are potentially other benefits to binding this to a specific group key // It's no longer possible to influence group key gen to key cancel without breaking the hash - // function, although that degree of influence means key gen is broken already + // function as well, although that degree of influence means key gen is broken already fn bind(&self, chain: &[u8]) -> MultisigKeys { - self.keys.offset( - C::hash_to_F( - &[ - b"Serai Processor Wallet", - chain, - &C::G_to_bytes(&self.keys.group_key()) - ].concat() - ) - ) + const DST: &[u8] = b"Serai Processor Wallet Chain Bind"; + let mut transcript = DigestTranscript::::new(DST); + transcript.append_message(b"chain", chain); + transcript.append_message(b"curve", C::id()); + transcript.append_message(b"group_key", &C::G_to_bytes(&self.keys.group_key())); + self.keys.offset(C::hash_to_F(DST, &transcript.challenge(b"offset"))) } } From b4cd29f49a05f10e58545882a750c447b8d03008 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 02:00:38 -0400 Subject: [PATCH 009/105] Finish implementing FROST v5 Identity check for P256 and H4 was all that was needed. --- coins/monero/src/frost.rs | 6 +++++- crypto/frost/src/lib.rs | 9 +++++++-- crypto/frost/src/sign.rs | 5 ++++- crypto/frost/src/tests/literal/p256.rs | 13 ++++++++++--- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index e0cef7c4..0a276288 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -11,7 +11,7 @@ use curve25519_dalek::{ edwards::EdwardsPoint as DPoint }; -use ff::PrimeField; +use ff::{Field, PrimeField}; use group::Group; use transcript::{Transcript as TranscriptTrait, DigestTranscript}; @@ -59,6 +59,10 @@ impl Curve for Ed25519 { true } + fn random_nonce(_secret: Self::F, rng: &mut R) -> Self::F { + dfg::Scalar::random(rng) + } + // This will already be a keccak256 hash in the case of CLSAG signing, making it fine to simply // return as-is, yet this ensures it's fixed size (a security requirement) and unique regardless // of how it's called/what it's called with diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index a600466a..54abee1d 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -3,6 +3,8 @@ use std::collections::HashMap; use thiserror::Error; +use rand_core::{RngCore, CryptoRng}; + use ff::{Field, PrimeField}; use group::{Group, GroupOps}; @@ -32,7 +34,7 @@ pub enum CurveError { // It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough // advantages in the modern day to be worth the hassle -- Kayaba pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { - /// Field element type + /// Scalar field element type // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses type F: PrimeField; /// Group element type @@ -57,6 +59,9 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { /// If little endian is used for the scalar field's Repr fn little_endian() -> bool; + /// Securely generate a random nonce. H4 from the IETF draft + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F; + /// Hash the message for the binding factor. H3 from the IETF draft // This doesn't actually need to be part of Curve as it does nothing with the curve // This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using @@ -80,7 +85,7 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { #[allow(non_snake_case)] fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F; - /// Constant size of a serialized field element + /// Constant size of a serialized scalar field element // The alternative way to grab this would be either serializing a junk element and getting its // length or doing a naive division of its BITS property by 8 and assuming a lack of padding #[allow(non_snake_case)] diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index ae33735b..5ccb139c 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -80,7 +80,10 @@ fn preprocess>( rng: &mut R, params: &mut Params, ) -> PreprocessPackage { - let nonces = [C::F::random(&mut *rng), C::F::random(&mut *rng)]; + let nonces = [ + C::random_nonce(params.view().secret_share(), &mut *rng), + C::random_nonce(params.view().secret_share(), &mut *rng) + ]; let commitments = [C::generator_table() * nonces[0], C::generator_table() * nonces[1]]; let mut serialized = C::G_to_bytes(&commitments[0]); serialized.extend(&C::G_to_bytes(&commitments[1])); diff --git a/crypto/frost/src/tests/literal/p256.rs b/crypto/frost/src/tests/literal/p256.rs index 75aa07bb..c36a046d 100644 --- a/crypto/frost/src/tests/literal/p256.rs +++ b/crypto/frost/src/tests/literal/p256.rs @@ -1,9 +1,9 @@ use core::convert::TryInto; -use rand::rngs::OsRng; +use rand::{RngCore, CryptoRng, rngs::OsRng}; use ff::{Field, PrimeField}; -use group::GroupEncoding; +use group::{Group, GroupEncoding}; use sha2::{digest::Update, Digest, Sha256}; @@ -102,6 +102,13 @@ impl Curve for P256 { false } + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { + let mut seed = vec![0; 32]; + rng.fill_bytes(&mut seed); + seed.extend(&secret.to_repr()); + Self::hash_to_F(&[CONTEXT_STRING, b"nonce"].concat(), &seed) + } + fn hash_msg(msg: &[u8]) -> Vec { (&Sha256::new() .chain(CONTEXT_STRING) @@ -151,7 +158,7 @@ impl Curve for P256 { .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; let point = ProjectivePoint::from_bytes(&bytes.into()); - if point.is_none().into() { + if point.is_none().into() || point.unwrap().is_identity().into() { Err(CurveError::InvalidPoint)?; } From f8d127bf8ac54baf1cc5ebaee11c786cb8cc6d41 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 03:56:17 -0400 Subject: [PATCH 010/105] Add FROST Ed25519 test vectors --- coins/monero/Cargo.toml | 1 + coins/monero/src/frost.rs | 58 +++++++++++++++++------- coins/monero/src/tests/frost.rs | 62 ++++++++++++++++++++++++-- crypto/frost/src/tests/literal/p256.rs | 2 - crypto/frost/src/tests/vectors.rs | 1 - 5 files changed, 102 insertions(+), 22 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index e9fdeaf0..22e1dfe5 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -40,4 +40,5 @@ experimental = [] multisig = ["ff", "group", "rand_chacha", "transcript", "frost", "dalek-ff-group"] [dev-dependencies] +sha2 = "0.10" tokio = { version = "1", features = ["full"] } diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 0a276288..6b44a296 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -1,9 +1,10 @@ -use core::convert::TryInto; +use core::{convert::TryInto, fmt::{Formatter, Debug}}; +use std::marker::PhantomData; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use blake2::{digest::Update, Digest, Blake2b512}; +use blake2::{digest::{generic_array::typenum::U64, Digest}, Blake2b512}; use curve25519_dalek::{ constants::ED25519_BASEPOINT_TABLE as DTable, @@ -11,7 +12,7 @@ use curve25519_dalek::{ edwards::EdwardsPoint as DPoint }; -use ff::{Field, PrimeField}; +use ff::PrimeField; use group::Group; use transcript::{Transcript as TranscriptTrait, DigestTranscript}; @@ -32,9 +33,26 @@ pub enum MultisigError { InvalidKeyImage(u16) } -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Ed25519; -impl Curve for Ed25519 { +// Accept a parameterized hash function in order to check against the FROST vectors while still +// allowing Blake2b to be used with wide reduction in practice +pub struct Ed25519Internal, const WIDE: bool> { + _digest: PhantomData +} + +// Removed requirements for D to have all of these +impl, const WIDE: bool> Clone for Ed25519Internal { + fn clone(&self) -> Self { *self } +} +impl, const WIDE: bool> Copy for Ed25519Internal {} +impl, const WIDE: bool> PartialEq for Ed25519Internal { + fn eq(&self, _: &Self) -> bool { true } +} +impl, const WIDE: bool> Eq for Ed25519Internal {} +impl, const WIDE: bool> Debug for Ed25519Internal { + fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { Ok(()) } +} + +impl, const WIDE: bool> Curve for Ed25519Internal { type F = dfg::Scalar; type G = dfg::EdwardsPoint; type T = &'static dfg::EdwardsBasepointTable; @@ -44,7 +62,7 @@ impl Curve for Ed25519 { } fn id() -> &'static [u8] { - b"Ed25519" + b"edwards25519" } fn generator() -> Self::G { @@ -59,15 +77,15 @@ impl Curve for Ed25519 { true } - fn random_nonce(_secret: Self::F, rng: &mut R) -> Self::F { - dfg::Scalar::random(rng) + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { + let mut seed = vec![0; 32]; + rng.fill_bytes(&mut seed); + seed.extend(&secret.to_bytes()); + Self::hash_to_F(b"nonce", &seed) } - // This will already be a keccak256 hash in the case of CLSAG signing, making it fine to simply - // return as-is, yet this ensures it's fixed size (a security requirement) and unique regardless - // of how it's called/what it's called with fn hash_msg(msg: &[u8]) -> Vec { - Blake2b512::digest(msg).to_vec() + D::digest(msg).to_vec() } fn hash_binding_factor(binding: &[u8]) -> Self::F { @@ -75,7 +93,12 @@ impl Curve for Ed25519 { } fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { - dfg::Scalar::from_hash(Blake2b512::new().chain(dst).chain(msg)) + let digest = D::new().chain_update(dst).chain_update(msg); + if WIDE { + dfg::Scalar::from_hash(digest) + } else { + dfg::Scalar::from_bytes_mod_order(digest.finalize()[32 ..].try_into().unwrap()) + } } fn F_len() -> usize { @@ -101,8 +124,8 @@ impl Curve for Ed25519 { let point = dfg::CompressedEdwardsY::new(bytes).decompress(); if let Some(point) = point { - // Ban torsioned points - if !point.is_torsion_free() { + // Ban identity and torsioned points + if point.is_identity().into() || (!bool::from(point.is_torsion_free())) { Err(CurveError::InvalidPoint)?; } // Ban points which weren't canonically encoded @@ -124,6 +147,8 @@ impl Curve for Ed25519 { } } +pub type Ed25519 = Ed25519Internal; + // Used to prove legitimacy of key images and nonces which both involve other basepoints #[derive(Clone)] pub struct DLEqProof { @@ -225,6 +250,7 @@ pub fn read_dleq( xG: &DPoint ) -> Result { // Not using G_from_slice here would enable non-canonical points and break blame + // This does also ban identity points, yet those should never be a concern let other = ::G_from_slice( &serialized[(start + 0) .. (start + 32)] ).map_err(|_| MultisigError::InvalidDLEqProof(l))?; diff --git a/coins/monero/src/tests/frost.rs b/coins/monero/src/tests/frost.rs index 423d23d2..59f2b707 100644 --- a/coins/monero/src/tests/frost.rs +++ b/coins/monero/src/tests/frost.rs @@ -1,11 +1,67 @@ use rand::rngs::OsRng; -use frost::tests::{curve::test_curve, key_gen}; +use sha2::Sha512; -use crate::frost::Ed25519; +use dalek_ff_group as dfg; +use frost::{Curve, algorithm::Hram, tests::{curve::test_curve, vectors::{Vectors, vectors}}}; + +use crate::frost::{Ed25519, Ed25519Internal}; #[test] fn frost_ed25519() { test_curve::<_, Ed25519>(&mut OsRng); - key_gen::<_, Ed25519>(&mut OsRng); +} + +// Not spec-compliant, as this shouldn't use wide reduction +// Is vectors compliant, which is why the below tests pass +// See https://github.com/cfrg/draft-irtf-cfrg-frost/issues/204 +//type TestEd25519 = Ed25519Internal; +// If this is kept, we can remove WIDE +type TestEd25519 = Ed25519Internal; + +#[derive(Copy, Clone)] +struct IetfEd25519Hram {} +impl Hram for IetfEd25519Hram { + #[allow(non_snake_case)] + fn hram(R: &dfg::EdwardsPoint, A: &dfg::EdwardsPoint, m: &[u8]) -> dfg::Scalar { + TestEd25519::hash_to_F( + b"", + &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat() + ) + } +} + +#[test] +fn frost_ed25519_vectors() { + vectors::( + Vectors { + threshold: 2, + shares: &[ + "929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509", + "a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d", + "d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02" + ], + group_secret: "7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304", + group_key: "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673", + + msg: "74657374", + included: &[1, 3], + nonces: &[ + [ + "8c76af04340e83bb5fc427c117d38347fc8ef86d5397feea9aa6412d96c05b0a", + "14a37ddbeae8d9e9687369e5eb3c6d54f03dc19d76bb54fb5425131bc37a600b" + ], + [ + "5ca39ebab6874f5e7b5089f3521819a2aa1e2cf738bae6974ee80555de2ef70e", + "0afe3650c4815ff37becd3c6948066e906e929ea9b8f546c74e10002dbcc150c" + ] + ], + sig_shares: &[ + "4369474a398aa10357b60d683da91ea6a767dcf53fd541a8ed6b4d780827ea0a", + "32fcc690d926075e45d2dfb746bab71447943cddbefe80d122c39174aa2e1004" + ], + sig: "2b8d9c6995333c5990e3a3dd6568785539d3322f7f0376452487ea35cfda587b".to_owned() + + "75650edb12b1a8619c88ed1f8463d6baeefb18d3fed3c279102fdfecb255fa0e" + } + ); } diff --git a/crypto/frost/src/tests/literal/p256.rs b/crypto/frost/src/tests/literal/p256.rs index c36a046d..c2668329 100644 --- a/crypto/frost/src/tests/literal/p256.rs +++ b/crypto/frost/src/tests/literal/p256.rs @@ -179,7 +179,6 @@ fn p256_curve() { test_curve::<_, P256>(&mut OsRng); } -#[allow(non_snake_case)] #[derive(Clone)] pub struct IetfP256Hram {} impl Hram for IetfP256Hram { @@ -217,7 +216,6 @@ fn p256_vectors() { "486e2ee25a3fbc8e6399d748b077a2755fde99fa85cc24fa647ea4ebf5811a15" ] ], - binding: "cf7ffe4b8ad6edb6237efaa8cbfb2dfb2fd08d163b6ad9063720f14779a9e143", sig_shares: &[ "9e4d8865faf8c7b3193a3b35eda3d9e12118447114b1e7d5b4809ea28067f8a9", "b7d094eab6305ae74daeed1acd31abba9ab81f638d38b72c132cb25a5dfae1fc" diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 5c984aa3..0e9f3396 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -16,7 +16,6 @@ pub struct Vectors { pub msg: &'static str, pub included: &'static [u16], pub nonces: &'static [[&'static str; 2]], - pub binding: &'static str, pub sig_shares: &'static [&'static str], pub sig: String } From 33241a5bb6b77fdd447b76e6f0b13a80d4a1b17e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 15:35:42 -0400 Subject: [PATCH 011/105] Fill out dalek-ff-group a bit more --- crypto/dalek-ff-group/src/lib.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index ff0b2bc5..5bf1823d 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -13,7 +13,7 @@ pub use curve25519_dalek as dalek; use dalek::{ constants, - traits::{Identity, IsIdentity}, + traits::Identity, scalar::Scalar as DScalar, edwards::{ EdwardsPoint as DPoint, @@ -102,11 +102,13 @@ impl<'a> MulAssign<&'a Scalar> for Scalar { } impl ConstantTimeEq for Scalar { - fn ct_eq(&self, _: &Self) -> Choice { unimplemented!() } + fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } } impl ConditionallySelectable for Scalar { - fn conditional_select(_: &Self, _: &Self, _: Choice) -> Self { unimplemented!() } + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Scalar(DScalar::conditional_select(a, b, choice)) + } } impl Field for Scalar { @@ -124,7 +126,7 @@ impl Field for Scalar { CtOption::new(Self(self.0.invert()), Choice::from(1 as u8)) } fn sqrt(&self) -> CtOption { unimplemented!() } - fn is_zero(&self) -> Choice { Choice::from(if self.0 == DScalar::zero() { 1 } else { 0 }) } + fn is_zero(&self) -> Choice { self.0.ct_eq(&DScalar::zero()) } fn cube(&self) -> Self { *self * self * self } fn pow_vartime>(&self, _exp: S) -> Self { unimplemented!() } } @@ -146,9 +148,9 @@ impl PrimeField for Scalar { } fn to_repr(&self) -> [u8; 32] { self.0.to_bytes() } - const S: u32 = 0; + const S: u32 = 2; fn is_odd(&self) -> Choice { unimplemented!() } - fn multiplicative_generator() -> Self { unimplemented!() } + fn multiplicative_generator() -> Self { 2u64.into() } fn root_of_unity() -> Self { unimplemented!() } } @@ -245,10 +247,10 @@ impl<'a> MulAssign<&'a Scalar> for EdwardsPoint { impl Group for EdwardsPoint { type Scalar = Scalar; - fn random(mut _rng: impl RngCore) -> Self { unimplemented!() } + fn random(rng: impl RngCore) -> Self { &ED25519_BASEPOINT_TABLE * Scalar::random(rng) } fn identity() -> Self { Self(DPoint::identity()) } fn generator() -> Self { ED25519_BASEPOINT_POINT } - fn is_identity(&self) -> Choice { (self.0.is_identity() as u8).into() } + fn is_identity(&self) -> Choice { self.0.ct_eq(&DPoint::identity()) } fn double(&self) -> Self { *self + self } } From 9b52cf4d20ba610ad554deebd5cbcbc5b9b40ee0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 19:08:25 -0400 Subject: [PATCH 012/105] Generalize out the FROST test for signing/signing with an offset Moves Schnorr signature tests from test_curve to the new test_schnorr, which is more a test_frost. Relevant to https://github.com/serai-dex/serai/issues/9. --- coins/monero/src/tests/frost.rs | 13 ++++- crypto/frost/src/tests/curve.rs | 12 +---- crypto/frost/src/tests/literal/mod.rs | 1 - crypto/frost/src/tests/literal/p256.rs | 9 +++- crypto/frost/src/tests/literal/schnorr.rs | 42 --------------- crypto/frost/src/tests/mod.rs | 6 +-- crypto/frost/src/tests/schnorr.rs | 66 +++++++++++++++++++++-- 7 files changed, 83 insertions(+), 66 deletions(-) delete mode 100644 crypto/frost/src/tests/literal/schnorr.rs diff --git a/coins/monero/src/tests/frost.rs b/coins/monero/src/tests/frost.rs index 59f2b707..710328f8 100644 --- a/coins/monero/src/tests/frost.rs +++ b/coins/monero/src/tests/frost.rs @@ -3,15 +3,24 @@ use rand::rngs::OsRng; use sha2::Sha512; use dalek_ff_group as dfg; -use frost::{Curve, algorithm::Hram, tests::{curve::test_curve, vectors::{Vectors, vectors}}}; +use frost::{ + Curve, + algorithm::Hram, + tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} +}; use crate::frost::{Ed25519, Ed25519Internal}; #[test] -fn frost_ed25519() { +fn frost_ed25519_curve() { test_curve::<_, Ed25519>(&mut OsRng); } +#[test] +fn frost_ed25519_schnorr() { + test_schnorr::<_, Ed25519>(&mut OsRng); +} + // Not spec-compliant, as this shouldn't use wide reduction // Is vectors compliant, which is why the below tests pass // See https://github.com/cfrg/draft-irtf-cfrg-frost/issues/204 diff --git a/crypto/frost/src/tests/curve.rs b/crypto/frost/src/tests/curve.rs index ab83a0d9..33f4f516 100644 --- a/crypto/frost/src/tests/curve.rs +++ b/crypto/frost/src/tests/curve.rs @@ -1,9 +1,6 @@ use rand_core::{RngCore, CryptoRng}; -use crate::{ - Curve, MultisigKeys, - tests::{schnorr::{sign, verify, batch_verify}, key_gen} -}; +use crate::{Curve, MultisigKeys, tests::key_gen}; // Test generation of FROST keys fn key_generation(rng: &mut R) { @@ -21,13 +18,6 @@ fn keys_serialization(rng: &mut R) { pub fn test_curve(rng: &mut R) { // TODO: Test the Curve functions themselves - // Test Schnorr signatures work as expected - // This is a bit unnecessary, as they should for any valid curve, yet this provides tests with - // meaning, which the above tests won't have - sign::<_, C>(rng); - verify::<_, C>(rng); - batch_verify::<_, C>(rng); - // Test FROST key generation and serialization of MultisigKeys works as expected key_generation::<_, C>(rng); keys_serialization::<_, C>(rng); diff --git a/crypto/frost/src/tests/literal/mod.rs b/crypto/frost/src/tests/literal/mod.rs index adb87b1a..eea846ee 100644 --- a/crypto/frost/src/tests/literal/mod.rs +++ b/crypto/frost/src/tests/literal/mod.rs @@ -1,2 +1 @@ mod p256; -mod schnorr; diff --git a/crypto/frost/src/tests/literal/p256.rs b/crypto/frost/src/tests/literal/p256.rs index c2668329..d98d4824 100644 --- a/crypto/frost/src/tests/literal/p256.rs +++ b/crypto/frost/src/tests/literal/p256.rs @@ -12,7 +12,7 @@ use p256::{elliptic_curve::bigint::{Encoding, U384}, Scalar, ProjectivePoint}; use crate::{ CurveError, Curve, algorithm::Hram, - tests::{curve::test_curve, vectors::{Vectors, vectors}} + tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} }; const CONTEXT_STRING: &[u8] = b"FROST-P256-SHA256-v5"; @@ -179,8 +179,13 @@ fn p256_curve() { test_curve::<_, P256>(&mut OsRng); } +#[test] +fn p256_schnorr() { + test_schnorr::<_, P256>(&mut OsRng); +} + #[derive(Clone)] -pub struct IetfP256Hram {} +pub struct IetfP256Hram; impl Hram for IetfP256Hram { #[allow(non_snake_case)] fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { diff --git a/crypto/frost/src/tests/literal/schnorr.rs b/crypto/frost/src/tests/literal/schnorr.rs deleted file mode 100644 index 0d81bf8a..00000000 --- a/crypto/frost/src/tests/literal/schnorr.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::rc::Rc; - -use rand::rngs::OsRng; - -use crate::{ - Curve, schnorr, algorithm::{Hram, Schnorr}, - tests::{key_gen, algorithm_machines, sign as sign_test, literal::p256::{P256, IetfP256Hram}} -}; - -const MESSAGE: &[u8] = b"Hello World"; - -#[test] -fn sign() { - sign_test( - &mut OsRng, - algorithm_machines( - &mut OsRng, - Schnorr::::new(), - &key_gen::<_, P256>(&mut OsRng) - ), - MESSAGE - ); -} - -#[test] -fn sign_with_offset() { - let mut keys = key_gen::<_, P256>(&mut OsRng); - let group_key = keys[&1].group_key(); - - let offset = P256::hash_to_F(b"offset", &[]); - for i in 1 ..= u16::try_from(keys.len()).unwrap() { - keys.insert(i, Rc::new(keys[&i].offset(offset))); - } - let offset_key = group_key + (P256::generator_table() * offset); - - let sig = sign_test( - &mut OsRng, - algorithm_machines(&mut OsRng, Schnorr::::new(), &keys), - MESSAGE - ); - assert!(schnorr::verify(offset_key, IetfP256Hram::hram(&sig.R, &offset_key, MESSAGE), &sig)); -} diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index cc9c8aad..5a1b58f1 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -13,11 +13,9 @@ use crate::{ sign::{StateMachine, AlgorithmMachine} }; -// Internal tests -mod schnorr; - // Test suites for public usage pub mod curve; +pub mod schnorr; pub mod vectors; // Literal test definitions to run during `cargo test` @@ -56,7 +54,7 @@ pub fn key_gen( i, key_gen::StateMachine::::new( params[&i], - "FROST test key_gen".to_string() + "FROST Test key_gen".to_string() ) ); commitments.insert( diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index 5f64c303..4b39fcce 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -1,10 +1,15 @@ +use std::{marker::PhantomData, rc::Rc, collections::HashMap}; + use rand_core::{RngCore, CryptoRng}; use ff::Field; -use crate::{Curve, schnorr, algorithm::SchnorrSignature}; +use crate::{ + Curve, MultisigKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr}, + tests::{key_gen, algorithm_machines, sign as sign_test} +}; -pub(crate) fn sign(rng: &mut R) { +pub(crate) fn core_sign(rng: &mut R) { let private_key = C::F::random(&mut *rng); let nonce = C::F::random(&mut *rng); let challenge = C::F::random(rng); // Doesn't bother to craft an HRAM @@ -20,7 +25,7 @@ pub(crate) fn sign(rng: &mut R) { // The above sign function verifies signing works // This verifies invalid signatures don't pass, using zero signatures, which should effectively be // random -pub(crate) fn verify(rng: &mut R) { +pub(crate) fn core_verify(rng: &mut R) { assert!( !schnorr::verify::( C::generator_table() * C::F::random(&mut *rng), @@ -30,7 +35,7 @@ pub(crate) fn verify(rng: &mut R) { ); } -pub(crate) fn batch_verify(rng: &mut R) { +pub(crate) fn core_batch_verify(rng: &mut R) { // Create 5 signatures let mut keys = vec![]; let mut challenges = vec![]; @@ -71,3 +76,56 @@ pub(crate) fn batch_verify(rng: &mut R) { } } } + +fn sign_core( + rng: &mut R, + group_key: C::G, + keys: &HashMap>> +) { + const MESSAGE: &'static [u8] = b"Hello, World!"; + + let machines = algorithm_machines(rng, Schnorr::>::new(), keys); + let sig = sign_test(&mut *rng, machines, MESSAGE); + assert!(schnorr::verify(group_key, TestHram::::hram(&sig.R, &group_key, MESSAGE), &sig)); +} + +#[derive(Clone)] +pub struct TestHram { + _curve: PhantomData +} +impl Hram for TestHram { + #[allow(non_snake_case)] + fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F { + C::hash_to_F(b"challenge", &[&C::G_to_bytes(R), &C::G_to_bytes(A), m].concat()) + } +} + +fn sign(rng: &mut R) { + let keys = key_gen::<_, C>(&mut *rng); + sign_core(rng, keys[&1].group_key(), &keys); +} + +fn sign_with_offset(rng: &mut R) { + let mut keys = key_gen::<_, C>(&mut *rng); + let group_key = keys[&1].group_key(); + + let offset = C::hash_to_F(b"FROST Test sign_with_offset", b"offset"); + for i in 1 ..= u16::try_from(keys.len()).unwrap() { + keys.insert(i, Rc::new(keys[&i].offset(offset))); + } + let offset_key = group_key + (C::generator_table() * offset); + + sign_core(rng, offset_key, &keys); +} + +pub fn test_schnorr(rng: &mut R) { + // Test Schnorr signatures work as expected + // This is a bit unnecessary, as they should for any valid curve, yet this establishes sanity + core_sign::<_, C>(rng); + core_verify::<_, C>(rng); + core_batch_verify::<_, C>(rng); + + // Test Schnorr signatures under FROST + sign::<_, C>(rng); + sign_with_offset::<_, C>(rng); +} From b83ca7d6660c19cc0922f29e4d6aaf90dd85c17a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 22:46:48 -0400 Subject: [PATCH 013/105] Implement a basic TX IO selector algorithm --- processor/src/coins/monero.rs | 7 +- processor/src/lib.rs | 8 +-- processor/src/wallet.rs | 118 +++++++++++++++++++++++++++++----- 3 files changed, 110 insertions(+), 23 deletions(-) diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index 6e9140f9..834ab63d 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use async_trait::async_trait; use rand_core::{RngCore, CryptoRng}; @@ -16,6 +18,7 @@ use monero_serai::{ use crate::{Output as OutputTrait, CoinError, Coin, view_key}; +#[derive(Clone)] pub struct Output(SpendableOutput); impl OutputTrait for Output { // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. @@ -104,9 +107,9 @@ impl Coin for Monero { .collect() } - async fn prepare_send( + async fn prepare_send( &self, - _keys: MultisigKeys, + _keys: Arc>, _label: Vec, _height: usize, _inputs: Vec, diff --git a/processor/src/lib.rs b/processor/src/lib.rs index e87ef456..fae4bfad 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,4 +1,4 @@ -use std::marker::Send; +use std::{marker::Send, sync::Arc}; use async_trait::async_trait; use thiserror::Error; @@ -14,7 +14,7 @@ mod wallet; #[cfg(test)] mod tests; -pub trait Output: Sized { +pub trait Output: Sized + Clone { type Id; fn id(&self) -> Self::Id; @@ -53,9 +53,9 @@ pub trait Coin { key: ::G ) -> Vec; - async fn prepare_send( + async fn prepare_send( &self, - keys: MultisigKeys, + keys: Arc>, label: Vec, height: usize, inputs: Vec, diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index d55c3a11..d1ab088d 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -1,9 +1,9 @@ -use std::collections::HashMap; +use std::{sync::Arc, collections::HashMap}; use transcript::{Transcript, DigestTranscript}; use frost::{Curve, MultisigKeys}; -use crate::{CoinError, Coin}; +use crate::{CoinError, Output, Coin}; pub struct WalletKeys { keys: MultisigKeys, @@ -43,9 +43,8 @@ pub struct CoinDb { pub struct Wallet { db: CoinDb, coin: C, - keys: Vec>, - pending: Vec<(usize, MultisigKeys)>, - outputs: Vec + keys: Vec<(Arc>, Vec)>, + pending: Vec<(usize, MultisigKeys)> } impl Wallet { @@ -59,8 +58,7 @@ impl Wallet { coin, keys: vec![], - pending: vec![], - outputs: vec![] + pending: vec![] } } @@ -80,21 +78,107 @@ impl Wallet { pub async fn poll(&mut self) -> Result<(), CoinError> { let confirmed_height = self.coin.get_height().await? - C::confirmations(); - for h in self.scanned_height() .. confirmed_height { - let mut k = 0; - while k < self.pending.len() { - if h == self.pending[k].0 { - self.keys.push(self.pending.swap_remove(k).1); - } else { - k += 1; + for height in self.scanned_height() .. confirmed_height { + // If any keys activated at this height, shift them over + { + let mut k = 0; + while k < self.pending.len() { + if height >= self.pending[k].0 { + self.keys.push((Arc::new(self.pending.swap_remove(k).1), vec![])); + } else { + k += 1; + } } } - let block = self.coin.get_block(h).await?; - for keys in &self.keys { - let outputs = self.coin.get_outputs(&block, keys.group_key()); + let block = self.coin.get_block(height).await?; + for (keys, outputs) in self.keys.iter_mut() { + outputs.extend( + self.coin.get_outputs(&block, keys.group_key()).await.iter().cloned().filter( + |_output| true // !self.db.handled.contains_key(output.id()) // TODO + ) + ); } } Ok(()) } + + pub async fn prepare_sends( + &mut self, + canonical: usize, + payments: Vec<(C::Address, u64)> + ) -> Result, CoinError> { + if payments.len() == 0 { + return Ok(vec![]); + } + + let acknowledged_height = self.acknowledged_height(canonical); + + // TODO: Log schedule outputs when max_outputs is low + // Payments is the first set of TXs in the schedule + // As each payment re-appears, let mut payments = schedule[payment] where the only input is + // the source payment + // let (mut payments, schedule) = payments; + let mut payments = payments; + payments.sort_by(|a, b| a.1.cmp(&b.1).reverse()); + + let mut txs = vec![]; + for (keys, outputs) in self.keys.iter_mut() { + // Select the highest value outputs to minimize the amount of inputs needed + outputs.sort_by(|a, b| a.amount().cmp(&b.amount()).reverse()); + + while outputs.len() != 0 { + // Select the maximum amount of outputs possible + let mut inputs = &outputs[0 .. C::max_inputs().min(outputs.len())]; + + // Calculate their sum value, minus the fee needed to spend them + let mut sum = inputs.iter().map(|input| input.amount()).sum::(); + // sum -= C::MAX_FEE; // TODO + + // Grab the payments this will successfully fund + let mut these_payments = vec![]; + for payment in &payments { + if sum > payment.1 { + these_payments.push(payment); + sum -= payment.1; + } + // Doesn't break in this else case as a smaller payment may still fit + } + + // Move to the next set of keys if none of these outputs remain significant + if these_payments.len() == 0 { + break; + } + + // Drop any uneeded outputs + while sum > inputs[inputs.len() - 1].amount() { + sum -= inputs[inputs.len() - 1].amount(); + inputs = &inputs[.. (inputs.len() - 1)]; + } + + // We now have a minimal effective outputs/payments set + // Take ownership while removing these candidates from the provided list + let inputs = outputs.drain(.. inputs.len()).collect(); + let payments = payments.drain(.. these_payments.len()).collect::>(); + + let tx = self.coin.prepare_send( + keys.clone(), + format!( + "Serai Processor Wallet Send (height {}, index {})", + canonical, + txs.len() + ).as_bytes().to_vec(), + acknowledged_height, + inputs, + &payments + ).await?; + // self.db.save_tx(tx) // TODO + txs.push(tx); + } + } + + // TODO: Remaining payments? + + Ok(txs) + } } From 3617ed4eb7ff44272b0f3f344d2ec0d3dfed041f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 3 Jun 2022 23:22:08 -0400 Subject: [PATCH 014/105] Use const values for our traits where we can --- coins/monero/src/frost.rs | 21 ++-------- crypto/frost/src/key_gen.rs | 12 +++--- crypto/frost/src/lib.rs | 55 +++++++++++--------------- crypto/frost/src/schnorr.rs | 8 ++-- crypto/frost/src/sign.rs | 2 +- crypto/frost/src/tests/literal/p256.rs | 21 ++-------- crypto/frost/src/tests/mod.rs | 2 +- crypto/frost/src/tests/schnorr.rs | 10 ++--- crypto/frost/src/tests/vectors.rs | 12 +++--- processor/src/coins/monero.rs | 10 ++--- processor/src/lib.rs | 20 ++++------ processor/src/wallet.rs | 12 +++--- 12 files changed, 72 insertions(+), 113 deletions(-) diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 6b44a296..4653cc3e 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -57,25 +57,12 @@ impl, const WIDE: bool> Curve for Ed25519Internal u8 { - u8::try_from(Self::id().len()).unwrap() - } + const ID: &'static [u8] = b"edwards25519"; - fn id() -> &'static [u8] { - b"edwards25519" - } + const GENERATOR: Self::G = dfg::ED25519_BASEPOINT_POINT; + const GENERATOR_TABLE: Self::T = &dfg::ED25519_BASEPOINT_TABLE; - fn generator() -> Self::G { - Self::G::generator() - } - - fn generator_table() -> Self::T { - &dfg::ED25519_BASEPOINT_TABLE - } - - fn little_endian() -> bool { - true - } + const LITTLE_ENDIAN: bool = true; fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { let mut seed = vec![0; 32]; diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index 643a2454..c30962de 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -41,7 +41,7 @@ fn generate_key_r1( // Step 1: Generate t random values to form a polynomial with coefficients.push(C::F::random(&mut *rng)); // Step 3: Generate public commitments - commitments.push(C::generator_table() * coefficients[i]); + commitments.push(C::GENERATOR_TABLE * coefficients[i]); // Serialize them for publication serialized.extend(&C::G_to_bytes(&commitments[i])); } @@ -59,7 +59,7 @@ fn generate_key_r1( challenge::( context, params.i(), - &C::G_to_bytes(&(C::generator_table() * r)), + &C::G_to_bytes(&(C::GENERATOR_TABLE * r)), &serialized ) ).serialize() @@ -224,7 +224,7 @@ fn complete_r2( res }; - let mut batch = BatchVerifier::new(shares.len(), C::little_endian()); + let mut batch = BatchVerifier::new(shares.len(), C::LITTLE_ENDIAN); for (l, share) in &shares { if *l == params.i() { continue; @@ -237,7 +237,7 @@ fn complete_r2( // ensure that malleability isn't present is to use this n * t algorithm, which runs // per sender and not as an aggregate of all senders, which also enables blame let mut values = exponential(params.i, &commitments[l]); - values.push((-*share, C::generator())); + values.push((-*share, C::GENERATOR)); batch.queue(rng, *l, values); } batch.verify_with_vartime_blame().map_err(|l| FrostError::InvalidCommitment(l))?; @@ -254,9 +254,9 @@ fn complete_r2( // Calculate each user's verification share let mut verification_shares = HashMap::new(); for i in 1 ..= params.n() { - verification_shares.insert(i, multiexp_vartime(exponential(i, &stripes), C::little_endian())); + verification_shares.insert(i, multiexp_vartime(exponential(i, &stripes), C::LITTLE_ENDIAN)); } - debug_assert_eq!(C::generator_table() * secret_share, verification_shares[¶ms.i()]); + debug_assert_eq!(C::GENERATOR_TABLE * secret_share, verification_shares[¶ms.i()]); // TODO: Clear serialized and shares diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index 54abee1d..3f7c2b4e 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -42,22 +42,19 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { /// Precomputed table type type T: Mul; - /// Byte length of the curve ID - // While C::id().len() is trivial, this bounds it to u8 for any proper Curve implementation - fn id_len() -> u8; /// ID for this curve - fn id() -> &'static [u8]; + const ID: &'static [u8]; /// Generator for the group - // While group does provide this in its API, Jubjub users will want to use a custom basepoint - fn generator() -> Self::G; + // While group does provide this in its API, privacy coins will want to use a custom basepoint + const GENERATOR: Self::G; /// Table for the generator for the group /// If there isn't a precomputed table available, the generator itself should be used - fn generator_table() -> Self::T; + const GENERATOR_TABLE: Self::T; /// If little endian is used for the scalar field's Repr - fn little_endian() -> bool; + const LITTLE_ENDIAN: bool; /// Securely generate a random nonce. H4 from the IETF draft fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F; @@ -298,12 +295,12 @@ impl MultisigKeys { let offset_share = offset * C::F::from(included.len().try_into().unwrap()).invert().unwrap(); Ok(MultisigView { - group_key: self.group_key + (C::generator_table() * offset), + group_key: self.group_key + (C::GENERATOR_TABLE * offset), secret_share: secret_share + offset_share, verification_shares: self.verification_shares.iter().map( |(l, share)| ( *l, - (*share * lagrange::(*l, &included)) + (C::generator_table() * offset_share) + (*share * lagrange::(*l, &included)) + (C::GENERATOR_TABLE * offset_share) ) ).collect(), included: included.to_vec(), @@ -311,15 +308,13 @@ impl MultisigKeys { } pub fn serialized_len(n: u16) -> usize { - 1 + usize::from(C::id_len()) + (3 * 2) + C::F_len() + C::G_len() + (usize::from(n) * C::G_len()) + 8 + C::ID.len() + (3 * 2) + C::F_len() + C::G_len() + (usize::from(n) * C::G_len()) } pub fn serialize(&self) -> Vec { - let mut serialized = Vec::with_capacity( - 1 + usize::from(C::id_len()) + MultisigKeys::::serialized_len(self.params.n) - ); - serialized.push(C::id_len()); - serialized.extend(C::id()); + let mut serialized = Vec::with_capacity(MultisigKeys::::serialized_len(self.params.n)); + serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes()); + serialized.extend(C::ID); serialized.extend(&self.params.t.to_be_bytes()); serialized.extend(&self.params.n.to_be_bytes()); serialized.extend(&self.params.i.to_be_bytes()); @@ -328,34 +323,28 @@ impl MultisigKeys { for l in 1 ..= self.params.n.into() { serialized.extend(&C::G_to_bytes(&self.verification_shares[&l])); } - serialized } pub fn deserialize(serialized: &[u8]) -> Result, FrostError> { - if serialized.len() < 1 { - Err(FrostError::InternalError("MultisigKeys serialization is empty".to_string()))?; + let mut start = u64::try_from(C::ID.len()).unwrap().to_be_bytes().to_vec(); + start.extend(C::ID); + let mut cursor = start.len(); + + if serialized.len() < (cursor + 4) { + Err( + FrostError::InternalError( + "MultisigKeys serialization is missing its curve/participant quantities".to_string() + ) + )?; } - - let id_len: usize = serialized[0].into(); - let mut cursor = 1; - - if serialized.len() < (cursor + id_len) { - Err(FrostError::InternalError("ID wasn't included".to_string()))?; - } - - if C::id() != &serialized[cursor .. (cursor + id_len)] { + if &start != &serialized[.. cursor] { Err( FrostError::InternalError( "curve is distinct between serialization and deserialization".to_string() ) )?; } - cursor += id_len; - - if serialized.len() < (cursor + 4) { - Err(FrostError::InternalError("participant quantities weren't included".to_string()))?; - } let t = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); cursor += 2; diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index 238d8f4b..c138f05c 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -28,7 +28,7 @@ pub(crate) fn sign( challenge: C::F ) -> SchnorrSignature { SchnorrSignature { - R: C::generator_table() * nonce, + R: C::GENERATOR_TABLE * nonce, s: nonce + (private_key * challenge) } } @@ -38,15 +38,15 @@ pub(crate) fn verify( challenge: C::F, signature: &SchnorrSignature ) -> bool { - (C::generator_table() * signature.s) == (signature.R + (public_key * challenge)) + (C::GENERATOR_TABLE * signature.s) == (signature.R + (public_key * challenge)) } pub(crate) fn batch_verify( rng: &mut R, triplets: &[(u16, C::G, C::F, SchnorrSignature)] ) -> Result<(), u16> { - let mut values = [(C::F::one(), C::generator()); 3]; - let mut batch = BatchVerifier::new(triplets.len(), C::little_endian()); + let mut values = [(C::F::one(), C::GENERATOR); 3]; + let mut batch = BatchVerifier::new(triplets.len(), C::LITTLE_ENDIAN); for triple in triplets { // s = r + ca // sG == R + cA diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 5ccb139c..11739fc4 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -84,7 +84,7 @@ fn preprocess>( C::random_nonce(params.view().secret_share(), &mut *rng), C::random_nonce(params.view().secret_share(), &mut *rng) ]; - let commitments = [C::generator_table() * nonces[0], C::generator_table() * nonces[1]]; + let commitments = [C::GENERATOR_TABLE * nonces[0], C::GENERATOR_TABLE * nonces[1]]; let mut serialized = C::G_to_bytes(&commitments[0]); serialized.extend(&C::G_to_bytes(&commitments[1])); diff --git a/crypto/frost/src/tests/literal/p256.rs b/crypto/frost/src/tests/literal/p256.rs index d98d4824..1ca4ed39 100644 --- a/crypto/frost/src/tests/literal/p256.rs +++ b/crypto/frost/src/tests/literal/p256.rs @@ -82,25 +82,12 @@ impl Curve for P256 { type G = ProjectivePoint; type T = ProjectivePoint; - fn id_len() -> u8 { - u8::try_from(Self::id().len()).unwrap() - } + const ID: &'static [u8] = b"P-256"; - fn id() -> &'static [u8] { - b"P-256" - } + const GENERATOR: Self::G = Self::G::GENERATOR; + const GENERATOR_TABLE: Self::G = Self::G::GENERATOR; - fn generator() -> Self::G { - Self::G::GENERATOR - } - - fn generator_table() -> Self::T { - Self::G::GENERATOR - } - - fn little_endian() -> bool { - false - } + const LITTLE_ENDIAN: bool = false; fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { let mut seed = vec![0; 32]; diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index 5a1b58f1..d361d3fd 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -113,7 +113,7 @@ pub fn recover(keys: &HashMap>) -> C::F { C::F::zero(), |accum, (i, keys)| accum + (keys.secret_share() * lagrange::(*i, &included)) ); - assert_eq!(C::generator_table() * group_private, first.group_key(), "failed to recover keys"); + assert_eq!(C::GENERATOR_TABLE * group_private, first.group_key(), "failed to recover keys"); group_private } diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index 4b39fcce..2c2ea85e 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -15,7 +15,7 @@ pub(crate) fn core_sign(rng: &mut R) { let challenge = C::F::random(rng); // Doesn't bother to craft an HRAM assert!( schnorr::verify::( - C::generator_table() * private_key, + C::GENERATOR_TABLE * private_key, challenge, &schnorr::sign(private_key, nonce, challenge) ) @@ -28,9 +28,9 @@ pub(crate) fn core_sign(rng: &mut R) { pub(crate) fn core_verify(rng: &mut R) { assert!( !schnorr::verify::( - C::generator_table() * C::F::random(&mut *rng), + C::GENERATOR_TABLE * C::F::random(&mut *rng), C::F::random(rng), - &SchnorrSignature { R: C::generator_table() * C::F::zero(), s: C::F::zero() } + &SchnorrSignature { R: C::GENERATOR_TABLE * C::F::zero(), s: C::F::zero() } ) ); } @@ -48,7 +48,7 @@ pub(crate) fn core_batch_verify(rng: &mut R) { // Batch verify let triplets = (0 .. 5).map( - |i| (u16::try_from(i + 1).unwrap(), C::generator_table() * keys[i], challenges[i], sigs[i]) + |i| (u16::try_from(i + 1).unwrap(), C::GENERATOR_TABLE * keys[i], challenges[i], sigs[i]) ).collect::>(); schnorr::batch_verify(rng, &triplets).unwrap(); @@ -113,7 +113,7 @@ fn sign_with_offset(rng: &mut R) { for i in 1 ..= u16::try_from(keys.len()).unwrap() { keys.insert(i, Rc::new(keys[&i].offset(offset))); } - let offset_key = group_key + (C::generator_table() * offset); + let offset_key = group_key + (C::GENERATOR_TABLE * offset); sign_core(rng, offset_key, &keys); } diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 0e9f3396..590d9efa 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -26,14 +26,14 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap>(); let verification_shares = shares.iter().map( - |secret| C::generator() * secret + |secret| C::GENERATOR * secret ).collect::>(); let mut keys = HashMap::new(); for i in 1 ..= u16::try_from(shares.len()).unwrap() { let mut serialized = vec![]; - serialized.push(C::id_len()); - serialized.extend(C::id()); + serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes()); + serialized.extend(C::ID); serialized.extend(vectors.threshold.to_be_bytes()); serialized.extend(u16::try_from(shares.len()).unwrap().to_be_bytes()); serialized.extend(i.to_be_bytes()); @@ -59,7 +59,7 @@ pub fn vectors>(vectors: Vectors) { let keys = vectors_to_multisig_keys::(&vectors); let group_key = C::G_from_slice(&hex::decode(vectors.group_key).unwrap()).unwrap(); assert_eq!( - C::generator() * C::F_from_slice(&hex::decode(vectors.group_secret).unwrap()).unwrap(), + C::GENERATOR * C::F_from_slice(&hex::decode(vectors.group_secret).unwrap()).unwrap(), group_key ); assert_eq!( @@ -87,8 +87,8 @@ pub fn vectors>(vectors: Vectors) { C::F_from_slice(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap() ]; - let mut serialized = C::G_to_bytes(&(C::generator() * nonces[0])); - serialized.extend(&C::G_to_bytes(&(C::generator() * nonces[1]))); + let mut serialized = C::G_to_bytes(&(C::GENERATOR * nonces[0])); + serialized.extend(&C::G_to_bytes(&(C::GENERATOR * nonces[1]))); machine.unsafe_override_preprocess( PreprocessPackage { nonces, serialized: serialized.clone() } diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index 834ab63d..c0cb416a 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -58,7 +58,7 @@ impl Monero { pub fn new(url: String) -> Monero { Monero { rpc: Rpc::new(url), - view: dfg::Scalar::from_hash(view_key::(0)).0 + view: *view_key::(0) } } } @@ -73,16 +73,16 @@ impl Coin for Monero { type Address = Address; - fn id() -> &'static [u8] { b"Monero" } - fn confirmations() -> usize { 10 } + const ID: &'static [u8] = b"Monero"; + const CONFIRMATIONS: usize = 10; // Testnet TX bb4d188a4c571f2f0de70dca9d475abc19078c10ffa8def26dd4f63ce1bcfd79 uses 146 inputs // while using less than 100kb of space, albeit with just 2 outputs (though outputs share a BP) // The TX size limit is half the contextual median block weight, where said weight is >= 300,000 // This means any TX which fits into 150kb will be accepted by Monero // 128, even with 16 outputs, should fit into 100kb. Further efficiency by 192 may be viable // TODO: Get hard numbers and tune - fn max_inputs() -> usize { 128 } - fn max_outputs() -> usize { 16 } + const MAX_INPUTS: usize = 128; + const MAX_OUTPUTS: usize = 16; async fn get_height(&self) -> Result { self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError) diff --git a/processor/src/lib.rs b/processor/src/lib.rs index fae4bfad..3e01dd54 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -4,8 +4,6 @@ use async_trait::async_trait; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use blake2::{digest::{Digest, Update}, Blake2b512}; - use frost::{Curve, MultisigKeys}; mod coins; @@ -40,10 +38,10 @@ pub trait Coin { type Address: Send; - fn id() -> &'static [u8]; - fn confirmations() -> usize; - fn max_inputs() -> usize; - fn max_outputs() -> usize; + const ID: &'static [u8]; + const CONFIRMATIONS: usize; + const MAX_INPUTS: usize; + const MAX_OUTPUTS: usize; async fn get_height(&self) -> Result; async fn get_block(&self, height: usize) -> Result; @@ -70,11 +68,9 @@ pub trait Coin { ) -> Result<(Vec, Vec<::Id>), CoinError>; } -// Generate a view key for a given chain in a globally consistent manner regardless of the current -// group key +// Generate a static view key for a given chain in a globally consistent manner +// Doesn't consider the current group key to increase the simplicity of verifying Serai's status // Takes an index, k, for more modern privacy protocols which use multiple view keys -// Doesn't run Curve::hash_to_F, instead returning the hash object, due to hash_to_F being a FROST -// definition instead of a wide reduction from a hash object -pub fn view_key(k: u64) -> Blake2b512 { - Blake2b512::new().chain(b"Serai DEX View Key").chain(C::id()).chain(k.to_le_bytes()) +pub fn view_key(k: u64) -> ::F { + C::Curve::hash_to_F(b"Serai DEX View Key", &[C::ID, &k.to_le_bytes()].concat()) } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index d1ab088d..db05a6cd 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -16,7 +16,7 @@ impl WalletKeys { } // Bind this key to a specific network by applying an additive offset - // While it would be fine to just C::id(), including the group key creates distinct + // While it would be fine to just C::ID, including the group key creates distinct // offsets instead of static offsets. Under a statically offset system, a BTC key could // have X subtracted to find the potential group key, and then have Y added to find the // potential ETH group key. While this shouldn't be an issue, as this isn't a private @@ -27,7 +27,7 @@ impl WalletKeys { const DST: &[u8] = b"Serai Processor Wallet Chain Bind"; let mut transcript = DigestTranscript::::new(DST); transcript.append_message(b"chain", chain); - transcript.append_message(b"curve", C::id()); + transcript.append_message(b"curve", C::ID); transcript.append_message(b"group_key", &C::G_to_bytes(&self.keys.group_key())); self.keys.offset(C::hash_to_F(DST, &transcript.challenge(b"offset"))) } @@ -73,11 +73,11 @@ impl Wallet { pub fn add_keys(&mut self, keys: &WalletKeys) { // Doesn't use +1 as this is height, not block index, and poll moves by block index - self.pending.push((self.acknowledged_height(keys.creation_height), keys.bind(C::id()))); + self.pending.push((self.acknowledged_height(keys.creation_height), keys.bind(C::ID))); } pub async fn poll(&mut self) -> Result<(), CoinError> { - let confirmed_height = self.coin.get_height().await? - C::confirmations(); + let confirmed_height = self.coin.get_height().await? - C::CONFIRMATIONS; for height in self.scanned_height() .. confirmed_height { // If any keys activated at this height, shift them over { @@ -114,7 +114,7 @@ impl Wallet { let acknowledged_height = self.acknowledged_height(canonical); - // TODO: Log schedule outputs when max_outputs is low + // TODO: Log schedule outputs when MAX_OUTPUTS is low // Payments is the first set of TXs in the schedule // As each payment re-appears, let mut payments = schedule[payment] where the only input is // the source payment @@ -129,7 +129,7 @@ impl Wallet { while outputs.len() != 0 { // Select the maximum amount of outputs possible - let mut inputs = &outputs[0 .. C::max_inputs().min(outputs.len())]; + let mut inputs = &outputs[0 .. C::MAX_INPUTS.min(outputs.len())]; // Calculate their sum value, minus the fee needed to spend them let mut sum = inputs.iter().map(|input| input.amount()).sum::(); From a46432b829c3cc0e791a70e9e373ef60ff5f4c40 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 5 Jun 2022 06:00:21 -0400 Subject: [PATCH 015/105] Add a proper database trait --- processor/src/coins/monero.rs | 4 +- processor/src/lib.rs | 2 +- processor/src/tests/mod.rs | 4 +- processor/src/wallet.rs | 106 ++++++++++++++++++++++++++-------- 4 files changed, 88 insertions(+), 28 deletions(-) diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index c0cb416a..da5d6ac4 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -24,10 +24,10 @@ impl OutputTrait for Output { // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. // While the Monero library offers a variant which allows senders to ensure their TXs have unique // output keys, Serai can still be targeted using the classic burning bug - type Id = CompressedEdwardsY; + type Id = [u8; 32]; fn id(&self) -> Self::Id { - self.0.key.compress() + self.0.key.compress().to_bytes() } fn amount(&self) -> u64 { diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 3e01dd54..9357bd06 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -13,7 +13,7 @@ mod wallet; mod tests; pub trait Output: Sized + Clone { - type Id; + type Id: AsRef<[u8]>; fn id(&self) -> Self::Id; fn amount(&self) -> u64; diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 1bddc91a..02c8180d 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -2,14 +2,14 @@ use std::rc::Rc; use rand::rngs::OsRng; -use crate::{Coin, coins::monero::Monero, wallet::{WalletKeys, Wallet}}; +use crate::{Coin, coins::monero::Monero, wallet::{WalletKeys, MemCoinDb, Wallet}}; #[tokio::test] async fn test() { let monero = Monero::new("http://127.0.0.1:18081".to_string()); println!("{}", monero.get_height().await.unwrap()); let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng); - let mut wallet = Wallet::new(monero); + let mut wallet = Wallet::new(MemCoinDb::new(), monero); wallet.acknowledge_height(0, 0); wallet.add_keys(&WalletKeys::new(Rc::try_unwrap(keys.remove(&1).take().unwrap()).unwrap(), 0)); dbg!(0); diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index db05a6cd..0c5b66a9 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -33,28 +33,83 @@ impl WalletKeys { } } -pub struct CoinDb { +pub trait CoinDb { + // Set a height as scanned to + fn scanned_to_height(&mut self, height: usize); + // Acknowledge a given coin height for a canonical height + fn acknowledge_height(&mut self, canonical: usize, height: usize); + + // Adds an output to the DB. Returns false if the output was already added + fn add_output(&mut self, output: &O) -> bool; + + // Height this coin has been scanned to + fn scanned_height(&self) -> usize; + // Acknowledged height for a given canonical height + fn acknowledged_height(&self, canonical: usize) -> usize; +} + +pub struct MemCoinDb { // Height this coin has been scanned to scanned_height: usize, // Acknowledged height for a given canonical height - acknowledged_heights: HashMap + acknowledged_heights: HashMap, + outputs: HashMap, Vec> } -pub struct Wallet { - db: CoinDb, + +impl MemCoinDb { + pub fn new() -> MemCoinDb { + MemCoinDb { + scanned_height: 0, + acknowledged_heights: HashMap::new(), + outputs: HashMap::new() + } + } +} + +impl CoinDb for MemCoinDb { + fn scanned_to_height(&mut self, height: usize) { + self.scanned_height = height; + } + + fn acknowledge_height(&mut self, canonical: usize, height: usize) { + debug_assert!(!self.acknowledged_heights.contains_key(&canonical)); + self.acknowledged_heights.insert(canonical, height); + } + + fn add_output(&mut self, output: &O) -> bool { + // This would be insecure as we're indexing by ID and this will replace the output as a whole + // Multiple outputs may have the same ID in edge cases such as Monero, where outputs are ID'd + // by key image, not by hash + index + // self.outputs.insert(output.id(), output).is_some() + let id = output.id().as_ref().to_vec(); + if self.outputs.contains_key(&id) { + return false; + } + self.outputs.insert(id, output.serialize()); + true + } + + fn scanned_height(&self) -> usize { + self.scanned_height + } + + fn acknowledged_height(&self, canonical: usize) -> usize { + self.acknowledged_heights[&canonical] + } +} + +pub struct Wallet { + db: D, coin: C, keys: Vec<(Arc>, Vec)>, pending: Vec<(usize, MultisigKeys)> } -impl Wallet { - pub fn new(coin: C) -> Wallet { +impl Wallet { + pub fn new(db: D, coin: C) -> Wallet { Wallet { - db: CoinDb { - scanned_height: 0, - acknowledged_heights: HashMap::new(), - }, - + db, coin, keys: vec![], @@ -62,13 +117,12 @@ impl Wallet { } } - pub fn scanned_height(&self) -> usize { self.db.scanned_height } + pub fn scanned_height(&self) -> usize { self.db.scanned_height() } pub fn acknowledge_height(&mut self, canonical: usize, height: usize) { - debug_assert!(!self.db.acknowledged_heights.contains_key(&canonical)); - self.db.acknowledged_heights.insert(canonical, height); + self.db.acknowledge_height(canonical, height); } pub fn acknowledged_height(&self, canonical: usize) -> usize { - self.db.acknowledged_heights[&canonical] + self.db.acknowledged_height(canonical) } pub fn add_keys(&mut self, keys: &WalletKeys) { @@ -83,7 +137,10 @@ impl Wallet { { let mut k = 0; while k < self.pending.len() { - if height >= self.pending[k].0 { + // TODO + //if height < self.pending[k].0 { + //} else if height == self.pending[k].0 { + if height <= self.pending[k].0 { self.keys.push((Arc::new(self.pending.swap_remove(k).1), vec![])); } else { k += 1; @@ -95,7 +152,7 @@ impl Wallet { for (keys, outputs) in self.keys.iter_mut() { outputs.extend( self.coin.get_outputs(&block, keys.group_key()).await.iter().cloned().filter( - |_output| true // !self.db.handled.contains_key(output.id()) // TODO + |output| self.db.add_output(output) ) ); } @@ -103,18 +160,23 @@ impl Wallet { Ok(()) } + // This should be called whenever new outputs are received, meaning there was a new block + // If these outputs were received and sent to Substrate, it should be called after they're + // included in a block and we have results to act on + // If these outputs weren't sent to Substrate (change), it should be called immediately + // with all payments still queued from the last call pub async fn prepare_sends( &mut self, canonical: usize, payments: Vec<(C::Address, u64)> - ) -> Result, CoinError> { + ) -> Result<(Vec<(C::Address, u64)>, Vec), CoinError> { if payments.len() == 0 { - return Ok(vec![]); + return Ok((vec![], vec![])); } let acknowledged_height = self.acknowledged_height(canonical); - // TODO: Log schedule outputs when MAX_OUTPUTS is low + // TODO: Log schedule outputs when MAX_OUTPUTS is lower than payments.len() // Payments is the first set of TXs in the schedule // As each payment re-appears, let mut payments = schedule[payment] where the only input is // the source payment @@ -177,8 +239,6 @@ impl Wallet { } } - // TODO: Remaining payments? - - Ok(txs) + Ok((payments, txs)) } } From fdb1929ba4435ba0ce15c65c30c826ada9b9e81a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 5 Jun 2022 07:33:15 -0400 Subject: [PATCH 016/105] Move to Arc/RwLock --- coins/monero/src/ringct/clsag/multisig.rs | 14 ++++++------ coins/monero/src/tests/clsag.rs | 4 ++-- coins/monero/src/wallet/send/multisig.rs | 26 +++++++++++------------ crypto/frost/src/sign.rs | 8 +++---- crypto/frost/src/tests/mod.rs | 8 +++---- crypto/frost/src/tests/schnorr.rs | 6 +++--- crypto/frost/src/tests/vectors.rs | 4 ++-- 7 files changed, 34 insertions(+), 36 deletions(-) diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index c42ac029..8aaae8f8 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -1,5 +1,5 @@ use core::fmt::Debug; -use std::{rc::Rc, cell::RefCell}; +use std::sync::{Arc, RwLock}; use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha12Rng; @@ -47,7 +47,7 @@ impl ClsagInput { } } -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, Debug)] pub struct ClsagDetails { input: ClsagInput, mask: Scalar @@ -70,7 +70,7 @@ struct Interim { } #[allow(non_snake_case)] -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, Debug)] pub struct ClsagMultisig { transcript: Transcript, @@ -79,7 +79,7 @@ pub struct ClsagMultisig { image: EdwardsPoint, AH: (dfg::EdwardsPoint, dfg::EdwardsPoint), - details: Rc>>, + details: Arc>>, msg: Option<[u8; 32]>, interim: Option @@ -88,7 +88,7 @@ pub struct ClsagMultisig { impl ClsagMultisig { pub fn new( transcript: Transcript, - details: Rc>> + details: Arc>> ) -> Result { Ok( ClsagMultisig { @@ -111,11 +111,11 @@ impl ClsagMultisig { } fn input(&self) -> ClsagInput { - self.details.borrow().as_ref().unwrap().input.clone() + (*self.details.read().unwrap()).as_ref().unwrap().input.clone() } fn mask(&self) -> Scalar { - self.details.borrow().as_ref().unwrap().mask + (*self.details.read().unwrap()).as_ref().unwrap().mask } } diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 102b64be..b5b90d2c 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -1,5 +1,5 @@ #[cfg(feature = "multisig")] -use std::{cell::RefCell, rc::Rc}; +use std::sync::{Arc, RwLock}; use rand::{RngCore, rngs::OsRng}; @@ -97,7 +97,7 @@ fn clsag_multisig() -> Result<(), MultisigError> { &mut OsRng, ClsagMultisig::new( Transcript::new(b"Monero Serai CLSAG Test"), - Rc::new(RefCell::new(Some( + Arc::new(RwLock::new(Some( ClsagDetails::new( ClsagInput::new( Commitment::new(randomness, AMOUNT), diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index c1305cde..e1ac0f10 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -1,4 +1,4 @@ -use std::{cell::RefCell, rc::Rc, collections::HashMap}; +use std::{sync::{Arc, RwLock}, collections::HashMap}; use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_chacha::ChaCha12Rng; @@ -28,7 +28,7 @@ pub struct TransactionMachine { images: Vec, output_masks: Option, - inputs: Vec>>>, + inputs: Vec>>>, clsags: Vec>, tx: Option @@ -49,7 +49,7 @@ impl SignableTransaction { let mut inputs = vec![]; for _ in 0 .. self.inputs.len() { // Doesn't resize as that will use a single Rc for the entire Vec - inputs.push(Rc::new(RefCell::new(None))); + inputs.push(Arc::new(RwLock::new(None))); } let mut clsags = vec![]; @@ -87,7 +87,7 @@ impl SignableTransaction { // Ideally, this would be done post entropy, instead of now, yet doing so would require sign // to be async which isn't preferable. This should be suitably competent though // While this inability means we can immediately create the input, moving it out of the - // Rc RefCell, keeping it within an Rc RefCell keeps our options flexible + // Arc RwLock, keeping it within an Arc RwLock keeps our options flexible let decoys = Decoys::select( // Using a seeded RNG with a specific height, committed to above, should make these decoys // committed to. They'll also be committed to later via the TX message as a whole @@ -107,7 +107,7 @@ impl SignableTransaction { transcript.clone(), inputs[i].clone() ).map_err(|e| TransactionError::MultisigError(e))?, - Rc::new(keys.offset(dalek_ff_group::Scalar(input.key_offset))), + Arc::new(keys.offset(dalek_ff_group::Scalar(input.key_offset))), &included ).map_err(|e| TransactionError::FrostError(e))? ); @@ -270,15 +270,13 @@ impl StateMachine for TransactionMachine { } ); - value.3.replace( - Some( - ClsagDetails::new( - ClsagInput::new( - value.0.commitment, - value.1 - ).map_err(|_| panic!("Signing an input which isn't present in the ring we created for it"))?, - mask - ) + *value.3.write().unwrap() = Some( + ClsagDetails::new( + ClsagInput::new( + value.0.commitment, + value.1 + ).map_err(|_| panic!("Signing an input which isn't present in the ring we created for it"))?, + mask ) ); diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 11739fc4..49c1c853 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -1,5 +1,5 @@ use core::fmt; -use std::{rc::Rc, collections::HashMap}; +use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; @@ -19,7 +19,7 @@ use crate::{ #[derive(Clone)] pub struct Params> { algorithm: A, - keys: Rc>, + keys: Arc>, view: MultisigView, } @@ -27,7 +27,7 @@ pub struct Params> { impl> Params { pub fn new( algorithm: A, - keys: Rc>, + keys: Arc>, included: &[u16], ) -> Result, FrostError> { let mut included = included.to_vec(); @@ -297,7 +297,7 @@ impl> AlgorithmMachine { /// Creates a new machine to generate a key for the specified curve in the specified multisig pub fn new( algorithm: A, - keys: Rc>, + keys: Arc>, included: &[u16], ) -> Result, FrostError> { Ok( diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index d361d3fd..52fbf515 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -1,4 +1,4 @@ -use std::{rc::Rc, collections::HashMap}; +use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; @@ -36,7 +36,7 @@ pub fn clone_without( pub fn key_gen( rng: &mut R -) -> HashMap>> { +) -> HashMap>> { let mut params = HashMap::new(); let mut machines = HashMap::new(); @@ -98,7 +98,7 @@ pub fn key_gen( } assert_eq!(group_key.unwrap(), these_keys.group_key()); - keys.insert(*i, Rc::new(these_keys)); + keys.insert(*i, Arc::new(these_keys)); } keys @@ -120,7 +120,7 @@ pub fn recover(keys: &HashMap>) -> C::F { pub fn algorithm_machines>( rng: &mut R, algorithm: A, - keys: &HashMap>>, + keys: &HashMap>>, ) -> HashMap> { let mut included = vec![]; while included.len() < usize::from(keys[&1].params().t()) { diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index 2c2ea85e..684107a7 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, rc::Rc, collections::HashMap}; +use std::{marker::PhantomData, sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; @@ -80,7 +80,7 @@ pub(crate) fn core_batch_verify(rng: &mut R) { fn sign_core( rng: &mut R, group_key: C::G, - keys: &HashMap>> + keys: &HashMap>> ) { const MESSAGE: &'static [u8] = b"Hello, World!"; @@ -111,7 +111,7 @@ fn sign_with_offset(rng: &mut R) { let offset = C::hash_to_F(b"FROST Test sign_with_offset", b"offset"); for i in 1 ..= u16::try_from(keys.len()).unwrap() { - keys.insert(i, Rc::new(keys[&i].offset(offset))); + keys.insert(i, Arc::new(keys[&i].offset(offset))); } let offset_key = group_key + (C::GENERATOR_TABLE * offset); diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 590d9efa..c4403d07 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -1,4 +1,4 @@ -use std::{rc::Rc, collections::HashMap}; +use std::{sync::Arc, collections::HashMap}; use crate::{ Curve, MultisigKeys, @@ -73,7 +73,7 @@ pub fn vectors>(vectors: Vectors) { *i, AlgorithmMachine::new( Schnorr::::new(), - Rc::new(keys[i].clone()), + Arc::new(keys[i].clone()), vectors.included.clone() ).unwrap() )); From 53132105262ca84e215b3a05d8f8a90edb236ecd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 5 Jun 2022 15:10:50 -0400 Subject: [PATCH 017/105] Monero prepare_send --- coins/monero/src/wallet/send/multisig.rs | 4 +- coins/monero/tests/send.rs | 4 +- processor/src/coins/monero.rs | 80 +++++++++++++++++++----- processor/src/lib.rs | 4 +- processor/src/tests/mod.rs | 4 +- processor/src/wallet.rs | 26 +++++--- 6 files changed, 91 insertions(+), 31 deletions(-) diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index e1ac0f10..02c3e4ff 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -37,11 +37,11 @@ pub struct TransactionMachine { impl SignableTransaction { pub async fn multisig( mut self, - mut transcript: Transcript, rng: &mut R, rpc: &Rpc, - height: usize, keys: MultisigKeys, + mut transcript: Transcript, + height: usize, mut included: Vec ) -> Result { let mut images = vec![]; diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index a50ffd33..dd95c5b4 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -145,11 +145,11 @@ async fn send_core(test: usize, multisig: bool) { machines.insert( i, signable.clone().multisig( - Transcript::new(b"Monero Serai Test Transaction"), &mut OsRng, &rpc, - rpc.get_height().await.unwrap() - 10, (*keys[&i]).clone(), + Transcript::new(b"Monero Serai Test Transaction"), + rpc.get_height().await.unwrap() - 10, (1 ..= THRESHOLD).collect::>() ).await.unwrap() ); diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index da5d6ac4..2b694984 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -3,20 +3,24 @@ use std::sync::Arc; use async_trait::async_trait; use rand_core::{RngCore, CryptoRng}; -use curve25519_dalek::{scalar::Scalar, edwards::CompressedEdwardsY}; +use curve25519_dalek::{ + constants::ED25519_BASEPOINT_TABLE, + scalar::Scalar, + edwards::CompressedEdwardsY +}; use dalek_ff_group as dfg; use frost::MultisigKeys; -use monero::util::address::Address; +use monero::{PublicKey, network::Network, util::address::Address}; use monero_serai::{ frost::Ed25519, transaction::{Timelock, Transaction}, rpc::Rpc, - wallet::{SpendableOutput, SignableTransaction} + wallet::{SpendableOutput, SignableTransaction as MSignableTransaction} }; -use crate::{Output as OutputTrait, CoinError, Coin, view_key}; +use crate::{Transcript, Output as OutputTrait, CoinError, Coin, view_key}; #[derive(Clone)] pub struct Output(SpendableOutput); @@ -49,16 +53,26 @@ impl From for Output { } } +pub struct SignableTransaction( + Arc>, + Transcript, + usize, + MSignableTransaction +); + pub struct Monero { rpc: Rpc, - view: Scalar + view: Scalar, + view_pub: CompressedEdwardsY } impl Monero { pub fn new(url: String) -> Monero { + let view = view_key::(0).0; Monero { rpc: Rpc::new(url), - view: *view_key::(0) + view, + view_pub: (&view * &ED25519_BASEPOINT_TABLE).compress() } } } @@ -109,21 +123,55 @@ impl Coin for Monero { async fn prepare_send( &self, - _keys: Arc>, - _label: Vec, - _height: usize, - _inputs: Vec, - _payments: &[(Address, u64)] + keys: Arc>, + transcript: Transcript, + height: usize, + mut inputs: Vec, + payments: &[(Address, u64)] ) -> Result { - todo!() + let spend = keys.group_key().0.compress(); + Ok( + SignableTransaction( + keys, + transcript, + height, + MSignableTransaction::new( + inputs.drain(..).map(|input| input.0).collect(), + payments.to_vec(), + Address::standard( + Network::Mainnet, + PublicKey { point: spend }, + PublicKey { point: self.view_pub } + ), + 100000000 + ).map_err(|_| CoinError::ConnectionError)? + ) + ) } async fn attempt_send( &self, - _rng: &mut R, - _transaction: SignableTransaction, - _included: &[u16] + rng: &mut R, + transaction: SignableTransaction, + included: &[u16] ) -> Result<(Vec, Vec<::Id>), CoinError> { - todo!() + let attempt = transaction.3.clone().multisig( + rng, + &self.rpc, + (*transaction.0).clone(), + transaction.1.clone(), + transaction.2, + included.to_vec() + ).await.map_err(|_| CoinError::ConnectionError)?; + + /* + let tx = None; + self.rpc.publish_transaction(tx).await.map_err(|_| CoinError::ConnectionError)?; + Ok( + tx.hash().to_vec(), + tx.outputs.iter().map(|output| output.key.compress().to_bytes().collect()) + ) + */ + Ok((vec![], vec![])) } } diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 9357bd06..337a409f 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -6,6 +6,8 @@ use rand_core::{RngCore, CryptoRng}; use frost::{Curve, MultisigKeys}; +pub(crate) use monero_serai::frost::Transcript; + mod coins; mod wallet; @@ -54,7 +56,7 @@ pub trait Coin { async fn prepare_send( &self, keys: Arc>, - label: Vec, + transcript: Transcript, height: usize, inputs: Vec, payments: &[(Self::Address, u64)] diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 02c8180d..abc338d3 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -1,4 +1,4 @@ -use std::rc::Rc; +use std::sync::Arc; use rand::rngs::OsRng; @@ -11,6 +11,6 @@ async fn test() { let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng); let mut wallet = Wallet::new(MemCoinDb::new(), monero); wallet.acknowledge_height(0, 0); - wallet.add_keys(&WalletKeys::new(Rc::try_unwrap(keys.remove(&1).take().unwrap()).unwrap(), 0)); + wallet.add_keys(&WalletKeys::new(Arc::try_unwrap(keys.remove(&1).take().unwrap()).unwrap(), 0)); dbg!(0); } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 0c5b66a9..25514ce8 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -1,9 +1,10 @@ use std::{sync::Arc, collections::HashMap}; -use transcript::{Transcript, DigestTranscript}; +use transcript::Transcript as TranscriptTrait; + use frost::{Curve, MultisigKeys}; -use crate::{CoinError, Output, Coin}; +use crate::{Transcript, CoinError, Output, Coin}; pub struct WalletKeys { keys: MultisigKeys, @@ -25,7 +26,7 @@ impl WalletKeys { // function as well, although that degree of influence means key gen is broken already fn bind(&self, chain: &[u8]) -> MultisigKeys { const DST: &[u8] = b"Serai Processor Wallet Chain Bind"; - let mut transcript = DigestTranscript::::new(DST); + let mut transcript = Transcript::new(DST); transcript.append_message(b"chain", chain); transcript.append_message(b"curve", C::ID); transcript.append_message(b"group_key", &C::G_to_bytes(&self.keys.group_key())); @@ -223,13 +224,22 @@ impl Wallet { let inputs = outputs.drain(.. inputs.len()).collect(); let payments = payments.drain(.. these_payments.len()).collect::>(); + let mut transcript = Transcript::new(b"Serai Processor Wallet Send"); + transcript.append_message( + b"canonical_height", + &u64::try_from(canonical).unwrap().to_le_bytes() + ); + transcript.append_message( + b"acknowledged_height", + &u64::try_from(acknowledged_height).unwrap().to_le_bytes() + ); + transcript.append_message( + b"index", + &u64::try_from(txs.len()).unwrap().to_le_bytes() + ); let tx = self.coin.prepare_send( keys.clone(), - format!( - "Serai Processor Wallet Send (height {}, index {})", - canonical, - txs.len() - ).as_bytes().to_vec(), + transcript, acknowledged_height, inputs, &payments From 55a895d65a18f0cca0fb2cd5e46c1dcc15e4af83 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 5 Jun 2022 16:08:51 -0400 Subject: [PATCH 018/105] Add first party support for k256 and p256 under feature flags Given the lack of vectors for k256, it's currently a match of the p256 spec (with a distinct context string), yet p256 is still always used when testing. --- crypto/frost/Cargo.toml | 8 + crypto/frost/src/curves/kp256.rs | 133 +++++++++++ crypto/frost/src/curves/mod.rs | 48 ++++ crypto/frost/src/lib.rs | 2 + .../frost/src/tests/literal/expand_message.rs | 15 ++ crypto/frost/src/tests/literal/kp256.rs | 80 +++++++ crypto/frost/src/tests/literal/mod.rs | 3 +- crypto/frost/src/tests/literal/p256.rs | 219 ------------------ 8 files changed, 288 insertions(+), 220 deletions(-) create mode 100644 crypto/frost/src/curves/kp256.rs create mode 100644 crypto/frost/src/curves/mod.rs create mode 100644 crypto/frost/src/tests/literal/expand_message.rs create mode 100644 crypto/frost/src/tests/literal/kp256.rs delete mode 100644 crypto/frost/src/tests/literal/p256.rs diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index d5f5f2dc..2ed0bb3c 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -15,6 +15,10 @@ hex = "0.4" ff = "0.11" group = "0.11" +sha2 = { version = "0.10", optional = true } +p256 = { version = "0.10", optional = true } +k256 = { version = "0.10", optional = true } + transcript = { path = "../transcript" } multiexp = { path = "../multiexp", features = ["batch"] } @@ -23,3 +27,7 @@ multiexp = { path = "../multiexp", features = ["batch"] } rand = "0.8" sha2 = "0.10" p256 = { version = "0.10", features = ["arithmetic"] } + +[features] +p256 = ["sha2", "dep:p256"] +k256 = ["sha2", "dep:k256"] diff --git a/crypto/frost/src/curves/kp256.rs b/crypto/frost/src/curves/kp256.rs new file mode 100644 index 00000000..4fcdecb8 --- /dev/null +++ b/crypto/frost/src/curves/kp256.rs @@ -0,0 +1,133 @@ +use core::{marker::PhantomData, convert::TryInto}; + +use rand_core::{RngCore, CryptoRng}; + +use ff::{Field, PrimeField}; +use group::{Group, GroupEncoding}; + +use sha2::{digest::Update, Digest, Sha256}; + +#[cfg(feature = "k256")] +use k256::elliptic_curve::bigint::{Encoding, U384}; +#[cfg(all(not(feature = "k256"), any(test, feature = "p256")))] +use p256::elliptic_curve::bigint::{Encoding, U384}; + +use crate::{CurveError, Curve, curves::expand_message_xmd_sha256}; + +#[allow(non_snake_case)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct KP256 { + _P: PhantomData

+} + +pub(crate) trait KP256Instance

{ + const CONTEXT: &'static [u8]; + const ID: &'static [u8]; + const GENERATOR: P; +} + +#[cfg(any(test, feature = "p256"))] +pub type P256 = KP256; +#[cfg(any(test, feature = "p256"))] +impl KP256Instance for P256 { + const CONTEXT: &'static [u8] = b"FROST-P256-SHA256-v5"; + const ID: &'static [u8] = b"P-256"; + const GENERATOR: p256::ProjectivePoint = p256::ProjectivePoint::GENERATOR; +} + +#[cfg(feature = "k256")] +pub type K256 = KP256; +#[cfg(feature = "k256")] +impl KP256Instance for K256 { + const CONTEXT: &'static [u8] = b"FROST-secp256k1-SHA256-v5"; + const ID: &'static [u8] = b"secp256k1"; + const GENERATOR: k256::ProjectivePoint = k256::ProjectivePoint::GENERATOR; +} + +impl Curve for KP256

where + KP256

: KP256Instance

, + P::Scalar: PrimeField, + ::Repr: From<[u8; 32]> + AsRef<[u8]>, + P::Repr: From<[u8; 33]> + AsRef<[u8]> { + type F = P::Scalar; + type G = P; + type T = P; + + const ID: &'static [u8] = >::ID; + + const GENERATOR: Self::G = >::GENERATOR; + const GENERATOR_TABLE: Self::G = >::GENERATOR; + + const LITTLE_ENDIAN: bool = false; + + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { + let mut seed = vec![0; 32]; + rng.fill_bytes(&mut seed); + seed.extend(secret.to_repr().as_ref()); + Self::hash_to_F(&[Self::CONTEXT, b"nonce"].concat(), &seed) + } + + fn hash_msg(msg: &[u8]) -> Vec { + (&Sha256::new() + .chain(Self::CONTEXT) + .chain(b"digest") + .chain(msg) + .finalize() + ).to_vec() + } + + fn hash_binding_factor(binding: &[u8]) -> Self::F { + Self::hash_to_F(&[Self::CONTEXT, b"rho"].concat(), binding) + } + + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + let mut modulus = vec![0; 16]; + modulus.extend((Self::F::zero() - Self::F::one()).to_repr().as_ref()); + let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); + Self::F_from_slice( + &U384::from_be_slice( + &expand_message_xmd_sha256(dst, msg, 48).unwrap() + ).reduce(&modulus).unwrap().to_be_bytes()[16 ..] + ).unwrap() + } + + fn F_len() -> usize { + 32 + } + + fn G_len() -> usize { + 33 + } + + fn F_from_slice(slice: &[u8]) -> Result { + let bytes: [u8; 32] = slice.try_into() + .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; + + let scalar = Self::F::from_repr(bytes.into()); + if scalar.is_none().into() { + Err(CurveError::InvalidScalar)?; + } + + Ok(scalar.unwrap()) + } + + fn G_from_slice(slice: &[u8]) -> Result { + let bytes: [u8; 33] = slice.try_into() + .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; + + let point = Self::G::from_bytes(&bytes.into()); + if point.is_none().into() || point.unwrap().is_identity().into() { + Err(CurveError::InvalidPoint)?; + } + + Ok(point.unwrap()) + } + + fn F_to_bytes(f: &Self::F) -> Vec { + f.to_repr().as_ref().to_vec() + } + + fn G_to_bytes(g: &Self::G) -> Vec { + g.to_bytes().as_ref().to_vec() + } +} diff --git a/crypto/frost/src/curves/mod.rs b/crypto/frost/src/curves/mod.rs new file mode 100644 index 00000000..fab9f2a4 --- /dev/null +++ b/crypto/frost/src/curves/mod.rs @@ -0,0 +1,48 @@ +use sha2::{Digest, Sha256}; + +pub mod kp256; + +// TODO: Actually make proper or replace with something from another crate +pub(crate) fn expand_message_xmd_sha256(dst: &[u8], msg: &[u8], len: u16) -> Option> { + const OUTPUT_SIZE: u16 = 32; + const BLOCK_SIZE: u16 = 64; + + let blocks = ((len + OUTPUT_SIZE) - 1) / OUTPUT_SIZE; + if blocks > 255 { + return None; + } + let blocks = blocks as u8; + + let mut dst = dst; + let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); + if dst.len() > 255 { + dst = &oversize; + } + let dst_prime = &[dst, &[dst.len() as u8]].concat(); + + let mut msg_prime = vec![0; BLOCK_SIZE.into()]; + msg_prime.extend(msg); + msg_prime.extend(len.to_be_bytes()); + msg_prime.push(0); + msg_prime.extend(dst_prime); + + let mut b = vec![Sha256::digest(&msg_prime).to_vec()]; + + { + let mut b1 = b[0].clone(); + b1.push(1); + b1.extend(dst_prime); + b.push(Sha256::digest(&b1).to_vec()); + } + + for i in 2 ..= blocks { + let mut msg = b[0] + .iter().zip(b[usize::from(i) - 1].iter()) + .map(|(a, b)| *a ^ b).collect::>(); + msg.push(i); + msg.extend(dst_prime); + b.push(Sha256::digest(msg).to_vec()); + } + + Some(b[1 ..].concat()[.. usize::from(len)].to_vec()) +} diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index 3f7c2b4e..b7146247 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -13,6 +13,8 @@ mod schnorr; pub mod key_gen; pub mod algorithm; pub mod sign; +#[cfg(any(test, feature = "p256", feature = "k256"))] +pub mod curves; pub mod tests; diff --git a/crypto/frost/src/tests/literal/expand_message.rs b/crypto/frost/src/tests/literal/expand_message.rs new file mode 100644 index 00000000..762ab0df --- /dev/null +++ b/crypto/frost/src/tests/literal/expand_message.rs @@ -0,0 +1,15 @@ +use crate::curves::expand_message_xmd_sha256; + +#[test] +fn test_xmd_sha256() { + assert_eq!( + hex::encode(expand_message_xmd_sha256(b"QUUX-V01-CS02-with-expander", b"", 0x80).unwrap()), + ( + "8bcffd1a3cae24cf9cd7ab85628fd111bb17e3739d3b53f8".to_owned() + + "9580d217aa79526f1708354a76a402d3569d6a9d19ef3de4d0b991" + + "e4f54b9f20dcde9b95a66824cbdf6c1a963a1913d43fd7ac443a02" + + "fc5d9d8d77e2071b86ab114a9f34150954a7531da568a1ea8c7608" + + "61c0cde2005afc2c114042ee7b5848f5303f0611cf297f" + ) + ); +} diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs new file mode 100644 index 00000000..3adf42dd --- /dev/null +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -0,0 +1,80 @@ +use rand::rngs::OsRng; + +use crate::{ + Curve, + curves::kp256::{KP256Instance, P256}, + algorithm::Hram, + tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} +}; + +#[cfg(feature = "k256")] +use crate::curves::kp256::K256; + +#[test] +fn p256_curve() { + test_curve::<_, P256>(&mut OsRng); +} + +#[test] +fn p256_schnorr() { + test_schnorr::<_, P256>(&mut OsRng); +} + +#[derive(Clone)] +pub struct IetfP256Hram; +impl Hram for IetfP256Hram { + #[allow(non_snake_case)] + fn hram(R: &p256::ProjectivePoint, A: &p256::ProjectivePoint, m: &[u8]) -> p256::Scalar { + P256::hash_to_F( + &[P256::CONTEXT, b"chal"].concat(), + &[&P256::G_to_bytes(R), &P256::G_to_bytes(A), m].concat() + ) + } +} + +#[test] +fn p256_vectors() { + vectors::( + Vectors { + threshold: 2, + shares: &[ + "0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731", + "8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5", + "0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928" + ], + group_secret: "8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de", + group_key: "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70", + + msg: "74657374", + included: &[1, 3], + nonces: &[ + [ + "081617b24375e069b39f649d4c4ce2fba6e38b73e7c16759de0b6079a22c4c7e", + "4de5fb77d99f03a2491a83a6a4cb91ca3c82a3f34ce94cec939174f47c9f95dd" + ], + [ + "d186ea92593f83ea83181b184d41aa93493301ac2bc5b4b1767e94d2db943e38", + "486e2ee25a3fbc8e6399d748b077a2755fde99fa85cc24fa647ea4ebf5811a15" + ] + ], + sig_shares: &[ + "9e4d8865faf8c7b3193a3b35eda3d9e12118447114b1e7d5b4809ea28067f8a9", + "b7d094eab6305ae74daeed1acd31abba9ab81f638d38b72c132cb25a5dfae1fc" + ], + sig: "0342c14c77f9d4ef9b8bd64fb0d7bbfdb9f8216a44e5f7bbe6ac0f3ed5e1a57367".to_owned() + + "561e1d51b129229966e92850bad5859bfee96926fad3007cd3f38639e1ffb554" + } + ); +} + +#[cfg(feature = "k256")] +#[test] +fn k256_curve() { + test_curve::<_, K256>(&mut OsRng); +} + +#[cfg(feature = "k256")] +#[test] +fn k256_schnorr() { + test_schnorr::<_, K256>(&mut OsRng); +} diff --git a/crypto/frost/src/tests/literal/mod.rs b/crypto/frost/src/tests/literal/mod.rs index eea846ee..fc2aab5a 100644 --- a/crypto/frost/src/tests/literal/mod.rs +++ b/crypto/frost/src/tests/literal/mod.rs @@ -1 +1,2 @@ -mod p256; +mod expand_message; +mod kp256; diff --git a/crypto/frost/src/tests/literal/p256.rs b/crypto/frost/src/tests/literal/p256.rs deleted file mode 100644 index 1ca4ed39..00000000 --- a/crypto/frost/src/tests/literal/p256.rs +++ /dev/null @@ -1,219 +0,0 @@ -use core::convert::TryInto; - -use rand::{RngCore, CryptoRng, rngs::OsRng}; - -use ff::{Field, PrimeField}; -use group::{Group, GroupEncoding}; - -use sha2::{digest::Update, Digest, Sha256}; - -use p256::{elliptic_curve::bigint::{Encoding, U384}, Scalar, ProjectivePoint}; - -use crate::{ - CurveError, Curve, - algorithm::Hram, - tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} -}; - -const CONTEXT_STRING: &[u8] = b"FROST-P256-SHA256-v5"; - -fn expand_message_xmd_sha256(dst: &[u8], msg: &[u8], len: u16) -> Option> { - const OUTPUT_SIZE: u16 = 32; - const BLOCK_SIZE: u16 = 64; - - let blocks = ((len + OUTPUT_SIZE) - 1) / OUTPUT_SIZE; - if blocks > 255 { - return None; - } - let blocks = blocks as u8; - - let mut dst = dst; - let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); - if dst.len() > 255 { - dst = &oversize; - } - let dst_prime = &[dst, &[dst.len() as u8]].concat(); - - let mut msg_prime = vec![0; BLOCK_SIZE.into()]; - msg_prime.extend(msg); - msg_prime.extend(len.to_be_bytes()); - msg_prime.push(0); - msg_prime.extend(dst_prime); - - let mut b = vec![Sha256::digest(&msg_prime).to_vec()]; - - { - let mut b1 = b[0].clone(); - b1.push(1); - b1.extend(dst_prime); - b.push(Sha256::digest(&b1).to_vec()); - } - - for i in 2 ..= blocks { - let mut msg = b[0] - .iter().zip(b[usize::from(i) - 1].iter()) - .map(|(a, b)| *a ^ b).collect::>(); - msg.push(i); - msg.extend(dst_prime); - b.push(Sha256::digest(msg).to_vec()); - } - - Some(b[1 ..].concat()[.. usize::from(len)].to_vec()) -} - -#[test] -fn test_xmd_sha256() { - assert_eq!( - hex::encode(expand_message_xmd_sha256(b"QUUX-V01-CS02-with-expander", b"", 0x80).unwrap()), - ( - "8bcffd1a3cae24cf9cd7ab85628fd111bb17e3739d3b53f8".to_owned() + - "9580d217aa79526f1708354a76a402d3569d6a9d19ef3de4d0b991" + - "e4f54b9f20dcde9b95a66824cbdf6c1a963a1913d43fd7ac443a02" + - "fc5d9d8d77e2071b86ab114a9f34150954a7531da568a1ea8c7608" + - "61c0cde2005afc2c114042ee7b5848f5303f0611cf297f" - ) - ); -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct P256; -impl Curve for P256 { - type F = Scalar; - type G = ProjectivePoint; - type T = ProjectivePoint; - - const ID: &'static [u8] = b"P-256"; - - const GENERATOR: Self::G = Self::G::GENERATOR; - const GENERATOR_TABLE: Self::G = Self::G::GENERATOR; - - const LITTLE_ENDIAN: bool = false; - - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { - let mut seed = vec![0; 32]; - rng.fill_bytes(&mut seed); - seed.extend(&secret.to_repr()); - Self::hash_to_F(&[CONTEXT_STRING, b"nonce"].concat(), &seed) - } - - fn hash_msg(msg: &[u8]) -> Vec { - (&Sha256::new() - .chain(CONTEXT_STRING) - .chain(b"digest") - .chain(msg) - .finalize() - ).to_vec() - } - - fn hash_binding_factor(binding: &[u8]) -> Self::F { - Self::hash_to_F(&[CONTEXT_STRING, b"rho"].concat(), binding) - } - - fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { - let mut modulus = vec![0; 16]; - modulus.extend(&(Scalar::zero() - Scalar::one()).to_repr()); - let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); - Self::F_from_slice( - &U384::from_be_slice( - &expand_message_xmd_sha256(dst, msg, 48).unwrap() - ).reduce(&modulus).unwrap().to_be_bytes()[16 ..] - ).unwrap() - } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 33 - } - - fn F_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 32] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - - let scalar = Scalar::from_repr(bytes.into()); - if scalar.is_none().into() { - Err(CurveError::InvalidScalar)?; - } - - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 33] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; - - let point = ProjectivePoint::from_bytes(&bytes.into()); - if point.is_none().into() || point.unwrap().is_identity().into() { - Err(CurveError::InvalidPoint)?; - } - - Ok(point.unwrap()) - } - - fn F_to_bytes(f: &Self::F) -> Vec { - (&f.to_bytes()).to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - (&g.to_bytes()).to_vec() - } -} - -#[test] -fn p256_curve() { - test_curve::<_, P256>(&mut OsRng); -} - -#[test] -fn p256_schnorr() { - test_schnorr::<_, P256>(&mut OsRng); -} - -#[derive(Clone)] -pub struct IetfP256Hram; -impl Hram for IetfP256Hram { - #[allow(non_snake_case)] - fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - P256::hash_to_F( - &[CONTEXT_STRING, b"chal"].concat(), - &[&P256::G_to_bytes(R), &P256::G_to_bytes(A), m].concat() - ) - } -} - -#[test] -fn p256_vectors() { - vectors::( - Vectors { - threshold: 2, - shares: &[ - "0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731", - "8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5", - "0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928" - ], - group_secret: "8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de", - group_key: "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70", - - msg: "74657374", - included: &[1, 3], - nonces: &[ - [ - "081617b24375e069b39f649d4c4ce2fba6e38b73e7c16759de0b6079a22c4c7e", - "4de5fb77d99f03a2491a83a6a4cb91ca3c82a3f34ce94cec939174f47c9f95dd" - ], - [ - "d186ea92593f83ea83181b184d41aa93493301ac2bc5b4b1767e94d2db943e38", - "486e2ee25a3fbc8e6399d748b077a2755fde99fa85cc24fa647ea4ebf5811a15" - ] - ], - sig_shares: &[ - "9e4d8865faf8c7b3193a3b35eda3d9e12118447114b1e7d5b4809ea28067f8a9", - "b7d094eab6305ae74daeed1acd31abba9ab81f638d38b72c132cb25a5dfae1fc" - ], - sig: "0342c14c77f9d4ef9b8bd64fb0d7bbfdb9f8216a44e5f7bbe6ac0f3ed5e1a57367".to_owned() + - "561e1d51b129229966e92850bad5859bfee96926fad3007cd3f38639e1ffb554" - } - ); -} From e0ce6e5c12e3bc35f9d9821f75849c65974919f2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 6 Jun 2022 02:18:25 -0400 Subject: [PATCH 019/105] Add Ed25519 to FROST and remove expand_xmd for elliptic_curve's Doesn't fully utilize ec's hash2curve module as k256 Scalar doesn't have FromOkm for some reason. The previously present bigint reduction is preserved. Updates ff/group to 0.12. Premised on https://github.com/cfrg/draft-irtf-cfrg-frost/pull/205 being merged, as while this Ed25519 is vector compliant, it's technically not spec compliant due to that conflict. --- coins/monero/Cargo.toml | 9 +- coins/monero/src/frost.rs | 114 +----------------- coins/monero/src/tests/mod.rs | 3 - crypto/dalek-ff-group/Cargo.toml | 3 +- crypto/dalek-ff-group/src/lib.rs | 3 +- crypto/frost/Cargo.toml | 24 ++-- crypto/frost/src/curves/ed25519.rs | 104 ++++++++++++++++ crypto/frost/src/curves/kp256.rs | 71 +++++++---- crypto/frost/src/curves/mod.rs | 49 +------- crypto/frost/src/lib.rs | 2 +- .../frost/src/tests/literal/ed25519.rs | 37 +----- .../frost/src/tests/literal/expand_message.rs | 15 --- crypto/frost/src/tests/literal/kp256.rs | 16 +-- crypto/frost/src/tests/literal/mod.rs | 3 +- crypto/multiexp/Cargo.toml | 2 +- 15 files changed, 189 insertions(+), 266 deletions(-) create mode 100644 crypto/frost/src/curves/ed25519.rs rename coins/monero/src/tests/frost.rs => crypto/frost/src/tests/literal/ed25519.rs (61%) delete mode 100644 crypto/frost/src/tests/literal/expand_message.rs diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 22e1dfe5..0934a813 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -16,16 +16,15 @@ rand = "0.8" rand_distr = "0.4" tiny-keccak = { version = "2", features = ["keccak"] } -blake2 = "0.10" +blake2 = { version = "0.10", optional = true } curve25519-dalek = { version = "3", features = ["std"] } -ff = { version = "0.11", optional = true } -group = { version = "0.11", optional = true } +group = { version = "0.12", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } transcript = { path = "../../crypto/transcript", optional = true } -frost = { path = "../../crypto/frost", optional = true } +frost = { path = "../../crypto/frost", features = ["ed25519"], optional = true } monero = "0.16" @@ -37,7 +36,7 @@ reqwest = { version = "0.11", features = ["json"] } [features] experimental = [] -multisig = ["ff", "group", "rand_chacha", "transcript", "frost", "dalek-ff-group"] +multisig = ["rand_chacha", "blake2", "group", "dalek-ff-group", "transcript", "frost"] [dev-dependencies] sha2 = "0.10" diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 4653cc3e..d16238b4 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -1,22 +1,17 @@ -use core::{convert::TryInto, fmt::{Formatter, Debug}}; -use std::marker::PhantomData; +use core::convert::TryInto; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use blake2::{digest::{generic_array::typenum::U64, Digest}, Blake2b512}; - use curve25519_dalek::{ constants::ED25519_BASEPOINT_TABLE as DTable, scalar::Scalar as DScalar, edwards::EdwardsPoint as DPoint }; -use ff::PrimeField; -use group::Group; - use transcript::{Transcript as TranscriptTrait, DigestTranscript}; -use frost::{CurveError, Curve}; +use frost::Curve; +pub use frost::curves::ed25519::Ed25519; use dalek_ff_group as dfg; use crate::random_scalar; @@ -33,109 +28,6 @@ pub enum MultisigError { InvalidKeyImage(u16) } -// Accept a parameterized hash function in order to check against the FROST vectors while still -// allowing Blake2b to be used with wide reduction in practice -pub struct Ed25519Internal, const WIDE: bool> { - _digest: PhantomData -} - -// Removed requirements for D to have all of these -impl, const WIDE: bool> Clone for Ed25519Internal { - fn clone(&self) -> Self { *self } -} -impl, const WIDE: bool> Copy for Ed25519Internal {} -impl, const WIDE: bool> PartialEq for Ed25519Internal { - fn eq(&self, _: &Self) -> bool { true } -} -impl, const WIDE: bool> Eq for Ed25519Internal {} -impl, const WIDE: bool> Debug for Ed25519Internal { - fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { Ok(()) } -} - -impl, const WIDE: bool> Curve for Ed25519Internal { - type F = dfg::Scalar; - type G = dfg::EdwardsPoint; - type T = &'static dfg::EdwardsBasepointTable; - - const ID: &'static [u8] = b"edwards25519"; - - const GENERATOR: Self::G = dfg::ED25519_BASEPOINT_POINT; - const GENERATOR_TABLE: Self::T = &dfg::ED25519_BASEPOINT_TABLE; - - const LITTLE_ENDIAN: bool = true; - - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { - let mut seed = vec![0; 32]; - rng.fill_bytes(&mut seed); - seed.extend(&secret.to_bytes()); - Self::hash_to_F(b"nonce", &seed) - } - - fn hash_msg(msg: &[u8]) -> Vec { - D::digest(msg).to_vec() - } - - fn hash_binding_factor(binding: &[u8]) -> Self::F { - Self::hash_to_F(b"rho", binding) - } - - fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { - let digest = D::new().chain_update(dst).chain_update(msg); - if WIDE { - dfg::Scalar::from_hash(digest) - } else { - dfg::Scalar::from_bytes_mod_order(digest.finalize()[32 ..].try_into().unwrap()) - } - } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 32 - } - - fn F_from_slice(slice: &[u8]) -> Result { - let scalar = Self::F::from_repr( - slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))? - ); - if scalar.is_some().unwrap_u8() == 0 { - Err(CurveError::InvalidScalar)?; - } - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - let point = dfg::CompressedEdwardsY::new(bytes).decompress(); - - if let Some(point) = point { - // Ban identity and torsioned points - if point.is_identity().into() || (!bool::from(point.is_torsion_free())) { - Err(CurveError::InvalidPoint)?; - } - // Ban points which weren't canonically encoded - if point.compress().to_bytes() != bytes { - Err(CurveError::InvalidPoint)?; - } - Ok(point) - } else { - Err(CurveError::InvalidPoint) - } - } - - fn F_to_bytes(f: &Self::F) -> Vec { - f.to_repr().to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - g.compress().to_bytes().to_vec() - } -} - -pub type Ed25519 = Ed25519Internal; - // Used to prove legitimacy of key images and nonces which both involve other basepoints #[derive(Clone)] pub struct DLEqProof { diff --git a/coins/monero/src/tests/mod.rs b/coins/monero/src/tests/mod.rs index c964dd96..b42cbcff 100644 --- a/coins/monero/src/tests/mod.rs +++ b/coins/monero/src/tests/mod.rs @@ -1,4 +1 @@ -#[cfg(feature = "multisig")] -mod frost; - mod clsag; diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 55140724..79fdceb0 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -12,7 +12,6 @@ digest = "0.10" subtle = "2.4" -ff = "0.11" -group = "0.11" +group = "0.12" curve25519-dalek = "3.2" diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index 5bf1823d..9fa622f9 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -22,8 +22,7 @@ use dalek::{ } }; -use ff::{Field, PrimeField}; -use group::Group; +use group::{ff::{Field, PrimeField}, Group}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Default)] pub struct Scalar(pub DScalar); diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 2ed0bb3c..8112b97b 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -12,12 +12,15 @@ thiserror = "1" rand_core = "0.6" hex = "0.4" -ff = "0.11" -group = "0.11" - sha2 = { version = "0.10", optional = true } -p256 = { version = "0.10", optional = true } -k256 = { version = "0.10", optional = true } + +ff = "0.12" +group = "0.12" + +elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true } +p256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } +k256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } +dalek-ff-group = { path = "../dalek-ff-group", optional = true } transcript = { path = "../transcript" } @@ -25,9 +28,14 @@ multiexp = { path = "../multiexp", features = ["batch"] } [dev-dependencies] rand = "0.8" + sha2 = "0.10" -p256 = { version = "0.10", features = ["arithmetic"] } +elliptic-curve = { version = "0.12", features = ["hash2curve"] } +p256 = { version = "0.11", features = ["arithmetic", "hash2curve"] } [features] -p256 = ["sha2", "dep:p256"] -k256 = ["sha2", "dep:k256"] +curves = [] +kp256 = ["elliptic-curve"] +p256 = ["curves", "kp256", "sha2", "dep:p256"] +k256 = ["curves", "kp256", "sha2", "dep:k256"] +ed25519 = ["curves", "sha2", "dalek-ff-group"] diff --git a/crypto/frost/src/curves/ed25519.rs b/crypto/frost/src/curves/ed25519.rs new file mode 100644 index 00000000..f6e9aa52 --- /dev/null +++ b/crypto/frost/src/curves/ed25519.rs @@ -0,0 +1,104 @@ +use core::convert::TryInto; + +use rand_core::{RngCore, CryptoRng}; + +use sha2::{Digest, Sha512}; + +use ff::PrimeField; +use group::Group; + +use dalek_ff_group::{ + EdwardsBasepointTable, + ED25519_BASEPOINT_POINT, ED25519_BASEPOINT_TABLE, + Scalar, EdwardsPoint, CompressedEdwardsY +}; + +use crate::{CurveError, Curve, algorithm::Hram}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Ed25519; +impl Curve for Ed25519 { + type F = Scalar; + type G = EdwardsPoint; + type T = &'static EdwardsBasepointTable; + + const ID: &'static [u8] = b"edwards25519"; + + const GENERATOR: Self::G = ED25519_BASEPOINT_POINT; + const GENERATOR_TABLE: Self::T = &ED25519_BASEPOINT_TABLE; + + const LITTLE_ENDIAN: bool = true; + + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { + let mut seed = vec![0; 32]; + rng.fill_bytes(&mut seed); + seed.extend(&secret.to_bytes()); + Self::hash_to_F(b"nonce", &seed) + } + + fn hash_msg(msg: &[u8]) -> Vec { + Sha512::digest(msg).to_vec() + } + + fn hash_binding_factor(binding: &[u8]) -> Self::F { + Self::hash_to_F(b"rho", binding) + } + + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + Scalar::from_hash(Sha512::new().chain_update(dst).chain_update(msg)) + } + + fn F_len() -> usize { + 32 + } + + fn G_len() -> usize { + 32 + } + + fn F_from_slice(slice: &[u8]) -> Result { + let scalar = Self::F::from_repr( + slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))? + ); + if scalar.is_some().unwrap_u8() == 0 { + Err(CurveError::InvalidScalar)?; + } + Ok(scalar.unwrap()) + } + + fn G_from_slice(slice: &[u8]) -> Result { + let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?; + let point = CompressedEdwardsY::new(bytes).decompress(); + + if let Some(point) = point { + // Ban identity and torsioned points + if point.is_identity().into() || (!bool::from(point.is_torsion_free())) { + Err(CurveError::InvalidPoint)?; + } + // Ban points which weren't canonically encoded + if point.compress().to_bytes() != bytes { + Err(CurveError::InvalidPoint)?; + } + Ok(point) + } else { + Err(CurveError::InvalidPoint) + } + } + + fn F_to_bytes(f: &Self::F) -> Vec { + f.to_repr().to_vec() + } + + fn G_to_bytes(g: &Self::G) -> Vec { + g.compress().to_bytes().to_vec() + } +} + +#[derive(Copy, Clone)] +pub struct IetfEd25519Hram; +impl Hram for IetfEd25519Hram { + #[allow(non_snake_case)] + fn hram(R: &EdwardsPoint, A: &EdwardsPoint, m: &[u8]) -> Scalar { + Ed25519::hash_to_F(b"", &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat()) + } +} diff --git a/crypto/frost/src/curves/kp256.rs b/crypto/frost/src/curves/kp256.rs index 4fcdecb8..3abb1879 100644 --- a/crypto/frost/src/curves/kp256.rs +++ b/crypto/frost/src/curves/kp256.rs @@ -2,28 +2,27 @@ use core::{marker::PhantomData, convert::TryInto}; use rand_core::{RngCore, CryptoRng}; +use sha2::{digest::Update, Digest, Sha256}; + use ff::{Field, PrimeField}; use group::{Group, GroupEncoding}; -use sha2::{digest::Update, Digest, Sha256}; +use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}}; -#[cfg(feature = "k256")] -use k256::elliptic_curve::bigint::{Encoding, U384}; -#[cfg(all(not(feature = "k256"), any(test, feature = "p256")))] -use p256::elliptic_curve::bigint::{Encoding, U384}; - -use crate::{CurveError, Curve, curves::expand_message_xmd_sha256}; +use crate::{CurveError, Curve}; +#[cfg(any(test, feature = "p256"))] +use crate::algorithm::Hram; #[allow(non_snake_case)] #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct KP256 { - _P: PhantomData

+pub struct KP256 { + _G: PhantomData } -pub(crate) trait KP256Instance

{ +pub(crate) trait KP256Instance { const CONTEXT: &'static [u8]; const ID: &'static [u8]; - const GENERATOR: P; + const GENERATOR: G; } #[cfg(any(test, feature = "p256"))] @@ -44,19 +43,19 @@ impl KP256Instance for K256 { const GENERATOR: k256::ProjectivePoint = k256::ProjectivePoint::GENERATOR; } -impl Curve for KP256

where - KP256

: KP256Instance

, - P::Scalar: PrimeField, - ::Repr: From<[u8; 32]> + AsRef<[u8]>, - P::Repr: From<[u8; 33]> + AsRef<[u8]> { - type F = P::Scalar; - type G = P; - type T = P; +impl Curve for KP256 where + KP256: KP256Instance, + G::Scalar: PrimeField, + ::Repr: From<[u8; 32]> + AsRef<[u8]>, + G::Repr: From<[u8; 33]> + AsRef<[u8]> { + type F = G::Scalar; + type G = G; + type T = G; - const ID: &'static [u8] = >::ID; + const ID: &'static [u8] = >::ID; - const GENERATOR: Self::G = >::GENERATOR; - const GENERATOR_TABLE: Self::G = >::GENERATOR; + const GENERATOR: Self::G = >::GENERATOR; + const GENERATOR_TABLE: Self::G = >::GENERATOR; const LITTLE_ENDIAN: bool = false; @@ -81,13 +80,21 @@ impl Curve for KP256

where } fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + let mut dst = dst; + let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); + if dst.len() > 255 { + dst = &oversize; + } + let mut modulus = vec![0; 16]; modulus.extend((Self::F::zero() - Self::F::one()).to_repr().as_ref()); let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); Self::F_from_slice( - &U384::from_be_slice( - &expand_message_xmd_sha256(dst, msg, 48).unwrap() - ).reduce(&modulus).unwrap().to_be_bytes()[16 ..] + &U384::from_be_slice(&{ + let mut bytes = [0; 48]; + ExpandMsgXmd::::expand_message(&[msg], dst, 48).unwrap().fill_bytes(&mut bytes); + bytes + }).reduce(&modulus).unwrap().to_be_bytes()[16 ..] ).unwrap() } @@ -131,3 +138,17 @@ impl Curve for KP256

where g.to_bytes().as_ref().to_vec() } } + +#[cfg(any(test, feature = "p256"))] +#[derive(Clone)] +pub struct IetfP256Hram; +#[cfg(any(test, feature = "p256"))] +impl Hram for IetfP256Hram { + #[allow(non_snake_case)] + fn hram(R: &p256::ProjectivePoint, A: &p256::ProjectivePoint, m: &[u8]) -> p256::Scalar { + P256::hash_to_F( + &[P256::CONTEXT, b"chal"].concat(), + &[&P256::G_to_bytes(R), &P256::G_to_bytes(A), m].concat() + ) + } +} diff --git a/crypto/frost/src/curves/mod.rs b/crypto/frost/src/curves/mod.rs index fab9f2a4..890f8600 100644 --- a/crypto/frost/src/curves/mod.rs +++ b/crypto/frost/src/curves/mod.rs @@ -1,48 +1,5 @@ -use sha2::{Digest, Sha256}; - +#[cfg(any(test, feature = "kp256"))] pub mod kp256; -// TODO: Actually make proper or replace with something from another crate -pub(crate) fn expand_message_xmd_sha256(dst: &[u8], msg: &[u8], len: u16) -> Option> { - const OUTPUT_SIZE: u16 = 32; - const BLOCK_SIZE: u16 = 64; - - let blocks = ((len + OUTPUT_SIZE) - 1) / OUTPUT_SIZE; - if blocks > 255 { - return None; - } - let blocks = blocks as u8; - - let mut dst = dst; - let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); - if dst.len() > 255 { - dst = &oversize; - } - let dst_prime = &[dst, &[dst.len() as u8]].concat(); - - let mut msg_prime = vec![0; BLOCK_SIZE.into()]; - msg_prime.extend(msg); - msg_prime.extend(len.to_be_bytes()); - msg_prime.push(0); - msg_prime.extend(dst_prime); - - let mut b = vec![Sha256::digest(&msg_prime).to_vec()]; - - { - let mut b1 = b[0].clone(); - b1.push(1); - b1.extend(dst_prime); - b.push(Sha256::digest(&b1).to_vec()); - } - - for i in 2 ..= blocks { - let mut msg = b[0] - .iter().zip(b[usize::from(i) - 1].iter()) - .map(|(a, b)| *a ^ b).collect::>(); - msg.push(i); - msg.extend(dst_prime); - b.push(Sha256::digest(msg).to_vec()); - } - - Some(b[1 ..].concat()[.. usize::from(len)].to_vec()) -} +#[cfg(feature = "ed25519")] +pub mod ed25519; diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index b7146247..bf876f51 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -13,7 +13,7 @@ mod schnorr; pub mod key_gen; pub mod algorithm; pub mod sign; -#[cfg(any(test, feature = "p256", feature = "k256"))] +#[cfg(any(test, feature = "curves"))] pub mod curves; pub mod tests; diff --git a/coins/monero/src/tests/frost.rs b/crypto/frost/src/tests/literal/ed25519.rs similarity index 61% rename from coins/monero/src/tests/frost.rs rename to crypto/frost/src/tests/literal/ed25519.rs index 710328f8..43b31dc4 100644 --- a/coins/monero/src/tests/frost.rs +++ b/crypto/frost/src/tests/literal/ed25519.rs @@ -1,48 +1,23 @@ use rand::rngs::OsRng; -use sha2::Sha512; - -use dalek_ff_group as dfg; -use frost::{ - Curve, - algorithm::Hram, +use crate::{ + curves::ed25519::{Ed25519, IetfEd25519Hram}, tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} }; -use crate::frost::{Ed25519, Ed25519Internal}; - #[test] -fn frost_ed25519_curve() { +fn ed25519_curve() { test_curve::<_, Ed25519>(&mut OsRng); } #[test] -fn frost_ed25519_schnorr() { +fn ed25519_schnorr() { test_schnorr::<_, Ed25519>(&mut OsRng); } -// Not spec-compliant, as this shouldn't use wide reduction -// Is vectors compliant, which is why the below tests pass -// See https://github.com/cfrg/draft-irtf-cfrg-frost/issues/204 -//type TestEd25519 = Ed25519Internal; -// If this is kept, we can remove WIDE -type TestEd25519 = Ed25519Internal; - -#[derive(Copy, Clone)] -struct IetfEd25519Hram {} -impl Hram for IetfEd25519Hram { - #[allow(non_snake_case)] - fn hram(R: &dfg::EdwardsPoint, A: &dfg::EdwardsPoint, m: &[u8]) -> dfg::Scalar { - TestEd25519::hash_to_F( - b"", - &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat() - ) - } -} - #[test] -fn frost_ed25519_vectors() { - vectors::( +fn ed25519_vectors() { + vectors::( Vectors { threshold: 2, shares: &[ diff --git a/crypto/frost/src/tests/literal/expand_message.rs b/crypto/frost/src/tests/literal/expand_message.rs deleted file mode 100644 index 762ab0df..00000000 --- a/crypto/frost/src/tests/literal/expand_message.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::curves::expand_message_xmd_sha256; - -#[test] -fn test_xmd_sha256() { - assert_eq!( - hex::encode(expand_message_xmd_sha256(b"QUUX-V01-CS02-with-expander", b"", 0x80).unwrap()), - ( - "8bcffd1a3cae24cf9cd7ab85628fd111bb17e3739d3b53f8".to_owned() + - "9580d217aa79526f1708354a76a402d3569d6a9d19ef3de4d0b991" + - "e4f54b9f20dcde9b95a66824cbdf6c1a963a1913d43fd7ac443a02" + - "fc5d9d8d77e2071b86ab114a9f34150954a7531da568a1ea8c7608" + - "61c0cde2005afc2c114042ee7b5848f5303f0611cf297f" - ) - ); -} diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs index 3adf42dd..60a29bee 100644 --- a/crypto/frost/src/tests/literal/kp256.rs +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -1,9 +1,7 @@ use rand::rngs::OsRng; use crate::{ - Curve, - curves::kp256::{KP256Instance, P256}, - algorithm::Hram, + curves::kp256::{P256, IetfP256Hram}, tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} }; @@ -20,18 +18,6 @@ fn p256_schnorr() { test_schnorr::<_, P256>(&mut OsRng); } -#[derive(Clone)] -pub struct IetfP256Hram; -impl Hram for IetfP256Hram { - #[allow(non_snake_case)] - fn hram(R: &p256::ProjectivePoint, A: &p256::ProjectivePoint, m: &[u8]) -> p256::Scalar { - P256::hash_to_F( - &[P256::CONTEXT, b"chal"].concat(), - &[&P256::G_to_bytes(R), &P256::G_to_bytes(A), m].concat() - ) - } -} - #[test] fn p256_vectors() { vectors::( diff --git a/crypto/frost/src/tests/literal/mod.rs b/crypto/frost/src/tests/literal/mod.rs index fc2aab5a..77da3224 100644 --- a/crypto/frost/src/tests/literal/mod.rs +++ b/crypto/frost/src/tests/literal/mod.rs @@ -1,2 +1,3 @@ -mod expand_message; mod kp256; +#[cfg(feature = "ed25519")] +mod ed25519; diff --git a/crypto/multiexp/Cargo.toml b/crypto/multiexp/Cargo.toml index facc1aef..b45dbcf5 100644 --- a/crypto/multiexp/Cargo.toml +++ b/crypto/multiexp/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Luke Parker "] edition = "2021" [dependencies] -group = "0.11" +group = "0.12" rand_core = { version = "0.6", optional = true } From 301634dd8ebe8775424847ff15baad7c50ec2ebe Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 6 Jun 2022 04:22:49 -0400 Subject: [PATCH 020/105] Add support for Ristretto Replaces P-256 as the curve used for testing FROST. --- coins/monero/src/frost.rs | 2 +- crypto/dalek-ff-group/src/lib.rs | 400 ++++++++++------------ crypto/frost/Cargo.toml | 15 +- crypto/frost/src/curves/dalek.rs | 163 +++++++++ crypto/frost/src/curves/ed25519.rs | 104 ------ crypto/frost/src/curves/kp256.rs | 10 +- crypto/frost/src/curves/mod.rs | 8 +- crypto/frost/src/tests/literal/dalek.rs | 77 +++++ crypto/frost/src/tests/literal/ed25519.rs | 51 --- crypto/frost/src/tests/literal/kp256.rs | 39 +-- crypto/frost/src/tests/literal/mod.rs | 5 +- crypto/frost/src/tests/vectors.rs | 15 +- 12 files changed, 477 insertions(+), 412 deletions(-) create mode 100644 crypto/frost/src/curves/dalek.rs delete mode 100644 crypto/frost/src/curves/ed25519.rs create mode 100644 crypto/frost/src/tests/literal/dalek.rs delete mode 100644 crypto/frost/src/tests/literal/ed25519.rs diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index d16238b4..8b151abc 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -11,7 +11,7 @@ use curve25519_dalek::{ use transcript::{Transcript as TranscriptTrait, DigestTranscript}; use frost::Curve; -pub use frost::curves::ed25519::Ed25519; +pub use frost::curves::dalek::Ed25519; use dalek_ff_group as dfg; use crate::random_scalar; diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index 9fa622f9..c919fea9 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -16,88 +16,123 @@ use dalek::{ traits::Identity, scalar::Scalar as DScalar, edwards::{ - EdwardsPoint as DPoint, - EdwardsBasepointTable as DTable, - CompressedEdwardsY as DCompressed + EdwardsPoint as DEdwardsPoint, + EdwardsBasepointTable as DEdwardsBasepointTable, + CompressedEdwardsY as DCompressedEdwards + }, + ristretto::{ + RistrettoPoint as DRistrettoPoint, + RistrettoBasepointTable as DRistrettoBasepointTable, + CompressedRistretto as DCompressedRistretto } }; use group::{ff::{Field, PrimeField}, Group}; +macro_rules! deref_borrow { + ($Source: ident, $Target: ident) => { + impl Deref for $Source { + type Target = $Target; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl Borrow<$Target> for $Source { + fn borrow(&self) -> &$Target { + &self.0 + } + } + + impl Borrow<$Target> for &$Source { + fn borrow(&self) -> &$Target { + &self.0 + } + } + } +} + +macro_rules! math { + ($Value: ident, $Factor: ident, $Product: ident) => { + impl Add<$Value> for $Value { + type Output = Self; + fn add(self, other: $Value) -> Self::Output { Self(self.0 + other.0) } + } + impl AddAssign for $Value { + fn add_assign(&mut self, other: $Value) { self.0 += other.0 } + } + + impl<'a> Add<&'a $Value> for $Value { + type Output = Self; + fn add(self, other: &'a $Value) -> Self::Output { Self(self.0 + other.0) } + } + impl<'a> AddAssign<&'a $Value> for $Value { + fn add_assign(&mut self, other: &'a $Value) { self.0 += other.0 } + } + + impl Sub<$Value> for $Value { + type Output = Self; + fn sub(self, other: $Value) -> Self::Output { Self(self.0 - other.0) } + } + impl SubAssign for $Value { + fn sub_assign(&mut self, other: $Value) { self.0 -= other.0 } + } + + impl<'a> Sub<&'a $Value> for $Value { + type Output = Self; + fn sub(self, other: &'a $Value) -> Self::Output { Self(self.0 - other.0) } + } + impl<'a> SubAssign<&'a $Value> for $Value { + fn sub_assign(&mut self, other: &'a $Value) { self.0 -= other.0 } + } + + impl Neg for $Value { + type Output = Self; + fn neg(self) -> Self::Output { Self(-self.0) } + } + + impl Mul<$Factor> for $Value { + type Output = $Product; + fn mul(self, other: $Factor) -> Self::Output { Self(self.0 * other.0) } + } + impl MulAssign<$Factor> for $Value { + fn mul_assign(&mut self, other: $Factor) { self.0 *= other.0 } + } + + impl<'a> Mul<&'a $Factor> for $Value { + type Output = Self; + fn mul(self, b: &'a $Factor) -> $Product { Self(b.0 * self.0) } + } + impl<'a> MulAssign<&'a $Factor> for $Value { + fn mul_assign(&mut self, other: &'a $Factor) { self.0 *= other.0 } + } + } +} + #[derive(Clone, Copy, PartialEq, Eq, Debug, Default)] pub struct Scalar(pub DScalar); +deref_borrow!(Scalar, DScalar); +math!(Scalar, Scalar, Scalar); -impl Deref for Scalar { - type Target = DScalar; - - fn deref(&self) -> &Self::Target { - &self.0 +impl Scalar { + pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option { + DScalar::from_canonical_bytes(bytes).map(|x| Self(x)) } -} -impl Borrow for Scalar { - fn borrow(&self) -> &DScalar { - &self.0 + pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar { + Self(DScalar::from_bytes_mod_order(bytes)) } -} -impl Borrow for &Scalar { - fn borrow(&self) -> &DScalar { - &self.0 + pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar { + Self(DScalar::from_bytes_mod_order_wide(bytes)) } -} -impl Add for Scalar { - type Output = Self; - fn add(self, other: Scalar) -> Scalar { Self(self.0 + other.0) } -} -impl AddAssign for Scalar { - fn add_assign(&mut self, other: Scalar) { self.0 += other.0 } -} - -impl<'a> Add<&'a Scalar> for Scalar { - type Output = Self; - fn add(self, other: &'a Scalar) -> Scalar { Self(self.0 + other.0) } -} -impl<'a> AddAssign<&'a Scalar> for Scalar { - fn add_assign(&mut self, other: &'a Scalar) { self.0 += other.0 } -} - -impl Sub for Scalar { - type Output = Self; - fn sub(self, other: Scalar) -> Scalar { Self(self.0 - other.0) } -} -impl SubAssign for Scalar { - fn sub_assign(&mut self, other: Scalar) { self.0 -= other.0 } -} - -impl<'a> Sub<&'a Scalar> for Scalar { - type Output = Self; - fn sub(self, other: &'a Scalar) -> Scalar { Self(self.0 - other.0) } -} -impl<'a> SubAssign<&'a Scalar> for Scalar { - fn sub_assign(&mut self, other: &'a Scalar) { self.0 -= other.0 } -} - -impl Neg for Scalar { - type Output = Self; - fn neg(self) -> Scalar { Self(-self.0) } -} - -impl Mul for Scalar { - type Output = Self; - fn mul(self, other: Scalar) -> Scalar { Self(self.0 * other.0) } -} -impl MulAssign for Scalar { - fn mul_assign(&mut self, other: Scalar) { self.0 *= other.0 } -} - -impl<'a> Mul<&'a Scalar> for Scalar { - type Output = Self; - fn mul(self, other: &'a Scalar) -> Scalar { Self(self.0 * other.0) } -} -impl<'a> MulAssign<&'a Scalar> for Scalar { - fn mul_assign(&mut self, other: &'a Scalar) { self.0 *= other.0 } + pub fn from_hash>(hash: D) -> Scalar { + let mut output = [0u8; 64]; + output.copy_from_slice(&hash.finalize()); + Scalar(DScalar::from_bytes_mod_order_wide(&output)) + } } impl ConstantTimeEq for Scalar { @@ -153,163 +188,106 @@ impl PrimeField for Scalar { fn root_of_unity() -> Self { unimplemented!() } } -impl Scalar { - pub fn from_hash>(hash: D) -> Scalar { - let mut output = [0u8; 64]; - output.copy_from_slice(&hash.finalize()); - Scalar(DScalar::from_bytes_mod_order_wide(&output)) - } -} +macro_rules! dalek_group { + ( + $Point: ident, + $DPoint: ident, -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct EdwardsPoint(pub DPoint); -pub const ED25519_BASEPOINT_POINT: EdwardsPoint = EdwardsPoint(constants::ED25519_BASEPOINT_POINT); + $Table: ident, + $DTable: ident, -impl Deref for EdwardsPoint { - type Target = DPoint; + $Compressed: ident, + $DCompressed: ident, - fn deref(&self) -> &Self::Target { - &self.0 + $BASEPOINT_POINT: ident, + $BASEPOINT_TABLE: ident + ) => { + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + pub struct $Point(pub $DPoint); + deref_borrow!($Point, $DPoint); + math!($Point, Scalar, $Point); + + pub const $BASEPOINT_POINT: $Point = $Point(constants::$BASEPOINT_POINT); + + impl Sum<$Point> for $Point { + fn sum>(iter: I) -> $Point { Self($DPoint::sum(iter)) } } + impl<'a> Sum<&'a $Point> for $Point { + fn sum>(iter: I) -> $Point { Self($DPoint::sum(iter)) } + } + + impl Group for $Point { + type Scalar = Scalar; + fn random(rng: impl RngCore) -> Self { &$BASEPOINT_TABLE * Scalar::random(rng) } + fn identity() -> Self { Self($DPoint::identity()) } + fn generator() -> Self { $BASEPOINT_POINT } + fn is_identity(&self) -> Choice { self.0.ct_eq(&$DPoint::identity()) } + fn double(&self) -> Self { *self + self } + } + + pub struct $Compressed(pub $DCompressed); + deref_borrow!($Compressed, $DCompressed); + impl $Compressed { + pub fn new(y: [u8; 32]) -> $Compressed { + Self($DCompressed(y)) + } + + pub fn decompress(&self) -> Option<$Point> { + self.0.decompress().map(|x| $Point(x)) + } + + pub fn to_bytes(&self) -> [u8; 32] { + self.0.to_bytes() + } + } + + impl $Point { + pub fn compress(&self) -> $Compressed { + $Compressed(self.0.compress()) + } + } + + pub struct $Table(pub $DTable); + deref_borrow!($Table, $DTable); + pub const $BASEPOINT_TABLE: $Table = $Table(constants::$BASEPOINT_TABLE); + + impl Mul for &$Table { + type Output = $Point; + fn mul(self, b: Scalar) -> $Point { $Point(&b.0 * &self.0) } + } + }; } -impl Borrow for EdwardsPoint { - fn borrow(&self) -> &DPoint { - &self.0 - } -} +dalek_group!( + EdwardsPoint, + DEdwardsPoint, -impl Borrow for &EdwardsPoint { - fn borrow(&self) -> &DPoint { - &self.0 - } -} + EdwardsBasepointTable, + DEdwardsBasepointTable, -impl Add for EdwardsPoint { - type Output = Self; - fn add(self, b: EdwardsPoint) -> EdwardsPoint { Self(self.0 + b.0) } -} -impl AddAssign for EdwardsPoint { - fn add_assign(&mut self, other: EdwardsPoint) { self.0 += other.0 } -} -impl Sum for EdwardsPoint { - fn sum>(iter: I) -> EdwardsPoint { Self(DPoint::sum(iter)) } -} + CompressedEdwardsY, + DCompressedEdwards, -impl<'a> Add<&'a EdwardsPoint> for EdwardsPoint { - type Output = Self; - fn add(self, b: &'a EdwardsPoint) -> EdwardsPoint { Self(self.0 + b.0) } -} -impl<'a> AddAssign<&'a EdwardsPoint> for EdwardsPoint { - fn add_assign(&mut self, other: &'a EdwardsPoint) { self.0 += other.0 } -} -impl<'a> Sum<&'a EdwardsPoint> for EdwardsPoint { - fn sum>(iter: I) -> EdwardsPoint { Self(DPoint::sum(iter)) } -} - -impl Sub for EdwardsPoint { - type Output = Self; - fn sub(self, b: EdwardsPoint) -> EdwardsPoint { Self(self.0 - b.0) } -} -impl SubAssign for EdwardsPoint { - fn sub_assign(&mut self, other: EdwardsPoint) { self.0 -= other.0 } -} - -impl<'a> Sub<&'a EdwardsPoint> for EdwardsPoint { - type Output = Self; - fn sub(self, b: &'a EdwardsPoint) -> EdwardsPoint { Self(self.0 - b.0) } -} -impl<'a> SubAssign<&'a EdwardsPoint> for EdwardsPoint { - fn sub_assign(&mut self, other: &'a EdwardsPoint) { self.0 -= other.0 } -} - -impl Neg for EdwardsPoint { - type Output = Self; - fn neg(self) -> EdwardsPoint { Self(-self.0) } -} - -impl Mul for EdwardsPoint { - type Output = Self; - fn mul(self, b: Scalar) -> EdwardsPoint { Self(b.0 * self.0) } -} -impl MulAssign for EdwardsPoint { - fn mul_assign(&mut self, other: Scalar) { self.0 *= other.0 } -} - -impl<'a> Mul<&'a Scalar> for EdwardsPoint { - type Output = Self; - fn mul(self, b: &'a Scalar) -> EdwardsPoint { Self(b.0 * self.0) } -} -impl<'a> MulAssign<&'a Scalar> for EdwardsPoint { - fn mul_assign(&mut self, other: &'a Scalar) { self.0 *= other.0 } -} - -impl Group for EdwardsPoint { - type Scalar = Scalar; - fn random(rng: impl RngCore) -> Self { &ED25519_BASEPOINT_TABLE * Scalar::random(rng) } - fn identity() -> Self { Self(DPoint::identity()) } - fn generator() -> Self { ED25519_BASEPOINT_POINT } - fn is_identity(&self) -> Choice { self.0.ct_eq(&DPoint::identity()) } - fn double(&self) -> Self { *self + self } -} - -impl Scalar { - pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option { - DScalar::from_canonical_bytes(bytes).map(|x| Self(x)) - } - pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar { - Self(DScalar::from_bytes_mod_order(bytes)) - } - pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar { - Self(DScalar::from_bytes_mod_order_wide(bytes)) - } -} - -pub struct CompressedEdwardsY(pub DCompressed); -impl CompressedEdwardsY { - pub fn new(y: [u8; 32]) -> CompressedEdwardsY { - Self(DCompressed(y)) - } - - pub fn decompress(&self) -> Option { - self.0.decompress().map(|x| EdwardsPoint(x)) - } - - pub fn to_bytes(&self) -> [u8; 32] { - self.0.to_bytes() - } -} + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_TABLE +); impl EdwardsPoint { pub fn is_torsion_free(&self) -> bool { self.0.is_torsion_free() } - - pub fn compress(&self) -> CompressedEdwardsY { - CompressedEdwardsY(self.0.compress()) - } } -pub struct EdwardsBasepointTable(pub DTable); -pub const ED25519_BASEPOINT_TABLE: EdwardsBasepointTable = EdwardsBasepointTable( - constants::ED25519_BASEPOINT_TABLE +dalek_group!( + RistrettoPoint, + DRistrettoPoint, + + RistrettoBasepointTable, + DRistrettoBasepointTable, + + CompressedRistretto, + DCompressedRistretto, + + RISTRETTO_BASEPOINT_POINT, + RISTRETTO_BASEPOINT_TABLE ); - -impl Deref for EdwardsBasepointTable { - type Target = DTable; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Borrow for &EdwardsBasepointTable { - fn borrow(&self) -> &DTable { - &self.0 - } -} - -impl Mul for &EdwardsBasepointTable { - type Output = EdwardsPoint; - fn mul(self, b: Scalar) -> EdwardsPoint { EdwardsPoint(&b.0 * &self.0) } -} diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 8112b97b..6d9af3f6 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -30,12 +30,13 @@ multiexp = { path = "../multiexp", features = ["batch"] } rand = "0.8" sha2 = "0.10" -elliptic-curve = { version = "0.12", features = ["hash2curve"] } -p256 = { version = "0.11", features = ["arithmetic", "hash2curve"] } +dalek-ff-group = { path = "../dalek-ff-group" } [features] -curves = [] -kp256 = ["elliptic-curve"] -p256 = ["curves", "kp256", "sha2", "dep:p256"] -k256 = ["curves", "kp256", "sha2", "dep:k256"] -ed25519 = ["curves", "sha2", "dalek-ff-group"] +curves = ["sha2"] # All officially denoted curves use the SHA2 family of hashes +kp256 = ["elliptic-curve", "curves"] +p256 = ["dep:p256", "kp256"] +k256 = ["dep:k256", "kp256"] +dalek = ["curves", "dalek-ff-group"] +ed25519 = ["dalek"] +ristretto = ["dalek"] diff --git a/crypto/frost/src/curves/dalek.rs b/crypto/frost/src/curves/dalek.rs new file mode 100644 index 00000000..994ac651 --- /dev/null +++ b/crypto/frost/src/curves/dalek.rs @@ -0,0 +1,163 @@ +use core::convert::TryInto; + +use rand_core::{RngCore, CryptoRng}; + +use sha2::{Digest, Sha512}; + +use ff::PrimeField; +use group::Group; + +use dalek_ff_group::Scalar; + +use crate::{CurveError, Curve, algorithm::Hram}; + +macro_rules! dalek_curve { + ( + $Curve: ident, + $Hram: ident, + $Point: ident, + $Compressed: ident, + $Table: ident, + + $POINT: ident, + $TABLE: ident, + + $torsioned: expr, + + $ID: literal, + $CONTEXT: literal, + $chal: literal, + $digest: literal, + ) => { + use dalek_ff_group::{$Point, $Compressed, $Table, $POINT, $TABLE}; + + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + pub struct $Curve; + impl Curve for $Curve { + type F = Scalar; + type G = $Point; + type T = &'static $Table; + + const ID: &'static [u8] = $ID; + + const GENERATOR: Self::G = $POINT; + const GENERATOR_TABLE: Self::T = &$TABLE; + + const LITTLE_ENDIAN: bool = true; + + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { + let mut seed = vec![0; 32]; + rng.fill_bytes(&mut seed); + seed.extend(&secret.to_bytes()); + Self::hash_to_F(b"nonce", &seed) + } + + fn hash_msg(msg: &[u8]) -> Vec { + Sha512::new() + .chain_update($CONTEXT) + .chain_update($digest) + .chain_update(msg) + .finalize() + .to_vec() + } + + fn hash_binding_factor(binding: &[u8]) -> Self::F { + Self::hash_to_F(b"rho", binding) + } + + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + Scalar::from_hash(Sha512::new().chain_update($CONTEXT).chain_update(dst).chain_update(msg)) + } + + fn F_len() -> usize { + 32 + } + + fn G_len() -> usize { + 32 + } + + fn F_from_slice(slice: &[u8]) -> Result { + let scalar = Self::F::from_repr( + slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))? + ); + if scalar.is_some().unwrap_u8() == 0 { + Err(CurveError::InvalidScalar)?; + } + Ok(scalar.unwrap()) + } + + fn G_from_slice(slice: &[u8]) -> Result { + let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?; + let point = $Compressed::new(bytes).decompress(); + + if let Some(point) = point { + // Ban identity + if point.is_identity().into() { + Err(CurveError::InvalidPoint)?; + } + // Ban torsioned points to meet the prime order group requirement + if $torsioned(point) { + Err(CurveError::InvalidPoint)?; + } + // Ban points which weren't canonically encoded + if point.compress().to_bytes() != bytes { + Err(CurveError::InvalidPoint)?; + } + Ok(point) + } else { + Err(CurveError::InvalidPoint) + } + } + + fn F_to_bytes(f: &Self::F) -> Vec { + f.to_repr().to_vec() + } + + fn G_to_bytes(g: &Self::G) -> Vec { + g.compress().to_bytes().to_vec() + } + } + + #[derive(Copy, Clone)] + pub struct $Hram; + impl Hram<$Curve> for $Hram { + #[allow(non_snake_case)] + fn hram(R: &$Point, A: &$Point, m: &[u8]) -> Scalar { + $Curve::hash_to_F($chal, &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat()) + } + } + } +} + +#[cfg(feature = "ed25519")] +dalek_curve!( + Ed25519, + IetfEd25519Hram, + EdwardsPoint, + CompressedEdwardsY, + EdwardsBasepointTable, + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_TABLE, + |point: EdwardsPoint| !bool::from(point.is_torsion_free()), + b"edwards25519", + b"", + b"", + b"", +); + +#[cfg(any(test, feature = "ristretto"))] +dalek_curve!( + Ristretto, + IetfRistrettoHram, + RistrettoPoint, + CompressedRistretto, + RistrettoBasepointTable, + RISTRETTO_BASEPOINT_POINT, + RISTRETTO_BASEPOINT_TABLE, + |_| false, + b"ristretto", + b"FROST-RISTRETTO255-SHA512-v5", + b"chal", + b"digest", +); diff --git a/crypto/frost/src/curves/ed25519.rs b/crypto/frost/src/curves/ed25519.rs deleted file mode 100644 index f6e9aa52..00000000 --- a/crypto/frost/src/curves/ed25519.rs +++ /dev/null @@ -1,104 +0,0 @@ -use core::convert::TryInto; - -use rand_core::{RngCore, CryptoRng}; - -use sha2::{Digest, Sha512}; - -use ff::PrimeField; -use group::Group; - -use dalek_ff_group::{ - EdwardsBasepointTable, - ED25519_BASEPOINT_POINT, ED25519_BASEPOINT_TABLE, - Scalar, EdwardsPoint, CompressedEdwardsY -}; - -use crate::{CurveError, Curve, algorithm::Hram}; - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Ed25519; -impl Curve for Ed25519 { - type F = Scalar; - type G = EdwardsPoint; - type T = &'static EdwardsBasepointTable; - - const ID: &'static [u8] = b"edwards25519"; - - const GENERATOR: Self::G = ED25519_BASEPOINT_POINT; - const GENERATOR_TABLE: Self::T = &ED25519_BASEPOINT_TABLE; - - const LITTLE_ENDIAN: bool = true; - - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { - let mut seed = vec![0; 32]; - rng.fill_bytes(&mut seed); - seed.extend(&secret.to_bytes()); - Self::hash_to_F(b"nonce", &seed) - } - - fn hash_msg(msg: &[u8]) -> Vec { - Sha512::digest(msg).to_vec() - } - - fn hash_binding_factor(binding: &[u8]) -> Self::F { - Self::hash_to_F(b"rho", binding) - } - - fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { - Scalar::from_hash(Sha512::new().chain_update(dst).chain_update(msg)) - } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 32 - } - - fn F_from_slice(slice: &[u8]) -> Result { - let scalar = Self::F::from_repr( - slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))? - ); - if scalar.is_some().unwrap_u8() == 0 { - Err(CurveError::InvalidScalar)?; - } - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - let point = CompressedEdwardsY::new(bytes).decompress(); - - if let Some(point) = point { - // Ban identity and torsioned points - if point.is_identity().into() || (!bool::from(point.is_torsion_free())) { - Err(CurveError::InvalidPoint)?; - } - // Ban points which weren't canonically encoded - if point.compress().to_bytes() != bytes { - Err(CurveError::InvalidPoint)?; - } - Ok(point) - } else { - Err(CurveError::InvalidPoint) - } - } - - fn F_to_bytes(f: &Self::F) -> Vec { - f.to_repr().to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - g.compress().to_bytes().to_vec() - } -} - -#[derive(Copy, Clone)] -pub struct IetfEd25519Hram; -impl Hram for IetfEd25519Hram { - #[allow(non_snake_case)] - fn hram(R: &EdwardsPoint, A: &EdwardsPoint, m: &[u8]) -> Scalar { - Ed25519::hash_to_F(b"", &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat()) - } -} diff --git a/crypto/frost/src/curves/kp256.rs b/crypto/frost/src/curves/kp256.rs index 3abb1879..e7421c5a 100644 --- a/crypto/frost/src/curves/kp256.rs +++ b/crypto/frost/src/curves/kp256.rs @@ -10,7 +10,7 @@ use group::{Group, GroupEncoding}; use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}}; use crate::{CurveError, Curve}; -#[cfg(any(test, feature = "p256"))] +#[cfg(feature = "p256")] use crate::algorithm::Hram; #[allow(non_snake_case)] @@ -25,9 +25,9 @@ pub(crate) trait KP256Instance { const GENERATOR: G; } -#[cfg(any(test, feature = "p256"))] +#[cfg(feature = "p256")] pub type P256 = KP256; -#[cfg(any(test, feature = "p256"))] +#[cfg(feature = "p256")] impl KP256Instance for P256 { const CONTEXT: &'static [u8] = b"FROST-P256-SHA256-v5"; const ID: &'static [u8] = b"P-256"; @@ -139,10 +139,10 @@ impl Curve for KP256 where } } -#[cfg(any(test, feature = "p256"))] +#[cfg(feature = "p256")] #[derive(Clone)] pub struct IetfP256Hram; -#[cfg(any(test, feature = "p256"))] +#[cfg(feature = "p256")] impl Hram for IetfP256Hram { #[allow(non_snake_case)] fn hram(R: &p256::ProjectivePoint, A: &p256::ProjectivePoint, m: &[u8]) -> p256::Scalar { diff --git a/crypto/frost/src/curves/mod.rs b/crypto/frost/src/curves/mod.rs index 890f8600..3742e1f9 100644 --- a/crypto/frost/src/curves/mod.rs +++ b/crypto/frost/src/curves/mod.rs @@ -1,5 +1,5 @@ -#[cfg(any(test, feature = "kp256"))] -pub mod kp256; +#[cfg(any(test, feature = "dalek"))] +pub mod dalek; -#[cfg(feature = "ed25519")] -pub mod ed25519; +#[cfg(feature = "kp256")] +pub mod kp256; diff --git a/crypto/frost/src/tests/literal/dalek.rs b/crypto/frost/src/tests/literal/dalek.rs new file mode 100644 index 00000000..7cd3e92f --- /dev/null +++ b/crypto/frost/src/tests/literal/dalek.rs @@ -0,0 +1,77 @@ +use rand::rngs::OsRng; + +use crate::{curves::dalek, tests::vectors::{Vectors, test_with_vectors}}; + +#[cfg(any(test, feature = "ristretto"))] +#[test] +fn ristretto_vectors() { + test_with_vectors::<_, dalek::Ristretto, dalek::IetfRistrettoHram>( + &mut OsRng, + Vectors { + threshold: 2, + shares: &[ + "5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e", + "b06fc5eac20b4f6e1b271d9df2343d843e1e1fb03c4cbb673f2872d459ce6f01", + "f17e505f0e2581c6acfe54d3846a622834b5e7b50cad9a2109a97ba7a80d5c04" + ], + group_secret: "1b25a55e463cfd15cf14a5d3acc3d15053f08da49c8afcf3ab265f2ebc4f970b", + group_key: "e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f57", + + msg: "74657374", + included: &[1, 3], + nonces: &[ + [ + "b358743151e33d84bf00c12f71808f4103957c3e2cabab7b895c436b5e70f90c", + "7bd112153b9ae1ab9b31f5e78f61f5c4ca9ee67b7ea6d1181799c409d14c350c" + ], + [ + "22acad88478e0d0373a991092a322ebd1b9a2dad90451a976d0db3215426af0e", + "9155e3d7bcf7cd468b980c7e20b2c77cbdfbe33a1dcae031fd8bc6b1403f4b04" + ] + ], + sig_shares: &[ + "ff801b4e0839faa67f16dee4127b9f7fbcf5fd007900257b0e2bbc02cbe5e709", + "afdf5481023c855bf3411a5c8a5fafa92357296a078c3b80dc168f294cb4f504" + ], + sig: "deae61af10e8ee48ba492573592fba547f5debeff6bd6e2024e8673584746f5e".to_owned() + + "ae6070cf0a757f027358f8409dda4e29e04c276b808c60fbea414b2c179add0e" + } + ); +} + +#[cfg(feature = "ed25519")] +#[test] +fn ed25519_vectors() { + test_with_vectors::<_, dalek::Ed25519, dalek::IetfEd25519Hram>( + &mut OsRng, + Vectors { + threshold: 2, + shares: &[ + "929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509", + "a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d", + "d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02" + ], + group_secret: "7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304", + group_key: "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673", + + msg: "74657374", + included: &[1, 3], + nonces: &[ + [ + "8c76af04340e83bb5fc427c117d38347fc8ef86d5397feea9aa6412d96c05b0a", + "14a37ddbeae8d9e9687369e5eb3c6d54f03dc19d76bb54fb5425131bc37a600b" + ], + [ + "5ca39ebab6874f5e7b5089f3521819a2aa1e2cf738bae6974ee80555de2ef70e", + "0afe3650c4815ff37becd3c6948066e906e929ea9b8f546c74e10002dbcc150c" + ] + ], + sig_shares: &[ + "4369474a398aa10357b60d683da91ea6a767dcf53fd541a8ed6b4d780827ea0a", + "32fcc690d926075e45d2dfb746bab71447943cddbefe80d122c39174aa2e1004" + ], + sig: "2b8d9c6995333c5990e3a3dd6568785539d3322f7f0376452487ea35cfda587b".to_owned() + + "75650edb12b1a8619c88ed1f8463d6baeefb18d3fed3c279102fdfecb255fa0e" + } + ); +} diff --git a/crypto/frost/src/tests/literal/ed25519.rs b/crypto/frost/src/tests/literal/ed25519.rs deleted file mode 100644 index 43b31dc4..00000000 --- a/crypto/frost/src/tests/literal/ed25519.rs +++ /dev/null @@ -1,51 +0,0 @@ -use rand::rngs::OsRng; - -use crate::{ - curves::ed25519::{Ed25519, IetfEd25519Hram}, - tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} -}; - -#[test] -fn ed25519_curve() { - test_curve::<_, Ed25519>(&mut OsRng); -} - -#[test] -fn ed25519_schnorr() { - test_schnorr::<_, Ed25519>(&mut OsRng); -} - -#[test] -fn ed25519_vectors() { - vectors::( - Vectors { - threshold: 2, - shares: &[ - "929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509", - "a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d", - "d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02" - ], - group_secret: "7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304", - group_key: "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673", - - msg: "74657374", - included: &[1, 3], - nonces: &[ - [ - "8c76af04340e83bb5fc427c117d38347fc8ef86d5397feea9aa6412d96c05b0a", - "14a37ddbeae8d9e9687369e5eb3c6d54f03dc19d76bb54fb5425131bc37a600b" - ], - [ - "5ca39ebab6874f5e7b5089f3521819a2aa1e2cf738bae6974ee80555de2ef70e", - "0afe3650c4815ff37becd3c6948066e906e929ea9b8f546c74e10002dbcc150c" - ] - ], - sig_shares: &[ - "4369474a398aa10357b60d683da91ea6a767dcf53fd541a8ed6b4d780827ea0a", - "32fcc690d926075e45d2dfb746bab71447943cddbefe80d122c39174aa2e1004" - ], - sig: "2b8d9c6995333c5990e3a3dd6568785539d3322f7f0376452487ea35cfda587b".to_owned() + - "75650edb12b1a8619c88ed1f8463d6baeefb18d3fed3c279102fdfecb255fa0e" - } - ); -} diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs index 60a29bee..8751fbe2 100644 --- a/crypto/frost/src/tests/literal/kp256.rs +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -1,26 +1,27 @@ use rand::rngs::OsRng; -use crate::{ - curves::kp256::{P256, IetfP256Hram}, - tests::{curve::test_curve, schnorr::test_schnorr, vectors::{Vectors, vectors}} -}; - +#[cfg(feature = "k256")] +use crate::tests::{curve::test_curve, schnorr::test_schnorr}; #[cfg(feature = "k256")] use crate::curves::kp256::K256; +#[cfg(feature = "p256")] +use crate::tests::vectors::{Vectors, test_with_vectors}; +#[cfg(feature = "p256")] +use crate::curves::kp256::{P256, IetfP256Hram}; + +#[cfg(feature = "k256")] #[test] -fn p256_curve() { - test_curve::<_, P256>(&mut OsRng); -} - -#[test] -fn p256_schnorr() { - test_schnorr::<_, P256>(&mut OsRng); +fn k256_not_ietf() { + test_curve::<_, K256>(&mut OsRng); + test_schnorr::<_, K256>(&mut OsRng); } +#[cfg(feature = "p256")] #[test] fn p256_vectors() { - vectors::( + test_with_vectors::<_, P256, IetfP256Hram>( + &mut OsRng, Vectors { threshold: 2, shares: &[ @@ -52,15 +53,3 @@ fn p256_vectors() { } ); } - -#[cfg(feature = "k256")] -#[test] -fn k256_curve() { - test_curve::<_, K256>(&mut OsRng); -} - -#[cfg(feature = "k256")] -#[test] -fn k256_schnorr() { - test_schnorr::<_, K256>(&mut OsRng); -} diff --git a/crypto/frost/src/tests/literal/mod.rs b/crypto/frost/src/tests/literal/mod.rs index 77da3224..00fe0477 100644 --- a/crypto/frost/src/tests/literal/mod.rs +++ b/crypto/frost/src/tests/literal/mod.rs @@ -1,3 +1,4 @@ +#[cfg(any(test, feature = "dalek"))] +mod dalek; +#[cfg(feature = "kp256")] mod kp256; -#[cfg(feature = "ed25519")] -mod ed25519; diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index c4403d07..db46de1f 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -1,10 +1,12 @@ use std::{sync::Arc, collections::HashMap}; +use rand_core::{RngCore, CryptoRng}; + use crate::{ Curve, MultisigKeys, algorithm::{Schnorr, Hram}, sign::{PreprocessPackage, StateMachine, AlgorithmMachine}, - tests::recover + tests::{curve::test_curve, schnorr::test_schnorr, recover} }; pub struct Vectors { @@ -55,7 +57,16 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap>(vectors: Vectors) { +pub fn test_with_vectors< + R: RngCore + CryptoRng, + C: Curve, + H: Hram +>(rng: &mut R, vectors: Vectors) { + // Do basic tests before trying the vectors + test_curve::<_, C>(&mut *rng); + test_schnorr::<_, C>(rng); + + // Test against the vectors let keys = vectors_to_multisig_keys::(&vectors); let group_key = C::G_from_slice(&hex::decode(vectors.group_key).unwrap()).unwrap(); assert_eq!( From 670ea3726f1846a3c0d7ddbc5d969b3297ec2f7c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 6 Jun 2022 04:28:52 -0400 Subject: [PATCH 021/105] Correct a warning when building Monero without multisig --- coins/monero/tests/send.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index dd95c5b4..4c002d45 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -1,4 +1,6 @@ -use std::{sync::Mutex, collections::HashMap}; +use std::sync::Mutex; +#[cfg(feature = "multisig")] +use std::collections::HashMap; use lazy_static::lazy_static; From 714ce68deb96cc249121b30993e12cb2d2e75623 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 7 Jun 2022 00:02:10 -0400 Subject: [PATCH 022/105] Add pippenger under multiexp --- crypto/frost/src/key_gen.rs | 2 +- crypto/frost/src/tests/curve.rs | 18 +++ crypto/multiexp/src/batch.rs | 78 +++++++++++++ crypto/multiexp/src/lib.rs | 189 +++++++------------------------ crypto/multiexp/src/pippenger.rs | 79 +++++++++++++ crypto/multiexp/src/straus.rs | 69 +++++++++++ 6 files changed, 286 insertions(+), 149 deletions(-) create mode 100644 crypto/multiexp/src/batch.rs create mode 100644 crypto/multiexp/src/pippenger.rs create mode 100644 crypto/multiexp/src/straus.rs diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index c30962de..8f235b6b 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -254,7 +254,7 @@ fn complete_r2( // Calculate each user's verification share let mut verification_shares = HashMap::new(); for i in 1 ..= params.n() { - verification_shares.insert(i, multiexp_vartime(exponential(i, &stripes), C::LITTLE_ENDIAN)); + verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes), C::LITTLE_ENDIAN)); } debug_assert_eq!(C::GENERATOR_TABLE * secret_share, verification_shares[¶ms.i()]); diff --git a/crypto/frost/src/tests/curve.rs b/crypto/frost/src/tests/curve.rs index 33f4f516..eed4e62a 100644 --- a/crypto/frost/src/tests/curve.rs +++ b/crypto/frost/src/tests/curve.rs @@ -1,5 +1,8 @@ use rand_core::{RngCore, CryptoRng}; +use ff::Field; +use group::Group; + use crate::{Curve, MultisigKeys, tests::key_gen}; // Test generation of FROST keys @@ -18,6 +21,21 @@ fn keys_serialization(rng: &mut R) { pub fn test_curve(rng: &mut R) { // TODO: Test the Curve functions themselves + // Test successful multiexp, with enough pairs to trigger its variety of algorithms + // TODO: This should probably be under multiexp + { + let mut pairs = Vec::with_capacity(1000); + let mut sum = C::G::identity(); + for _ in 0 .. 10 { + for _ in 0 .. 100 { + pairs.push((C::F::random(&mut *rng), C::GENERATOR * C::F::random(&mut *rng))); + sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; + } + assert_eq!(multiexp::multiexp(&pairs, C::LITTLE_ENDIAN), sum); + assert_eq!(multiexp::multiexp_vartime(&pairs, C::LITTLE_ENDIAN), sum); + } + } + // Test FROST key generation and serialization of MultisigKeys works as expected key_generation::<_, C>(rng); keys_serialization::<_, C>(rng); diff --git a/crypto/multiexp/src/batch.rs b/crypto/multiexp/src/batch.rs new file mode 100644 index 00000000..6962ea86 --- /dev/null +++ b/crypto/multiexp/src/batch.rs @@ -0,0 +1,78 @@ +use rand_core::{RngCore, CryptoRng}; + +use group::{ff::Field, Group}; + +use crate::{multiexp, multiexp_vartime}; + +#[cfg(feature = "batch")] +pub struct BatchVerifier(Vec<(Id, Vec<(G::Scalar, G)>)>, bool); + +#[cfg(feature = "batch")] +impl BatchVerifier { + pub fn new(capacity: usize, endian: bool) -> BatchVerifier { + BatchVerifier(Vec::with_capacity(capacity), endian) + } + + pub fn queue< + R: RngCore + CryptoRng, + I: IntoIterator + >(&mut self, rng: &mut R, id: Id, pairs: I) { + // Define a unique scalar factor for this set of variables so individual items can't overlap + let u = if self.0.len() == 0 { + G::Scalar::one() + } else { + G::Scalar::random(rng) + }; + self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); + } + + pub fn verify(&self) -> bool { + multiexp( + &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>(), + self.1 + ).is_identity().into() + } + + pub fn verify_vartime(&self) -> bool { + multiexp_vartime( + &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>(), + self.1 + ).is_identity().into() + } + + // A constant time variant may be beneficial for robust protocols + pub fn blame_vartime(&self) -> Option { + let mut slice = self.0.as_slice(); + while slice.len() > 1 { + let split = slice.len() / 2; + if multiexp_vartime( + &slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>(), + self.1 + ).is_identity().into() { + slice = &slice[split ..]; + } else { + slice = &slice[.. split]; + } + } + + slice.get(0).filter( + |(_, value)| !bool::from(multiexp_vartime(value, self.1).is_identity()) + ).map(|(id, _)| *id) + } + + pub fn verify_with_vartime_blame(&self) -> Result<(), Id> { + if self.verify() { + Ok(()) + } else { + Err(self.blame_vartime().unwrap()) + } + } + + pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> { + if self.verify_vartime() { + Ok(()) + } else { + Err(self.blame_vartime().unwrap()) + } + } +} diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index 0d145f16..51651e64 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -1,156 +1,49 @@ -use group::{ff::PrimeField, Group}; +use group::Group; + +mod straus; +use straus::*; + +mod pippenger; +use pippenger::*; #[cfg(feature = "batch")] -use group::ff::Field; +mod batch; #[cfg(feature = "batch")] -use rand_core::{RngCore, CryptoRng}; +pub use batch::BatchVerifier; -fn prep< - G: Group, - I: IntoIterator ->(pairs: I, little: bool) -> (Vec>, Vec<[G; 16]>) { - let mut nibbles = vec![]; - let mut tables = vec![]; - for pair in pairs.into_iter() { - let p = nibbles.len(); - nibbles.push(vec![]); - { - let mut repr = pair.0.to_repr(); - let bytes = repr.as_mut(); - if !little { - bytes.reverse(); - } - - nibbles[p].resize(bytes.len() * 2, 0); - for i in 0 .. bytes.len() { - nibbles[p][i * 2] = bytes[i] & 0b1111; - nibbles[p][(i * 2) + 1] = (bytes[i] >> 4) & 0b1111; - } - } - - tables.push([G::identity(); 16]); - let mut accum = G::identity(); - for i in 1 .. 16 { - accum += pair.1; - tables[p][i] = accum; - } - } - - (nibbles, tables) +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +enum Algorithm { + Straus, + Pippenger } -// An implementation of Straus, with a extremely minimal API that lets us add other algorithms in -// the future. Takes in an iterator of scalars and points with a boolean for if the scalars are -// little endian encoded in their Reprs or not -pub fn multiexp< - G: Group, - I: IntoIterator ->(pairs: I, little: bool) -> G { - let (nibbles, tables) = prep(pairs, little); - - let mut res = G::identity(); - for b in (0 .. nibbles[0].len()).rev() { - for _ in 0 .. 4 { - res = res.double(); - } - - for s in 0 .. tables.len() { - res += tables[s][usize::from(nibbles[s][b])]; - } - } - res -} - -pub fn multiexp_vartime< - G: Group, - I: IntoIterator ->(pairs: I, little: bool) -> G { - let (nibbles, tables) = prep(pairs, little); - - let mut res = G::identity(); - for b in (0 .. nibbles[0].len()).rev() { - for _ in 0 .. 4 { - res = res.double(); - } - - for s in 0 .. tables.len() { - if nibbles[s][b] != 0 { - res += tables[s][usize::from(nibbles[s][b])]; - } - } - } - res -} - -#[cfg(feature = "batch")] -pub struct BatchVerifier(Vec<(Id, Vec<(G::Scalar, G)>)>, bool); - -#[cfg(feature = "batch")] -impl BatchVerifier { - pub fn new(capacity: usize, endian: bool) -> BatchVerifier { - BatchVerifier(Vec::with_capacity(capacity), endian) - } - - pub fn queue< - R: RngCore + CryptoRng, - I: IntoIterator - >(&mut self, rng: &mut R, id: Id, pairs: I) { - // Define a unique scalar factor for this set of variables so individual items can't overlap - let u = if self.0.len() == 0 { - G::Scalar::one() - } else { - G::Scalar::random(rng) - }; - self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); - } - - pub fn verify(&self) -> bool { - multiexp( - self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned(), - self.1 - ).is_identity().into() - } - - pub fn verify_vartime(&self) -> bool { - multiexp_vartime( - self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned(), - self.1 - ).is_identity().into() - } - - // A constant time variant may be beneficial for robust protocols - pub fn blame_vartime(&self) -> Option { - let mut slice = self.0.as_slice(); - while slice.len() > 1 { - let split = slice.len() / 2; - if multiexp_vartime( - slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned(), - self.1 - ).is_identity().into() { - slice = &slice[split ..]; - } else { - slice = &slice[.. split]; - } - } - - slice.get(0).filter( - |(_, value)| !bool::from(multiexp_vartime(value.clone(), self.1).is_identity()) - ).map(|(id, _)| *id) - } - - pub fn verify_with_vartime_blame(&self) -> Result<(), Id> { - if self.verify() { - Ok(()) - } else { - Err(self.blame_vartime().unwrap()) - } - } - - pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> { - if self.verify_vartime() { - Ok(()) - } else { - Err(self.blame_vartime().unwrap()) - } +fn algorithm(pairs: usize) -> Algorithm { + // TODO: Replace this with an actual formula determining which will use less additions + // Right now, Straus is used until 600, instead of the far more accurate 300, as Pippenger + // operates per byte instead of per nibble, and therefore requires a much longer series to be + // performant + // Technically, 800 is dalek's number for when to use byte Pippenger, yet given Straus's own + // implementation limitations... + if pairs < 600 { + Algorithm::Straus + } else { + Algorithm::Pippenger + } +} + +// Performs a multiexp, automatically selecting the optimal algorithm based on amount of pairs +// Takes in an iterator of scalars and points, with a boolean for if the scalars are little endian +// encoded in their Reprs or not +pub fn multiexp(pairs: &[(G::Scalar, G)], little: bool) -> G { + match algorithm(pairs.len()) { + Algorithm::Straus => straus(pairs, little), + Algorithm::Pippenger => pippenger(pairs, little) + } +} + +pub fn multiexp_vartime(pairs: &[(G::Scalar, G)], little: bool) -> G { + match algorithm(pairs.len()) { + Algorithm::Straus => straus_vartime(pairs, little), + Algorithm::Pippenger => pippenger_vartime(pairs, little) } } diff --git a/crypto/multiexp/src/pippenger.rs b/crypto/multiexp/src/pippenger.rs new file mode 100644 index 00000000..b812c922 --- /dev/null +++ b/crypto/multiexp/src/pippenger.rs @@ -0,0 +1,79 @@ +use group::{ff::PrimeField, Group}; + +fn prep(pairs: &[(G::Scalar, G)], little: bool) -> (Vec>, Vec) { + let mut res = vec![]; + let mut points = vec![]; + for pair in pairs { + let p = res.len(); + res.push(vec![]); + { + let mut repr = pair.0.to_repr(); + let bytes = repr.as_mut(); + if !little { + bytes.reverse(); + } + + res[p].resize(bytes.len(), 0); + for i in 0 .. bytes.len() { + res[p][i] = bytes[i]; + } + } + + points.push(pair.1); + } + + (res, points) +} + +pub(crate) fn pippenger(pairs: &[(G::Scalar, G)], little: bool) -> G { + let (bytes, points) = prep(pairs, little); + + let mut res = G::identity(); + for n in (0 .. bytes[0].len()).rev() { + for _ in 0 .. 8 { + res = res.double(); + } + + let mut buckets = [G::identity(); 256]; + for p in 0 .. bytes.len() { + buckets[usize::from(bytes[p][n])] += points[p]; + } + + let mut intermediate_sum = G::identity(); + for b in (1 .. buckets.len()).rev() { + intermediate_sum += buckets[b]; + res += intermediate_sum; + } + } + + res +} + +pub(crate) fn pippenger_vartime(pairs: &[(G::Scalar, G)], little: bool) -> G { + let (bytes, points) = prep(pairs, little); + + let mut res = G::identity(); + for n in (0 .. bytes[0].len()).rev() { + if n != (bytes[0].len() - 1) { + for _ in 0 .. 8 { + res = res.double(); + } + } + + let mut buckets = [G::identity(); 256]; + for p in 0 .. bytes.len() { + let nibble = usize::from(bytes[p][n]); + if nibble != 0 { + buckets[nibble] += points[p]; + } + } + + let mut intermediate_sum = G::identity(); + for b in (1 .. buckets.len()).rev() { + intermediate_sum += buckets[b]; + res += intermediate_sum; + } + } + + res +} diff --git a/crypto/multiexp/src/straus.rs b/crypto/multiexp/src/straus.rs new file mode 100644 index 00000000..b8660f1b --- /dev/null +++ b/crypto/multiexp/src/straus.rs @@ -0,0 +1,69 @@ +use group::{ff::PrimeField, Group}; + +fn prep(pairs: &[(G::Scalar, G)], little: bool) -> (Vec>, Vec<[G; 16]>) { + let mut nibbles = vec![]; + let mut tables = vec![]; + for pair in pairs { + let p = nibbles.len(); + nibbles.push(vec![]); + { + let mut repr = pair.0.to_repr(); + let bytes = repr.as_mut(); + if !little { + bytes.reverse(); + } + + nibbles[p].resize(bytes.len() * 2, 0); + for i in 0 .. bytes.len() { + nibbles[p][i * 2] = bytes[i] & 0b1111; + nibbles[p][(i * 2) + 1] = (bytes[i] >> 4) & 0b1111; + } + } + + tables.push([G::identity(); 16]); + let mut accum = G::identity(); + for i in 1 .. 16 { + accum += pair.1; + tables[p][i] = accum; + } + } + + (nibbles, tables) +} + +pub(crate) fn straus(pairs: &[(G::Scalar, G)], little: bool) -> G { + let (nibbles, tables) = prep(pairs, little); + + let mut res = G::identity(); + for b in (0 .. nibbles[0].len()).rev() { + for _ in 0 .. 4 { + res = res.double(); + } + + for s in 0 .. tables.len() { + res += tables[s][usize::from(nibbles[s][b])]; + } + } + res +} + +pub(crate) fn straus_vartime(pairs: &[(G::Scalar, G)], little: bool) -> G { + let (nibbles, tables) = prep(pairs, little); + + let mut res = G::identity(); + for b in (0 .. nibbles[0].len()).rev() { + if b != (nibbles[0].len() - 1) { + for _ in 0 .. 4 { + res = res.double(); + } + } + + for s in 0 .. tables.len() { + if nibbles[s][b] != 0 { + res += tables[s][usize::from(nibbles[s][b])]; + } + } + } + + res +} From 27751d8d98136d059e804aaef6965aa6587aaab5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 9 Jun 2022 02:48:53 -0400 Subject: [PATCH 023/105] Successfully get processor to send a transaction out Modifies FROST behavior so group_key has the offset applied regardless of if view was called. The unaltered secret_share and verification_shares (as they have differing values depending on the signing set) are no longer publicly accessible. --- coins/monero/src/rpc.rs | 1 + coins/monero/src/wallet/send/mod.rs | 1 - crypto/frost/src/lib.rs | 7 +- processor/Cargo.toml | 6 ++ processor/src/coins/monero.rs | 118 ++++++++++++++++++++-------- processor/src/lib.rs | 52 +++++++++--- processor/src/tests/mod.rs | 105 +++++++++++++++++++++++-- processor/src/wallet.rs | 27 +++++-- 8 files changed, 255 insertions(+), 62 deletions(-) diff --git a/coins/monero/src/rpc.rs b/coins/monero/src/rpc.rs index a609901c..7a3edf80 100644 --- a/coins/monero/src/rpc.rs +++ b/coins/monero/src/rpc.rs @@ -34,6 +34,7 @@ pub enum RpcError { InvalidTransaction([u8; 32]) } +#[derive(Clone, Debug)] pub struct Rpc(String); fn rpc_hex(value: &str) -> Result, RpcError> { diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 8ad88a5e..531ea529 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -75,7 +75,6 @@ impl SendOutput { } } - #[derive(Clone, Error, Debug)] pub enum TransactionError { #[error("no inputs")] diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index bf876f51..c96e0333 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -268,6 +268,7 @@ impl MultisigKeys { // Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a // one-time-key offset res.offset = Some(offset + res.offset.unwrap_or(C::F::zero())); + res.group_key += C::GENERATOR_TABLE * offset; res } @@ -275,7 +276,7 @@ impl MultisigKeys { self.params } - pub fn secret_share(&self) -> C::F { + fn secret_share(&self) -> C::F { self.secret_share } @@ -283,7 +284,7 @@ impl MultisigKeys { self.group_key } - pub fn verification_shares(&self) -> HashMap { + fn verification_shares(&self) -> HashMap { self.verification_shares.clone() } @@ -297,7 +298,7 @@ impl MultisigKeys { let offset_share = offset * C::F::from(included.len().try_into().unwrap()).invert().unwrap(); Ok(MultisigView { - group_key: self.group_key + (C::GENERATOR_TABLE * offset), + group_key: self.group_key, secret_share: secret_share + offset_share, verification_shares: self.verification_shares.iter().map( |(l, share)| ( diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 19171348..4eaf4890 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -11,6 +11,10 @@ async-trait = "0.1" rand_core = "0.6" thiserror = "1" +hex = "0.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + curve25519-dalek = { version = "3", features = ["std"] } blake2 = "0.10" @@ -22,5 +26,7 @@ monero = { version = "0.16", features = ["experimental"] } monero-serai = { path = "../coins/monero", features = ["multisig"] } [dev-dependencies] +group = "0.12" rand = "0.8" +futures = "0.3" tokio = { version = "1", features = ["full"] } diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index 2b694984..a5f6fb68 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -1,16 +1,12 @@ use std::sync::Arc; use async_trait::async_trait; -use rand_core::{RngCore, CryptoRng}; +use rand_core::OsRng; -use curve25519_dalek::{ - constants::ED25519_BASEPOINT_TABLE, - scalar::Scalar, - edwards::CompressedEdwardsY -}; +use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; use dalek_ff_group as dfg; -use frost::MultisigKeys; +use frost::{MultisigKeys, sign::StateMachine}; use monero::{PublicKey, network::Network, util::address::Address}; use monero_serai::{ @@ -20,9 +16,15 @@ use monero_serai::{ wallet::{SpendableOutput, SignableTransaction as MSignableTransaction} }; -use crate::{Transcript, Output as OutputTrait, CoinError, Coin, view_key}; +use crate::{ + Transcript, + CoinError, SignError, + Network as NetworkTrait, + Output as OutputTrait, Coin, + view_key +}; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Output(SpendableOutput); impl OutputTrait for Output { // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. @@ -53,6 +55,7 @@ impl From for Output { } } +#[derive(Debug)] pub struct SignableTransaction( Arc>, Transcript, @@ -60,10 +63,11 @@ pub struct SignableTransaction( MSignableTransaction ); +#[derive(Clone, Debug)] pub struct Monero { rpc: Rpc, view: Scalar, - view_pub: CompressedEdwardsY + view_pub: PublicKey } impl Monero { @@ -72,7 +76,7 @@ impl Monero { Monero { rpc: Rpc::new(url), view, - view_pub: (&view * &ED25519_BASEPOINT_TABLE).compress() + view_pub: PublicKey { point: (&view * &ED25519_BASEPOINT_TABLE).compress() } } } } @@ -98,6 +102,10 @@ impl Coin for Monero { const MAX_INPUTS: usize = 128; const MAX_OUTPUTS: usize = 16; + fn address(&self, key: dfg::EdwardsPoint) -> Self::Address { + Address::standard(Network::Mainnet, PublicKey { point: key.compress().0 }, self.view_pub) + } + async fn get_height(&self) -> Result { self.rpc.get_height().await.map_err(|_| CoinError::ConnectionError) } @@ -129,7 +137,7 @@ impl Coin for Monero { mut inputs: Vec, payments: &[(Address, u64)] ) -> Result { - let spend = keys.group_key().0.compress(); + let spend = keys.group_key(); Ok( SignableTransaction( keys, @@ -138,40 +146,86 @@ impl Coin for Monero { MSignableTransaction::new( inputs.drain(..).map(|input| input.0).collect(), payments.to_vec(), - Address::standard( - Network::Mainnet, - PublicKey { point: spend }, - PublicKey { point: self.view_pub } - ), - 100000000 + self.address(spend), + 100000000 // TODO ).map_err(|_| CoinError::ConnectionError)? ) ) } - async fn attempt_send( + async fn attempt_send( &self, - rng: &mut R, + network: &mut N, transaction: SignableTransaction, included: &[u16] - ) -> Result<(Vec, Vec<::Id>), CoinError> { - let attempt = transaction.3.clone().multisig( - rng, + ) -> Result<(Vec, Vec<::Id>), SignError> { + let mut attempt = transaction.3.clone().multisig( + &mut OsRng, &self.rpc, (*transaction.0).clone(), transaction.1.clone(), transaction.2, included.to_vec() - ).await.map_err(|_| CoinError::ConnectionError)?; + ).await.map_err(|_| SignError::CoinError(CoinError::ConnectionError))?; - /* - let tx = None; - self.rpc.publish_transaction(tx).await.map_err(|_| CoinError::ConnectionError)?; - Ok( + let commitments = network.round( + attempt.preprocess(&mut OsRng).unwrap() + ).await.map_err(|e| SignError::NetworkError(e))?; + let shares = network.round( + attempt.sign(commitments, b"").map_err(|e| SignError::FrostError(e))? + ).await.map_err(|e| SignError::NetworkError(e))?; + let tx = attempt.complete(shares).map_err(|e| SignError::FrostError(e))?; + + self.rpc.publish_transaction( + &tx + ).await.map_err(|_| SignError::CoinError(CoinError::ConnectionError))?; + + Ok(( tx.hash().to_vec(), - tx.outputs.iter().map(|output| output.key.compress().to_bytes().collect()) - ) - */ - Ok((vec![], vec![])) + tx.prefix.outputs.iter().map(|output| output.key.compress().to_bytes()).collect() + )) + } + + #[cfg(test)] + async fn mine_block(&self, address: Self::Address) { + #[derive(serde::Deserialize, Debug)] + struct EmptyResponse {} + let _: EmptyResponse = self.rpc.rpc_call("json_rpc", Some(serde_json::json!({ + "method": "generateblocks", + "params": { + "wallet_address": address.to_string(), + "amount_of_blocks": 10 + }, + }))).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Self::Address) { + use group::Group; + + use rand::rngs::OsRng; + + let height = self.get_height().await.unwrap(); + + let temp = self.address(dfg::EdwardsPoint::generator()); + self.mine_block(temp).await; + for _ in 0 .. 7 { + self.mine_block(temp).await; + } + + let outputs = self.rpc + .get_block_transactions_possible(height).await.unwrap() + .swap_remove(0).scan(self.view, dfg::EdwardsPoint::generator().0).0; + + let amount = outputs[0].commitment.amount; + let fee = 1000000000; // TODO + let tx = MSignableTransaction::new( + outputs, + vec![(address, amount - fee)], + temp, + fee / 2000 + ).unwrap().sign(&mut OsRng, &self.rpc, &Scalar::one()).await.unwrap(); + self.rpc.publish_transaction(&tx).await.unwrap(); + self.mine_block(temp).await; } } diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 337a409f..47e12e85 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,10 +1,9 @@ -use std::{marker::Send, sync::Arc}; +use std::{marker::Send, sync::Arc, collections::HashMap}; use async_trait::async_trait; use thiserror::Error; -use rand_core::{RngCore, CryptoRng}; -use frost::{Curve, MultisigKeys}; +use frost::{Curve, FrostError, MultisigKeys}; pub(crate) use monero_serai::frost::Transcript; @@ -14,6 +13,30 @@ mod wallet; #[cfg(test)] mod tests; +#[derive(Clone, Error, Debug)] +pub enum CoinError { + #[error("failed to connect to coin daemon")] + ConnectionError +} + +#[derive(Clone, Error, Debug)] +pub enum NetworkError {} + +#[derive(Clone, Error, Debug)] +pub enum SignError { + #[error("coin had an error {0}")] + CoinError(CoinError), + #[error("network had an error {0}")] + NetworkError(NetworkError), + #[error("FROST had an error {0}")] + FrostError(FrostError) +} + +#[async_trait] +pub trait Network: Send { + async fn round(&mut self, data: Vec) -> Result>, NetworkError>; +} + pub trait Output: Sized + Clone { type Id: AsRef<[u8]>; @@ -24,12 +47,6 @@ pub trait Output: Sized + Clone { fn deserialize(reader: &mut R) -> std::io::Result; } -#[derive(Clone, Error, Debug)] -pub enum CoinError { - #[error("failed to connect to coin daemon")] - ConnectionError -} - #[async_trait] pub trait Coin { type Curve: Curve; @@ -43,7 +60,10 @@ pub trait Coin { const ID: &'static [u8]; const CONFIRMATIONS: usize; const MAX_INPUTS: usize; - const MAX_OUTPUTS: usize; + const MAX_OUTPUTS: usize; // TODO: Decide if this includes change or not + + // Doesn't have to take self, enables some level of caching which is pleasant + fn address(&self, key: ::G) -> Self::Address; async fn get_height(&self) -> Result; async fn get_block(&self, height: usize) -> Result; @@ -62,12 +82,18 @@ pub trait Coin { payments: &[(Self::Address, u64)] ) -> Result; - async fn attempt_send( + async fn attempt_send( &self, - rng: &mut R, + network: &mut N, transaction: Self::SignableTransaction, included: &[u16] - ) -> Result<(Vec, Vec<::Id>), CoinError>; + ) -> Result<(Vec, Vec<::Id>), SignError>; + + #[cfg(test)] + async fn mine_block(&self, address: Self::Address); + + #[cfg(test)] + async fn test_send(&self, key: Self::Address); } // Generate a static view key for a given chain in a globally consistent manner diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index abc338d3..7e640241 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -1,16 +1,107 @@ -use std::sync::Arc; +use std::{sync::{Arc, RwLock}, collections::HashMap}; + +use async_trait::async_trait; use rand::rngs::OsRng; -use crate::{Coin, coins::monero::Monero, wallet::{WalletKeys, MemCoinDb, Wallet}}; +use group::Group; + +use crate::{ + NetworkError, Network, + Coin, coins::monero::Monero, + wallet::{WalletKeys, MemCoinDb, Wallet} +}; + +#[derive(Clone)] +struct LocalNetwork { + i: u16, + size: u16, + round: usize, + rounds: Arc>>>> +} + +impl LocalNetwork { + fn new(size: u16) -> Vec { + let rounds = Arc::new(RwLock::new(vec![])); + let mut res = vec![]; + for i in 1 ..= size { + res.push(LocalNetwork { i, size, round: 0, rounds: rounds.clone() }); + } + res + } +} + +#[async_trait] +impl Network for LocalNetwork { + async fn round(&mut self, data: Vec) -> Result>, NetworkError> { + { + let mut rounds = self.rounds.write().unwrap(); + if rounds.len() == self.round { + rounds.push(HashMap::new()); + } + rounds[self.round].insert(self.i, data); + } + + while { + let read = self.rounds.try_read().unwrap(); + read[self.round].len() != usize::from(self.size) + } { + tokio::task::yield_now().await; + } + + let res = self.rounds.try_read().unwrap()[self.round].clone(); + self.round += 1; + Ok(res) + } +} #[tokio::test] async fn test() { let monero = Monero::new("http://127.0.0.1:18081".to_string()); - println!("{}", monero.get_height().await.unwrap()); + // Mine a block so there's a confirmed height + monero.mine_block(monero.address(dalek_ff_group::EdwardsPoint::generator())).await; + let height = monero.get_height().await.unwrap(); + + let mut networks = LocalNetwork::new(3); + let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng); - let mut wallet = Wallet::new(MemCoinDb::new(), monero); - wallet.acknowledge_height(0, 0); - wallet.add_keys(&WalletKeys::new(Arc::try_unwrap(keys.remove(&1).take().unwrap()).unwrap(), 0)); - dbg!(0); + let mut wallets = vec![]; + for i in 1 ..= 3 { + let mut wallet = Wallet::new(MemCoinDb::new(), monero.clone()); + wallet.acknowledge_height(0, height); + wallet.add_keys( + &WalletKeys::new(Arc::try_unwrap(keys.remove(&i).take().unwrap()).unwrap(), 0) + ); + wallets.push(wallet); + } + + // Get the chain to a height where blocks have sufficient confirmations + while (height + Monero::CONFIRMATIONS) > monero.get_height().await.unwrap() { + monero.mine_block(monero.address(dalek_ff_group::EdwardsPoint::generator())).await; + } + + for wallet in wallets.iter_mut() { + // Poll to activate the keys + wallet.poll().await.unwrap(); + } + + monero.test_send(wallets[0].address()).await; + + let mut futures = vec![]; + for (i, network) in networks.iter_mut().enumerate() { + let wallet = &mut wallets[i]; + wallet.poll().await.unwrap(); + + let height = monero.get_height().await.unwrap(); + wallet.acknowledge_height(1, height - 10); + let signable = wallet.prepare_sends( + 1, + vec![(wallet.address(), 10000000000)] + ).await.unwrap().1.swap_remove(0); + futures.push(monero.attempt_send(network, signable, &[1, 2, 3])); + } + println!( + "{:?}", + hex::encode(futures::future::join_all(futures).await.swap_remove(0).unwrap().0) + ); } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 25514ce8..d6bcaf92 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -121,6 +121,9 @@ impl Wallet { pub fn scanned_height(&self) -> usize { self.db.scanned_height() } pub fn acknowledge_height(&mut self, canonical: usize, height: usize) { self.db.acknowledge_height(canonical, height); + if height > self.db.scanned_height() { + self.db.scanned_to_height(height); + } } pub fn acknowledged_height(&self, canonical: usize) -> usize { self.db.acknowledged_height(canonical) @@ -131,17 +134,25 @@ impl Wallet { self.pending.push((self.acknowledged_height(keys.creation_height), keys.bind(C::ID))); } + pub fn address(&self) -> C::Address { + self.coin.address(self.keys[self.keys.len() - 1].0.group_key()) + } + pub async fn poll(&mut self) -> Result<(), CoinError> { - let confirmed_height = self.coin.get_height().await? - C::CONFIRMATIONS; - for height in self.scanned_height() .. confirmed_height { + if self.coin.get_height().await? < C::CONFIRMATIONS { + return Ok(()); + } + let confirmed_block = self.coin.get_height().await? - C::CONFIRMATIONS; + + for b in self.scanned_height() ..= confirmed_block { // If any keys activated at this height, shift them over { let mut k = 0; while k < self.pending.len() { // TODO - //if height < self.pending[k].0 { - //} else if height == self.pending[k].0 { - if height <= self.pending[k].0 { + //if b < self.pending[k].0 { + //} else if b == self.pending[k].0 { + if b <= self.pending[k].0 { self.keys.push((Arc::new(self.pending.swap_remove(k).1), vec![])); } else { k += 1; @@ -149,7 +160,7 @@ impl Wallet { } } - let block = self.coin.get_block(height).await?; + let block = self.coin.get_block(b).await?; for (keys, outputs) in self.keys.iter_mut() { outputs.extend( self.coin.get_outputs(&block, keys.group_key()).await.iter().cloned().filter( @@ -157,7 +168,11 @@ impl Wallet { ) ); } + + // Blocks are zero-indexed while heights aren't + self.db.scanned_to_height(b + 1); } + Ok(()) } From d611300adb51d708ef963cec3b1f4cd99a02c273 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 9 Jun 2022 04:05:57 -0400 Subject: [PATCH 024/105] Error when the wrong spend key is used to sign a transaction Moves decoy selection to being the last step in the multisig process so the RPC is only polled to continue valid transactions. --- coins/monero/src/wallet/send/mod.rs | 9 ++++- coins/monero/src/wallet/send/multisig.rs | 44 ++++++++++++++---------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 531ea529..45f57123 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -87,6 +87,8 @@ pub enum TransactionError { NotEnoughFunds(u64, u64), #[error("invalid address")] InvalidAddress, + #[error("wrong spend private key")] + WrongPrivateKey, #[error("rpc error ({0})")] RpcError(RpcError), #[error("clsag error ({0})")] @@ -282,7 +284,12 @@ impl SignableTransaction { ) -> Result { let mut images = Vec::with_capacity(self.inputs.len()); for input in &self.inputs { - images.push(generate_key_image(&(spend + input.key_offset))); + let offset = spend + input.key_offset; + if (&offset * &ED25519_BASEPOINT_TABLE) != input.key { + Err(TransactionError::WrongPrivateKey)?; + } + + images.push(generate_key_image(&offset)); } images.sort_by(key_image_sort); diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 02c3e4ff..18dba94b 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -83,6 +83,31 @@ impl SignableTransaction { } transcript.append_message(b"change", &self.change.as_bytes()); + // Sort included before cloning it around + included.sort_unstable(); + + for (i, input) in self.inputs.iter().enumerate() { + // Check this the right set of keys + let offset = keys.offset(dalek_ff_group::Scalar(input.key_offset)); + if offset.group_key().0 != input.key { + Err(TransactionError::WrongPrivateKey)?; + } + + clsags.push( + AlgorithmMachine::new( + ClsagMultisig::new( + transcript.clone(), + inputs[i].clone() + ).map_err(|e| TransactionError::MultisigError(e))?, + Arc::new(offset), + &included + ).map_err(|e| TransactionError::FrostError(e))? + ); + } + + // Verify these outputs by a dummy prep + self.prepare_outputs(rng, [0; 32])?; + // Select decoys // Ideally, this would be done post entropy, instead of now, yet doing so would require sign // to be async which isn't preferable. This should be suitably competent though @@ -97,25 +122,6 @@ impl SignableTransaction { &self.inputs ).await.map_err(|e| TransactionError::RpcError(e))?; - // Sort included before cloning it around - included.sort_unstable(); - - for (i, input) in self.inputs.iter().enumerate() { - clsags.push( - AlgorithmMachine::new( - ClsagMultisig::new( - transcript.clone(), - inputs[i].clone() - ).map_err(|e| TransactionError::MultisigError(e))?, - Arc::new(keys.offset(dalek_ff_group::Scalar(input.key_offset))), - &included - ).map_err(|e| TransactionError::FrostError(e))? - ); - } - - // Verify these outputs by a dummy prep - self.prepare_outputs(rng, [0; 32])?; - Ok(TransactionMachine { signable: self, i: keys.params().i(), From 75fb9b319814c6d1262fdf1c49c70489ddbf6580 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 9 Jun 2022 04:34:15 -0400 Subject: [PATCH 025/105] Correct input/output selection Payments weren't properly selected, as it'd drain a sequential series instead of the specified set, and inputs had a memory condition Rust couldn't prove was safe. --- processor/src/wallet.rs | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index d6bcaf92..f31b8cb5 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -207,20 +207,23 @@ impl Wallet { while outputs.len() != 0 { // Select the maximum amount of outputs possible - let mut inputs = &outputs[0 .. C::MAX_INPUTS.min(outputs.len())]; + let mut input_bound = C::MAX_INPUTS.min(outputs.len()); // Calculate their sum value, minus the fee needed to spend them - let mut sum = inputs.iter().map(|input| input.amount()).sum::(); + let mut sum = outputs[0 .. input_bound].iter().map(|input| input.amount()).sum::(); // sum -= C::MAX_FEE; // TODO // Grab the payments this will successfully fund let mut these_payments = vec![]; - for payment in &payments { - if sum > payment.1 { - these_payments.push(payment); - sum -= payment.1; + let mut p = 0; + while p < payments.len() { + if sum >= payments[p].1 { + sum -= payments[p].1; + these_payments.push(payments.remove(p)); + } else { + // Doesn't break in this else case as a smaller payment may still fit + p += 1; } - // Doesn't break in this else case as a smaller payment may still fit } // Move to the next set of keys if none of these outputs remain significant @@ -228,16 +231,17 @@ impl Wallet { break; } - // Drop any uneeded outputs - while sum > inputs[inputs.len() - 1].amount() { - sum -= inputs[inputs.len() - 1].amount(); - inputs = &inputs[.. (inputs.len() - 1)]; + // Drop any uneeded inputs + while sum > outputs[input_bound - 1].amount() { + sum -= outputs[input_bound - 1].amount(); + input_bound -= 1; } + // TODO: Replace any high value inputs with low value inputs, if we can + // We now have a minimal effective outputs/payments set // Take ownership while removing these candidates from the provided list - let inputs = outputs.drain(.. inputs.len()).collect(); - let payments = payments.drain(.. these_payments.len()).collect::>(); + let inputs = outputs.drain(.. input_bound).collect(); let mut transcript = Transcript::new(b"Serai Processor Wallet Send"); transcript.append_message( @@ -257,7 +261,7 @@ impl Wallet { transcript, acknowledged_height, inputs, - &payments + &these_payments ).await?; // self.db.save_tx(tx) // TODO txs.push(tx); From 8e8bfabc83df988eb906ff5a79ba35ac66cf42ec Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 9 Jun 2022 04:34:31 -0400 Subject: [PATCH 026/105] Make processor's test for an arbitrary coin, instead of just Monero --- processor/src/tests/mod.rs | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 7e640241..d69cc8f4 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -6,6 +6,8 @@ use rand::rngs::OsRng; use group::Group; +use frost::Curve; + use crate::{ NetworkError, Network, Coin, coins::monero::Monero, @@ -55,19 +57,17 @@ impl Network for LocalNetwork { } } -#[tokio::test] -async fn test() { - let monero = Monero::new("http://127.0.0.1:18081".to_string()); +async fn test_send(coin: C) { // Mine a block so there's a confirmed height - monero.mine_block(monero.address(dalek_ff_group::EdwardsPoint::generator())).await; - let height = monero.get_height().await.unwrap(); + coin.mine_block(coin.address(::G::generator())).await; + let height = coin.get_height().await.unwrap(); let mut networks = LocalNetwork::new(3); - let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng); + let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng); let mut wallets = vec![]; for i in 1 ..= 3 { - let mut wallet = Wallet::new(MemCoinDb::new(), monero.clone()); + let mut wallet = Wallet::new(MemCoinDb::new(), coin.clone()); wallet.acknowledge_height(0, height); wallet.add_keys( &WalletKeys::new(Arc::try_unwrap(keys.remove(&i).take().unwrap()).unwrap(), 0) @@ -76,8 +76,8 @@ async fn test() { } // Get the chain to a height where blocks have sufficient confirmations - while (height + Monero::CONFIRMATIONS) > monero.get_height().await.unwrap() { - monero.mine_block(monero.address(dalek_ff_group::EdwardsPoint::generator())).await; + while (height + C::CONFIRMATIONS) > coin.get_height().await.unwrap() { + coin.mine_block(coin.address(::G::generator())).await; } for wallet in wallets.iter_mut() { @@ -85,23 +85,29 @@ async fn test() { wallet.poll().await.unwrap(); } - monero.test_send(wallets[0].address()).await; + coin.test_send(wallets[0].address()).await; let mut futures = vec![]; for (i, network) in networks.iter_mut().enumerate() { let wallet = &mut wallets[i]; wallet.poll().await.unwrap(); - let height = monero.get_height().await.unwrap(); + let height = coin.get_height().await.unwrap(); wallet.acknowledge_height(1, height - 10); let signable = wallet.prepare_sends( 1, vec![(wallet.address(), 10000000000)] ).await.unwrap().1.swap_remove(0); - futures.push(monero.attempt_send(network, signable, &[1, 2, 3])); + futures.push(coin.attempt_send(network, signable, &[1, 2, 3])); } + println!( "{:?}", hex::encode(futures::future::join_all(futures).await.swap_remove(0).unwrap().0) ); } + +#[tokio::test] +async fn monero() { + test_send(Monero::new("http://127.0.0.1:18081".to_string())).await; +} From e1831ee5af85860cd6aa116a3fec544c7930aea6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 Jun 2022 00:20:59 -0400 Subject: [PATCH 027/105] Error when a message is passed to a Monero TransactionMachine --- coins/monero/src/wallet/send/multisig.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 18dba94b..4b7d0f69 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -175,13 +175,20 @@ impl StateMachine for TransactionMachine { fn sign( &mut self, mut commitments: HashMap>, - // Drop FROST's 'msg' since we calculate the actual message in this function - _: &[u8] + msg: &[u8] ) -> Result, FrostError> { if self.state() != State::Preprocessed { Err(FrostError::InvalidSignTransition(State::Preprocessed, self.state()))?; } + if msg.len() != 0 { + Err( + FrostError::InternalError( + "message was passed to the TransactionMachine when it generates its own".to_string() + ) + )?; + } + // Add all commitments to the transcript for their entropy // While each CLSAG will do this as they need to for security, they have their own transcripts // cloned from this TX's initial premise's transcript. For our TX transcript to have the CLSAG From 1ef528bf8c44073801b7185362597edb70727904 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 Jun 2022 00:32:56 -0400 Subject: [PATCH 028/105] Bound decoy selection to prevent it from infinite looping --- coins/monero/src/wallet/decoys.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index bcfd66d0..f205a735 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -33,11 +33,18 @@ async fn select_n( used: &mut HashSet, count: usize ) -> Result, RpcError> { + let mut iters = 0; let mut confirmed = Vec::with_capacity(count); while confirmed.len() != count { let remaining = count - confirmed.len(); let mut candidates = Vec::with_capacity(remaining); while candidates.len() != remaining { + iters += 1; + // This is cheap and on fresh chains, thousands of rounds may be needed + if iters == 10000 { + Err(RpcError::InternalError("not enough decoy candidates".to_string()))?; + } + // Use a gamma distribution let mut age = GAMMA.sample(rng).exp(); if age > TIP_APPLICATION { @@ -126,7 +133,7 @@ impl Decoys { // Panic if not enough decoys are available // TODO: Simply create a TX with less than the target amount, or at least return an error if (high - MATURITY) < u64::try_from(inputs.len() * RING_LEN).unwrap() { - panic!("Not enough decoys available"); + Err(RpcError::InternalError("not enough decoy candidates".to_string()))?; } // Select all decoys for this transaction, assuming we generate a sane transaction @@ -161,8 +168,8 @@ impl Decoys { if high > 500 { // Make sure the TX passes the sanity check that the median output is within the last 40% // This actually checks the median is within the last third, a slightly more aggressive - // boundary, as the height used in this calculation will be slightly under the height this is - // sanity checked against + // boundary, as the height used in this calculation will be slightly under the height this + // is sanity checked against let target_median = high * 2 / 3; while ring[RING_LEN / 2].0 < target_median { // If it's not, update the bottom half with new values to ensure the median only moves up From b91279f4ceac719e371203660dfbf52e745c9157 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 Jun 2022 02:38:19 -0400 Subject: [PATCH 029/105] Support sending to subaddresses --- coins/monero/src/wallet/send/mod.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 45f57123..53688db0 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -11,7 +11,7 @@ use curve25519_dalek::{ use monero::{ consensus::Encodable, - util::{key::PublicKey, address::Address}, + util::{key::PublicKey, address::{AddressType, Address}}, blockdata::transaction::SubField }; @@ -61,13 +61,15 @@ impl SendOutput { o ); + let spend = output.0.public_spend.point.decompress().ok_or(TransactionError::InvalidAddress)?; Ok( SendOutput { - R: &r * &ED25519_BASEPOINT_TABLE, - dest: ( - (&shared_key * &ED25519_BASEPOINT_TABLE) + - output.0.public_spend.point.decompress().ok_or(TransactionError::InvalidAddress)? - ), + R: match output.0.addr_type { + AddressType::Standard => Ok(&r * &ED25519_BASEPOINT_TABLE), + AddressType::SubAddress => Ok(&r * spend), + AddressType::Integrated(_) => Err(TransactionError::InvalidAddress) + }?, + dest: (&shared_key * &ED25519_BASEPOINT_TABLE) + spend, mask: commitment_mask(shared_key), amount: amount_encryption(output.1, shared_key) } @@ -233,6 +235,7 @@ impl SignableTransaction { bp: Bulletproofs ) -> Transaction { // Create the TX extra + // TODO: Review this for canonicity with Monero let mut extra = vec![]; SubField::TxPublicKey( PublicKey { point: self.outputs[0].R.compress() } From 4b8822cb74983fd15d10b9d7abeb0b1e86df498d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 Jun 2022 09:12:27 -0400 Subject: [PATCH 030/105] Clean input/output handling These individual functions should be much easier to test, more legible, more robust, and adds additional functionality to obtain the best fit. --- processor/src/wallet.rs | 143 +++++++++++++++++++++++++++++----------- 1 file changed, 103 insertions(+), 40 deletions(-) diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index f31b8cb5..5e0928fc 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -100,6 +100,101 @@ impl CoinDb for MemCoinDb { } } +fn select_inputs(inputs: &mut Vec) -> (Vec, u64) { + // Sort to ensure determinism. Inefficient, yet produces the most legible code to be optimized + // later + inputs.sort_by(|a, b| a.amount().cmp(&b.amount())); + + // Select the maximum amount of outputs possible + let res = inputs.split_off(inputs.len() - C::MAX_INPUTS.min(inputs.len())); + // Calculate their sum value, minus the fee needed to spend them + let sum = res.iter().map(|input| input.amount()).sum(); + // sum -= C::MAX_FEE; // TODO + (res, sum) +} + +fn select_outputs( + payments: &mut Vec<(C::Address, u64)>, + value: &mut u64 +) -> Vec<(C::Address, u64)> { + // Prioritize large payments which will most efficiently use large inputs + payments.sort_by(|a, b| a.1.cmp(&b.1)); + + // Grab the payments this will successfully fund + let mut outputs = vec![]; + let mut p = payments.len(); + while p != 0 { + p -= 1; + if *value >= payments[p].1 { + *value -= payments[p].1; + // Swap remove will either pop the tail or insert an element that wouldn't fit, making it + // always safe to move past + outputs.push(payments.swap_remove(p)); + } + // Doesn't break in this else case as a smaller payment may still fit + } + + outputs +} + +// Optimizes on the expectation selected/inputs are sorted from lowest value to highest +fn refine_inputs( + selected: &mut Vec, + inputs: &mut Vec, + mut remaining: u64 +) { + // Drop unused inputs + let mut s = 0; + while remaining > selected[s].amount() { + remaining -= selected[s].amount(); + s += 1; + } + // Add them back to the inputs pool + inputs.extend(selected.drain(.. s)); + + // Replace large inputs with smaller ones + for s in (0 .. selected.len()).rev() { + for i in 0 .. inputs.len() { + // Doesn't break due to inputs no longer being sorted + // This could be made faster if we prioritized small input usage over transaction size/fees + // TODO: Consider. This would implicitly consolidate inputs which would be advantageous + if selected[s].amount() < inputs[i].amount() { + continue; + } + + // If we can successfully replace this input, do so + let diff = selected[s].amount() - inputs[i].amount(); + if remaining > diff { + remaining -= diff; + + let old = selected[s].clone(); + selected[s] = inputs[i].clone(); + inputs[i] = old; + } + } + } +} + +fn select_inputs_outputs( + inputs: &mut Vec, + outputs: &mut Vec<(C::Address, u64)> +) -> (Vec, Vec<(C::Address, u64)>) { + if inputs.len() == 0 { + return (vec![], vec![]); + } + + let (mut selected, mut value) = select_inputs::(inputs); + + let outputs = select_outputs::(outputs, &mut value); + if outputs.len() == 0 { + inputs.extend(selected); + return (vec![], vec![]); + } + + refine_inputs::(&mut selected, inputs, value); + (selected, outputs) +} + pub struct Wallet { db: D, coin: C, @@ -196,53 +291,20 @@ impl Wallet { // Payments is the first set of TXs in the schedule // As each payment re-appears, let mut payments = schedule[payment] where the only input is // the source payment - // let (mut payments, schedule) = payments; + // let (mut payments, schedule) = schedule(payments); let mut payments = payments; - payments.sort_by(|a, b| a.1.cmp(&b.1).reverse()); let mut txs = vec![]; for (keys, outputs) in self.keys.iter_mut() { - // Select the highest value outputs to minimize the amount of inputs needed - outputs.sort_by(|a, b| a.amount().cmp(&b.amount()).reverse()); - while outputs.len() != 0 { - // Select the maximum amount of outputs possible - let mut input_bound = C::MAX_INPUTS.min(outputs.len()); - - // Calculate their sum value, minus the fee needed to spend them - let mut sum = outputs[0 .. input_bound].iter().map(|input| input.amount()).sum::(); - // sum -= C::MAX_FEE; // TODO - - // Grab the payments this will successfully fund - let mut these_payments = vec![]; - let mut p = 0; - while p < payments.len() { - if sum >= payments[p].1 { - sum -= payments[p].1; - these_payments.push(payments.remove(p)); - } else { - // Doesn't break in this else case as a smaller payment may still fit - p += 1; - } - } - - // Move to the next set of keys if none of these outputs remain significant - if these_payments.len() == 0 { + let (inputs, outputs) = select_inputs_outputs::(outputs, &mut payments); + // If we can no longer process any payments, move to the next set of keys + if outputs.len() == 0 { + debug_assert_eq!(inputs.len(), 0); break; } - // Drop any uneeded inputs - while sum > outputs[input_bound - 1].amount() { - sum -= outputs[input_bound - 1].amount(); - input_bound -= 1; - } - - // TODO: Replace any high value inputs with low value inputs, if we can - - // We now have a minimal effective outputs/payments set - // Take ownership while removing these candidates from the provided list - let inputs = outputs.drain(.. input_bound).collect(); - + // Create the transcript for this transaction let mut transcript = Transcript::new(b"Serai Processor Wallet Send"); transcript.append_message( b"canonical_height", @@ -256,12 +318,13 @@ impl Wallet { b"index", &u64::try_from(txs.len()).unwrap().to_le_bytes() ); + let tx = self.coin.prepare_send( keys.clone(), transcript, acknowledged_height, inputs, - &these_payments + &outputs ).await?; // self.db.save_tx(tx) // TODO txs.push(tx); From 32473d99760b5ef98fe50869df28f0bdbf732c09 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 Jun 2022 09:36:07 -0400 Subject: [PATCH 031/105] Route networking through Wallet, not Coin --- coins/monero/src/wallet/mod.rs | 2 ++ coins/monero/src/wallet/send/mod.rs | 2 ++ processor/src/coins/monero.rs | 43 ++++++++++++----------------- processor/src/lib.rs | 17 ++++++++---- processor/src/tests/mod.rs | 5 ++-- processor/src/wallet.rs | 28 +++++++++++++++++-- 6 files changed, 61 insertions(+), 36 deletions(-) diff --git a/coins/monero/src/wallet/mod.rs b/coins/monero/src/wallet/mod.rs index 88d4ff61..e717fe4c 100644 --- a/coins/monero/src/wallet/mod.rs +++ b/coins/monero/src/wallet/mod.rs @@ -14,6 +14,8 @@ pub(crate) use decoys::Decoys; mod send; pub use send::{TransactionError, SignableTransaction}; +#[cfg(feature = "multisig")] +pub use send::TransactionMachine; fn key_image_sort(x: &EdwardsPoint, y: &EdwardsPoint) -> std::cmp::Ordering { x.compress().to_bytes().cmp(&y.compress().to_bytes()).reverse() diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 53688db0..6a8c14dd 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -36,6 +36,8 @@ use crate::frost::MultisigError; #[cfg(feature = "multisig")] mod multisig; +#[cfg(feature = "multisig")] +pub use multisig::TransactionMachine; #[allow(non_snake_case)] #[derive(Clone, PartialEq, Debug)] diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index a5f6fb68..f1d96bc5 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -6,23 +6,17 @@ use rand_core::OsRng; use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; use dalek_ff_group as dfg; -use frost::{MultisigKeys, sign::StateMachine}; +use frost::MultisigKeys; use monero::{PublicKey, network::Network, util::address::Address}; use monero_serai::{ frost::Ed25519, transaction::{Timelock, Transaction}, rpc::Rpc, - wallet::{SpendableOutput, SignableTransaction as MSignableTransaction} + wallet::{SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} }; -use crate::{ - Transcript, - CoinError, SignError, - Network as NetworkTrait, - Output as OutputTrait, Coin, - view_key -}; +use crate::{Transcript, CoinError, Output as OutputTrait, Coin, view_key}; #[derive(Clone, Debug)] pub struct Output(SpendableOutput); @@ -85,9 +79,12 @@ impl Monero { impl Coin for Monero { type Curve = Ed25519; - type Output = Output; + type Transaction = Transaction; type Block = Vec; + + type Output = Output; type SignableTransaction = SignableTransaction; + type TransactionMachine = TransactionMachine; type Address = Address; @@ -153,32 +150,26 @@ impl Coin for Monero { ) } - async fn attempt_send( + async fn attempt_send( &self, - network: &mut N, transaction: SignableTransaction, included: &[u16] - ) -> Result<(Vec, Vec<::Id>), SignError> { - let mut attempt = transaction.3.clone().multisig( + ) -> Result { + transaction.3.clone().multisig( &mut OsRng, &self.rpc, (*transaction.0).clone(), transaction.1.clone(), transaction.2, included.to_vec() - ).await.map_err(|_| SignError::CoinError(CoinError::ConnectionError))?; + ).await.map_err(|_| CoinError::ConnectionError) + } - let commitments = network.round( - attempt.preprocess(&mut OsRng).unwrap() - ).await.map_err(|e| SignError::NetworkError(e))?; - let shares = network.round( - attempt.sign(commitments, b"").map_err(|e| SignError::FrostError(e))? - ).await.map_err(|e| SignError::NetworkError(e))?; - let tx = attempt.complete(shares).map_err(|e| SignError::FrostError(e))?; - - self.rpc.publish_transaction( - &tx - ).await.map_err(|_| SignError::CoinError(CoinError::ConnectionError))?; + async fn publish_transaction( + &self, + tx: &Self::Transaction + ) -> Result<(Vec, Vec<::Id>), CoinError> { + self.rpc.publish_transaction(&tx).await.map_err(|_| CoinError::ConnectionError)?; Ok(( tx.hash().to_vec(), diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 47e12e85..65862aed 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -3,7 +3,7 @@ use std::{marker::Send, sync::Arc, collections::HashMap}; use async_trait::async_trait; use thiserror::Error; -use frost::{Curve, FrostError, MultisigKeys}; +use frost::{Curve, FrostError, MultisigKeys, sign::StateMachine}; pub(crate) use monero_serai::frost::Transcript; @@ -51,9 +51,12 @@ pub trait Output: Sized + Clone { pub trait Coin { type Curve: Curve; - type Output: Output; + type Transaction; type Block; + + type Output: Output; type SignableTransaction; + type TransactionMachine: StateMachine; type Address: Send; @@ -82,12 +85,16 @@ pub trait Coin { payments: &[(Self::Address, u64)] ) -> Result; - async fn attempt_send( + async fn attempt_send( &self, - network: &mut N, transaction: Self::SignableTransaction, included: &[u16] - ) -> Result<(Vec, Vec<::Id>), SignError>; + ) -> Result; + + async fn publish_transaction( + &self, + tx: &Self::Transaction + ) -> Result<(Vec, Vec<::Id>), CoinError>; #[cfg(test)] async fn mine_block(&self, address: Self::Address); diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index d69cc8f4..c23b8309 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -88,8 +88,7 @@ async fn test_send(coin: C) { coin.test_send(wallets[0].address()).await; let mut futures = vec![]; - for (i, network) in networks.iter_mut().enumerate() { - let wallet = &mut wallets[i]; + for (network, wallet) in networks.iter_mut().zip(wallets.iter_mut()) { wallet.poll().await.unwrap(); let height = coin.get_height().await.unwrap(); @@ -98,7 +97,7 @@ async fn test_send(coin: C) { 1, vec![(wallet.address(), 10000000000)] ).await.unwrap().1.swap_remove(0); - futures.push(coin.attempt_send(network, signable, &[1, 2, 3])); + futures.push(wallet.attempt_send(network, signable, &[1, 2, 3])); } println!( diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 5e0928fc..27a1cb83 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -1,10 +1,12 @@ use std::{sync::Arc, collections::HashMap}; +use rand_core::OsRng; + use transcript::Transcript as TranscriptTrait; -use frost::{Curve, MultisigKeys}; +use frost::{Curve, MultisigKeys, sign::StateMachine}; -use crate::{Transcript, CoinError, Output, Coin}; +use crate::{Transcript, CoinError, SignError, Output, Coin, Network}; pub struct WalletKeys { keys: MultisigKeys, @@ -333,4 +335,26 @@ impl Wallet { Ok((payments, txs)) } + + pub async fn attempt_send( + &mut self, + network: &mut N, + prepared: C::SignableTransaction, + included: &[u16] + ) -> Result<(Vec, Vec<::Id>), SignError> { + let mut attempt = self.coin.attempt_send( + prepared, + included + ).await.map_err(|e| SignError::CoinError(e))?; + + let commitments = network.round( + attempt.preprocess(&mut OsRng).unwrap() + ).await.map_err(|e| SignError::NetworkError(e))?; + let shares = network.round( + attempt.sign(commitments, b"").map_err(|e| SignError::FrostError(e))? + ).await.map_err(|e| SignError::NetworkError(e))?; + let tx = attempt.complete(shares).map_err(|e| SignError::FrostError(e))?; + + self.coin.publish_transaction(&tx).await.map_err(|e| SignError::CoinError(e)) + } } From 06e37623d040466d7965550f6353b83e16cfb078 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 05:13:42 -0400 Subject: [PATCH 032/105] Slightly clean FROST's dalek support --- crypto/frost/src/curves/dalek.rs | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/crypto/frost/src/curves/dalek.rs b/crypto/frost/src/curves/dalek.rs index 994ac651..f3dce609 100644 --- a/crypto/frost/src/curves/dalek.rs +++ b/crypto/frost/src/curves/dalek.rs @@ -81,7 +81,7 @@ macro_rules! dalek_curve { let scalar = Self::F::from_repr( slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))? ); - if scalar.is_some().unwrap_u8() == 0 { + if !bool::from(scalar.is_some()) { Err(CurveError::InvalidScalar)?; } Ok(scalar.unwrap()) @@ -89,25 +89,21 @@ macro_rules! dalek_curve { fn G_from_slice(slice: &[u8]) -> Result { let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - let point = $Compressed::new(bytes).decompress(); + let point = $Compressed::new(bytes).decompress().ok_or(CurveError::InvalidPoint)?; - if let Some(point) = point { - // Ban identity - if point.is_identity().into() { - Err(CurveError::InvalidPoint)?; - } - // Ban torsioned points to meet the prime order group requirement - if $torsioned(point) { - Err(CurveError::InvalidPoint)?; - } - // Ban points which weren't canonically encoded - if point.compress().to_bytes() != bytes { - Err(CurveError::InvalidPoint)?; - } - Ok(point) - } else { - Err(CurveError::InvalidPoint) + // Ban identity + if point.is_identity().into() { + Err(CurveError::InvalidPoint)?; } + // Ban torsioned points to meet the prime order group requirement + if $torsioned(point) { + Err(CurveError::InvalidPoint)?; + } + // Ban points which weren't canonically encoded + if point.compress().to_bytes() != bytes { + Err(CurveError::InvalidPoint)?; + } + Ok(point) } fn F_to_bytes(f: &Self::F) -> Vec { From b4c1adcdfbef485f159cfb8bae599cb2fad5aab4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 05:21:22 -0400 Subject: [PATCH 033/105] Rename FROST's official package name and update documentation --- coins/monero/Cargo.toml | 2 +- crypto/frost/Cargo.toml | 6 ++++-- crypto/frost/README.md | 7 +++++-- processor/Cargo.toml | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 0934a813..acc301aa 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -24,7 +24,7 @@ group = { version = "0.12", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } transcript = { path = "../../crypto/transcript", optional = true } -frost = { path = "../../crypto/frost", features = ["ed25519"], optional = true } +frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } monero = "0.16" diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 6d9af3f6..9f9bff63 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -1,9 +1,11 @@ [package] -name = "frost" +name = "modular-frost" version = "0.1.0" -description = "Implementation of FROST over ff/group" +description = "Modular implementation of FROST over ff/group" license = "MIT" +repository = "https://github.com/serai-dex/serai" authors = ["Luke Parker "] +keywords = ["frost", "multisig", "threshold"] edition = "2021" [dependencies] diff --git a/crypto/frost/README.md b/crypto/frost/README.md index c71c0f0f..ecb80d2a 100644 --- a/crypto/frost/README.md +++ b/crypto/frost/README.md @@ -1,3 +1,6 @@ -# FROST +# Modular FROST -Implementation of FROST for any curve with a ff/group API. +A modular implementation of FROST for any curve with a ff/group API. Notably, +beyond curve modularity, custom algorithms may be specified providing support +for privacy coins. The provided Schnorr algorithm also has a modular HRAM due +to the variety in existence, enabling integration with existing systems. diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 4eaf4890..360f2767 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -20,7 +20,7 @@ blake2 = "0.10" transcript = { path = "../crypto/transcript" } dalek-ff-group = { path = "../crypto/dalek-ff-group" } -frost = { path = "../crypto/frost" } +frost = { package = "modular-frost", path = "../crypto/frost" } monero = { version = "0.16", features = ["experimental"] } monero-serai = { path = "../coins/monero", features = ["multisig"] } From 481bf7dcf3b7ad224efe7bd1f58ed13366e5cd18 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 06:33:19 -0400 Subject: [PATCH 034/105] Prepare dalek-ff-group for publishing --- crypto/dalek-ff-group/Cargo.toml | 2 ++ crypto/dalek-ff-group/README.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 79fdceb0..11515716 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -3,7 +3,9 @@ name = "dalek-ff-group" version = "0.1.0" description = "ff/group bindings around curve25519-dalek" license = "MIT" +repository = "https://github.com/serai-dex/serai" authors = ["Luke Parker "] +keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] edition = "2021" [dependencies] diff --git a/crypto/dalek-ff-group/README.md b/crypto/dalek-ff-group/README.md index 4e2eefe0..b5b600b7 100644 --- a/crypto/dalek-ff-group/README.md +++ b/crypto/dalek-ff-group/README.md @@ -1,3 +1,5 @@ # Dalek FF/Group ff/group bindings around curve25519-dalek with a random function based around a more modern rand_core. + +Some functions currently remain unimplemented. From 9549dc6a49a6717167916d70fcf201d6066bf8b7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 06:35:15 -0400 Subject: [PATCH 035/105] Prepare transcript for publishing --- crypto/transcript/Cargo.toml | 6 ++++-- crypto/transcript/README.md | 3 +++ 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 crypto/transcript/README.md diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index 3c9bc9f8..9b099c3b 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -1,9 +1,11 @@ [package] -name = "transcript" +name = "transcript-trait" version = "0.1.0" -description = "A simple transcript definition" +description = "A simple transcript trait definition" license = "MIT" +repository = "https://github.com/serai-dex/serai" authors = ["Luke Parker "] +keywords = ["transcript"] edition = "2021" [dependencies] diff --git a/crypto/transcript/README.md b/crypto/transcript/README.md new file mode 100644 index 00000000..10b007c9 --- /dev/null +++ b/crypto/transcript/README.md @@ -0,0 +1,3 @@ +# Transcript + +Basic transcript trait with a Merlin wrapper available via the Merlin feature. From 5da1b4fcf8b50d7e0cf00da49ab6b78291efafd6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 06:35:45 -0400 Subject: [PATCH 036/105] Prepare multiexp for publishing --- crypto/multiexp/Cargo.toml | 2 ++ crypto/multiexp/README.md | 6 ++++++ 2 files changed, 8 insertions(+) create mode 100644 crypto/multiexp/README.md diff --git a/crypto/multiexp/Cargo.toml b/crypto/multiexp/Cargo.toml index b45dbcf5..c4c73690 100644 --- a/crypto/multiexp/Cargo.toml +++ b/crypto/multiexp/Cargo.toml @@ -3,7 +3,9 @@ name = "multiexp" version = "0.1.0" description = "Multiexponentation algorithms for ff/group" license = "MIT" +repository = "https://github.com/serai-dex/serai" authors = ["Luke Parker "] +keywords = ["multiexp", "ff", "group"] edition = "2021" [dependencies] diff --git a/crypto/multiexp/README.md b/crypto/multiexp/README.md new file mode 100644 index 00000000..80668458 --- /dev/null +++ b/crypto/multiexp/README.md @@ -0,0 +1,6 @@ +# Multiexp + +A multiexp implementation for ff/group implementing Straus and Pippenger. A +batch verification API is also available via the "batch" feature, which enables +secure multiexponentation batch verification given a series of values which +should sum to 0, identifying which doesn't via binary search if they don't. From 382ff75455a27efa5f2f68d23b1c1d3549d4a671 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 06:36:47 -0400 Subject: [PATCH 037/105] Replace FROST's ff/group usage with just group --- crypto/frost/Cargo.toml | 3 +-- crypto/frost/src/curves/dalek.rs | 3 +-- crypto/frost/src/curves/kp256.rs | 3 +-- crypto/frost/src/key_gen.rs | 2 +- crypto/frost/src/lib.rs | 3 +-- crypto/frost/src/schnorr.rs | 2 +- crypto/frost/src/sign.rs | 2 +- crypto/frost/src/tests/curve.rs | 3 +-- crypto/frost/src/tests/mod.rs | 2 +- crypto/frost/src/tests/schnorr.rs | 2 +- 10 files changed, 10 insertions(+), 15 deletions(-) diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 9f9bff63..929b1eb0 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -16,7 +16,6 @@ hex = "0.4" sha2 = { version = "0.10", optional = true } -ff = "0.12" group = "0.12" elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true } @@ -24,7 +23,7 @@ p256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = t k256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } dalek-ff-group = { path = "../dalek-ff-group", optional = true } -transcript = { path = "../transcript" } +transcript = { package = "transcript-trait", path = "../transcript" } multiexp = { path = "../multiexp", features = ["batch"] } diff --git a/crypto/frost/src/curves/dalek.rs b/crypto/frost/src/curves/dalek.rs index f3dce609..1ba41918 100644 --- a/crypto/frost/src/curves/dalek.rs +++ b/crypto/frost/src/curves/dalek.rs @@ -4,8 +4,7 @@ use rand_core::{RngCore, CryptoRng}; use sha2::{Digest, Sha512}; -use ff::PrimeField; -use group::Group; +use group::{ff::PrimeField, Group}; use dalek_ff_group::Scalar; diff --git a/crypto/frost/src/curves/kp256.rs b/crypto/frost/src/curves/kp256.rs index e7421c5a..35e466e2 100644 --- a/crypto/frost/src/curves/kp256.rs +++ b/crypto/frost/src/curves/kp256.rs @@ -4,8 +4,7 @@ use rand_core::{RngCore, CryptoRng}; use sha2::{digest::Update, Digest, Sha256}; -use ff::{Field, PrimeField}; -use group::{Group, GroupEncoding}; +use group::{ff::{Field, PrimeField}, Group, GroupEncoding}; use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}}; diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index 8f235b6b..fd5f13bb 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use rand_core::{RngCore, CryptoRng}; -use ff::{Field, PrimeField}; +use group::ff::{Field, PrimeField}; use multiexp::{multiexp_vartime, BatchVerifier}; diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index c96e0333..cd5d8ee2 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -5,8 +5,7 @@ use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use ff::{Field, PrimeField}; -use group::{Group, GroupOps}; +use group::{ff::{Field, PrimeField}, Group, GroupOps}; mod schnorr; diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index c138f05c..22361173 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -1,6 +1,6 @@ use rand_core::{RngCore, CryptoRng}; -use ff::Field; +use group::ff::Field; use multiexp::BatchVerifier; diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 49c1c853..987cec0e 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use ff::Field; +use group::ff::Field; use transcript::Transcript; diff --git a/crypto/frost/src/tests/curve.rs b/crypto/frost/src/tests/curve.rs index eed4e62a..d7327605 100644 --- a/crypto/frost/src/tests/curve.rs +++ b/crypto/frost/src/tests/curve.rs @@ -1,7 +1,6 @@ use rand_core::{RngCore, CryptoRng}; -use ff::Field; -use group::Group; +use group::{ff::Field, Group}; use crate::{Curve, MultisigKeys, tests::key_gen}; diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index 52fbf515..78bc7425 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use ff::Field; +use group::ff::Field; use crate::{ Curve, diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index 684107a7..c9550577 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -2,7 +2,7 @@ use std::{marker::PhantomData, sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use ff::Field; +use group::ff::Field; use crate::{ Curve, MultisigKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr}, From b49f8cbe4fe7b55afde3bd3273ea19d394d68976 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 06:38:06 -0400 Subject: [PATCH 038/105] Prepare FROST for publishing --- crypto/frost/Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 929b1eb0..80ac587d 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -21,11 +21,11 @@ group = "0.12" elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true } p256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } k256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } -dalek-ff-group = { path = "../dalek-ff-group", optional = true } +dalek-ff-group = { path = "../dalek-ff-group", version = "0.1", optional = true } -transcript = { package = "transcript-trait", path = "../transcript" } +transcript = { package = "transcript-trait", path = "../transcript", version = "0.1" } -multiexp = { path = "../multiexp", features = ["batch"] } +multiexp = { path = "../multiexp", version = "0.1", features = ["batch"] } [dev-dependencies] rand = "0.8" From 71fca061206a3634bd21753523bbf2d55a3b38c2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 07:52:03 -0400 Subject: [PATCH 039/105] Correct monero/processor dependencies --- coins/monero/Cargo.toml | 2 +- crypto/frost/README.md | 2 +- processor/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index acc301aa..ae7e82e1 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -23,7 +23,7 @@ curve25519-dalek = { version = "3", features = ["std"] } group = { version = "0.12", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } -transcript = { path = "../../crypto/transcript", optional = true } +transcript = { package = "transcript-trait", path = "../../crypto/transcript", optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } monero = "0.16" diff --git a/crypto/frost/README.md b/crypto/frost/README.md index ecb80d2a..cae85207 100644 --- a/crypto/frost/README.md +++ b/crypto/frost/README.md @@ -1,6 +1,6 @@ # Modular FROST A modular implementation of FROST for any curve with a ff/group API. Notably, -beyond curve modularity, custom algorithms may be specified providing support +beyond curve modularity, custom algorithms may be specified, providing support for privacy coins. The provided Schnorr algorithm also has a modular HRAM due to the variety in existence, enabling integration with existing systems. diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 360f2767..a791c88d 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -18,7 +18,7 @@ serde_json = "1.0" curve25519-dalek = { version = "3", features = ["std"] } blake2 = "0.10" -transcript = { path = "../crypto/transcript" } +transcript = { package = "transcript-trait", path = "../crypto/transcript" } dalek-ff-group = { path = "../crypto/dalek-ff-group" } frost = { package = "modular-frost", path = "../crypto/frost" } From f50f2494682fbb2e3dc0042a89d53286b41c8ccb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 12:03:01 -0400 Subject: [PATCH 040/105] Add fee handling code to Monero Updates how change outputs are handled, with a far more logical construction offering greater flexibility. prepare_outputs can not longer error. SignaableTransaction::new will. --- coins/monero/src/ringct/bulletproofs.rs | 18 ++- coins/monero/src/ringct/clsag/mod.rs | 7 +- coins/monero/src/ringct/mod.rs | 12 ++ coins/monero/src/rpc.rs | 59 +++++--- coins/monero/src/serialize.rs | 4 + coins/monero/src/transaction.rs | 30 ++++ coins/monero/src/wallet/decoys.rs | 3 +- coins/monero/src/wallet/mod.rs | 2 +- coins/monero/src/wallet/send/mod.rs | 183 +++++++++++++++-------- coins/monero/src/wallet/send/multisig.rs | 13 +- coins/monero/tests/send.rs | 7 +- 11 files changed, 231 insertions(+), 107 deletions(-) diff --git a/coins/monero/src/ringct/bulletproofs.rs b/coins/monero/src/ringct/bulletproofs.rs index 6a5866b2..e6a258d9 100644 --- a/coins/monero/src/ringct/bulletproofs.rs +++ b/coins/monero/src/ringct/bulletproofs.rs @@ -6,6 +6,8 @@ use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; use crate::{Commitment, wallet::TransactionError, serialize::*}; +pub(crate) const MAX_OUTPUTS: usize = 16; + #[derive(Clone, PartialEq, Debug)] pub struct Bulletproofs { pub A: EdwardsPoint, @@ -22,8 +24,22 @@ pub struct Bulletproofs { } impl Bulletproofs { + pub(crate) fn fee_weight(outputs: usize) -> usize { + let proofs = 6 + usize::try_from(usize::BITS - (outputs - 1).leading_zeros()).unwrap(); + let len = (9 + (2 * proofs)) * 32; + + let mut clawback = 0; + let padded = 1 << (proofs - 6); + if padded > 2 { + const BP_BASE: usize = 368; + clawback = ((BP_BASE * padded) - len) * 4 / 5; + } + + len + clawback + } + pub fn new(rng: &mut R, outputs: &[Commitment]) -> Result { - if outputs.len() > 16 { + if outputs.len() > MAX_OUTPUTS { return Err(TransactionError::TooManyOutputs)?; } diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 215f08e4..80a50300 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -15,7 +15,8 @@ use crate::{ Commitment, wallet::decoys::Decoys, random_scalar, hash_to_scalar, hash_to_point, - serialize::* + serialize::*, + transaction::RING_LEN }; #[cfg(feature = "multisig")] @@ -287,6 +288,10 @@ impl Clsag { Ok(()) } + pub(crate) fn fee_weight() -> usize { + (RING_LEN * 32) + 32 + 32 + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { write_raw_vec(write_scalar, &self.s, w)?; w.write_all(&self.c1.to_bytes())?; diff --git a/coins/monero/src/ringct/mod.rs b/coins/monero/src/ringct/mod.rs index 91b565b6..dbfc0fad 100644 --- a/coins/monero/src/ringct/mod.rs +++ b/coins/monero/src/ringct/mod.rs @@ -16,6 +16,10 @@ pub struct RctBase { } impl RctBase { + pub(crate) fn fee_weight(outputs: usize) -> usize { + 1 + 8 + (outputs * (8 + 32)) + } + pub fn serialize(&self, w: &mut W, rct_type: u8) -> std::io::Result<()> { w.write_all(&[rct_type])?; match rct_type { @@ -69,6 +73,10 @@ impl RctPrunable { } } + pub(crate) fn fee_weight(inputs: usize, outputs: usize) -> usize { + 1 + Bulletproofs::fee_weight(outputs) + (inputs * (Clsag::fee_weight() + 32)) + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { match self { RctPrunable::Null => Ok(()), @@ -114,6 +122,10 @@ pub struct RctSignatures { } impl RctSignatures { + pub(crate) fn fee_weight(inputs: usize, outputs: usize) -> usize { + RctBase::fee_weight(outputs) + RctPrunable::fee_weight(inputs, outputs) + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { self.base.serialize(w, self.prunable.rct_type())?; self.prunable.serialize(w) diff --git a/coins/monero/src/rpc.rs b/coins/monero/src/rpc.rs index 7a3edf80..9a5ee8fe 100644 --- a/coins/monero/src/rpc.rs +++ b/coins/monero/src/rpc.rs @@ -9,7 +9,7 @@ use serde_json::json; use reqwest; -use crate::{transaction::{Input, Timelock, Transaction}, block::Block}; +use crate::{transaction::{Input, Timelock, Transaction}, block::Block, wallet::Fee}; #[derive(Deserialize, Debug)] pub struct EmptyResponse {} @@ -34,9 +34,6 @@ pub enum RpcError { InvalidTransaction([u8; 32]) } -#[derive(Clone, Debug)] -pub struct Rpc(String); - fn rpc_hex(value: &str) -> Result, RpcError> { hex::decode(value).map_err(|_| RpcError::InternalError("Monero returned invalid hex".to_string())) } @@ -47,6 +44,9 @@ fn rpc_point(point: &str) -> Result { ).decompress().ok_or(RpcError::InvalidPoint(point.to_string())) } +#[derive(Clone, Debug)] +pub struct Rpc(String); + impl Rpc { pub fn new(daemon: String) -> Rpc { Rpc(daemon) @@ -233,6 +233,32 @@ impl Rpc { Ok(indexes.o_indexes) } + pub async fn get_output_distribution(&self, height: usize) -> Result, RpcError> { + #[allow(dead_code)] + #[derive(Deserialize, Debug)] + pub struct Distribution { + distribution: Vec + } + + #[allow(dead_code)] + #[derive(Deserialize, Debug)] + struct Distributions { + distributions: Vec + } + + let mut distributions: JsonRpcResponse = self.rpc_call("json_rpc", Some(json!({ + "method": "get_output_distribution", + "params": { + "binary": false, + "amounts": [0], + "cumulative": true, + "to_height": height + } + }))).await?; + + Ok(distributions.result.distributions.swap_remove(0).distribution) + } + pub async fn get_outputs( &self, indexes: &[u64], @@ -278,30 +304,19 @@ impl Rpc { ).collect() } - pub async fn get_output_distribution(&self, height: usize) -> Result, RpcError> { + pub async fn get_fee(&self) -> Result { #[allow(dead_code)] #[derive(Deserialize, Debug)] - pub struct Distribution { - distribution: Vec + struct FeeResponse { + fee: u64, + quantization_mask: u64 } - #[allow(dead_code)] - #[derive(Deserialize, Debug)] - struct Distributions { - distributions: Vec - } - - let mut distributions: JsonRpcResponse = self.rpc_call("json_rpc", Some(json!({ - "method": "get_output_distribution", - "params": { - "binary": false, - "amounts": [0], - "cumulative": true, - "to_height": height - } + let res: JsonRpcResponse = self.rpc_call("json_rpc", Some(json!({ + "method": "get_fee_estimate" }))).await?; - Ok(distributions.result.distributions.swap_remove(0).distribution) + Ok(Fee { per_weight: res.result.fee, mask: res.result.quantization_mask }) } pub async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> { diff --git a/coins/monero/src/serialize.rs b/coins/monero/src/serialize.rs index 1303d43e..0ecd05b8 100644 --- a/coins/monero/src/serialize.rs +++ b/coins/monero/src/serialize.rs @@ -4,6 +4,10 @@ use curve25519_dalek::{scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwards pub const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000; +pub fn varint_len(varint: usize) -> usize { + ((usize::try_from(usize::BITS - varint.leading_zeros()).unwrap().saturating_sub(1)) / 7) + 1 +} + pub fn write_varint(varint: &u64, w: &mut W) -> io::Result<()> { let mut varint = *varint; while { diff --git a/coins/monero/src/transaction.rs b/coins/monero/src/transaction.rs index 338a16e5..32c68750 100644 --- a/coins/monero/src/transaction.rs +++ b/coins/monero/src/transaction.rs @@ -2,6 +2,8 @@ use curve25519_dalek::edwards::EdwardsPoint; use crate::{hash, serialize::*, ringct::{RctPrunable, RctSignatures}}; +pub const RING_LEN: usize = 11; + #[derive(Clone, PartialEq, Debug)] pub enum Input { Gen(u64), @@ -14,6 +16,13 @@ pub enum Input { } impl Input { + // Worst-case predictive len + pub(crate) fn fee_weight() -> usize { + // Uses 1 byte for the VarInt amount due to amount being 0 + // Uses 1 byte for the VarInt encoding of the length of the ring as well + 1 + 1 + 1 + (8 * RING_LEN) + 32 + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { match self { Input::Gen(height) => { @@ -56,6 +65,10 @@ pub struct Output { } impl Output { + pub(crate) fn fee_weight() -> usize { + 1 + 1 + 32 + 1 + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { write_varint(&self.amount, w)?; w.write_all(&[2 + (if self.tag.is_some() { 1 } else { 0 })])?; @@ -102,6 +115,10 @@ impl Timelock { } } + pub(crate) fn fee_weight() -> usize { + 8 + } + fn serialize(&self, w: &mut W) -> std::io::Result<()> { write_varint( &match self { @@ -124,6 +141,15 @@ pub struct TransactionPrefix { } impl TransactionPrefix { + pub(crate) fn fee_weight(inputs: usize, outputs: usize, extra: usize) -> usize { + // Assumes Timelock::None since this library won't let you create a TX with a timelock + 1 + 1 + + varint_len(inputs) + (inputs * Input::fee_weight()) + + // Only 16 outputs are possible under transactions by this lib + 1 + (outputs * Output::fee_weight()) + + varint_len(extra) + extra + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { write_varint(&self.version, w)?; self.timelock.serialize(w)?; @@ -157,6 +183,10 @@ pub struct Transaction { } impl Transaction { + pub(crate) fn fee_weight(inputs: usize, outputs: usize, extra: usize) -> usize { + TransactionPrefix::fee_weight(inputs, outputs, extra) + RctSignatures::fee_weight(inputs, outputs) + } + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { self.prefix.serialize(w)?; self.rct_signatures.serialize(w) diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index f205a735..405926bc 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -7,7 +7,7 @@ use rand_distr::{Distribution, Gamma}; use curve25519_dalek::edwards::EdwardsPoint; -use crate::{wallet::SpendableOutput, rpc::{RpcError, Rpc}}; +use crate::{transaction::RING_LEN, wallet::SpendableOutput, rpc::{RpcError, Rpc}}; const LOCK_WINDOW: usize = 10; const MATURITY: u64 = 60; @@ -16,7 +16,6 @@ const BLOCK_TIME: usize = 120; const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME; const TIP_APPLICATION: f64 = (LOCK_WINDOW * BLOCK_TIME) as f64; -const RING_LEN: usize = 11; const DECOYS: usize = RING_LEN - 1; lazy_static! { diff --git a/coins/monero/src/wallet/mod.rs b/coins/monero/src/wallet/mod.rs index e717fe4c..e0287eb4 100644 --- a/coins/monero/src/wallet/mod.rs +++ b/coins/monero/src/wallet/mod.rs @@ -13,7 +13,7 @@ pub(crate) mod decoys; pub(crate) use decoys::Decoys; mod send; -pub use send::{TransactionError, SignableTransaction}; +pub use send::{Fee, TransactionError, SignableTransaction}; #[cfg(feature = "multisig")] pub use send::TransactionMachine; diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 6a8c14dd..a9a689dd 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -24,7 +24,7 @@ use crate::{ generate_key_image, ringct::{ clsag::{ClsagError, ClsagInput, Clsag}, - bulletproofs::Bulletproofs, + bulletproofs::{MAX_OUTPUTS, Bulletproofs}, RctBase, RctPrunable, RctSignatures }, transaction::{Input, Output, Timelock, TransactionPrefix, Transaction}, @@ -44,53 +44,53 @@ pub use multisig::TransactionMachine; struct SendOutput { R: EdwardsPoint, dest: EdwardsPoint, - mask: Scalar, + commitment: Commitment, amount: [u8; 8] } impl SendOutput { fn new( rng: &mut R, - unique: Option<[u8; 32]>, - output: (Address, u64), + unique: [u8; 32], + output: (Address, u64, bool), o: usize - ) -> Result { + ) -> SendOutput { let r = random_scalar(rng); let shared_key = shared_key( - unique, + Some(unique).filter(|_| output.2), r, - &output.0.public_view.point.decompress().ok_or(TransactionError::InvalidAddress)?, + &output.0.public_view.point.decompress().expect("SendOutput::new requires valid addresses"), o ); - let spend = output.0.public_spend.point.decompress().ok_or(TransactionError::InvalidAddress)?; - Ok( - SendOutput { - R: match output.0.addr_type { - AddressType::Standard => Ok(&r * &ED25519_BASEPOINT_TABLE), - AddressType::SubAddress => Ok(&r * spend), - AddressType::Integrated(_) => Err(TransactionError::InvalidAddress) - }?, - dest: (&shared_key * &ED25519_BASEPOINT_TABLE) + spend, - mask: commitment_mask(shared_key), - amount: amount_encryption(output.1, shared_key) - } - ) + let spend = output.0.public_spend.point.decompress().expect("SendOutput::new requires valid addresses"); + SendOutput { + R: match output.0.addr_type { + AddressType::Standard => &r * &ED25519_BASEPOINT_TABLE, + AddressType::SubAddress => &r * spend, + AddressType::Integrated(_) => panic!("SendOutput::new doesn't support Integrated addresses") + }, + dest: ((&shared_key * &ED25519_BASEPOINT_TABLE) + spend), + commitment: Commitment::new(commitment_mask(shared_key), output.1), + amount: amount_encryption(output.1, shared_key) + } } } #[derive(Clone, Error, Debug)] pub enum TransactionError { + #[error("invalid address")] + InvalidAddress, #[error("no inputs")] NoInputs, #[error("no outputs")] NoOutputs, + #[error("only one output and no change address")] + NoChange, #[error("too many outputs")] TooManyOutputs, #[error("not enough funds (in {0}, out {1})")] NotEnoughFunds(u64, u64), - #[error("invalid address")] - InvalidAddress, #[error("wrong spend private key")] WrongPrivateKey, #[error("rpc error ({0})")] @@ -154,24 +154,56 @@ async fn prepare_inputs( Ok(signable) } +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Fee { + pub per_weight: u64, + pub mask: u64 +} + +impl Fee { + pub fn calculate(&self, weight: usize) -> u64 { + ((((self.per_weight * u64::try_from(weight).unwrap()) - 1) / self.mask) + 1) * self.mask + } +} + #[derive(Clone, PartialEq, Debug)] pub struct SignableTransaction { inputs: Vec, - payments: Vec<(Address, u64)>, - change: Address, - fee_per_byte: u64, - - fee: u64, - outputs: Vec + payments: Vec<(Address, u64, bool)>, + outputs: Vec, + fee: u64 } impl SignableTransaction { pub fn new( inputs: Vec, payments: Vec<(Address, u64)>, - change: Address, - fee_per_byte: u64 + change_address: Option

, + fee_rate: Fee ) -> Result { + // Make sure all addresses are valid + let test = |addr: Address| { + if !( + addr.public_view.point.decompress().is_some() && + addr.public_spend.point.decompress().is_some() + ) { + Err(TransactionError::InvalidAddress)?; + } + + match addr.addr_type { + AddressType::Standard => Ok(()), + AddressType::Integrated(..) => Err(TransactionError::InvalidAddress), + AddressType::SubAddress => Ok(()) + } + }; + + for payment in &payments { + test(payment.0)?; + } + if let Some(change) = change_address { + test(change)?; + } + if inputs.len() == 0 { Err(TransactionError::NoInputs)?; } @@ -179,15 +211,55 @@ impl SignableTransaction { Err(TransactionError::NoOutputs)?; } + // TODO TX MAX SIZE + + // If we don't have two outputs, as required by Monero, add a second + let mut change = payments.len() == 1; + if change && change_address.is_none() { + Err(TransactionError::NoChange)?; + } + let mut outputs = payments.len() + (if change { 1 } else { 0 }); + + // Calculate the fee. + let extra = 0; + let mut fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs, extra)); + + // Make sure we have enough funds + let in_amount = inputs.iter().map(|input| input.commitment.amount).sum::(); + let mut out_amount = payments.iter().map(|payment| payment.1).sum::() + fee; + if in_amount < out_amount { + Err(TransactionError::NotEnoughFunds(in_amount, out_amount))?; + } + + // If we have yet to add a change output, do so if it's economically viable + if (!change) && change_address.is_some() && (in_amount != out_amount) { + // Check even with the new fee, there's remaining funds + let change_fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs + 1, extra)) - fee; + if (out_amount + change_fee) < in_amount { + change = true; + outputs += 1; + out_amount += change_fee; + fee += change_fee; + } + } + + if outputs > MAX_OUTPUTS { + Err(TransactionError::TooManyOutputs)?; + } + + let mut payments = payments.iter().map(|(address, amount)| (*address, *amount, false)).collect::>(); + if change { + // Always use a unique key image for the change output + // TODO: Make this a config option + payments.push((change_address.unwrap(), in_amount - out_amount, true)); + } + Ok( SignableTransaction { inputs, payments, - change, - fee_per_byte, - - fee: 0, - outputs: vec![] + outputs: vec![], + fee } ) } @@ -196,39 +268,19 @@ impl SignableTransaction { &mut self, rng: &mut R, uniqueness: [u8; 32] - ) -> Result<(Vec, Scalar), TransactionError> { - self.fee = self.fee_per_byte * 2000; // TODO - - // TODO TX MAX SIZE - - // Make sure we have enough funds - let in_amount = self.inputs.iter().map(|input| input.commitment.amount).sum(); - let out_amount = self.fee + self.payments.iter().map(|payment| payment.1).sum::(); - if in_amount < out_amount { - Err(TransactionError::NotEnoughFunds(in_amount, out_amount))?; - } - - let mut temp_outputs = Vec::with_capacity(self.payments.len() + 1); - // Add the payments to the outputs - for payment in &self.payments { - temp_outputs.push((None, (payment.0, payment.1))); - } - temp_outputs.push((Some(uniqueness), (self.change, in_amount - out_amount))); - - // Shuffle the outputs - temp_outputs.shuffle(rng); + ) -> (Vec, Scalar) { + // Shuffle the payments + self.payments.shuffle(rng); // Actually create the outputs - self.outputs = Vec::with_capacity(temp_outputs.len()); - let mut commitments = Vec::with_capacity(temp_outputs.len()); - let mut mask_sum = Scalar::zero(); - for (o, output) in temp_outputs.iter().enumerate() { - self.outputs.push(SendOutput::new(rng, output.0, output.1, o)?); - commitments.push(Commitment::new(self.outputs[o].mask, output.1.1)); - mask_sum += self.outputs[o].mask; + self.outputs = Vec::with_capacity(self.payments.len() + 1); + for (o, output) in self.payments.iter().enumerate() { + self.outputs.push(SendOutput::new(rng, uniqueness, *output, o)); } - Ok((commitments, mask_sum)) + let commitments = self.outputs.iter().map(|output| output.commitment).collect::>(); + let sum = commitments.iter().map(|commitment| commitment.mask).sum(); + (commitments, sum) } fn prepare_transaction( @@ -246,7 +298,6 @@ impl SignableTransaction { self.outputs[1 ..].iter().map(|output| PublicKey { point: output.R.compress() }).collect() ).consensus_encode(&mut extra).unwrap(); - // Format it for monero-rs let mut tx_outputs = Vec::with_capacity(self.outputs.len()); let mut ecdh_info = Vec::with_capacity(self.outputs.len()); for o in 0 .. self.outputs.len() { @@ -307,7 +358,7 @@ impl SignableTransaction { key_image: *image }).collect::>() ) - )?; + ); let mut tx = self.prepare_transaction(&commitments, Bulletproofs::new(rng, &commitments)?); diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 4b7d0f69..84dc63d7 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -35,9 +35,8 @@ pub struct TransactionMachine { } impl SignableTransaction { - pub async fn multisig( - mut self, - rng: &mut R, + pub async fn multisig( + self, rpc: &Rpc, keys: MultisigKeys, mut transcript: Transcript, @@ -80,8 +79,8 @@ impl SignableTransaction { for payment in &self.payments { transcript.append_message(b"payment_address", &payment.0.as_bytes()); transcript.append_message(b"payment_amount", &payment.1.to_le_bytes()); + transcript.append_message(b"payment_unique", &(if payment.2 { [1] } else { [0] })); } - transcript.append_message(b"change", &self.change.as_bytes()); // Sort included before cloning it around included.sort_unstable(); @@ -105,9 +104,6 @@ impl SignableTransaction { ); } - // Verify these outputs by a dummy prep - self.prepare_outputs(rng, [0; 32])?; - // Select decoys // Ideally, this would be done post entropy, instead of now, yet doing so would require sign // to be async which isn't preferable. This should be suitably competent though @@ -228,7 +224,6 @@ impl StateMachine for TransactionMachine { let mut images = self.images.clone(); images.sort_by(key_image_sort); - // Not invalid outputs due to already doing a dummy prep let (commitments, output_masks) = self.signable.prepare_outputs( &mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"tx_keys")), uniqueness( @@ -238,7 +233,7 @@ impl StateMachine for TransactionMachine { key_image: *image }).collect::>() ) - ).expect("Couldn't prepare outputs despite already doing a dummy prep"); + ); self.output_masks = Some(output_masks); self.signable.prepare_transaction( diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index 4c002d45..9ef558dc 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -80,9 +80,7 @@ async fn send_core(test: usize, multisig: bool) { PublicKey { point: (&view * &ED25519_BASEPOINT_TABLE).compress() } ); - // TODO - let fee_per_byte = 50000000; - let fee = fee_per_byte * 2000; + let fee = rpc.get_fee().await.unwrap(); let start = rpc.get_height().await.unwrap(); for _ in 0 .. 7 { @@ -134,7 +132,7 @@ async fn send_core(test: usize, multisig: bool) { } let mut signable = SignableTransaction::new( - outputs, vec![(addr, amount - fee)], addr, fee_per_byte + outputs, vec![(addr, amount - 10000000000)], Some(addr), fee ).unwrap(); if !multisig { @@ -147,7 +145,6 @@ async fn send_core(test: usize, multisig: bool) { machines.insert( i, signable.clone().multisig( - &mut OsRng, &rpc, (*keys[&i]).clone(), Transcript::new(b"Monero Serai Test Transaction"), From b6ea654823c06598aaff8d35e8455d87180e0b11 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 12:19:32 -0400 Subject: [PATCH 041/105] Update the processor to use the coin's specified fee --- processor/src/coins/monero.rs | 17 +++++++++-------- processor/src/lib.rs | 4 +++- processor/src/scanner.rs | 5 ----- processor/src/tests/mod.rs | 20 +++++++++++++------- processor/src/wallet.rs | 10 ++++++---- 5 files changed, 31 insertions(+), 25 deletions(-) delete mode 100644 processor/src/scanner.rs diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index f1d96bc5..55226981 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -13,7 +13,7 @@ use monero_serai::{ frost::Ed25519, transaction::{Timelock, Transaction}, rpc::Rpc, - wallet::{SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} + wallet::{Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} }; use crate::{Transcript, CoinError, Output as OutputTrait, Coin, view_key}; @@ -59,7 +59,7 @@ pub struct SignableTransaction( #[derive(Clone, Debug)] pub struct Monero { - rpc: Rpc, + pub(crate) rpc: Rpc, view: Scalar, view_pub: PublicKey } @@ -79,6 +79,7 @@ impl Monero { impl Coin for Monero { type Curve = Ed25519; + type Fee = Fee; type Transaction = Transaction; type Block = Vec; @@ -132,7 +133,8 @@ impl Coin for Monero { transcript: Transcript, height: usize, mut inputs: Vec, - payments: &[(Address, u64)] + payments: &[(Address, u64)], + fee: Fee ) -> Result { let spend = keys.group_key(); Ok( @@ -143,8 +145,8 @@ impl Coin for Monero { MSignableTransaction::new( inputs.drain(..).map(|input| input.0).collect(), payments.to_vec(), - self.address(spend), - 100000000 // TODO + Some(self.address(spend)), + fee ).map_err(|_| CoinError::ConnectionError)? ) ) @@ -156,7 +158,6 @@ impl Coin for Monero { included: &[u16] ) -> Result { transaction.3.clone().multisig( - &mut OsRng, &self.rpc, (*transaction.0).clone(), transaction.1.clone(), @@ -213,8 +214,8 @@ impl Coin for Monero { let tx = MSignableTransaction::new( outputs, vec![(address, amount - fee)], - temp, - fee / 2000 + Some(temp), + self.rpc.get_fee().await.unwrap() ).unwrap().sign(&mut OsRng, &self.rpc, &Scalar::one()).await.unwrap(); self.rpc.publish_transaction(&tx).await.unwrap(); self.mine_block(temp).await; diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 65862aed..2a615ea5 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -51,6 +51,7 @@ pub trait Output: Sized + Clone { pub trait Coin { type Curve: Curve; + type Fee: Copy; type Transaction; type Block; @@ -82,7 +83,8 @@ pub trait Coin { transcript: Transcript, height: usize, inputs: Vec, - payments: &[(Self::Address, u64)] + payments: &[(Self::Address, u64)], + fee: Self::Fee ) -> Result; async fn attempt_send( diff --git a/processor/src/scanner.rs b/processor/src/scanner.rs deleted file mode 100644 index 59cca8fe..00000000 --- a/processor/src/scanner.rs +++ /dev/null @@ -1,5 +0,0 @@ -struct Scanner {} - -impl Scanner { - -} diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index c23b8309..e7214b97 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -57,16 +57,17 @@ impl Network for LocalNetwork { } } -async fn test_send(coin: C) { +async fn test_send(coin: C, fee: C::Fee) { // Mine a block so there's a confirmed height coin.mine_block(coin.address(::G::generator())).await; let height = coin.get_height().await.unwrap(); - let mut networks = LocalNetwork::new(3); - let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng); + let threshold = keys[&1].params().t(); + let mut networks = LocalNetwork::new(threshold); + let mut wallets = vec![]; - for i in 1 ..= 3 { + for i in 1 ..= threshold { let mut wallet = Wallet::new(MemCoinDb::new(), coin.clone()); wallet.acknowledge_height(0, height); wallet.add_keys( @@ -95,9 +96,12 @@ async fn test_send(coin: C) { wallet.acknowledge_height(1, height - 10); let signable = wallet.prepare_sends( 1, - vec![(wallet.address(), 10000000000)] + vec![(wallet.address(), 10000000000)], + fee ).await.unwrap().1.swap_remove(0); - futures.push(wallet.attempt_send(network, signable, &[1, 2, 3])); + futures.push( + wallet.attempt_send(network, signable, (1 ..= threshold).into_iter().collect::>()) + ); } println!( @@ -108,5 +112,7 @@ async fn test_send(coin: C) { #[tokio::test] async fn monero() { - test_send(Monero::new("http://127.0.0.1:18081".to_string())).await; + let monero = Monero::new("http://127.0.0.1:18081".to_string()); + let fee = monero.rpc.get_fee().await.unwrap(); + test_send(monero, fee).await; } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 27a1cb83..be58a838 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -281,7 +281,8 @@ impl Wallet { pub async fn prepare_sends( &mut self, canonical: usize, - payments: Vec<(C::Address, u64)> + payments: Vec<(C::Address, u64)>, + fee: C::Fee ) -> Result<(Vec<(C::Address, u64)>, Vec), CoinError> { if payments.len() == 0 { return Ok((vec![], vec![])); @@ -326,7 +327,8 @@ impl Wallet { transcript, acknowledged_height, inputs, - &outputs + &outputs, + fee ).await?; // self.db.save_tx(tx) // TODO txs.push(tx); @@ -340,11 +342,11 @@ impl Wallet { &mut self, network: &mut N, prepared: C::SignableTransaction, - included: &[u16] + included: Vec ) -> Result<(Vec, Vec<::Id>), SignError> { let mut attempt = self.coin.attempt_send( prepared, - included + &included ).await.map_err(|e| SignError::CoinError(e))?; let commitments = network.round( From 9d817a00b2216158c2fda1bcdcacd9fa1e7939a7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 19 Jun 2022 12:19:57 -0400 Subject: [PATCH 042/105] Correct Monero's extra length calculation for fee calculation --- coins/monero/src/wallet/send/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index a9a689dd..7c95d753 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -220,8 +220,11 @@ impl SignableTransaction { } let mut outputs = payments.len() + (if change { 1 } else { 0 }); + // Calculate the extra length. + // Type, length, value, with 1 field for the first key and 1 field for the rest + let extra = (outputs * (2 + 32)) - (outputs.saturating_sub(2) * 2); + // Calculate the fee. - let extra = 0; let mut fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs, extra)); // Make sure we have enough funds From f10bd5feeea9c3b98018012d459515390bee978d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 20 Jun 2022 23:00:49 -0400 Subject: [PATCH 043/105] Cache output distribution Also moves to the expected sanity median --- coins/monero/src/rpc.rs | 6 ++++-- coins/monero/src/wallet/decoys.rs | 24 +++++++++++++++--------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/coins/monero/src/rpc.rs b/coins/monero/src/rpc.rs index 9a5ee8fe..7ac6c30e 100644 --- a/coins/monero/src/rpc.rs +++ b/coins/monero/src/rpc.rs @@ -233,7 +233,8 @@ impl Rpc { Ok(indexes.o_indexes) } - pub async fn get_output_distribution(&self, height: usize) -> Result, RpcError> { + // from and to are inclusive + pub async fn get_output_distribution(&self, from: usize, to: usize) -> Result, RpcError> { #[allow(dead_code)] #[derive(Deserialize, Debug)] pub struct Distribution { @@ -252,7 +253,8 @@ impl Rpc { "binary": false, "amounts": [0], "cumulative": true, - "to_height": height + "from_height": from, + "to_height": to } }))).await?; diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index 405926bc..38ae071f 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -1,4 +1,4 @@ -use std::collections::HashSet; +use std::{sync::Mutex, collections::HashSet}; use lazy_static::lazy_static; @@ -20,13 +20,14 @@ const DECOYS: usize = RING_LEN - 1; lazy_static! { static ref GAMMA: Gamma = Gamma::new(19.28, 1.0 / 1.61).unwrap(); + static ref DISTRIBUTION: Mutex> = Mutex::new(vec![]); } async fn select_n( rng: &mut R, rpc: &Rpc, height: usize, - distribution: &[u64], + distribution: &Vec, high: u64, per_second: f64, used: &mut HashSet, @@ -107,6 +108,8 @@ impl Decoys { height: usize, inputs: &[SpendableOutput] ) -> Result, RpcError> { + let mut distribution = DISTRIBUTION.lock().unwrap(); + // Convert the inputs in question to the raw output data let mut outputs = Vec::with_capacity(inputs.len()); for input in inputs { @@ -116,7 +119,14 @@ impl Decoys { )); } - let distribution = rpc.get_output_distribution(height).await?; + if distribution.len() <= height { + let from = distribution.len(); + distribution.extend(rpc.get_output_distribution(from, height).await?); + } + // If asked to use an older height than previously asked, truncate to ensure accuracy + // Should never happen, yet risks desyncing if it did + distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height + let high = distribution[distribution.len() - 1]; let per_second = { let blocks = distribution.len().min(BLOCKS_PER_YEAR); @@ -129,8 +139,7 @@ impl Decoys { used.insert(o.0); } - // Panic if not enough decoys are available - // TODO: Simply create a TX with less than the target amount, or at least return an error + // TODO: Simply create a TX with less than the target amount if (high - MATURITY) < u64::try_from(inputs.len() * RING_LEN).unwrap() { Err(RpcError::InternalError("not enough decoy candidates".to_string()))?; } @@ -166,10 +175,7 @@ impl Decoys { // small chains if high > 500 { // Make sure the TX passes the sanity check that the median output is within the last 40% - // This actually checks the median is within the last third, a slightly more aggressive - // boundary, as the height used in this calculation will be slightly under the height this - // is sanity checked against - let target_median = high * 2 / 3; + let target_median = high * 3 / 5; while ring[RING_LEN / 2].0 < target_median { // If it's not, update the bottom half with new values to ensure the median only moves up for removed in ring.drain(0 .. (RING_LEN / 2)).collect::>() { From 462d0e74cee32442fbadf1b255f9a9dae035d7cd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 20 Jun 2022 23:10:13 -0400 Subject: [PATCH 044/105] Pre-allocate the distribution --- coins/monero/src/wallet/decoys.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index 38ae071f..dbba42f7 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -20,7 +20,7 @@ const DECOYS: usize = RING_LEN - 1; lazy_static! { static ref GAMMA: Gamma = Gamma::new(19.28, 1.0 / 1.61).unwrap(); - static ref DISTRIBUTION: Mutex> = Mutex::new(vec![]); + static ref DISTRIBUTION: Mutex> = Mutex::new(Vec::with_capacity(3000000)); } async fn select_n( From 1caa6a96065cdba1ce2e3c909ee6c9027618403d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 08:40:14 -0400 Subject: [PATCH 045/105] Enforce FROST StateMachine progression via the type system A comment on the matter was made in https://github.com/serai-dex/serai/issues/12. While I do believe the API is slightly worse, I appreciate the explicitness. --- coins/monero/src/wallet/send/multisig.rs | 199 +++++++++++++---------- crypto/frost/src/key_gen.rs | 130 +++++---------- crypto/frost/src/lib.rs | 5 - crypto/frost/src/sign.rs | 159 +++++++----------- crypto/frost/src/tests/mod.rs | 80 ++++----- crypto/frost/src/tests/vectors.rs | 33 ++-- processor/src/coins/monero.rs | 1 - processor/src/lib.rs | 4 +- processor/src/wallet.rs | 16 +- 9 files changed, 276 insertions(+), 351 deletions(-) diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 84dc63d7..f03fbf36 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -6,7 +6,13 @@ use rand_chacha::ChaCha12Rng; use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwardsY}}; use transcript::Transcript as TranscriptTrait; -use frost::{FrostError, MultisigKeys, MultisigParams, sign::{State, StateMachine, AlgorithmMachine}}; +use frost::{ + FrostError, MultisigKeys, + sign::{ + PreprocessMachine, SignMachine, SignatureMachine, + AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine + } +}; use crate::{ frost::{Transcript, Ed25519}, @@ -24,14 +30,27 @@ pub struct TransactionMachine { decoys: Vec, - our_preprocess: Vec, - - images: Vec, - output_masks: Option, inputs: Vec>>>, - clsags: Vec>, + clsags: Vec> +} - tx: Option +pub struct TransactionSignMachine { + signable: SignableTransaction, + i: u16, + included: Vec, + transcript: Transcript, + + decoys: Vec, + + inputs: Vec>>>, + clsags: Vec>, + + our_preprocess: Vec +} + +pub struct TransactionSignatureMachine { + tx: Transaction, + clsags: Vec> } impl SignableTransaction { @@ -43,8 +62,6 @@ impl SignableTransaction { height: usize, mut included: Vec ) -> Result { - let mut images = vec![]; - images.resize(self.inputs.len(), EdwardsPoint::identity()); let mut inputs = vec![]; for _ in 0 .. self.inputs.len() { // Doesn't resize as that will use a single Rc for the entire Vec @@ -118,43 +135,38 @@ impl SignableTransaction { &self.inputs ).await.map_err(|e| TransactionError::RpcError(e))?; - Ok(TransactionMachine { - signable: self, - i: keys.params().i(), - included, - transcript, + Ok( + TransactionMachine { + signable: self, + i: keys.params().i(), + included, + transcript, - decoys, + decoys, - our_preprocess: vec![], - - images, - output_masks: None, - inputs, - clsags, - - tx: None - }) + inputs, + clsags + } + ) } } -impl StateMachine for TransactionMachine { +impl PreprocessMachine for TransactionMachine { type Signature = Transaction; + type SignMachine = TransactionSignMachine; fn preprocess( - &mut self, + mut self, rng: &mut R - ) -> Result, FrostError> { - if self.state() != State::Fresh { - Err(FrostError::InvalidSignTransition(State::Fresh, self.state()))?; - } - + ) -> (TransactionSignMachine, Vec) { // Iterate over each CLSAG calling preprocess let mut serialized = Vec::with_capacity(self.clsags.len() * (64 + ClsagMultisig::serialized_len())); - for clsag in self.clsags.iter_mut() { - serialized.extend(&clsag.preprocess(rng)?); - } - self.our_preprocess = serialized.clone(); + let clsags = self.clsags.drain(..).map(|clsag| { + let (clsag, preprocess) = clsag.preprocess(rng); + serialized.extend(&preprocess); + clsag + }).collect(); + let our_preprocess = serialized.clone(); // We could add further entropy here, and previous versions of this library did so // As of right now, the multisig's key, the inputs being spent, and the FROST data itself @@ -165,18 +177,33 @@ impl StateMachine for TransactionMachine { // increase privacy. If they're not sent in plain text, or are otherwise inaccessible, they // already offer sufficient entropy. That's why further entropy is not included - Ok(serialized) + ( + TransactionSignMachine { + signable: self.signable, + i: self.i, + included: self.included, + transcript: self.transcript, + + decoys: self.decoys, + + inputs: self.inputs, + clsags, + + our_preprocess, + }, + serialized + ) } +} + +impl SignMachine for TransactionSignMachine { + type SignatureMachine = TransactionSignatureMachine; fn sign( - &mut self, + mut self, mut commitments: HashMap>, msg: &[u8] - ) -> Result, FrostError> { - if self.state() != State::Preprocessed { - Err(FrostError::InvalidSignTransition(State::Preprocessed, self.state()))?; - } - + ) -> Result<(TransactionSignatureMachine, Vec), FrostError> { if msg.len() != 0 { Err( FrostError::InternalError( @@ -189,7 +216,7 @@ impl StateMachine for TransactionMachine { // While each CLSAG will do this as they need to for security, they have their own transcripts // cloned from this TX's initial premise's transcript. For our TX transcript to have the CLSAG // data for entropy, it'll have to be added ourselves - commitments.insert(self.i, self.our_preprocess.clone()); + commitments.insert(self.i, self.our_preprocess); for l in &self.included { self.transcript.append_message(b"participant", &(*l).to_be_bytes()); // FROST itself will error if this is None, so let it @@ -201,30 +228,33 @@ impl StateMachine for TransactionMachine { // FROST commitments, image, H commitments, and their proofs let clsag_len = 64 + ClsagMultisig::serialized_len(); - let mut commitments = (0 .. self.clsags.len()).map(|c| commitments.iter().map( - |(l, commitments)| (*l, commitments[(c * clsag_len) .. ((c + 1) * clsag_len)].to_vec()) + // Convert the unified commitments to a Vec of the individual commitments + let mut commitments = (0 .. self.clsags.len()).map(|_| commitments.iter_mut().map( + |(l, commitments)| (*l, commitments.drain(.. clsag_len).collect::>()) ).collect::>()).collect::>(); + // Calculate the key images + // Clsag will parse/calculate/validate this as needed, yet doing so here as well provides + // the easiest API overall, as this is where the TX is (which needs the key images in its + // message), along with where the outputs are determined (where our change output needs these + // to be unique) + let mut images = vec![EdwardsPoint::identity(); self.clsags.len()]; for c in 0 .. self.clsags.len() { - // Calculate the key images - // Multisig will parse/calculate/validate this as needed, yet doing so here as well provides - // the easiest API overall, as this is where the TX is (which needs the key images in its - // message), along with where the outputs are determined (where our change output needs these - // to be unique) for (l, preprocess) in &commitments[c] { - self.images[c] += CompressedEdwardsY( + images[c] += CompressedEdwardsY( preprocess[64 .. 96].try_into().map_err(|_| FrostError::InvalidCommitment(*l))? ).decompress().ok_or(FrostError::InvalidCommitment(*l))?; } } // Create the actual transaction + let output_masks; let mut tx = { - // Calculate uniqueness - let mut images = self.images.clone(); - images.sort_by(key_image_sort); + let mut sorted_images = images.clone(); + sorted_images.sort_by(key_image_sort); - let (commitments, output_masks) = self.signable.prepare_outputs( + let commitments; + (commitments, output_masks) = self.signable.prepare_outputs( &mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"tx_keys")), uniqueness( &images.iter().map(|image| Input::ToKey { @@ -234,7 +264,6 @@ impl StateMachine for TransactionMachine { }).collect::>() ) ); - self.output_masks = Some(output_masks); self.signable.prepare_transaction( &commitments, @@ -245,18 +274,19 @@ impl StateMachine for TransactionMachine { ) }; - let mut sorted = Vec::with_capacity(self.decoys.len()); - while self.decoys.len() != 0 { + // Sort the inputs, as expected + let mut sorted = Vec::with_capacity(self.clsags.len()); + while self.clsags.len() != 0 { sorted.push(( + images.swap_remove(0), self.signable.inputs.swap_remove(0), self.decoys.swap_remove(0), - self.images.swap_remove(0), self.inputs.swap_remove(0), self.clsags.swap_remove(0), commitments.swap_remove(0) )); } - sorted.sort_by(|x, y| x.2.compress().to_bytes().cmp(&y.2.compress().to_bytes()).reverse()); + sorted.sort_by(|x, y| key_image_sort(&x.0, &y.0)); let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks")); let mut sum_pseudo_outs = Scalar::zero(); @@ -265,7 +295,7 @@ impl StateMachine for TransactionMachine { let mut mask = random_scalar(&mut rng); if sorted.len() == 0 { - mask = self.output_masks.unwrap() - sum_pseudo_outs; + mask = output_masks - sum_pseudo_outs; } else { sum_pseudo_outs += mask; } @@ -273,16 +303,16 @@ impl StateMachine for TransactionMachine { tx.prefix.inputs.push( Input::ToKey { amount: 0, - key_offsets: value.1.offsets.clone(), - key_image: value.2 + key_offsets: value.2.offsets.clone(), + key_image: value.0 } ); *value.3.write().unwrap() = Some( ClsagDetails::new( ClsagInput::new( - value.0.commitment, - value.1 + value.1.commitment, + value.2 ).map_err(|_| panic!("Signing an input which isn't present in the ring we created for it"))?, mask ) @@ -293,30 +323,31 @@ impl StateMachine for TransactionMachine { } let msg = tx.signature_hash(); - self.tx = Some(tx); // Iterate over each CLSAG calling sign let mut serialized = Vec::with_capacity(self.clsags.len() * 32); - for clsag in self.clsags.iter_mut() { - serialized.extend(&clsag.sign(commitments.remove(0), &msg)?); - } + let clsags = self.clsags.drain(..).map(|clsag| { + let (clsag, share) = clsag.sign(commitments.remove(0), &msg)?; + serialized.extend(&share); + Ok(clsag) + }).collect::>()?; - Ok(serialized) + Ok((TransactionSignatureMachine { tx, clsags }, serialized)) } +} - fn complete(&mut self, shares: HashMap>) -> Result { - if self.state() != State::Signed { - Err(FrostError::InvalidSignTransition(State::Signed, self.state()))?; - } - - let mut tx = self.tx.take().unwrap(); +impl SignatureMachine for TransactionSignatureMachine { + fn complete(self, mut shares: HashMap>) -> Result { + let mut tx = self.tx; match tx.rct_signatures.prunable { RctPrunable::Null => panic!("Signing for RctPrunable::Null"), RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => { - for (c, clsag) in self.clsags.iter_mut().enumerate() { - let (clsag, pseudo_out) = clsag.complete(shares.iter().map( - |(l, shares)| (*l, shares[(c * 32) .. ((c + 1) * 32)].to_vec()) - ).collect::>())?; + for clsag in self.clsags { + let (clsag, pseudo_out) = clsag.complete( + shares.iter_mut().map( + |(l, shares)| (*l, shares.drain(.. 32).collect()) + ).collect::>() + )?; clsags.push(clsag); pseudo_outs.push(pseudo_out); } @@ -324,12 +355,4 @@ impl StateMachine for TransactionMachine { } Ok(tx) } - - fn multisig_params(&self) -> MultisigParams { - self.clsags[0].multisig_params() - } - - fn state(&self) -> State { - self.clsags[0].state() - } } diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index fd5f13bb..061260c6 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -1,5 +1,4 @@ -use core::fmt; -use std::collections::HashMap; +use std::{marker::PhantomData, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; @@ -271,100 +270,76 @@ fn complete_r2( ) } -/// State of a Key Generation machine -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum State { - Fresh, - GeneratedCoefficients, - GeneratedSecretShares, - Complete, -} - -impl fmt::Display for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// State machine which manages key generation -#[allow(non_snake_case)] -pub struct StateMachine { +pub struct KeyGenMachine { params: MultisigParams, context: String, - state: State, - coefficients: Option>, - our_commitments: Option>, - secret: Option, - commitments: Option>> + _curve: PhantomData, } -impl StateMachine { +pub struct SecretShareMachine { + params: MultisigParams, + context: String, + coefficients: Vec, + our_commitments: Vec, +} + +pub struct KeyMachine { + params: MultisigParams, + secret: C::F, + commitments: HashMap>, +} + +impl KeyGenMachine { /// Creates a new machine to generate a key for the specified curve in the specified multisig // The context string must be unique among multisigs - pub fn new(params: MultisigParams, context: String) -> StateMachine { - StateMachine { - params, - context, - state: State::Fresh, - coefficients: None, - our_commitments: None, - secret: None, - commitments: None - } + pub fn new(params: MultisigParams, context: String) -> KeyGenMachine { + KeyGenMachine { params, context, _curve: PhantomData } } /// Start generating a key according to the FROST DKG spec /// Returns a serialized list of commitments to be sent to all parties over an authenticated /// channel. If any party submits multiple sets of commitments, they MUST be treated as malicious pub fn generate_coefficients( - &mut self, + self, rng: &mut R - ) -> Result, FrostError> { - if self.state != State::Fresh { - Err(FrostError::InvalidKeyGenTransition(State::Fresh, self.state))?; - } - - let (coefficients, serialized) = generate_key_r1::( - rng, - &self.params, - &self.context, - ); - - self.coefficients = Some(coefficients); - self.our_commitments = Some(serialized.clone()); - self.state = State::GeneratedCoefficients; - Ok(serialized) + ) -> (SecretShareMachine, Vec) { + let (coefficients, serialized) = generate_key_r1::(rng, &self.params, &self.context); + ( + SecretShareMachine { + params: self.params, + context: self.context, + coefficients, + our_commitments: serialized.clone() + }, + serialized, + ) } +} +impl SecretShareMachine { /// Continue generating a key /// Takes in everyone else's commitments, which are expected to be in a Vec where participant /// index = Vec index. An empty vector is expected at index 0 to allow for this. An empty vector /// is also expected at index i which is locally handled. Returns a byte vector representing a /// secret share for each other participant which should be encrypted before sending pub fn generate_secret_shares( - &mut self, + self, rng: &mut R, commitments: HashMap>, - ) -> Result>, FrostError> { - if self.state != State::GeneratedCoefficients { - Err(FrostError::InvalidKeyGenTransition(State::GeneratedCoefficients, self.state))?; - } - + ) -> Result<(KeyMachine, HashMap>), FrostError> { let (secret, commitments, shares) = generate_key_r2::( rng, &self.params, &self.context, - self.coefficients.take().unwrap(), - self.our_commitments.take().unwrap(), + self.coefficients, + self.our_commitments, commitments, )?; - - self.secret = Some(secret); - self.commitments = Some(commitments); - self.state = State::GeneratedSecretShares; - Ok(shares) + Ok((KeyMachine { params: self.params, secret, commitments }, shares)) } +} +impl KeyMachine { /// Complete key generation /// Takes in everyone elses' shares submitted to us as a Vec, expecting participant index = /// Vec index with an empty vector at index 0 and index i. Returns a byte vector representing the @@ -372,31 +347,10 @@ impl StateMachine { /// must report completion without issue before this key can be considered usable, yet you should /// wait for all participants to report as such pub fn complete( - &mut self, + self, rng: &mut R, shares: HashMap>, ) -> Result, FrostError> { - if self.state != State::GeneratedSecretShares { - Err(FrostError::InvalidKeyGenTransition(State::GeneratedSecretShares, self.state))?; - } - - let keys = complete_r2( - rng, - self.params, - self.secret.take().unwrap(), - self.commitments.take().unwrap(), - shares, - )?; - - self.state = State::Complete; - Ok(keys) - } - - pub fn params(&self) -> MultisigParams { - self.params.clone() - } - - pub fn state(&self) -> State { - self.state + complete_r2(rng, self.params, self.secret, self.commitments, shares) } } diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index cd5d8ee2..f3b8b2bd 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -181,11 +181,6 @@ pub enum FrostError { InvalidProofOfKnowledge(u16), #[error("invalid share (participant {0})")] InvalidShare(u16), - #[error("invalid key generation state machine transition (expected {0}, was {1})")] - InvalidKeyGenTransition(key_gen::State, key_gen::State), - - #[error("invalid sign state machine transition (expected {0}, was {1})")] - InvalidSignTransition(sign::State, sign::State), #[error("internal error ({0})")] InternalError(String), diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 987cec0e..289165aa 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -236,31 +236,21 @@ fn complete>( ) } -/// State of a Sign machine -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum State { - Fresh, - Preprocessed, - Signed, - Complete, -} - -impl fmt::Display for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -pub trait StateMachine { +pub trait PreprocessMachine { type Signature: Clone + PartialEq + fmt::Debug; + type SignMachine: SignMachine; /// Perform the preprocessing round required in order to sign /// Returns a byte vector which must be transmitted to all parties selected for this signing /// process, over an authenticated channel fn preprocess( - &mut self, + self, rng: &mut R - ) -> Result, FrostError>; + ) -> (Self::SignMachine, Vec); +} + +pub trait SignMachine { + type SignatureMachine: SignatureMachine; /// Sign a message /// Takes in the participant's commitments, which are expected to be in a Vec where participant @@ -268,29 +258,33 @@ pub trait StateMachine { /// index i which is locally handled. Returns a byte vector representing a share of the signature /// for every other participant to receive, over an authenticated channel fn sign( - &mut self, + self, commitments: HashMap>, msg: &[u8], - ) -> Result, FrostError>; + ) -> Result<(Self::SignatureMachine, Vec), FrostError>; +} +pub trait SignatureMachine { /// Complete signing /// Takes in everyone elses' shares submitted to us as a Vec, expecting participant index = /// Vec index with None at index 0 and index i. Returns a byte vector representing the serialized /// signature - fn complete(&mut self, shares: HashMap>) -> Result; - - fn multisig_params(&self) -> MultisigParams; - - fn state(&self) -> State; + fn complete(self, shares: HashMap>) -> Result; } /// State machine which manages signing for an arbitrary signature algorithm -#[allow(non_snake_case)] pub struct AlgorithmMachine> { + params: Params +} + +pub struct AlgorithmSignMachine> { params: Params, - state: State, - preprocess: Option>, - sign: Option>, + preprocess: PreprocessPackage, +} + +pub struct AlgorithmSignatureMachine> { + params: Params, + sign: Package, } impl> AlgorithmMachine { @@ -300,85 +294,52 @@ impl> AlgorithmMachine { keys: Arc>, included: &[u16], ) -> Result, FrostError> { - Ok( - AlgorithmMachine { - params: Params::new(algorithm, keys, included)?, - state: State::Fresh, - preprocess: None, - sign: None, - } - ) + Ok(AlgorithmMachine { params: Params::new(algorithm, keys, included)? }) } - pub(crate) fn unsafe_override_preprocess(&mut self, preprocess: PreprocessPackage) { - if self.state != State::Fresh { - // This would be unacceptable, yet this is pub(crate) and explicitly labelled unsafe - // It's solely used in a testing environment, which is how it's justified - Err::<(), _>(FrostError::InvalidSignTransition(State::Fresh, self.state)).unwrap(); - } - self.preprocess = Some(preprocess); - self.state = State::Preprocessed; + pub(crate) fn unsafe_override_preprocess( + self, + preprocess: PreprocessPackage + ) -> (AlgorithmSignMachine, Vec) { + let serialized = preprocess.serialized.clone(); + (AlgorithmSignMachine { params: self.params, preprocess }, serialized) } } -impl> StateMachine for AlgorithmMachine { +impl> PreprocessMachine for AlgorithmMachine { type Signature = A::Signature; + type SignMachine = AlgorithmSignMachine; fn preprocess( - &mut self, + self, rng: &mut R - ) -> Result, FrostError> { - if self.state != State::Fresh { - Err(FrostError::InvalidSignTransition(State::Fresh, self.state))?; - } - let preprocess = preprocess::(rng, &mut self.params); + ) -> (Self::SignMachine, Vec) { + let mut params = self.params; + let preprocess = preprocess::(rng, &mut params); let serialized = preprocess.serialized.clone(); - self.preprocess = Some(preprocess); - self.state = State::Preprocessed; - Ok(serialized) - } - - fn sign( - &mut self, - commitments: HashMap>, - msg: &[u8], - ) -> Result, FrostError> { - if self.state != State::Preprocessed { - Err(FrostError::InvalidSignTransition(State::Preprocessed, self.state))?; - } - - let (sign, serialized) = sign_with_share( - &mut self.params, - self.preprocess.take().unwrap(), - commitments, - msg, - )?; - - self.sign = Some(sign); - self.state = State::Signed; - Ok(serialized) - } - - fn complete(&mut self, shares: HashMap>) -> Result { - if self.state != State::Signed { - Err(FrostError::InvalidSignTransition(State::Signed, self.state))?; - } - - let signature = complete( - &self.params, - self.sign.take().unwrap(), - shares, - )?; - - self.state = State::Complete; - Ok(signature) - } - - fn multisig_params(&self) -> MultisigParams { - self.params.multisig_params().clone() - } - - fn state(&self) -> State { - self.state + (AlgorithmSignMachine { params, preprocess }, serialized) + } +} + +impl> SignMachine for AlgorithmSignMachine { + type SignatureMachine = AlgorithmSignatureMachine; + + fn sign( + self, + commitments: HashMap>, + msg: &[u8] + ) -> Result<(Self::SignatureMachine, Vec), FrostError> { + let mut params = self.params; + let (sign, serialized) = sign_with_share(&mut params, self.preprocess, commitments, msg)?; + Ok((AlgorithmSignatureMachine { params, sign }, serialized)) + } +} + +impl< + C: Curve, + A: Algorithm +> SignatureMachine for AlgorithmSignatureMachine { + fn complete(self, shares: HashMap>) -> Result { + complete(&self.params, self.sign, shares) } } diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index 78bc7425..fa45f6f1 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -8,9 +8,9 @@ use crate::{ Curve, MultisigParams, MultisigKeys, lagrange, - key_gen, + key_gen::KeyGenMachine, algorithm::Algorithm, - sign::{StateMachine, AlgorithmMachine} + sign::{PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine} }; // Test suites for public usage @@ -37,49 +37,36 @@ pub fn clone_without( pub fn key_gen( rng: &mut R ) -> HashMap>> { - let mut params = HashMap::new(); let mut machines = HashMap::new(); - let mut commitments = HashMap::new(); for i in 1 ..= PARTICIPANTS { - params.insert( - i, - MultisigParams::new( - THRESHOLD, - PARTICIPANTS, - i - ).unwrap() - ); - machines.insert( - i, - key_gen::StateMachine::::new( - params[&i], - "FROST Test key_gen".to_string() - ) - ); - commitments.insert( - i, - machines.get_mut(&i).unwrap().generate_coefficients(rng).unwrap() + let machine = KeyGenMachine::::new( + MultisigParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(), + "FROST Test key_gen".to_string() ); + let (machine, these_commitments) = machine.generate_coefficients(rng); + machines.insert(i, machine); + commitments.insert(i, these_commitments); } let mut secret_shares = HashMap::new(); - for (l, machine) in machines.iter_mut() { - secret_shares.insert( - *l, + let mut machines = machines.drain().map(|(l, machine)| { + let (machine, shares) = machine.generate_secret_shares( + rng, // clone_without isn't necessary, as this machine's own data will be inserted without // conflict, yet using it ensures the machine's own data is actually inserted as expected - machine.generate_secret_shares(rng, clone_without(&commitments, l)).unwrap() - ); - } + clone_without(&commitments, &l) + ).unwrap(); + secret_shares.insert(l, shares); + (l, machine) + }).collect::>(); let mut verification_shares = None; let mut group_key = None; - let mut keys = HashMap::new(); - for (i, machine) in machines.iter_mut() { + machines.drain().map(|(i, machine)| { let mut our_secret_shares = HashMap::new(); for (l, shares) in &secret_shares { - if i == l { + if i == *l { continue; } our_secret_shares.insert(*l, shares[&i].clone()); @@ -98,10 +85,8 @@ pub fn key_gen( } assert_eq!(group_key.unwrap(), these_keys.group_key()); - keys.insert(*i, Arc::new(these_keys)); - } - - keys + (i, Arc::new(these_keys)) + }).collect::>() } pub fn recover(keys: &HashMap>) -> C::F { @@ -147,27 +132,28 @@ pub fn algorithm_machines>( ).collect() } -pub fn sign( +pub fn sign( rng: &mut R, mut machines: HashMap, msg: &[u8] ) -> M::Signature { let mut commitments = HashMap::new(); - for (i, machine) in machines.iter_mut() { - commitments.insert(*i, machine.preprocess(rng).unwrap()); - } + let mut machines = machines.drain().map(|(i, machine)| { + let (machine, preprocess) = machine.preprocess(rng); + commitments.insert(i, preprocess); + (i, machine) + }).collect::>(); let mut shares = HashMap::new(); - for (i, machine) in machines.iter_mut() { - shares.insert( - *i, - machine.sign(clone_without(&commitments, i), msg).unwrap() - ); - } + let mut machines = machines.drain().map(|(i, machine)| { + let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap(); + shares.insert(i, share); + (i, machine) + }).collect::>(); let mut signature = None; - for (i, machine) in machines.iter_mut() { - let sig = machine.complete(clone_without(&shares, i)).unwrap(); + for (i, machine) in machines.drain() { + let sig = machine.complete(clone_without(&shares, &i)).unwrap(); if signature.is_none() { signature = Some(sig.clone()); } diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index db46de1f..e0def162 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -5,7 +5,7 @@ use rand_core::{RngCore, CryptoRng}; use crate::{ Curve, MultisigKeys, algorithm::{Schnorr, Hram}, - sign::{PreprocessPackage, StateMachine, AlgorithmMachine}, + sign::{PreprocessPackage, SignMachine, SignatureMachine, AlgorithmMachine}, tests::{curve::test_curve, schnorr::test_schnorr, recover} }; @@ -92,33 +92,40 @@ pub fn test_with_vectors< let mut commitments = HashMap::new(); let mut c = 0; - for (i, machine) in machines.iter_mut() { + let mut machines = machines.drain(..).map(|(i, machine)| { let nonces = [ C::F_from_slice(&hex::decode(vectors.nonces[c][0]).unwrap()).unwrap(), C::F_from_slice(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap() ]; + c += 1; let mut serialized = C::G_to_bytes(&(C::GENERATOR * nonces[0])); serialized.extend(&C::G_to_bytes(&(C::GENERATOR * nonces[1]))); - machine.unsafe_override_preprocess( + let (machine, serialized) = machine.unsafe_override_preprocess( PreprocessPackage { nonces, serialized: serialized.clone() } ); - commitments.insert(*i, serialized); - c += 1; - } + commitments.insert(i, serialized); + (i, machine) + }).collect::>(); let mut shares = HashMap::new(); c = 0; - for (i, machine) in machines.iter_mut() { - let share = machine.sign(commitments.clone(), &hex::decode(vectors.msg).unwrap()).unwrap(); - assert_eq!(share, hex::decode(vectors.sig_shares[c]).unwrap()); - shares.insert(*i, share); - c += 1; - } + let mut machines = machines.drain(..).map(|(i, machine)| { + let (machine, share) = machine.sign( + commitments.clone(), + &hex::decode(vectors.msg).unwrap() + ).unwrap(); - for (_, machine) in machines.iter_mut() { + assert_eq!(share, hex::decode(vectors.sig_shares[c]).unwrap()); + c += 1; + + shares.insert(i, share); + (i, machine) + }).collect::>(); + + for (_, machine) in machines.drain() { let sig = machine.complete(shares.clone()).unwrap(); let mut serialized = C::G_to_bytes(&sig.R); serialized.extend(C::F_to_bytes(&sig.s)); diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index 55226981..a9757e97 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use async_trait::async_trait; -use rand_core::OsRng; use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 2a615ea5..d990a4c5 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -3,7 +3,7 @@ use std::{marker::Send, sync::Arc, collections::HashMap}; use async_trait::async_trait; use thiserror::Error; -use frost::{Curve, FrostError, MultisigKeys, sign::StateMachine}; +use frost::{Curve, FrostError, MultisigKeys, sign::PreprocessMachine}; pub(crate) use monero_serai::frost::Transcript; @@ -57,7 +57,7 @@ pub trait Coin { type Output: Output; type SignableTransaction; - type TransactionMachine: StateMachine; + type TransactionMachine: PreprocessMachine; type Address: Send; diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index be58a838..cf3e731e 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -4,7 +4,7 @@ use rand_core::OsRng; use transcript::Transcript as TranscriptTrait; -use frost::{Curve, MultisigKeys, sign::StateMachine}; +use frost::{Curve, MultisigKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; use crate::{Transcript, CoinError, SignError, Output, Coin, Network}; @@ -344,17 +344,17 @@ impl Wallet { prepared: C::SignableTransaction, included: Vec ) -> Result<(Vec, Vec<::Id>), SignError> { - let mut attempt = self.coin.attempt_send( + let attempt = self.coin.attempt_send( prepared, &included ).await.map_err(|e| SignError::CoinError(e))?; - let commitments = network.round( - attempt.preprocess(&mut OsRng).unwrap() - ).await.map_err(|e| SignError::NetworkError(e))?; - let shares = network.round( - attempt.sign(commitments, b"").map_err(|e| SignError::FrostError(e))? - ).await.map_err(|e| SignError::NetworkError(e))?; + let (attempt, commitments) = attempt.preprocess(&mut OsRng); + let commitments = network.round(commitments).await.map_err(|e| SignError::NetworkError(e))?; + + let (attempt, share) = attempt.sign(commitments, b"").map_err(|e| SignError::FrostError(e))?; + let shares = network.round(share).await.map_err(|e| SignError::NetworkError(e))?; + let tx = attempt.complete(shares).map_err(|e| SignError::FrostError(e))?; self.coin.publish_transaction(&tx).await.map_err(|e| SignError::CoinError(e)) From 1d4018c1ba1f2fcfddfadebce2e68b437fd81f0c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 08:41:05 -0400 Subject: [PATCH 046/105] Correct when the decoys distribution lock is acquired The existing design maintained a non-Send object across async contexts. --- coins/monero/src/wallet/decoys.rs | 39 ++++++++++++++++++------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index dbba42f7..9a56eaf3 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -27,7 +27,6 @@ async fn select_n( rng: &mut R, rpc: &Rpc, height: usize, - distribution: &Vec, high: u64, per_second: f64, used: &mut HashSet, @@ -56,6 +55,7 @@ async fn select_n( let o = (age * per_second) as u64; if o < high { + let distribution = DISTRIBUTION.lock().unwrap(); let i = distribution.partition_point(|s| *s < (high - 1 - o)); let prev = i.saturating_sub(1); let n = distribution[i] - distribution[prev]; @@ -108,8 +108,6 @@ impl Decoys { height: usize, inputs: &[SpendableOutput] ) -> Result, RpcError> { - let mut distribution = DISTRIBUTION.lock().unwrap(); - // Convert the inputs in question to the raw output data let mut outputs = Vec::with_capacity(inputs.len()); for input in inputs { @@ -119,19 +117,29 @@ impl Decoys { )); } - if distribution.len() <= height { - let from = distribution.len(); - distribution.extend(rpc.get_output_distribution(from, height).await?); + let distribution_len = { + let distribution = DISTRIBUTION.lock().unwrap(); + distribution.len() + }; + if distribution_len <= height { + let extension = rpc.get_output_distribution(distribution_len, height).await?; + DISTRIBUTION.lock().unwrap().extend(extension); } - // If asked to use an older height than previously asked, truncate to ensure accuracy - // Should never happen, yet risks desyncing if it did - distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height - let high = distribution[distribution.len() - 1]; - let per_second = { - let blocks = distribution.len().min(BLOCKS_PER_YEAR); - let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)]; - (outputs as f64) / ((blocks * BLOCK_TIME) as f64) + let high; + let per_second; + { + let mut distribution = DISTRIBUTION.lock().unwrap(); + // If asked to use an older height than previously asked, truncate to ensure accuracy + // Should never happen, yet risks desyncing if it did + distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height + + high = distribution[distribution.len() - 1]; + per_second = { + let blocks = distribution.len().min(BLOCKS_PER_YEAR); + let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)]; + (outputs as f64) / ((blocks * BLOCK_TIME) as f64) + }; }; let mut used = HashSet::::new(); @@ -151,7 +159,6 @@ impl Decoys { rng, rpc, height, - &distribution, high, per_second, &mut used, @@ -192,7 +199,7 @@ impl Decoys { // Select new outputs until we have a full sized ring again ring.extend( - select_n(rng, rpc, height, &distribution, high, per_second, &mut used, RING_LEN - ring.len()).await? + select_n(rng, rpc, height, high, per_second, &mut used, RING_LEN - ring.len()).await? ); ring.sort_by(|a, b| a.0.cmp(&b.0)); } From 03e759b1fde76d2a51b1de3e891536fe82683647 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 08:42:38 -0400 Subject: [PATCH 047/105] Fix DigestTranscript to be secure Collisions were possible depending on static label substrings. Now, labels are prefixed by their length to prevent this from being possible. All variables are also flagged by their type, preventing other potential conflicts. --- crypto/transcript/src/lib.rs | 43 +++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index b324cc31..d5e4aa14 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -14,6 +14,26 @@ pub trait Transcript { fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } +enum DigestTranscriptMember { + Name, + Domain, + Label, + Value, + Challenge +} + +impl DigestTranscriptMember { + fn as_u8(&self) -> u8 { + match self { + DigestTranscriptMember::Name => 0, + DigestTranscriptMember::Domain => 1, + DigestTranscriptMember::Label => 2, + DigestTranscriptMember::Value => 3, + DigestTranscriptMember::Challenge => 4 + } + } +} + #[derive(Clone, Debug)] pub struct DigestTranscript(Vec, PhantomData); @@ -24,25 +44,32 @@ impl PartialEq for DigestTranscript { } impl DigestTranscript { - pub fn new(label: &'static [u8]) -> Self { - DigestTranscript(label.to_vec(), PhantomData) + fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) { + self.0.push(kind.as_u8()); + // Assumes messages don't exceed 16 exabytes + self.0.extend(u64::try_from(value.len()).unwrap().to_le_bytes()); + self.0.extend(value); + } + + pub fn new(name: &'static [u8]) -> Self { + let mut res = DigestTranscript(vec![], PhantomData); + res.append(DigestTranscriptMember::Name, name); + res } } impl Transcript for DigestTranscript { fn domain_separate(&mut self, label: &[u8]) { - self.append_message(b"domain", label); + self.append(DigestTranscriptMember::Domain, label); } fn append_message(&mut self, label: &'static [u8], message: &[u8]) { - self.0.extend(label); - // Assumes messages don't exceed 16 exabytes - self.0.extend(u64::try_from(message.len()).unwrap().to_le_bytes()); - self.0.extend(message); + self.append(DigestTranscriptMember::Label, label); + self.append(DigestTranscriptMember::Value, message); } fn challenge(&mut self, label: &'static [u8]) -> Vec { - self.0.extend(label); + self.append(DigestTranscriptMember::Challenge, label); D::new().chain_update(&self.0).finalize().to_vec() } From 7ee9581d67e3c556ba3fa73778dc8870739633c5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 08:44:12 -0400 Subject: [PATCH 048/105] Use a macro to generate the Secp256k1/P-256 curves --- crypto/frost/Cargo.toml | 4 +- crypto/frost/src/curves/kp256.rs | 249 ++++++++++++------------ crypto/frost/src/tests/literal/kp256.rs | 14 +- 3 files changed, 133 insertions(+), 134 deletions(-) diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 80ac587d..536f5c53 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -36,8 +36,8 @@ dalek-ff-group = { path = "../dalek-ff-group" } [features] curves = ["sha2"] # All officially denoted curves use the SHA2 family of hashes kp256 = ["elliptic-curve", "curves"] -p256 = ["dep:p256", "kp256"] -k256 = ["dep:k256", "kp256"] +p256 = ["kp256", "dep:p256"] +secp256k1 = ["kp256", "k256"] dalek = ["curves", "dalek-ff-group"] ed25519 = ["dalek"] ristretto = ["dalek"] diff --git a/crypto/frost/src/curves/kp256.rs b/crypto/frost/src/curves/kp256.rs index 35e466e2..c7568708 100644 --- a/crypto/frost/src/curves/kp256.rs +++ b/crypto/frost/src/curves/kp256.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, convert::TryInto}; +use core::convert::TryInto; use rand_core::{RngCore, CryptoRng}; @@ -8,146 +8,145 @@ use group::{ff::{Field, PrimeField}, Group, GroupEncoding}; use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}}; -use crate::{CurveError, Curve}; -#[cfg(feature = "p256")] -use crate::algorithm::Hram; +use crate::{curves::{CurveError, Curve}, algorithm::Hram}; -#[allow(non_snake_case)] -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct KP256 { - _G: PhantomData -} +macro_rules! kp_curve { + ( + $lib: ident, + $Curve: ident, + $Hram: ident, -pub(crate) trait KP256Instance { - const CONTEXT: &'static [u8]; - const ID: &'static [u8]; - const GENERATOR: G; -} + $ID: literal, + $CONTEXT: literal + ) => { + #[derive(Clone, Copy, PartialEq, Eq, Debug)] + pub struct $Curve; + impl Curve for $Curve { + type F = $lib::Scalar; + type G = $lib::ProjectivePoint; + type T = $lib::ProjectivePoint; -#[cfg(feature = "p256")] -pub type P256 = KP256; -#[cfg(feature = "p256")] -impl KP256Instance for P256 { - const CONTEXT: &'static [u8] = b"FROST-P256-SHA256-v5"; - const ID: &'static [u8] = b"P-256"; - const GENERATOR: p256::ProjectivePoint = p256::ProjectivePoint::GENERATOR; -} + const ID: &'static [u8] = $ID; -#[cfg(feature = "k256")] -pub type K256 = KP256; -#[cfg(feature = "k256")] -impl KP256Instance for K256 { - const CONTEXT: &'static [u8] = b"FROST-secp256k1-SHA256-v5"; - const ID: &'static [u8] = b"secp256k1"; - const GENERATOR: k256::ProjectivePoint = k256::ProjectivePoint::GENERATOR; -} + const GENERATOR: Self::G = $lib::ProjectivePoint::GENERATOR; + const GENERATOR_TABLE: Self::G = $lib::ProjectivePoint::GENERATOR; -impl Curve for KP256 where - KP256: KP256Instance, - G::Scalar: PrimeField, - ::Repr: From<[u8; 32]> + AsRef<[u8]>, - G::Repr: From<[u8; 33]> + AsRef<[u8]> { - type F = G::Scalar; - type G = G; - type T = G; + const LITTLE_ENDIAN: bool = false; - const ID: &'static [u8] = >::ID; + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { + let mut seed = vec![0; 32]; + rng.fill_bytes(&mut seed); + seed.extend(secret.to_bytes()); + Self::hash_to_F(&[$CONTEXT as &[u8], b"nonce"].concat(), &seed) + } - const GENERATOR: Self::G = >::GENERATOR; - const GENERATOR_TABLE: Self::G = >::GENERATOR; + fn hash_msg(msg: &[u8]) -> Vec { + (&Sha256::new() + .chain($CONTEXT) + .chain(b"digest") + .chain(msg) + .finalize() + ).to_vec() + } - const LITTLE_ENDIAN: bool = false; + fn hash_binding_factor(binding: &[u8]) -> Self::F { + Self::hash_to_F(&[$CONTEXT as &[u8], b"rho"].concat(), binding) + } - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { - let mut seed = vec![0; 32]; - rng.fill_bytes(&mut seed); - seed.extend(secret.to_repr().as_ref()); - Self::hash_to_F(&[Self::CONTEXT, b"nonce"].concat(), &seed) - } + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + let mut dst = dst; + let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); + if dst.len() > 255 { + dst = &oversize; + } - fn hash_msg(msg: &[u8]) -> Vec { - (&Sha256::new() - .chain(Self::CONTEXT) - .chain(b"digest") - .chain(msg) - .finalize() - ).to_vec() - } + // While one of these two libraries does support directly hashing to the Scalar field, the + // other doesn't. While that's probably an oversight, this is a universally working method + let mut modulus = vec![0; 16]; + modulus.extend((Self::F::zero() - Self::F::one()).to_bytes()); + let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); + Self::F_from_slice( + &U384::from_be_slice(&{ + let mut bytes = [0; 48]; + ExpandMsgXmd::::expand_message( + &[msg], + dst, + 48 + ).unwrap().fill_bytes(&mut bytes); + bytes + }).reduce(&modulus).unwrap().to_be_bytes()[16 ..] + ).unwrap() + } - fn hash_binding_factor(binding: &[u8]) -> Self::F { - Self::hash_to_F(&[Self::CONTEXT, b"rho"].concat(), binding) - } + fn F_len() -> usize { + 32 + } - fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { - let mut dst = dst; - let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat()); - if dst.len() > 255 { - dst = &oversize; + fn G_len() -> usize { + 33 + } + + fn F_from_slice(slice: &[u8]) -> Result { + let bytes: [u8; 32] = slice.try_into() + .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; + + let scalar = Self::F::from_repr(bytes.into()); + if scalar.is_none().into() { + Err(CurveError::InvalidScalar)?; + } + + Ok(scalar.unwrap()) + } + + fn G_from_slice(slice: &[u8]) -> Result { + let bytes: [u8; 33] = slice.try_into() + .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; + + let point = Self::G::from_bytes(&bytes.into()); + if point.is_none().into() || point.unwrap().is_identity().into() { + Err(CurveError::InvalidPoint)?; + } + + Ok(point.unwrap()) + } + + fn F_to_bytes(f: &Self::F) -> Vec { + f.to_bytes().to_vec() + } + + fn G_to_bytes(g: &Self::G) -> Vec { + g.to_bytes().to_vec() + } } - let mut modulus = vec![0; 16]; - modulus.extend((Self::F::zero() - Self::F::one()).to_repr().as_ref()); - let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); - Self::F_from_slice( - &U384::from_be_slice(&{ - let mut bytes = [0; 48]; - ExpandMsgXmd::::expand_message(&[msg], dst, 48).unwrap().fill_bytes(&mut bytes); - bytes - }).reduce(&modulus).unwrap().to_be_bytes()[16 ..] - ).unwrap() - } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 33 - } - - fn F_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 32] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - - let scalar = Self::F::from_repr(bytes.into()); - if scalar.is_none().into() { - Err(CurveError::InvalidScalar)?; + #[derive(Clone)] + pub struct $Hram; + impl Hram<$Curve> for $Hram { + #[allow(non_snake_case)] + fn hram(R: &$lib::ProjectivePoint, A: &$lib::ProjectivePoint, m: &[u8]) -> $lib::Scalar { + $Curve::hash_to_F( + &[$CONTEXT as &[u8], b"chal"].concat(), + &[&$Curve::G_to_bytes(R), &$Curve::G_to_bytes(A), m].concat() + ) + } } - - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 33] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; - - let point = Self::G::from_bytes(&bytes.into()); - if point.is_none().into() || point.unwrap().is_identity().into() { - Err(CurveError::InvalidPoint)?; - } - - Ok(point.unwrap()) - } - - fn F_to_bytes(f: &Self::F) -> Vec { - f.to_repr().as_ref().to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - g.to_bytes().as_ref().to_vec() } } #[cfg(feature = "p256")] -#[derive(Clone)] -pub struct IetfP256Hram; -#[cfg(feature = "p256")] -impl Hram for IetfP256Hram { - #[allow(non_snake_case)] - fn hram(R: &p256::ProjectivePoint, A: &p256::ProjectivePoint, m: &[u8]) -> p256::Scalar { - P256::hash_to_F( - &[P256::CONTEXT, b"chal"].concat(), - &[&P256::G_to_bytes(R), &P256::G_to_bytes(A), m].concat() - ) - } -} +kp_curve!( + p256, + P256, + IetfP256Hram, + b"P-256", + b"FROST-P256-SHA256-v5" +); + +#[cfg(feature = "secp256k1")] +kp_curve!( + k256, + Secp256k1, + NonIetfSecp256k1Hram, + b"secp256k1", + b"FROST-secp256k1-SHA256-v5" +); diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs index 8751fbe2..07db5073 100644 --- a/crypto/frost/src/tests/literal/kp256.rs +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -1,20 +1,20 @@ use rand::rngs::OsRng; -#[cfg(feature = "k256")] +#[cfg(feature = "secp256k1")] use crate::tests::{curve::test_curve, schnorr::test_schnorr}; -#[cfg(feature = "k256")] -use crate::curves::kp256::K256; +#[cfg(feature = "secp256k1")] +use crate::curves::kp256::Secp256k1; #[cfg(feature = "p256")] use crate::tests::vectors::{Vectors, test_with_vectors}; #[cfg(feature = "p256")] use crate::curves::kp256::{P256, IetfP256Hram}; -#[cfg(feature = "k256")] +#[cfg(feature = "secp256k1")] #[test] -fn k256_not_ietf() { - test_curve::<_, K256>(&mut OsRng); - test_schnorr::<_, K256>(&mut OsRng); +fn secp256k1_non_ietf() { + test_curve::<_, Secp256k1>(&mut OsRng); + test_schnorr::<_, Secp256k1>(&mut OsRng); } #[cfg(feature = "p256")] From eb94abb81aceed79498da5ae90e88eac5335a026 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 18:43:32 -0400 Subject: [PATCH 049/105] Have DigestTranscript update its digest instead of maintaining a Vec --- crypto/transcript/src/lib.rs | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index d5e4aa14..54299c94 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -35,30 +35,24 @@ impl DigestTranscriptMember { } #[derive(Clone, Debug)] -pub struct DigestTranscript(Vec, PhantomData); +pub struct DigestTranscript(D, PhantomData); -impl PartialEq for DigestTranscript { - fn eq(&self, other: &DigestTranscript) -> bool { - self.0 == other.0 - } -} - -impl DigestTranscript { +impl DigestTranscript { fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) { - self.0.push(kind.as_u8()); + self.0.update(&[kind.as_u8()]); // Assumes messages don't exceed 16 exabytes - self.0.extend(u64::try_from(value.len()).unwrap().to_le_bytes()); - self.0.extend(value); + self.0.update(u64::try_from(value.len()).unwrap().to_le_bytes()); + self.0.update(value); } pub fn new(name: &'static [u8]) -> Self { - let mut res = DigestTranscript(vec![], PhantomData); + let mut res = DigestTranscript(D::new(), PhantomData); res.append(DigestTranscriptMember::Name, name); res } } -impl Transcript for DigestTranscript { +impl Transcript for DigestTranscript { fn domain_separate(&mut self, label: &[u8]) { self.append(DigestTranscriptMember::Domain, label); } @@ -70,7 +64,7 @@ impl Transcript for DigestTranscript { fn challenge(&mut self, label: &'static [u8]) -> Vec { self.append(DigestTranscriptMember::Challenge, label); - D::new().chain_update(&self.0).finalize().to_vec() + self.0.clone().finalize().to_vec() } fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { From 963d9eab10b8d5e93f13975f48dc3d9d04c4e6d0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 18:49:04 -0400 Subject: [PATCH 050/105] Have DigestTranscript require a 32-byte hash Needed to generate RNG seeds without panicking. Mandates at least a 128-bit security level. --- crypto/transcript/src/lib.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index 54299c94..1ea4a7a8 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -1,11 +1,11 @@ -use core::{marker::PhantomData, fmt::Debug}; +use core::fmt::Debug; #[cfg(features = "merlin")] mod merlin; #[cfg(features = "merlin")] pub use merlin::MerlinTranscript; -use digest::Digest; +use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest}; pub trait Transcript { fn domain_separate(&mut self, label: &'static [u8]); @@ -35,9 +35,9 @@ impl DigestTranscriptMember { } #[derive(Clone, Debug)] -pub struct DigestTranscript(D, PhantomData); +pub struct DigestTranscript(D) where D::OutputSize: IsGreaterOrEqual; -impl DigestTranscript { +impl DigestTranscript where D::OutputSize: IsGreaterOrEqual { fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) { self.0.update(&[kind.as_u8()]); // Assumes messages don't exceed 16 exabytes @@ -46,13 +46,14 @@ impl DigestTranscript { } pub fn new(name: &'static [u8]) -> Self { - let mut res = DigestTranscript(D::new(), PhantomData); + let mut res = DigestTranscript(D::new()); res.append(DigestTranscriptMember::Name, name); res } } -impl Transcript for DigestTranscript { +impl Transcript for DigestTranscript + where D::OutputSize: IsGreaterOrEqual { fn domain_separate(&mut self, label: &[u8]) { self.append(DigestTranscriptMember::Domain, label); } From a46524f0ce8dab5b747ed3f96fb4f32b5d0c499f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 18:58:24 -0400 Subject: [PATCH 051/105] Supply a RecommendedTranscript type of DT --- coins/monero/Cargo.toml | 2 +- coins/monero/src/frost.rs | 8 +++----- coins/monero/src/ringct/clsag/multisig.rs | 12 ++++++------ coins/monero/src/tests/clsag.rs | 7 +++++-- coins/monero/src/wallet/send/multisig.rs | 10 +++++----- coins/monero/tests/send.rs | 6 ++++-- crypto/transcript/Cargo.toml | 2 ++ crypto/transcript/src/lib.rs | 3 +++ processor/Cargo.toml | 2 +- processor/src/coins/monero.rs | 7 ++++--- processor/src/lib.rs | 4 ++-- processor/src/wallet.rs | 8 ++++---- 12 files changed, 40 insertions(+), 31 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index ae7e82e1..883dcdbb 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -23,7 +23,7 @@ curve25519-dalek = { version = "3", features = ["std"] } group = { version = "0.12", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } -transcript = { package = "transcript-trait", path = "../../crypto/transcript", optional = true } +transcript = { package = "transcript-trait", path = "../../crypto/transcript", features = ["recommended"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } monero = "0.16" diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 8b151abc..ef557384 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -9,15 +9,13 @@ use curve25519_dalek::{ edwards::EdwardsPoint as DPoint }; -use transcript::{Transcript as TranscriptTrait, DigestTranscript}; -use frost::Curve; +use transcript::{Transcript, RecommendedTranscript}; +use frost::curves::Curve; pub use frost::curves::dalek::Ed25519; use dalek_ff_group as dfg; use crate::random_scalar; -pub type Transcript = DigestTranscript::; - #[derive(Clone, Error, Debug)] pub enum MultisigError { #[error("internal error ({0})")] @@ -43,7 +41,7 @@ impl DLEqProof { // the proper order if they want to reach consensus // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to // merge later in some form, when it should instead just merge xH (as it does) - let mut transcript = Transcript::new(b"DLEq Proof"); + let mut transcript = RecommendedTranscript::new(b"DLEq Proof"); // Bit redundant, keeps things consistent transcript.domain_separate(b"DLEq"); // Doesn't include G which is constant, does include H which isn't, even though H manipulation diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 8aaae8f8..dfbd64ad 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -13,18 +13,18 @@ use curve25519_dalek::{ use group::Group; -use transcript::Transcript as TranscriptTrait; +use transcript::{Transcript, RecommendedTranscript}; use frost::{FrostError, MultisigView, algorithm::Algorithm}; use dalek_ff_group as dfg; use crate::{ hash_to_point, - frost::{Transcript, MultisigError, Ed25519, DLEqProof, read_dleq}, + frost::{MultisigError, Ed25519, DLEqProof, read_dleq}, ringct::clsag::{ClsagInput, Clsag} }; impl ClsagInput { - fn transcript(&self, transcript: &mut T) { + fn transcript(&self, transcript: &mut T) { // Doesn't domain separate as this is considered part of the larger CLSAG proof // Ring index @@ -72,7 +72,7 @@ struct Interim { #[allow(non_snake_case)] #[derive(Clone, Debug)] pub struct ClsagMultisig { - transcript: Transcript, + transcript: RecommendedTranscript, H: EdwardsPoint, // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires a round @@ -87,7 +87,7 @@ pub struct ClsagMultisig { impl ClsagMultisig { pub fn new( - transcript: Transcript, + transcript: RecommendedTranscript, details: Arc>> ) -> Result { Ok( @@ -120,7 +120,7 @@ impl ClsagMultisig { } impl Algorithm for ClsagMultisig { - type Transcript = Transcript; + type Transcript = RecommendedTranscript; type Signature = (Clsag, EdwardsPoint); fn preprocess_addendum( diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index b5b90d2c..e35c7972 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -5,6 +5,9 @@ use rand::{RngCore, rngs::OsRng}; use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; +#[cfg(feature = "multisig")] +use transcript::RecommendedTranscript; + use crate::{ Commitment, random_scalar, generate_key_image, @@ -12,7 +15,7 @@ use crate::{ ringct::clsag::{ClsagInput, Clsag} }; #[cfg(feature = "multisig")] -use crate::{frost::{Ed25519, MultisigError, Transcript}, ringct::clsag::{ClsagDetails, ClsagMultisig}}; +use crate::{frost::{Ed25519, MultisigError}, ringct::clsag::{ClsagDetails, ClsagMultisig}}; #[cfg(feature = "multisig")] use frost::tests::{key_gen, algorithm_machines, sign}; @@ -96,7 +99,7 @@ fn clsag_multisig() -> Result<(), MultisigError> { algorithm_machines( &mut OsRng, ClsagMultisig::new( - Transcript::new(b"Monero Serai CLSAG Test"), + RecommendedTranscript::new(b"Monero Serai CLSAG Test"), Arc::new(RwLock::new(Some( ClsagDetails::new( ClsagInput::new( diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index f03fbf36..ca9db422 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -5,7 +5,7 @@ use rand_chacha::ChaCha12Rng; use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwardsY}}; -use transcript::Transcript as TranscriptTrait; +use transcript::{Transcript, RecommendedTranscript}; use frost::{ FrostError, MultisigKeys, sign::{ @@ -15,7 +15,7 @@ use frost::{ }; use crate::{ - frost::{Transcript, Ed25519}, + frost::Ed25519, random_scalar, ringct::{clsag::{ClsagInput, ClsagDetails, ClsagMultisig}, bulletproofs::Bulletproofs, RctPrunable}, transaction::{Input, Transaction}, rpc::Rpc, @@ -26,7 +26,7 @@ pub struct TransactionMachine { signable: SignableTransaction, i: u16, included: Vec, - transcript: Transcript, + transcript: RecommendedTranscript, decoys: Vec, @@ -38,7 +38,7 @@ pub struct TransactionSignMachine { signable: SignableTransaction, i: u16, included: Vec, - transcript: Transcript, + transcript: RecommendedTranscript, decoys: Vec, @@ -58,7 +58,7 @@ impl SignableTransaction { self, rpc: &Rpc, keys: MultisigKeys, - mut transcript: Transcript, + mut transcript: RecommendedTranscript, height: usize, mut included: Vec ) -> Result { diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index 9ef558dc..ba05f338 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -27,7 +27,9 @@ mod rpc; use crate::rpc::{rpc, mine_block}; #[cfg(feature = "multisig")] -use monero_serai::frost::{Transcript, Ed25519}; +use transcript::RecommendedTranscript; +#[cfg(feature = "multisig")] +use monero_serai::frost::Ed25519; lazy_static! { static ref SEQUENTIAL: Mutex<()> = Mutex::new(()); @@ -147,7 +149,7 @@ async fn send_core(test: usize, multisig: bool) { signable.clone().multisig( &rpc, (*keys[&i]).clone(), - Transcript::new(b"Monero Serai Test Transaction"), + RecommendedTranscript::new(b"Monero Serai Test Transaction"), rpc.get_height().await.unwrap() - 10, (1 ..= THRESHOLD).collect::>() ).await.unwrap() diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index 9b099c3b..989db342 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -11,7 +11,9 @@ edition = "2021" [dependencies] digest = "0.10" +blake2 = { version = "0.10", optional = true } merlin = { version = "3", optional = true } [features] +recommended = ["blake2"] merlin = ["dep:merlin"] diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index 1ea4a7a8..f01215fb 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -74,3 +74,6 @@ impl Transcript for DigestTranscript seed } } + +#[cfg(feature = "recommended")] +pub type RecommendedTranscript = DigestTranscript; diff --git a/processor/Cargo.toml b/processor/Cargo.toml index a791c88d..c22329a3 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -18,7 +18,7 @@ serde_json = "1.0" curve25519-dalek = { version = "3", features = ["std"] } blake2 = "0.10" -transcript = { package = "transcript-trait", path = "../crypto/transcript" } +transcript = { package = "transcript-trait", path = "../crypto/transcript", features = ["recommended"] } dalek-ff-group = { path = "../crypto/dalek-ff-group" } frost = { package = "modular-frost", path = "../crypto/frost" } diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index a9757e97..f7156cb1 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -5,6 +5,7 @@ use async_trait::async_trait; use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; use dalek_ff_group as dfg; +use transcript::RecommendedTranscript; use frost::MultisigKeys; use monero::{PublicKey, network::Network, util::address::Address}; @@ -15,7 +16,7 @@ use monero_serai::{ wallet::{Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} }; -use crate::{Transcript, CoinError, Output as OutputTrait, Coin, view_key}; +use crate::{CoinError, Output as OutputTrait, Coin, view_key}; #[derive(Clone, Debug)] pub struct Output(SpendableOutput); @@ -51,7 +52,7 @@ impl From for Output { #[derive(Debug)] pub struct SignableTransaction( Arc>, - Transcript, + RecommendedTranscript, usize, MSignableTransaction ); @@ -129,7 +130,7 @@ impl Coin for Monero { async fn prepare_send( &self, keys: Arc>, - transcript: Transcript, + transcript: RecommendedTranscript, height: usize, mut inputs: Vec, payments: &[(Address, u64)], diff --git a/processor/src/lib.rs b/processor/src/lib.rs index d990a4c5..678cb288 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -5,7 +5,7 @@ use thiserror::Error; use frost::{Curve, FrostError, MultisigKeys, sign::PreprocessMachine}; -pub(crate) use monero_serai::frost::Transcript; +use transcript::RecommendedTranscript; mod coins; mod wallet; @@ -80,7 +80,7 @@ pub trait Coin { async fn prepare_send( &self, keys: Arc>, - transcript: Transcript, + transcript: RecommendedTranscript, height: usize, inputs: Vec, payments: &[(Self::Address, u64)], diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index cf3e731e..e7bdb50e 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -2,11 +2,11 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; -use transcript::Transcript as TranscriptTrait; +use transcript::{Transcript, RecommendedTranscript}; use frost::{Curve, MultisigKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; -use crate::{Transcript, CoinError, SignError, Output, Coin, Network}; +use crate::{CoinError, SignError, Output, Coin, Network}; pub struct WalletKeys { keys: MultisigKeys, @@ -28,7 +28,7 @@ impl WalletKeys { // function as well, although that degree of influence means key gen is broken already fn bind(&self, chain: &[u8]) -> MultisigKeys { const DST: &[u8] = b"Serai Processor Wallet Chain Bind"; - let mut transcript = Transcript::new(DST); + let mut transcript = RecommendedTranscript::new(DST); transcript.append_message(b"chain", chain); transcript.append_message(b"curve", C::ID); transcript.append_message(b"group_key", &C::G_to_bytes(&self.keys.group_key())); @@ -308,7 +308,7 @@ impl Wallet { } // Create the transcript for this transaction - let mut transcript = Transcript::new(b"Serai Processor Wallet Send"); + let mut transcript = RecommendedTranscript::new(b"Serai Processor Wallet Send"); transcript.append_message( b"canonical_height", &u64::try_from(canonical).unwrap().to_le_bytes() From 6775fb471efdf6f789394b2a2700cf219752d9b0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 18:59:28 -0400 Subject: [PATCH 052/105] Version bump Transcript trait Preparation for yanking 0.1.0 which had an insecure format due to lack of length prefixing labels. --- crypto/transcript/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index 989db342..64445baa 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "transcript-trait" -version = "0.1.0" +version = "0.1.1" description = "A simple transcript trait definition" license = "MIT" repository = "https://github.com/serai-dex/serai" From 60254a017175ff15e36683ebe92d4e53355d6b04 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 19:47:19 -0400 Subject: [PATCH 053/105] Reorganize FROST's handling of curves --- coins/monero/src/frost.rs | 3 +- coins/monero/src/ringct/clsag/multisig.rs | 4 +- coins/monero/src/tests/clsag.rs | 4 +- coins/monero/src/wallet/send/multisig.rs | 2 +- coins/monero/tests/send.rs | 9 +- crypto/frost/src/{curves => curve}/dalek.rs | 34 +++--- crypto/frost/src/{curves => curve}/kp256.rs | 2 +- crypto/frost/src/curve/mod.rs | 121 ++++++++++++++++++++ crypto/frost/src/curves/mod.rs | 5 - crypto/frost/src/key_gen.rs | 3 +- crypto/frost/src/lib.rs | 110 +----------------- crypto/frost/src/sign.rs | 2 +- crypto/frost/src/tests/literal/dalek.rs | 6 +- crypto/frost/src/tests/literal/kp256.rs | 4 +- processor/src/coins/monero.rs | 3 +- processor/src/lib.rs | 3 +- processor/src/tests/mod.rs | 2 +- processor/src/wallet.rs | 2 +- 18 files changed, 165 insertions(+), 154 deletions(-) rename crypto/frost/src/{curves => curve}/dalek.rs (98%) rename crypto/frost/src/{curves => curve}/kp256.rs (98%) create mode 100644 crypto/frost/src/curve/mod.rs delete mode 100644 crypto/frost/src/curves/mod.rs diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index ef557384..49d4e2da 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -10,8 +10,7 @@ use curve25519_dalek::{ }; use transcript::{Transcript, RecommendedTranscript}; -use frost::curves::Curve; -pub use frost::curves::dalek::Ed25519; +use frost::curve::{Curve, Ed25519}; use dalek_ff_group as dfg; use crate::random_scalar; diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index dfbd64ad..dfeeda5f 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -14,12 +14,12 @@ use curve25519_dalek::{ use group::Group; use transcript::{Transcript, RecommendedTranscript}; -use frost::{FrostError, MultisigView, algorithm::Algorithm}; +use frost::{curve::Ed25519, FrostError, MultisigView, algorithm::Algorithm}; use dalek_ff_group as dfg; use crate::{ hash_to_point, - frost::{MultisigError, Ed25519, DLEqProof, read_dleq}, + frost::{MultisigError, DLEqProof, read_dleq}, ringct::clsag::{ClsagInput, Clsag} }; diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index e35c7972..66644a50 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -7,6 +7,8 @@ use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; #[cfg(feature = "multisig")] use transcript::RecommendedTranscript; +#[cfg(feature = "multisig")] +use frost::curve::Ed25519; use crate::{ Commitment, @@ -15,7 +17,7 @@ use crate::{ ringct::clsag::{ClsagInput, Clsag} }; #[cfg(feature = "multisig")] -use crate::{frost::{Ed25519, MultisigError}, ringct::clsag::{ClsagDetails, ClsagMultisig}}; +use crate::{frost::MultisigError, ringct::clsag::{ClsagDetails, ClsagMultisig}}; #[cfg(feature = "multisig")] use frost::tests::{key_gen, algorithm_machines, sign}; diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index ca9db422..89eaa6d0 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -7,6 +7,7 @@ use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::{EdwardsPoint, use transcript::{Transcript, RecommendedTranscript}; use frost::{ + curve::Ed25519, FrostError, MultisigKeys, sign::{ PreprocessMachine, SignMachine, SignatureMachine, @@ -15,7 +16,6 @@ use frost::{ }; use crate::{ - frost::Ed25519, random_scalar, ringct::{clsag::{ClsagInput, ClsagDetails, ClsagMultisig}, bulletproofs::Bulletproofs, RctPrunable}, transaction::{Input, Transaction}, rpc::Rpc, diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index ba05f338..c8543d9f 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -14,7 +14,9 @@ use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE; #[cfg(feature = "multisig")] use dalek_ff_group::Scalar; #[cfg(feature = "multisig")] -use frost::tests::{THRESHOLD, key_gen, sign}; +use transcript::RecommendedTranscript; +#[cfg(feature = "multisig")] +use frost::{curve::Ed25519, tests::{THRESHOLD, key_gen, sign}}; use monero::{ network::Network, @@ -26,11 +28,6 @@ use monero_serai::{random_scalar, wallet::SignableTransaction}; mod rpc; use crate::rpc::{rpc, mine_block}; -#[cfg(feature = "multisig")] -use transcript::RecommendedTranscript; -#[cfg(feature = "multisig")] -use monero_serai::frost::Ed25519; - lazy_static! { static ref SEQUENTIAL: Mutex<()> = Mutex::new(()); } diff --git a/crypto/frost/src/curves/dalek.rs b/crypto/frost/src/curve/dalek.rs similarity index 98% rename from crypto/frost/src/curves/dalek.rs rename to crypto/frost/src/curve/dalek.rs index 1ba41918..362a9614 100644 --- a/crypto/frost/src/curves/dalek.rs +++ b/crypto/frost/src/curve/dalek.rs @@ -8,7 +8,7 @@ use group::{ff::PrimeField, Group}; use dalek_ff_group::Scalar; -use crate::{CurveError, Curve, algorithm::Hram}; +use crate::{curve::{CurveError, Curve}, algorithm::Hram}; macro_rules! dalek_curve { ( @@ -125,22 +125,6 @@ macro_rules! dalek_curve { } } -#[cfg(feature = "ed25519")] -dalek_curve!( - Ed25519, - IetfEd25519Hram, - EdwardsPoint, - CompressedEdwardsY, - EdwardsBasepointTable, - ED25519_BASEPOINT_POINT, - ED25519_BASEPOINT_TABLE, - |point: EdwardsPoint| !bool::from(point.is_torsion_free()), - b"edwards25519", - b"", - b"", - b"", -); - #[cfg(any(test, feature = "ristretto"))] dalek_curve!( Ristretto, @@ -156,3 +140,19 @@ dalek_curve!( b"chal", b"digest", ); + +#[cfg(feature = "ed25519")] +dalek_curve!( + Ed25519, + IetfEd25519Hram, + EdwardsPoint, + CompressedEdwardsY, + EdwardsBasepointTable, + ED25519_BASEPOINT_POINT, + ED25519_BASEPOINT_TABLE, + |point: EdwardsPoint| !bool::from(point.is_torsion_free()), + b"edwards25519", + b"", + b"", + b"", +); diff --git a/crypto/frost/src/curves/kp256.rs b/crypto/frost/src/curve/kp256.rs similarity index 98% rename from crypto/frost/src/curves/kp256.rs rename to crypto/frost/src/curve/kp256.rs index c7568708..1b762978 100644 --- a/crypto/frost/src/curves/kp256.rs +++ b/crypto/frost/src/curve/kp256.rs @@ -8,7 +8,7 @@ use group::{ff::{Field, PrimeField}, Group, GroupEncoding}; use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}}; -use crate::{curves::{CurveError, Curve}, algorithm::Hram}; +use crate::{curve::{CurveError, Curve}, algorithm::Hram}; macro_rules! kp_curve { ( diff --git a/crypto/frost/src/curve/mod.rs b/crypto/frost/src/curve/mod.rs new file mode 100644 index 00000000..f6ad5cf9 --- /dev/null +++ b/crypto/frost/src/curve/mod.rs @@ -0,0 +1,121 @@ +use core::{ops::Mul, fmt::Debug}; + +use thiserror::Error; + +use rand_core::{RngCore, CryptoRng}; + +use group::{ff::PrimeField, Group, GroupOps}; + +#[cfg(any(test, feature = "dalek"))] +mod dalek; +#[cfg(any(test, feature = "ristretto"))] +pub use dalek::{Ristretto, IetfRistrettoHram}; +#[cfg(feature = "ed25519")] +pub use dalek::{Ed25519, IetfEd25519Hram}; + +#[cfg(feature = "kp256")] +mod kp256; +#[cfg(feature = "secp256k1")] +pub use kp256::{Secp256k1, NonIetfSecp256k1Hram}; +#[cfg(feature = "p256")] +pub use kp256::{P256, IetfP256Hram}; + +/// Set of errors for curve-related operations, namely encoding and decoding +#[derive(Clone, Error, Debug)] +pub enum CurveError { + #[error("invalid length for data (expected {0}, got {0})")] + InvalidLength(usize, usize), + #[error("invalid scalar")] + InvalidScalar, + #[error("invalid point")] + InvalidPoint, +} + +/// Unified trait to manage a field/group +// This should be moved into its own crate if the need for generic cryptography over ff/group +// continues, which is the exact reason ff/group exists (to provide a generic interface) +// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec +// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough +// advantages in the modern day to be worth the hassle -- Kayaba +pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { + /// Scalar field element type + // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses + type F: PrimeField; + /// Group element type + type G: Group + GroupOps; + /// Precomputed table type + type T: Mul; + + /// ID for this curve + const ID: &'static [u8]; + + /// Generator for the group + // While group does provide this in its API, privacy coins will want to use a custom basepoint + const GENERATOR: Self::G; + + /// Table for the generator for the group + /// If there isn't a precomputed table available, the generator itself should be used + const GENERATOR_TABLE: Self::T; + + /// If little endian is used for the scalar field's Repr + const LITTLE_ENDIAN: bool; + + /// Securely generate a random nonce. H4 from the IETF draft + fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F; + + /// Hash the message for the binding factor. H3 from the IETF draft + // This doesn't actually need to be part of Curve as it does nothing with the curve + // This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using + // aggregatable signatures over this curve will work without issue + // It is kept here as Curve + H{1, 2, 3} is effectively a ciphersuite according to the IETF draft + // and moving it to Schnorr would force all of them into being ciphersuite-specific + // H2 is left to the Schnorr Algorithm as H2 is the H used in HRAM, which Schnorr further + // modularizes + fn hash_msg(msg: &[u8]) -> Vec; + + /// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft + fn hash_binding_factor(binding: &[u8]) -> Self::F; + + // The following methods would optimally be F:: and G:: yet developers can't control F/G + // They can control a trait they pass into this library + + /// Field element from hash. Used during key gen and by other crates under Serai as a general + /// utility + // Not parameterized by Digest as it's fine for it to use its own hash function as relevant to + // hash_msg and hash_binding_factor + #[allow(non_snake_case)] + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F; + + /// Constant size of a serialized scalar field element + // The alternative way to grab this would be either serializing a junk element and getting its + // length or doing a naive division of its BITS property by 8 and assuming a lack of padding + #[allow(non_snake_case)] + fn F_len() -> usize; + + /// Constant size of a serialized group element + // We could grab the serialization as described above yet a naive developer may use a + // non-constant size encoding, proving yet another reason to force this to be a provided constant + // A naive developer could still provide a constant for a variable length encoding, yet at least + // that is on them + #[allow(non_snake_case)] + fn G_len() -> usize; + + /// Field element from slice. Preferred to be canonical yet does not have to be + // Required due to the lack of standardized encoding functions provided by ff/group + // While they do technically exist, their usage of Self::Repr breaks all potential library usage + // without helper functions like this + #[allow(non_snake_case)] + fn F_from_slice(slice: &[u8]) -> Result; + + /// Group element from slice. Must require canonicity or risks differing binding factors + #[allow(non_snake_case)] + fn G_from_slice(slice: &[u8]) -> Result; + + /// Obtain a vector of the byte encoding of F + #[allow(non_snake_case)] + fn F_to_bytes(f: &Self::F) -> Vec; + + /// Obtain a vector of the byte encoding of G + #[allow(non_snake_case)] + fn G_to_bytes(g: &Self::G) -> Vec; +} diff --git a/crypto/frost/src/curves/mod.rs b/crypto/frost/src/curves/mod.rs deleted file mode 100644 index 3742e1f9..00000000 --- a/crypto/frost/src/curves/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[cfg(any(test, feature = "dalek"))] -pub mod dalek; - -#[cfg(feature = "kp256")] -pub mod kp256; diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index 061260c6..f48a82fd 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -7,7 +7,8 @@ use group::ff::{Field, PrimeField}; use multiexp::{multiexp_vartime, BatchVerifier}; use crate::{ - Curve, MultisigParams, MultisigKeys, FrostError, + curve::Curve, + FrostError, MultisigParams, MultisigKeys, schnorr::{self, SchnorrSignature}, validate_map }; diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index f3b8b2bd..2e53b723 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -1,122 +1,20 @@ -use core::{ops::Mul, fmt::Debug}; +use core::fmt::Debug; use std::collections::HashMap; use thiserror::Error; -use rand_core::{RngCore, CryptoRng}; - -use group::{ff::{Field, PrimeField}, Group, GroupOps}; +use group::ff::{Field, PrimeField}; mod schnorr; +pub mod curve; +use curve::Curve; pub mod key_gen; pub mod algorithm; pub mod sign; -#[cfg(any(test, feature = "curves"))] -pub mod curves; pub mod tests; -/// Set of errors for curve-related operations, namely encoding and decoding -#[derive(Clone, Error, Debug)] -pub enum CurveError { - #[error("invalid length for data (expected {0}, got {0})")] - InvalidLength(usize, usize), - #[error("invalid scalar")] - InvalidScalar, - #[error("invalid point")] - InvalidPoint, -} - -/// Unified trait to manage a field/group -// This should be moved into its own crate if the need for generic cryptography over ff/group -// continues, which is the exact reason ff/group exists (to provide a generic interface) -// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec -// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough -// advantages in the modern day to be worth the hassle -- Kayaba -pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { - /// Scalar field element type - // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses - type F: PrimeField; - /// Group element type - type G: Group + GroupOps; - /// Precomputed table type - type T: Mul; - - /// ID for this curve - const ID: &'static [u8]; - - /// Generator for the group - // While group does provide this in its API, privacy coins will want to use a custom basepoint - const GENERATOR: Self::G; - - /// Table for the generator for the group - /// If there isn't a precomputed table available, the generator itself should be used - const GENERATOR_TABLE: Self::T; - - /// If little endian is used for the scalar field's Repr - const LITTLE_ENDIAN: bool; - - /// Securely generate a random nonce. H4 from the IETF draft - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F; - - /// Hash the message for the binding factor. H3 from the IETF draft - // This doesn't actually need to be part of Curve as it does nothing with the curve - // This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using - // aggregatable signatures over this curve will work without issue - // It is kept here as Curve + H{1, 2, 3} is effectively a ciphersuite according to the IETF draft - // and moving it to Schnorr would force all of them into being ciphersuite-specific - // H2 is left to the Schnorr Algorithm as H2 is the H used in HRAM, which Schnorr further - // modularizes - fn hash_msg(msg: &[u8]) -> Vec; - - /// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft - fn hash_binding_factor(binding: &[u8]) -> Self::F; - - // The following methods would optimally be F:: and G:: yet developers can't control F/G - // They can control a trait they pass into this library - - /// Field element from hash. Used during key gen and by other crates under Serai as a general - /// utility - // Not parameterized by Digest as it's fine for it to use its own hash function as relevant to - // hash_msg and hash_binding_factor - #[allow(non_snake_case)] - fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F; - - /// Constant size of a serialized scalar field element - // The alternative way to grab this would be either serializing a junk element and getting its - // length or doing a naive division of its BITS property by 8 and assuming a lack of padding - #[allow(non_snake_case)] - fn F_len() -> usize; - - /// Constant size of a serialized group element - // We could grab the serialization as described above yet a naive developer may use a - // non-constant size encoding, proving yet another reason to force this to be a provided constant - // A naive developer could still provide a constant for a variable length encoding, yet at least - // that is on them - #[allow(non_snake_case)] - fn G_len() -> usize; - - /// Field element from slice. Preferred to be canonical yet does not have to be - // Required due to the lack of standardized encoding functions provided by ff/group - // While they do technically exist, their usage of Self::Repr breaks all potential library usage - // without helper functions like this - #[allow(non_snake_case)] - fn F_from_slice(slice: &[u8]) -> Result; - - /// Group element from slice. Must require canonicity or risks differing binding factors - #[allow(non_snake_case)] - fn G_from_slice(slice: &[u8]) -> Result; - - /// Obtain a vector of the byte encoding of F - #[allow(non_snake_case)] - fn F_to_bytes(f: &Self::F) -> Vec; - - /// Obtain a vector of the byte encoding of G - #[allow(non_snake_case)] - fn G_to_bytes(g: &Self::G) -> Vec; -} - /// Parameters for a multisig // These fields can not be made public as they should be static #[derive(Clone, Copy, PartialEq, Eq, Debug)] diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 289165aa..8ea0f61c 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -8,7 +8,7 @@ use group::ff::Field; use transcript::Transcript; use crate::{ - Curve, + curve::Curve, FrostError, MultisigParams, MultisigKeys, MultisigView, algorithm::Algorithm, diff --git a/crypto/frost/src/tests/literal/dalek.rs b/crypto/frost/src/tests/literal/dalek.rs index 7cd3e92f..fdcc0c0f 100644 --- a/crypto/frost/src/tests/literal/dalek.rs +++ b/crypto/frost/src/tests/literal/dalek.rs @@ -1,11 +1,11 @@ use rand::rngs::OsRng; -use crate::{curves::dalek, tests::vectors::{Vectors, test_with_vectors}}; +use crate::{curve, tests::vectors::{Vectors, test_with_vectors}}; #[cfg(any(test, feature = "ristretto"))] #[test] fn ristretto_vectors() { - test_with_vectors::<_, dalek::Ristretto, dalek::IetfRistrettoHram>( + test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>( &mut OsRng, Vectors { threshold: 2, @@ -42,7 +42,7 @@ fn ristretto_vectors() { #[cfg(feature = "ed25519")] #[test] fn ed25519_vectors() { - test_with_vectors::<_, dalek::Ed25519, dalek::IetfEd25519Hram>( + test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>( &mut OsRng, Vectors { threshold: 2, diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs index 07db5073..dee20157 100644 --- a/crypto/frost/src/tests/literal/kp256.rs +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -3,12 +3,12 @@ use rand::rngs::OsRng; #[cfg(feature = "secp256k1")] use crate::tests::{curve::test_curve, schnorr::test_schnorr}; #[cfg(feature = "secp256k1")] -use crate::curves::kp256::Secp256k1; +use crate::curve::Secp256k1; #[cfg(feature = "p256")] use crate::tests::vectors::{Vectors, test_with_vectors}; #[cfg(feature = "p256")] -use crate::curves::kp256::{P256, IetfP256Hram}; +use crate::curve::{P256, IetfP256Hram}; #[cfg(feature = "secp256k1")] #[test] diff --git a/processor/src/coins/monero.rs b/processor/src/coins/monero.rs index f7156cb1..06dc2f75 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coins/monero.rs @@ -6,11 +6,10 @@ use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; use dalek_ff_group as dfg; use transcript::RecommendedTranscript; -use frost::MultisigKeys; +use frost::{curve::Ed25519, MultisigKeys}; use monero::{PublicKey, network::Network, util::address::Address}; use monero_serai::{ - frost::Ed25519, transaction::{Timelock, Transaction}, rpc::Rpc, wallet::{Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 678cb288..caf92d2f 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -3,9 +3,8 @@ use std::{marker::Send, sync::Arc, collections::HashMap}; use async_trait::async_trait; use thiserror::Error; -use frost::{Curve, FrostError, MultisigKeys, sign::PreprocessMachine}; - use transcript::RecommendedTranscript; +use frost::{curve::Curve, FrostError, MultisigKeys, sign::PreprocessMachine}; mod coins; mod wallet; diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index e7214b97..eacb8223 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -6,7 +6,7 @@ use rand::rngs::OsRng; use group::Group; -use frost::Curve; +use frost::curve::Curve; use crate::{ NetworkError, Network, diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index e7bdb50e..0680ea53 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -4,7 +4,7 @@ use rand_core::OsRng; use transcript::{Transcript, RecommendedTranscript}; -use frost::{Curve, MultisigKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; +use frost::{curve::Curve, MultisigKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; use crate::{CoinError, SignError, Output, Coin, Network}; From 020d246b8f7e7052a0f58c21b8256a21351dfc2a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 24 Jun 2022 19:53:41 -0400 Subject: [PATCH 054/105] Reorganize processor's handling of coins --- processor/src/coin/mod.rs | 84 +++++++++++++++++++++ processor/src/{coins => coin}/monero.rs | 2 +- processor/src/coins/mod.rs | 1 - processor/src/lib.rs | 98 +++---------------------- processor/src/tests/mod.rs | 6 +- processor/src/wallet.rs | 2 +- 6 files changed, 99 insertions(+), 94 deletions(-) create mode 100644 processor/src/coin/mod.rs rename processor/src/{coins => coin}/monero.rs (98%) delete mode 100644 processor/src/coins/mod.rs diff --git a/processor/src/coin/mod.rs b/processor/src/coin/mod.rs new file mode 100644 index 00000000..fed65c26 --- /dev/null +++ b/processor/src/coin/mod.rs @@ -0,0 +1,84 @@ +use std::{marker::Send, sync::Arc}; + +use async_trait::async_trait; +use thiserror::Error; + +use transcript::RecommendedTranscript; +use frost::{curve::Curve, MultisigKeys, sign::PreprocessMachine}; + +pub mod monero; +pub use self::monero::Monero; + +#[derive(Clone, Error, Debug)] +pub enum CoinError { + #[error("failed to connect to coin daemon")] + ConnectionError +} + +pub trait Output: Sized + Clone { + type Id: AsRef<[u8]>; + + fn id(&self) -> Self::Id; + fn amount(&self) -> u64; + + fn serialize(&self) -> Vec; + fn deserialize(reader: &mut R) -> std::io::Result; +} + +#[async_trait] +pub trait Coin { + type Curve: Curve; + + type Fee: Copy; + type Transaction; + type Block; + + type Output: Output; + type SignableTransaction; + type TransactionMachine: PreprocessMachine; + + type Address: Send; + + const ID: &'static [u8]; + const CONFIRMATIONS: usize; + const MAX_INPUTS: usize; + const MAX_OUTPUTS: usize; // TODO: Decide if this includes change or not + + // Doesn't have to take self, enables some level of caching which is pleasant + fn address(&self, key: ::G) -> Self::Address; + + async fn get_height(&self) -> Result; + async fn get_block(&self, height: usize) -> Result; + async fn get_outputs( + &self, + block: &Self::Block, + key: ::G + ) -> Vec; + + async fn prepare_send( + &self, + keys: Arc>, + transcript: RecommendedTranscript, + height: usize, + inputs: Vec, + payments: &[(Self::Address, u64)], + fee: Self::Fee + ) -> Result; + + async fn attempt_send( + &self, + transaction: Self::SignableTransaction, + included: &[u16] + ) -> Result; + + async fn publish_transaction( + &self, + tx: &Self::Transaction + ) -> Result<(Vec, Vec<::Id>), CoinError>; + + #[cfg(test)] + async fn mine_block(&self, address: Self::Address); + + #[cfg(test)] + async fn test_send(&self, key: Self::Address); +} diff --git a/processor/src/coins/monero.rs b/processor/src/coin/monero.rs similarity index 98% rename from processor/src/coins/monero.rs rename to processor/src/coin/monero.rs index 06dc2f75..80d19a89 100644 --- a/processor/src/coins/monero.rs +++ b/processor/src/coin/monero.rs @@ -15,7 +15,7 @@ use monero_serai::{ wallet::{Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} }; -use crate::{CoinError, Output as OutputTrait, Coin, view_key}; +use crate::{coin::{CoinError, Output as OutputTrait, Coin}, view_key}; #[derive(Clone, Debug)] pub struct Output(SpendableOutput); diff --git a/processor/src/coins/mod.rs b/processor/src/coins/mod.rs deleted file mode 100644 index 3c43a86a..00000000 --- a/processor/src/coins/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod monero; diff --git a/processor/src/lib.rs b/processor/src/lib.rs index caf92d2f..fe427dfe 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,107 +1,33 @@ -use std::{marker::Send, sync::Arc, collections::HashMap}; +use std::{marker::Send, collections::HashMap}; use async_trait::async_trait; use thiserror::Error; -use transcript::RecommendedTranscript; -use frost::{curve::Curve, FrostError, MultisigKeys, sign::PreprocessMachine}; +use frost::{curve::Curve, FrostError}; -mod coins; +mod coin; +use coin::{CoinError, Coin}; mod wallet; #[cfg(test)] mod tests; -#[derive(Clone, Error, Debug)] -pub enum CoinError { - #[error("failed to connect to coin daemon")] - ConnectionError -} - #[derive(Clone, Error, Debug)] pub enum NetworkError {} -#[derive(Clone, Error, Debug)] -pub enum SignError { - #[error("coin had an error {0}")] - CoinError(CoinError), - #[error("network had an error {0}")] - NetworkError(NetworkError), - #[error("FROST had an error {0}")] - FrostError(FrostError) -} - #[async_trait] pub trait Network: Send { async fn round(&mut self, data: Vec) -> Result>, NetworkError>; } -pub trait Output: Sized + Clone { - type Id: AsRef<[u8]>; - - fn id(&self) -> Self::Id; - fn amount(&self) -> u64; - - fn serialize(&self) -> Vec; - fn deserialize(reader: &mut R) -> std::io::Result; -} - -#[async_trait] -pub trait Coin { - type Curve: Curve; - - type Fee: Copy; - type Transaction; - type Block; - - type Output: Output; - type SignableTransaction; - type TransactionMachine: PreprocessMachine; - - type Address: Send; - - const ID: &'static [u8]; - const CONFIRMATIONS: usize; - const MAX_INPUTS: usize; - const MAX_OUTPUTS: usize; // TODO: Decide if this includes change or not - - // Doesn't have to take self, enables some level of caching which is pleasant - fn address(&self, key: ::G) -> Self::Address; - - async fn get_height(&self) -> Result; - async fn get_block(&self, height: usize) -> Result; - async fn get_outputs( - &self, - block: &Self::Block, - key: ::G - ) -> Vec; - - async fn prepare_send( - &self, - keys: Arc>, - transcript: RecommendedTranscript, - height: usize, - inputs: Vec, - payments: &[(Self::Address, u64)], - fee: Self::Fee - ) -> Result; - - async fn attempt_send( - &self, - transaction: Self::SignableTransaction, - included: &[u16] - ) -> Result; - - async fn publish_transaction( - &self, - tx: &Self::Transaction - ) -> Result<(Vec, Vec<::Id>), CoinError>; - - #[cfg(test)] - async fn mine_block(&self, address: Self::Address); - - #[cfg(test)] - async fn test_send(&self, key: Self::Address); +#[derive(Clone, Error, Debug)] +pub enum SignError { + #[error("FROST had an error {0}")] + FrostError(FrostError), + #[error("coin had an error {0}")] + CoinError(CoinError), + #[error("network had an error {0}")] + NetworkError(NetworkError) } // Generate a static view key for a given chain in a globally consistent manner diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index eacb8223..728b0668 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -8,11 +8,7 @@ use group::Group; use frost::curve::Curve; -use crate::{ - NetworkError, Network, - Coin, coins::monero::Monero, - wallet::{WalletKeys, MemCoinDb, Wallet} -}; +use crate::{NetworkError, Network, coin::{Coin, Monero}, wallet::{WalletKeys, MemCoinDb, Wallet}}; #[derive(Clone)] struct LocalNetwork { diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 0680ea53..6789cb8f 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -6,7 +6,7 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{curve::Curve, MultisigKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; -use crate::{CoinError, SignError, Output, Coin, Network}; +use crate::{coin::{CoinError, Output, Coin}, SignError, Network}; pub struct WalletKeys { keys: MultisigKeys, From 7b70baaa969c65426bf28b48bc121c044c11d30f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 27 Jun 2022 09:02:21 -0400 Subject: [PATCH 055/105] Rename transcript-trait to flexible-transcript It offers the trait for flexibility, yet it also offers an incredibly competent (and logical) transcript format, along with a Merlin wrapper. --- coins/monero/Cargo.toml | 2 +- crypto/frost/Cargo.toml | 2 +- crypto/transcript/Cargo.toml | 4 ++-- crypto/transcript/README.md | 25 +++++++++++++++++++++++-- processor/Cargo.toml | 2 +- 5 files changed, 28 insertions(+), 7 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 883dcdbb..d92cd3a0 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -23,7 +23,7 @@ curve25519-dalek = { version = "3", features = ["std"] } group = { version = "0.12", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } -transcript = { package = "transcript-trait", path = "../../crypto/transcript", features = ["recommended"], optional = true } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } monero = "0.16" diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 536f5c53..73dbc52d 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -23,7 +23,7 @@ p256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = t k256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } dalek-ff-group = { path = "../dalek-ff-group", version = "0.1", optional = true } -transcript = { package = "transcript-trait", path = "../transcript", version = "0.1" } +transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } multiexp = { path = "../multiexp", version = "0.1", features = ["batch"] } diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index 64445baa..a5d366c2 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "transcript-trait" +name = "flexible-transcript" version = "0.1.1" -description = "A simple transcript trait definition" +description = "A simple transcript trait definition, along with viable options" license = "MIT" repository = "https://github.com/serai-dex/serai" authors = ["Luke Parker "] diff --git a/crypto/transcript/README.md b/crypto/transcript/README.md index 10b007c9..92777a52 100644 --- a/crypto/transcript/README.md +++ b/crypto/transcript/README.md @@ -1,3 +1,24 @@ -# Transcript +# Flexible Transcript -Basic transcript trait with a Merlin wrapper available via the Merlin feature. +Flexible Transcript is a crate offering: +- `Transcript`, a trait offering functions transcripts should implement. +- `DigestTranscript`, a competent transcript format instantiated against a +provided hash function. +- `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the +`merlin` feature). + +The trait was created while working on an IETF draft which defined an incredibly +simple transcript format. Extensions of the protocol would quickly require a +more competent format, yet implementing the one specified was mandatory to meet +the specification. Accordingly, the library implementing the draft defined an +`IetfTranscript`, dropping labels and not allowing successive challenges, yet +thanks to the trait, allowed protocols building on top to provide their own +transcript format as needed. + +`DigestTranscript` takes in any hash function implementing `Digest`, offering a +secure transcript format around it. All items are prefixed by a flag, denoting +their type, and their length. + +`MerlinTranscript` was used to justify the API, and if any issues existed with +`DigestTranscript`, enable a fallback. It was also meant as a way to be +compatible with existing Rust projects using `merlin`. diff --git a/processor/Cargo.toml b/processor/Cargo.toml index c22329a3..131b3106 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -18,7 +18,7 @@ serde_json = "1.0" curve25519-dalek = { version = "3", features = ["std"] } blake2 = "0.10" -transcript = { package = "transcript-trait", path = "../crypto/transcript", features = ["recommended"] } +transcript = { package = "flexible-transcript", path = "../crypto/transcript", features = ["recommended"] } dalek-ff-group = { path = "../crypto/dalek-ff-group" } frost = { package = "modular-frost", path = "../crypto/frost" } From 7c86e4593a67de1c858ffdd9b6ab796289bea02a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Jun 2022 00:01:20 -0400 Subject: [PATCH 056/105] Implement Guaranteed Addresses Closes https://github.com/serai-dex/serai/issues/27. monero-rs is now solely used for Extra encoding. --- coins/monero/Cargo.toml | 2 + coins/monero/src/tests/address.rs | 45 +++++++ coins/monero/src/tests/mod.rs | 1 + coins/monero/src/wallet/address.rs | 152 +++++++++++++++++++++++ coins/monero/src/wallet/mod.rs | 10 +- coins/monero/src/wallet/scan.rs | 89 ++++++------- coins/monero/src/wallet/send/mod.rs | 45 +++---- coins/monero/src/wallet/send/multisig.rs | 3 +- coins/monero/tests/send.rs | 18 +-- processor/src/coin/mod.rs | 2 +- processor/src/coin/monero.rs | 53 ++++---- processor/src/tests/mod.rs | 8 +- 12 files changed, 311 insertions(+), 117 deletions(-) create mode 100644 coins/monero/src/tests/address.rs create mode 100644 coins/monero/src/wallet/address.rs diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index d92cd3a0..b24adce0 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Luke Parker "] edition = "2021" [dependencies] +hex-literal = "0.3" lazy_static = "1" thiserror = "1" @@ -26,6 +27,7 @@ dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } +base58-monero = "1" monero = "0.16" hex = "0.4" diff --git a/coins/monero/src/tests/address.rs b/coins/monero/src/tests/address.rs new file mode 100644 index 00000000..0bda391d --- /dev/null +++ b/coins/monero/src/tests/address.rs @@ -0,0 +1,45 @@ +use hex_literal::hex; + +use crate::wallet::address::{Network, AddressType, Address}; + +const SPEND: [u8; 32] = hex!("f8631661f6ab4e6fda310c797330d86e23a682f20d5bc8cc27b18051191f16d7"); +const VIEW: [u8; 32] = hex!("4a1535063ad1fee2dabbf909d4fd9a873e29541b401f0944754e17c9a41820ce"); + +const STANDARD: &'static str = "4B33mFPMq6mKi7Eiyd5XuyKRVMGVZz1Rqb9ZTyGApXW5d1aT7UBDZ89ewmnWFkzJ5wPd2SFbn313vCT8a4E2Qf4KQH4pNey"; + +const PAYMENT_ID: [u8; 8] = hex!("b8963a57855cf73f"); +const INTEGRATED: &'static str = "4Ljin4CrSNHKi7Eiyd5XuyKRVMGVZz1Rqb9ZTyGApXW5d1aT7UBDZ89ewmnWFkzJ5wPd2SFbn313vCT8a4E2Qf4KbaTH6MnpXSn88oBX35"; + +const SUB_SPEND: [u8; 32] = hex!("fe358188b528335ad1cfdc24a22a23988d742c882b6f19a602892eaab3c1b62b"); +const SUB_VIEW: [u8; 32] = hex!("9bc2b464de90d058468522098d5610c5019c45fd1711a9517db1eea7794f5470"); +const SUBADDRESS: &'static str = "8C5zHM5ud8nGC4hC2ULiBLSWx9infi8JUUmWEat4fcTf8J4H38iWYVdFmPCA9UmfLTZxD43RsyKnGEdZkoGij6csDeUnbEB"; + +#[test] +fn standard_address() { + let addr = Address::from_str(STANDARD, Network::Mainnet).unwrap(); + assert_eq!(addr.meta.network, Network::Mainnet); + assert_eq!(addr.meta.kind, AddressType::Standard); + assert_eq!(addr.meta.guaranteed, false); + assert_eq!(addr.spend.compress().to_bytes(), SPEND); + assert_eq!(addr.view.compress().to_bytes(), VIEW); +} + +#[test] +fn integrated_address() { + let addr = Address::from_str(INTEGRATED, Network::Mainnet).unwrap(); + assert_eq!(addr.meta.network, Network::Mainnet); + assert_eq!(addr.meta.kind, AddressType::Integrated(PAYMENT_ID)); + assert_eq!(addr.meta.guaranteed, false); + assert_eq!(addr.spend.compress().to_bytes(), SPEND); + assert_eq!(addr.view.compress().to_bytes(), VIEW); +} + +#[test] +fn subaddress() { + let addr = Address::from_str(SUBADDRESS, Network::Mainnet).unwrap(); + assert_eq!(addr.meta.network, Network::Mainnet); + assert_eq!(addr.meta.kind, AddressType::Subaddress); + assert_eq!(addr.meta.guaranteed, false); + assert_eq!(addr.spend.compress().to_bytes(), SUB_SPEND); + assert_eq!(addr.view.compress().to_bytes(), SUB_VIEW); +} diff --git a/coins/monero/src/tests/mod.rs b/coins/monero/src/tests/mod.rs index b42cbcff..d9b85f0c 100644 --- a/coins/monero/src/tests/mod.rs +++ b/coins/monero/src/tests/mod.rs @@ -1 +1,2 @@ mod clsag; +mod address; diff --git a/coins/monero/src/wallet/address.rs b/coins/monero/src/wallet/address.rs new file mode 100644 index 00000000..329f5435 --- /dev/null +++ b/coins/monero/src/wallet/address.rs @@ -0,0 +1,152 @@ +use std::string::ToString; + +use thiserror::Error; + +use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, edwards::{EdwardsPoint, CompressedEdwardsY}}; + +use base58_monero::base58::{encode_check, decode_check}; + +use crate::wallet::ViewPair; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Network { + Mainnet, + Testnet, + Stagenet +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum AddressType { + Standard, + Integrated([u8; 8]), + Subaddress +} + +impl AddressType { + fn network_bytes(network: Network) -> (u8, u8, u8) { + match network { + Network::Mainnet => (18, 19, 42), + Network::Testnet => (53, 54, 63), + Network::Stagenet => (24, 25, 36) + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct AddressMeta { + pub network: Network, + pub kind: AddressType, + pub guaranteed: bool +} + +#[derive(Clone, Error, Debug)] +pub enum AddressError { + #[error("invalid address byte")] + InvalidByte, + #[error("invalid address encoding")] + InvalidEncoding, + #[error("invalid length")] + InvalidLength, + #[error("different network than expected")] + DifferentNetwork, + #[error("invalid key")] + InvalidKey +} + +impl AddressMeta { + fn to_byte(&self) -> u8 { + let bytes = AddressType::network_bytes(self.network); + let byte = match self.kind { + AddressType::Standard => bytes.0, + AddressType::Integrated(_) => bytes.1, + AddressType::Subaddress => bytes.2 + }; + byte | (if self.guaranteed { 1 << 7 } else { 0 }) + } + + // Returns an incomplete type in the case of Integrated addresses + fn from_byte(byte: u8) -> Result { + let actual = byte & 0b01111111; + let guaranteed = (byte >> 7) == 1; + + let mut meta = None; + for network in [Network::Mainnet, Network::Testnet, Network::Stagenet] { + let (standard, integrated, subaddress) = AddressType::network_bytes(network); + if let Some(kind) = match actual { + _ if actual == standard => Some(AddressType::Standard), + _ if actual == integrated => Some(AddressType::Integrated([0; 8])), + _ if actual == subaddress => Some(AddressType::Subaddress), + _ => None + } { + meta = Some(AddressMeta { network, kind, guaranteed }); + break; + } + } + + meta.ok_or(AddressError::InvalidByte) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address { + pub meta: AddressMeta, + pub spend: EdwardsPoint, + pub view: EdwardsPoint +} + +impl ViewPair { + pub fn address(&self, network: Network, kind: AddressType, guaranteed: bool) -> Address { + Address { + meta: AddressMeta { + network, + kind, + guaranteed + }, + spend: self.spend, + view: &self.view * &ED25519_BASEPOINT_TABLE + } + } +} + +impl ToString for Address { + fn to_string(&self) -> String { + let mut data = vec![self.meta.to_byte()]; + data.extend(self.spend.compress().to_bytes()); + data.extend(self.view.compress().to_bytes()); + if let AddressType::Integrated(id) = self.meta.kind { + data.extend(id); + } + encode_check(&data).unwrap() + } +} + +impl Address { + pub fn from_str(s: &str, network: Network) -> Result { + let raw = decode_check(s).map_err(|_| AddressError::InvalidEncoding)?; + if raw.len() == 1 { + Err(AddressError::InvalidLength)?; + } + + let mut meta = AddressMeta::from_byte(raw[0])?; + if meta.network != network { + Err(AddressError::DifferentNetwork)?; + } + + let len = match meta.kind { + AddressType::Standard | AddressType::Subaddress => 65, + AddressType::Integrated(_) => 73 + }; + if raw.len() != len { + Err(AddressError::InvalidLength)?; + } + + let spend = CompressedEdwardsY(raw[1 .. 33].try_into().unwrap()).decompress().ok_or(AddressError::InvalidKey)?; + let view = CompressedEdwardsY(raw[33 .. 65].try_into().unwrap()).decompress().ok_or(AddressError::InvalidKey)?; + + if let AddressType::Integrated(ref mut payment_id) = meta.kind { + payment_id.copy_from_slice(&raw[65 .. 73]); + } + + Ok(Address { meta, spend, view }) + } +} diff --git a/coins/monero/src/wallet/mod.rs b/coins/monero/src/wallet/mod.rs index e0287eb4..ca694744 100644 --- a/coins/monero/src/wallet/mod.rs +++ b/coins/monero/src/wallet/mod.rs @@ -6,6 +6,8 @@ use crate::{ transaction::Input }; +pub mod address; + mod scan; pub use scan::SpendableOutput; @@ -23,7 +25,7 @@ fn key_image_sort(x: &EdwardsPoint, y: &EdwardsPoint) -> std::cmp::Ordering { // https://github.com/monero-project/research-lab/issues/103 pub(crate) fn uniqueness(inputs: &[Input]) -> [u8; 32] { - let mut u = b"domain_separator".to_vec(); + let mut u = b"uniqueness".to_vec(); for input in inputs { match input { // If Gen, this should be the only input, making this loop somewhat pointless @@ -63,3 +65,9 @@ pub(crate) fn commitment_mask(shared_key: Scalar) -> Scalar { mask.extend(shared_key.to_bytes()); hash_to_scalar(&mask) } + +#[derive(Clone, Copy)] +pub struct ViewPair { + pub spend: EdwardsPoint, + pub view: Scalar +} diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index d8feb7da..c813169e 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -12,7 +12,7 @@ use crate::{ Commitment, serialize::{write_varint, read_32, read_scalar, read_point}, transaction::{Timelock, Transaction}, - wallet::{uniqueness, shared_key, amount_decryption, commitment_mask} + wallet::{ViewPair, uniqueness, shared_key, amount_decryption, commitment_mask} }; #[derive(Clone, PartialEq, Debug)] @@ -55,8 +55,8 @@ impl SpendableOutput { impl Transaction { pub fn scan( &self, - view: Scalar, - spend: EdwardsPoint + view: ViewPair, + guaranteed: bool ) -> (Vec, Timelock) { let mut extra = vec![]; write_varint(&u64::try_from(self.prefix.extra.len()).unwrap(), &mut extra).unwrap(); @@ -82,52 +82,53 @@ impl Transaction { for (o, output) in self.prefix.outputs.iter().enumerate() { // TODO: This may be replaceable by pubkeys[o] for pubkey in &pubkeys { + let key_offset = shared_key( + Some(uniqueness(&self.prefix.inputs)).filter(|_| guaranteed), + view.view, + pubkey, + o + ); + // P - shared == spend + if (output.key - (&key_offset * &ED25519_BASEPOINT_TABLE)) != view.spend { + continue; + } + + // Since we've found an output to us, get its amount let mut commitment = Commitment::zero(); - // P - shared == spend - let matches = |shared_key| (output.key - (&shared_key * &ED25519_BASEPOINT_TABLE)) == spend; - let test = |shared_key| Some(shared_key).filter(|shared_key| matches(*shared_key)); + // Miner transaction + if output.amount != 0 { + commitment.amount = output.amount; + // Regular transaction + } else { + let amount = match self.rct_signatures.base.ecdh_info.get(o) { + Some(amount) => amount_decryption(*amount, key_offset), + // This should never happen, yet it may be possible with miner transactions? + // Using get just decreases the possibility of a panic and lets us move on in that case + None => break + }; - // Get the traditional shared key and unique shared key, testing if either matches for this output - let traditional = test(shared_key(None, view, pubkey, o)); - let unique = test(shared_key(Some(uniqueness(&self.prefix.inputs)), view, pubkey, o)); - - // If either matches, grab it and decode the amount - if let Some(key_offset) = traditional.or(unique) { - // Miner transaction - if output.amount != 0 { - commitment.amount = output.amount; - // Regular transaction - } else { - let amount = match self.rct_signatures.base.ecdh_info.get(o) { - Some(amount) => amount_decryption(*amount, key_offset), - // This should never happen, yet it may be possible with miner transactions? - // Using get just decreases the possibility of a panic and lets us move on in that case - None => continue - }; - - // Rebuild the commitment to verify it - commitment = Commitment::new(commitment_mask(key_offset), amount); - // If this is a malicious commitment, move to the next output - // Any other R value will calculate to a different spend key and are therefore ignorable - if Some(&commitment.calculate()) != self.rct_signatures.base.commitments.get(o) { - break; - } + // Rebuild the commitment to verify it + commitment = Commitment::new(commitment_mask(key_offset), amount); + // If this is a malicious commitment, move to the next output + // Any other R value will calculate to a different spend key and are therefore ignorable + if Some(&commitment.calculate()) != self.rct_signatures.base.commitments.get(o) { + break; } - - if commitment.amount != 0 { - res.push(SpendableOutput { - tx: self.hash(), - o: o.try_into().unwrap(), - key: output.key, - key_offset, - commitment - }); - } - // Break to prevent public keys from being included multiple times, triggering multiple - // inclusions of the same output - break; } + + if commitment.amount != 0 { + res.push(SpendableOutput { + tx: self.hash(), + o: o.try_into().unwrap(), + key: output.key, + key_offset, + commitment + }); + } + // Break to prevent public keys from being included multiple times, triggering multiple + // inclusions of the same output + break; } } diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 7c95d753..cf9ab33f 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -9,11 +9,7 @@ use curve25519_dalek::{ edwards::EdwardsPoint }; -use monero::{ - consensus::Encodable, - util::{key::PublicKey, address::{AddressType, Address}}, - blockdata::transaction::SubField -}; +use monero::{consensus::Encodable, PublicKey, blockdata::transaction::SubField}; #[cfg(feature = "multisig")] use frost::FrostError; @@ -29,7 +25,10 @@ use crate::{ }, transaction::{Input, Output, Timelock, TransactionPrefix, Transaction}, rpc::{Rpc, RpcError}, - wallet::{SpendableOutput, Decoys, key_image_sort, uniqueness, shared_key, commitment_mask, amount_encryption} + wallet::{ + address::{AddressType, Address}, SpendableOutput, Decoys, + key_image_sort, uniqueness, shared_key, commitment_mask, amount_encryption + } }; #[cfg(feature = "multisig")] use crate::frost::MultisigError; @@ -52,23 +51,23 @@ impl SendOutput { fn new( rng: &mut R, unique: [u8; 32], - output: (Address, u64, bool), + output: (Address, u64), o: usize ) -> SendOutput { let r = random_scalar(rng); let shared_key = shared_key( - Some(unique).filter(|_| output.2), + Some(unique).filter(|_| output.0.meta.guaranteed), r, - &output.0.public_view.point.decompress().expect("SendOutput::new requires valid addresses"), + &output.0.view, o ); - let spend = output.0.public_spend.point.decompress().expect("SendOutput::new requires valid addresses"); + let spend = output.0.spend; SendOutput { - R: match output.0.addr_type { + R: match output.0.meta.kind { AddressType::Standard => &r * &ED25519_BASEPOINT_TABLE, - AddressType::SubAddress => &r * spend, - AddressType::Integrated(_) => panic!("SendOutput::new doesn't support Integrated addresses") + AddressType::Integrated(_) => unimplemented!("SendOutput::new doesn't support Integrated addresses"), + AddressType::Subaddress => &r * spend }, dest: ((&shared_key * &ED25519_BASEPOINT_TABLE) + spend), commitment: Commitment::new(commitment_mask(shared_key), output.1), @@ -169,7 +168,7 @@ impl Fee { #[derive(Clone, PartialEq, Debug)] pub struct SignableTransaction { inputs: Vec, - payments: Vec<(Address, u64, bool)>, + payments: Vec<(Address, u64)>, outputs: Vec, fee: u64 } @@ -177,23 +176,16 @@ pub struct SignableTransaction { impl SignableTransaction { pub fn new( inputs: Vec, - payments: Vec<(Address, u64)>, + mut payments: Vec<(Address, u64)>, change_address: Option
, fee_rate: Fee ) -> Result { // Make sure all addresses are valid let test = |addr: Address| { - if !( - addr.public_view.point.decompress().is_some() && - addr.public_spend.point.decompress().is_some() - ) { - Err(TransactionError::InvalidAddress)?; - } - - match addr.addr_type { + match addr.meta.kind { AddressType::Standard => Ok(()), AddressType::Integrated(..) => Err(TransactionError::InvalidAddress), - AddressType::SubAddress => Ok(()) + AddressType::Subaddress => Ok(()) } }; @@ -250,11 +242,8 @@ impl SignableTransaction { Err(TransactionError::TooManyOutputs)?; } - let mut payments = payments.iter().map(|(address, amount)| (*address, *amount, false)).collect::>(); if change { - // Always use a unique key image for the change output - // TODO: Make this a config option - payments.push((change_address.unwrap(), in_amount - out_amount, true)); + payments.push((change_address.unwrap(), in_amount - out_amount)); } Ok( diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 89eaa6d0..67a39d93 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -94,9 +94,8 @@ impl SignableTransaction { transcript.append_message(b"input_shared_key", &input.key_offset.to_bytes()); } for payment in &self.payments { - transcript.append_message(b"payment_address", &payment.0.as_bytes()); + transcript.append_message(b"payment_address", &payment.0.to_string().as_bytes()); transcript.append_message(b"payment_amount", &payment.1.to_le_bytes()); - transcript.append_message(b"payment_unique", &(if payment.2 { [1] } else { [0] })); } // Sort included before cloning it around diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index c8543d9f..a3585ce0 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -18,12 +18,7 @@ use transcript::RecommendedTranscript; #[cfg(feature = "multisig")] use frost::{curve::Ed25519, tests::{THRESHOLD, key_gen, sign}}; -use monero::{ - network::Network, - util::{key::PublicKey, address::Address} -}; - -use monero_serai::{random_scalar, wallet::SignableTransaction}; +use monero_serai::{random_scalar, wallet::{ViewPair, address::{Network, AddressType}, SignableTransaction}}; mod rpc; use crate::rpc::{rpc, mine_block}; @@ -73,11 +68,8 @@ async fn send_core(test: usize, multisig: bool) { } } - let addr = Address::standard( - Network::Mainnet, - PublicKey { point: spend_pub.compress() }, - PublicKey { point: (&view * &ED25519_BASEPOINT_TABLE).compress() } - ); + let view_pair = ViewPair { view, spend: spend_pub }; + let addr = view_pair.address(Network::Mainnet, AddressType::Standard, false); let fee = rpc.get_fee().await.unwrap(); @@ -99,7 +91,7 @@ async fn send_core(test: usize, multisig: bool) { // Grab the largest output available let output = { - let mut outputs = tx.as_ref().unwrap().scan(view, spend_pub).0; + let mut outputs = tx.as_ref().unwrap().scan(view_pair, false).0; outputs.sort_by(|x, y| x.commitment.amount.cmp(&y.commitment.amount).reverse()); outputs.swap_remove(0) }; @@ -124,7 +116,7 @@ async fn send_core(test: usize, multisig: bool) { for i in (start + 1) .. (start + 9) { let tx = rpc.get_block_transactions(i).await.unwrap().swap_remove(0); - let output = tx.scan(view, spend_pub).0.swap_remove(0); + let output = tx.scan(view_pair, false).0.swap_remove(0); amount += output.commitment.amount; outputs.push(output); } diff --git a/processor/src/coin/mod.rs b/processor/src/coin/mod.rs index fed65c26..79945665 100644 --- a/processor/src/coin/mod.rs +++ b/processor/src/coin/mod.rs @@ -77,7 +77,7 @@ pub trait Coin { ) -> Result<(Vec, Vec<::Id>), CoinError>; #[cfg(test)] - async fn mine_block(&self, address: Self::Address); + async fn mine_block(&self); #[cfg(test)] async fn test_send(&self, key: Self::Address); diff --git a/processor/src/coin/monero.rs b/processor/src/coin/monero.rs index 80d19a89..5e045892 100644 --- a/processor/src/coin/monero.rs +++ b/processor/src/coin/monero.rs @@ -2,17 +2,19 @@ use std::sync::Arc; use async_trait::async_trait; -use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; +use curve25519_dalek::scalar::Scalar; use dalek_ff_group as dfg; use transcript::RecommendedTranscript; use frost::{curve::Ed25519, MultisigKeys}; -use monero::{PublicKey, network::Network, util::address::Address}; use monero_serai::{ transaction::{Timelock, Transaction}, rpc::Rpc, - wallet::{Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine} + wallet::{ + ViewPair, address::{Network, AddressType, Address}, + Fee, SpendableOutput, SignableTransaction as MSignableTransaction, TransactionMachine + } }; use crate::{coin::{CoinError, Output as OutputTrait, Coin}, view_key}; @@ -59,18 +61,28 @@ pub struct SignableTransaction( #[derive(Clone, Debug)] pub struct Monero { pub(crate) rpc: Rpc, - view: Scalar, - view_pub: PublicKey + view: Scalar } impl Monero { pub fn new(url: String) -> Monero { let view = view_key::(0).0; - Monero { - rpc: Rpc::new(url), - view, - view_pub: PublicKey { point: (&view * &ED25519_BASEPOINT_TABLE).compress() } - } + Monero { rpc: Rpc::new(url), view } + } + + fn view_pair(&self, spend: dfg::EdwardsPoint) -> ViewPair { + ViewPair { spend: spend.0, view: self.view } + } + + #[cfg(test)] + fn empty_view_pair(&self) -> ViewPair { + use group::Group; + self.view_pair(dfg::EdwardsPoint::generator()) + } + + #[cfg(test)] + fn empty_address(&self) -> Address { + self.empty_view_pair().address(Network::Mainnet, AddressType::Standard, false) } } @@ -100,7 +112,7 @@ impl Coin for Monero { const MAX_OUTPUTS: usize = 16; fn address(&self, key: dfg::EdwardsPoint) -> Self::Address { - Address::standard(Network::Mainnet, PublicKey { point: key.compress().0 }, self.view_pub) + self.view_pair(key).address(Network::Mainnet, AddressType::Standard, true) } async fn get_height(&self) -> Result { @@ -115,7 +127,7 @@ impl Coin for Monero { block .iter() .flat_map(|tx| { - let (outputs, timelock) = tx.scan(self.view, key.0); + let (outputs, timelock) = tx.scan(self.view_pair(key), true); if timelock == Timelock::None { outputs } else { @@ -178,13 +190,13 @@ impl Coin for Monero { } #[cfg(test)] - async fn mine_block(&self, address: Self::Address) { + async fn mine_block(&self) { #[derive(serde::Deserialize, Debug)] struct EmptyResponse {} let _: EmptyResponse = self.rpc.rpc_call("json_rpc", Some(serde_json::json!({ "method": "generateblocks", "params": { - "wallet_address": address.to_string(), + "wallet_address": self.empty_address().to_string(), "amount_of_blocks": 10 }, }))).await.unwrap(); @@ -192,31 +204,28 @@ impl Coin for Monero { #[cfg(test)] async fn test_send(&self, address: Self::Address) { - use group::Group; - use rand::rngs::OsRng; let height = self.get_height().await.unwrap(); - let temp = self.address(dfg::EdwardsPoint::generator()); - self.mine_block(temp).await; + self.mine_block().await; for _ in 0 .. 7 { - self.mine_block(temp).await; + self.mine_block().await; } let outputs = self.rpc .get_block_transactions_possible(height).await.unwrap() - .swap_remove(0).scan(self.view, dfg::EdwardsPoint::generator().0).0; + .swap_remove(0).scan(self.empty_view_pair(), false).0; let amount = outputs[0].commitment.amount; let fee = 1000000000; // TODO let tx = MSignableTransaction::new( outputs, vec![(address, amount - fee)], - Some(temp), + Some(self.empty_address()), self.rpc.get_fee().await.unwrap() ).unwrap().sign(&mut OsRng, &self.rpc, &Scalar::one()).await.unwrap(); self.rpc.publish_transaction(&tx).await.unwrap(); - self.mine_block(temp).await; + self.mine_block().await; } } diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 728b0668..051ecad6 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -4,10 +4,6 @@ use async_trait::async_trait; use rand::rngs::OsRng; -use group::Group; - -use frost::curve::Curve; - use crate::{NetworkError, Network, coin::{Coin, Monero}, wallet::{WalletKeys, MemCoinDb, Wallet}}; #[derive(Clone)] @@ -55,7 +51,7 @@ impl Network for LocalNetwork { async fn test_send(coin: C, fee: C::Fee) { // Mine a block so there's a confirmed height - coin.mine_block(coin.address(::G::generator())).await; + coin.mine_block().await; let height = coin.get_height().await.unwrap(); let mut keys = frost::tests::key_gen::<_, C::Curve>(&mut OsRng); @@ -74,7 +70,7 @@ async fn test_send(coin: C, fee: C::Fee) { // Get the chain to a height where blocks have sufficient confirmations while (height + C::CONFIRMATIONS) > coin.get_height().await.unwrap() { - coin.mine_block(coin.address(::G::generator())).await; + coin.mine_block().await; } for wallet in wallets.iter_mut() { From ac17645fc8c5e6eb22217c5ae6011db2c3334949 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Jun 2022 00:06:12 -0400 Subject: [PATCH 057/105] Rename MultisigParams/MultisigKeys/MultisigView to Frost* --- coins/monero/src/ringct/clsag/multisig.rs | 8 ++--- coins/monero/src/wallet/send/multisig.rs | 4 +-- crypto/frost/src/algorithm.rs | 14 ++++---- crypto/frost/src/key_gen.rs | 25 +++++++------- crypto/frost/src/lib.rs | 40 +++++++++++------------ crypto/frost/src/sign.rs | 16 ++++----- crypto/frost/src/tests/curve.rs | 6 ++-- crypto/frost/src/tests/mod.rs | 10 +++--- crypto/frost/src/tests/schnorr.rs | 4 +-- crypto/frost/src/tests/vectors.rs | 8 ++--- processor/src/coin/mod.rs | 4 +-- processor/src/coin/monero.rs | 6 ++-- processor/src/wallet.rs | 12 +++---- 13 files changed, 79 insertions(+), 78 deletions(-) diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index dfeeda5f..8d15b0e2 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -14,7 +14,7 @@ use curve25519_dalek::{ use group::Group; use transcript::{Transcript, RecommendedTranscript}; -use frost::{curve::Ed25519, FrostError, MultisigView, algorithm::Algorithm}; +use frost::{curve::Ed25519, FrostError, FrostView, algorithm::Algorithm}; use dalek_ff_group as dfg; use crate::{ @@ -126,7 +126,7 @@ impl Algorithm for ClsagMultisig { fn preprocess_addendum( &mut self, rng: &mut R, - view: &MultisigView, + view: &FrostView, nonces: &[dfg::Scalar; 2] ) -> Vec { self.H = hash_to_point(&view.group_key().0); @@ -144,7 +144,7 @@ impl Algorithm for ClsagMultisig { fn process_addendum( &mut self, - view: &MultisigView, + view: &FrostView, l: u16, commitments: &[dfg::EdwardsPoint; 2], serialized: &[u8] @@ -192,7 +192,7 @@ impl Algorithm for ClsagMultisig { fn sign_share( &mut self, - view: &MultisigView, + view: &FrostView, nonce_sum: dfg::EdwardsPoint, b: dfg::Scalar, nonce: dfg::Scalar, diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 67a39d93..c2de702c 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -8,7 +8,7 @@ use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::{EdwardsPoint, use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, - FrostError, MultisigKeys, + FrostError, FrostKeys, sign::{ PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine @@ -57,7 +57,7 @@ impl SignableTransaction { pub async fn multisig( self, rpc: &Rpc, - keys: MultisigKeys, + keys: FrostKeys, mut transcript: RecommendedTranscript, height: usize, mut included: Vec diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index fbd1dec8..21d51521 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng}; use transcript::Transcript; -use crate::{Curve, FrostError, MultisigView, schnorr}; +use crate::{Curve, FrostError, FrostView, schnorr}; pub use schnorr::SchnorrSignature; /// Algorithm to use FROST with @@ -19,14 +19,14 @@ pub trait Algorithm: Clone { fn preprocess_addendum( &mut self, rng: &mut R, - params: &MultisigView, + params: &FrostView, nonces: &[C::F; 2], ) -> Vec; /// Proccess the addendum for the specified participant. Guaranteed to be ordered fn process_addendum( &mut self, - params: &MultisigView, + params: &FrostView, l: u16, commitments: &[C::G; 2], serialized: &[u8], @@ -38,7 +38,7 @@ pub trait Algorithm: Clone { /// The nonce will already have been processed into the combined form d + (e * p) fn sign_share( &mut self, - params: &MultisigView, + params: &FrostView, nonce_sum: C::G, binding: C::F, nonce: C::F, @@ -114,7 +114,7 @@ impl> Algorithm for Schnorr { fn preprocess_addendum( &mut self, _: &mut R, - _: &MultisigView, + _: &FrostView, _: &[C::F; 2], ) -> Vec { vec![] @@ -122,7 +122,7 @@ impl> Algorithm for Schnorr { fn process_addendum( &mut self, - _: &MultisigView, + _: &FrostView, _: u16, _: &[C::G; 2], _: &[u8], @@ -132,7 +132,7 @@ impl> Algorithm for Schnorr { fn sign_share( &mut self, - params: &MultisigView, + params: &FrostView, nonce_sum: C::G, _: C::F, nonce: C::F, diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index f48a82fd..aeb18ca0 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -8,7 +8,7 @@ use multiexp::{multiexp_vartime, BatchVerifier}; use crate::{ curve::Curve, - FrostError, MultisigParams, MultisigKeys, + FrostError, FrostParams, FrostKeys, schnorr::{self, SchnorrSignature}, validate_map }; @@ -29,7 +29,7 @@ fn challenge(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F { // the serialized commitments to be broadcasted over an authenticated channel to all parties fn generate_key_r1( rng: &mut R, - params: &MultisigParams, + params: &FrostParams, context: &str, ) -> (Vec, Vec) { let t = usize::from(params.t); @@ -72,7 +72,7 @@ fn generate_key_r1( // Verify the received data from the first round of key generation fn verify_r1( rng: &mut R, - params: &MultisigParams, + params: &FrostParams, context: &str, our_commitments: Vec, mut serialized: HashMap>, @@ -149,7 +149,7 @@ fn polynomial( // counterparty to receive fn generate_key_r2( rng: &mut R, - params: &MultisigParams, + params: &FrostParams, context: &str, coefficients: Vec, our_commitments: Vec, @@ -190,12 +190,12 @@ fn generate_key_r2( /// broadcasted initially fn complete_r2( rng: &mut R, - params: MultisigParams, + params: FrostParams, mut secret_share: C::F, commitments: HashMap>, // Vec to preserve ownership mut serialized: HashMap>, -) -> Result, FrostError> { +) -> Result, FrostError> { validate_map( &mut serialized, &(1 ..= params.n()).into_iter().collect::>(), @@ -256,12 +256,13 @@ fn complete_r2( for i in 1 ..= params.n() { verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes), C::LITTLE_ENDIAN)); } + // Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t) debug_assert_eq!(C::GENERATOR_TABLE * secret_share, verification_shares[¶ms.i()]); // TODO: Clear serialized and shares Ok( - MultisigKeys { + FrostKeys { params, secret_share, group_key: stripes[0], @@ -272,20 +273,20 @@ fn complete_r2( } pub struct KeyGenMachine { - params: MultisigParams, + params: FrostParams, context: String, _curve: PhantomData, } pub struct SecretShareMachine { - params: MultisigParams, + params: FrostParams, context: String, coefficients: Vec, our_commitments: Vec, } pub struct KeyMachine { - params: MultisigParams, + params: FrostParams, secret: C::F, commitments: HashMap>, } @@ -293,7 +294,7 @@ pub struct KeyMachine { impl KeyGenMachine { /// Creates a new machine to generate a key for the specified curve in the specified multisig // The context string must be unique among multisigs - pub fn new(params: MultisigParams, context: String) -> KeyGenMachine { + pub fn new(params: FrostParams, context: String) -> KeyGenMachine { KeyGenMachine { params, context, _curve: PhantomData } } @@ -351,7 +352,7 @@ impl KeyMachine { self, rng: &mut R, shares: HashMap>, - ) -> Result, FrostError> { + ) -> Result, FrostError> { complete_r2(rng, self.params, self.secret, self.commitments, shares) } } diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index 2e53b723..e337b70e 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -18,7 +18,7 @@ pub mod tests; /// Parameters for a multisig // These fields can not be made public as they should be static #[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct MultisigParams { +pub struct FrostParams { /// Participants needed to sign on behalf of the group t: u16, /// Amount of participants @@ -27,12 +27,12 @@ pub struct MultisigParams { i: u16, } -impl MultisigParams { +impl FrostParams { pub fn new( t: u16, n: u16, i: u16 - ) -> Result { + ) -> Result { if (t == 0) || (n == 0) { Err(FrostError::ZeroParameter(t, n))?; } @@ -46,7 +46,7 @@ impl MultisigParams { Err(FrostError::InvalidParticipantIndex(n, i))?; } - Ok(MultisigParams{ t, n, i }) + Ok(FrostParams{ t, n, i }) } pub fn t(&self) -> u16 { self.t } @@ -86,14 +86,14 @@ pub enum FrostError { // View of keys passable to algorithm implementations #[derive(Clone)] -pub struct MultisigView { +pub struct FrostView { group_key: C::G, included: Vec, secret_share: C::F, verification_shares: HashMap, } -impl MultisigView { +impl FrostView { pub fn group_key(&self) -> C::G { self.group_key } @@ -134,9 +134,9 @@ pub fn lagrange( } #[derive(Clone, PartialEq, Eq, Debug)] -pub struct MultisigKeys { - /// Multisig Parameters - params: MultisigParams, +pub struct FrostKeys { + /// FROST Parameters + params: FrostParams, /// Secret share key secret_share: C::F, @@ -149,12 +149,12 @@ pub struct MultisigKeys { offset: Option, } -impl MultisigKeys { +impl FrostKeys { /// Offset the keys by a given scalar to allow for account and privacy schemes /// This offset is ephemeral and will not be included when these keys are serialized /// Keys offset multiple times will form a new offset of their sum /// Not IETF compliant - pub fn offset(&self, offset: C::F) -> MultisigKeys { + pub fn offset(&self, offset: C::F) -> FrostKeys { let mut res = self.clone(); // Carry any existing offset // Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a @@ -164,7 +164,7 @@ impl MultisigKeys { res } - pub fn params(&self) -> MultisigParams { + pub fn params(&self) -> FrostParams { self.params } @@ -180,7 +180,7 @@ impl MultisigKeys { self.verification_shares.clone() } - pub fn view(&self, included: &[u16]) -> Result, FrostError> { + pub fn view(&self, included: &[u16]) -> Result, FrostError> { if (included.len() < self.params.t.into()) || (usize::from(self.params.n) < included.len()) { Err(FrostError::InvalidSigningSet("invalid amount of participants included".to_string()))?; } @@ -189,7 +189,7 @@ impl MultisigKeys { let offset = self.offset.unwrap_or(C::F::zero()); let offset_share = offset * C::F::from(included.len().try_into().unwrap()).invert().unwrap(); - Ok(MultisigView { + Ok(FrostView { group_key: self.group_key, secret_share: secret_share + offset_share, verification_shares: self.verification_shares.iter().map( @@ -207,7 +207,7 @@ impl MultisigKeys { } pub fn serialize(&self) -> Vec { - let mut serialized = Vec::with_capacity(MultisigKeys::::serialized_len(self.params.n)); + let mut serialized = Vec::with_capacity(FrostKeys::::serialized_len(self.params.n)); serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes()); serialized.extend(C::ID); serialized.extend(&self.params.t.to_be_bytes()); @@ -221,7 +221,7 @@ impl MultisigKeys { serialized } - pub fn deserialize(serialized: &[u8]) -> Result, FrostError> { + pub fn deserialize(serialized: &[u8]) -> Result, FrostError> { let mut start = u64::try_from(C::ID.len()).unwrap().to_be_bytes().to_vec(); start.extend(C::ID); let mut cursor = start.len(); @@ -229,7 +229,7 @@ impl MultisigKeys { if serialized.len() < (cursor + 4) { Err( FrostError::InternalError( - "MultisigKeys serialization is missing its curve/participant quantities".to_string() + "FrostKeys serialization is missing its curve/participant quantities".to_string() ) )?; } @@ -246,7 +246,7 @@ impl MultisigKeys { let n = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); cursor += 2; - if serialized.len() != MultisigKeys::::serialized_len(n) { + if serialized.len() != FrostKeys::::serialized_len(n) { Err(FrostError::InternalError("incorrect serialization length".to_string()))?; } @@ -271,8 +271,8 @@ impl MultisigKeys { } Ok( - MultisigKeys { - params: MultisigParams::new(t, n, i) + FrostKeys { + params: FrostParams::new(t, n, i) .map_err(|_| FrostError::InternalError("invalid parameters".to_string()))?, secret_share, group_key, diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 8ea0f61c..902607f7 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -10,24 +10,24 @@ use transcript::Transcript; use crate::{ curve::Curve, FrostError, - MultisigParams, MultisigKeys, MultisigView, + FrostParams, FrostKeys, FrostView, algorithm::Algorithm, validate_map }; -/// Pairing of an Algorithm with a MultisigKeys instance and this specific signing set +/// Pairing of an Algorithm with a FrostKeys instance and this specific signing set #[derive(Clone)] pub struct Params> { algorithm: A, - keys: Arc>, - view: MultisigView, + keys: Arc>, + view: FrostView, } // Currently public to enable more complex operations as desired, yet solely used in testing impl> Params { pub fn new( algorithm: A, - keys: Arc>, + keys: Arc>, included: &[u16], ) -> Result, FrostError> { let mut included = included.to_vec(); @@ -60,11 +60,11 @@ impl> Params { Ok(Params { algorithm, view: keys.view(&included).unwrap(), keys }) } - pub fn multisig_params(&self) -> MultisigParams { + pub fn multisig_params(&self) -> FrostParams { self.keys.params } - pub fn view(&self) -> MultisigView { + pub fn view(&self) -> FrostView { self.view.clone() } } @@ -291,7 +291,7 @@ impl> AlgorithmMachine { /// Creates a new machine to generate a key for the specified curve in the specified multisig pub fn new( algorithm: A, - keys: Arc>, + keys: Arc>, included: &[u16], ) -> Result, FrostError> { Ok(AlgorithmMachine { params: Params::new(algorithm, keys, included)? }) diff --git a/crypto/frost/src/tests/curve.rs b/crypto/frost/src/tests/curve.rs index d7327605..48dd78de 100644 --- a/crypto/frost/src/tests/curve.rs +++ b/crypto/frost/src/tests/curve.rs @@ -2,7 +2,7 @@ use rand_core::{RngCore, CryptoRng}; use group::{ff::Field, Group}; -use crate::{Curve, MultisigKeys, tests::key_gen}; +use crate::{Curve, FrostKeys, tests::key_gen}; // Test generation of FROST keys fn key_generation(rng: &mut R) { @@ -13,7 +13,7 @@ fn key_generation(rng: &mut R) { // Test serialization of generated keys fn keys_serialization(rng: &mut R) { for (_, keys) in key_gen::<_, C>(rng) { - assert_eq!(&MultisigKeys::::deserialize(&keys.serialize()).unwrap(), &*keys); + assert_eq!(&FrostKeys::::deserialize(&keys.serialize()).unwrap(), &*keys); } } @@ -35,7 +35,7 @@ pub fn test_curve(rng: &mut R) { } } - // Test FROST key generation and serialization of MultisigKeys works as expected + // Test FROST key generation and serialization of FrostKeys works as expected key_generation::<_, C>(rng); keys_serialization::<_, C>(rng); } diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index fa45f6f1..87f2bf83 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -6,7 +6,7 @@ use group::ff::Field; use crate::{ Curve, - MultisigParams, MultisigKeys, + FrostParams, FrostKeys, lagrange, key_gen::KeyGenMachine, algorithm::Algorithm, @@ -36,12 +36,12 @@ pub fn clone_without( pub fn key_gen( rng: &mut R -) -> HashMap>> { +) -> HashMap>> { let mut machines = HashMap::new(); let mut commitments = HashMap::new(); for i in 1 ..= PARTICIPANTS { let machine = KeyGenMachine::::new( - MultisigParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(), + FrostParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(), "FROST Test key_gen".to_string() ); let (machine, these_commitments) = machine.generate_coefficients(rng); @@ -89,7 +89,7 @@ pub fn key_gen( }).collect::>() } -pub fn recover(keys: &HashMap>) -> C::F { +pub fn recover(keys: &HashMap>) -> C::F { let first = keys.values().next().expect("no keys provided"); assert!(keys.len() >= first.params().t().into(), "not enough keys provided"); let included = keys.keys().cloned().collect::>(); @@ -105,7 +105,7 @@ pub fn recover(keys: &HashMap>) -> C::F { pub fn algorithm_machines>( rng: &mut R, algorithm: A, - keys: &HashMap>>, + keys: &HashMap>>, ) -> HashMap> { let mut included = vec![]; while included.len() < usize::from(keys[&1].params().t()) { diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index c9550577..7ef1806a 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -5,7 +5,7 @@ use rand_core::{RngCore, CryptoRng}; use group::ff::Field; use crate::{ - Curve, MultisigKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr}, + Curve, FrostKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr}, tests::{key_gen, algorithm_machines, sign as sign_test} }; @@ -80,7 +80,7 @@ pub(crate) fn core_batch_verify(rng: &mut R) { fn sign_core( rng: &mut R, group_key: C::G, - keys: &HashMap>> + keys: &HashMap>> ) { const MESSAGE: &'static [u8] = b"Hello, World!"; diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index e0def162..00f745fb 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; use crate::{ - Curve, MultisigKeys, + Curve, FrostKeys, algorithm::{Schnorr, Hram}, sign::{PreprocessPackage, SignMachine, SignatureMachine, AlgorithmMachine}, tests::{curve::test_curve, schnorr::test_schnorr, recover} @@ -22,8 +22,8 @@ pub struct Vectors { pub sig: String } -// Load these vectors into MultisigKeys using a custom serialization it'll deserialize -fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap> { +// Load these vectors into FrostKeys using a custom serialization it'll deserialize +fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap> { let shares = vectors.shares.iter().map( |secret| C::F_from_slice(&hex::decode(secret).unwrap()).unwrap() ).collect::>(); @@ -45,7 +45,7 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap::deserialize(&serialized).unwrap(); + let these_keys = FrostKeys::::deserialize(&serialized).unwrap(); assert_eq!(these_keys.params().t(), vectors.threshold); assert_eq!(usize::from(these_keys.params().n()), shares.len()); assert_eq!(these_keys.params().i(), i); diff --git a/processor/src/coin/mod.rs b/processor/src/coin/mod.rs index 79945665..6e7308db 100644 --- a/processor/src/coin/mod.rs +++ b/processor/src/coin/mod.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use thiserror::Error; use transcript::RecommendedTranscript; -use frost::{curve::Curve, MultisigKeys, sign::PreprocessMachine}; +use frost::{curve::Curve, FrostKeys, sign::PreprocessMachine}; pub mod monero; pub use self::monero::Monero; @@ -57,7 +57,7 @@ pub trait Coin { async fn prepare_send( &self, - keys: Arc>, + keys: Arc>, transcript: RecommendedTranscript, height: usize, inputs: Vec, diff --git a/processor/src/coin/monero.rs b/processor/src/coin/monero.rs index 5e045892..980dedde 100644 --- a/processor/src/coin/monero.rs +++ b/processor/src/coin/monero.rs @@ -6,7 +6,7 @@ use curve25519_dalek::scalar::Scalar; use dalek_ff_group as dfg; use transcript::RecommendedTranscript; -use frost::{curve::Ed25519, MultisigKeys}; +use frost::{curve::Ed25519, FrostKeys}; use monero_serai::{ transaction::{Timelock, Transaction}, @@ -52,7 +52,7 @@ impl From for Output { #[derive(Debug)] pub struct SignableTransaction( - Arc>, + Arc>, RecommendedTranscript, usize, MSignableTransaction @@ -140,7 +140,7 @@ impl Coin for Monero { async fn prepare_send( &self, - keys: Arc>, + keys: Arc>, transcript: RecommendedTranscript, height: usize, mut inputs: Vec, diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 6789cb8f..79458ce1 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -4,17 +4,17 @@ use rand_core::OsRng; use transcript::{Transcript, RecommendedTranscript}; -use frost::{curve::Curve, MultisigKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; +use frost::{curve::Curve, FrostKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; use crate::{coin::{CoinError, Output, Coin}, SignError, Network}; pub struct WalletKeys { - keys: MultisigKeys, + keys: FrostKeys, creation_height: usize } impl WalletKeys { - pub fn new(keys: MultisigKeys, creation_height: usize) -> WalletKeys { + pub fn new(keys: FrostKeys, creation_height: usize) -> WalletKeys { WalletKeys { keys, creation_height } } @@ -26,7 +26,7 @@ impl WalletKeys { // system, there are potentially other benefits to binding this to a specific group key // It's no longer possible to influence group key gen to key cancel without breaking the hash // function as well, although that degree of influence means key gen is broken already - fn bind(&self, chain: &[u8]) -> MultisigKeys { + fn bind(&self, chain: &[u8]) -> FrostKeys { const DST: &[u8] = b"Serai Processor Wallet Chain Bind"; let mut transcript = RecommendedTranscript::new(DST); transcript.append_message(b"chain", chain); @@ -200,8 +200,8 @@ fn select_inputs_outputs( pub struct Wallet { db: D, coin: C, - keys: Vec<(Arc>, Vec)>, - pending: Vec<(usize, MultisigKeys)> + keys: Vec<(Arc>, Vec)>, + pending: Vec<(usize, FrostKeys)> } impl Wallet { From 3de7a76051681de1ab24c2b3fd8eac9048e867d8 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Jun 2022 01:25:26 -0400 Subject: [PATCH 058/105] Use GroupEncoding instead of Curve's from_slice/to_bytes Increases usage of standardization while expanding dalek_ff_group. Closes https://github.com/serai-dex/serai/issues/26 by moving dfg::EdwardsPoint to only be for the prime subgroup. --- coins/monero/src/frost.rs | 23 +++++++---- coins/monero/src/wallet/send/multisig.rs | 5 +++ crypto/dalek-ff-group/src/lib.rs | 34 ++++++++++++---- crypto/frost/src/curve/dalek.rs | 52 +----------------------- crypto/frost/src/curve/kp256.rs | 42 ++----------------- crypto/frost/src/curve/mod.rs | 50 +++++++++++++---------- crypto/frost/src/key_gen.rs | 20 ++++----- crypto/frost/src/lib.rs | 16 ++++---- crypto/frost/src/schnorr.rs | 6 +-- crypto/frost/src/sign.rs | 30 +++++++------- crypto/frost/src/tests/schnorr.rs | 4 +- crypto/frost/src/tests/vectors.rs | 30 +++++++------- processor/Cargo.toml | 2 + processor/src/wallet.rs | 5 ++- 14 files changed, 141 insertions(+), 178 deletions(-) diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 49d4e2da..69c4747e 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -3,6 +3,8 @@ use core::convert::TryInto; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; +use group::GroupEncoding; + use curve25519_dalek::{ constants::ED25519_BASEPOINT_TABLE as DTable, scalar::Scalar as DScalar, @@ -10,7 +12,6 @@ use curve25519_dalek::{ }; use transcript::{Transcript, RecommendedTranscript}; -use frost::curve::{Curve, Ed25519}; use dalek_ff_group as dfg; use crate::random_scalar; @@ -118,18 +119,26 @@ impl DLEqProof { } #[allow(non_snake_case)] -pub fn read_dleq( +pub(crate) fn read_dleq( serialized: &[u8], start: usize, H: &DPoint, l: u16, xG: &DPoint ) -> Result { - // Not using G_from_slice here would enable non-canonical points and break blame - // This does also ban identity points, yet those should never be a concern - let other = ::G_from_slice( - &serialized[(start + 0) .. (start + 32)] - ).map_err(|_| MultisigError::InvalidDLEqProof(l))?; + if serialized.len() < start + 96 { + Err(MultisigError::InvalidDLEqProof(l))?; + } + + let bytes = (&serialized[(start + 0) .. (start + 32)]).try_into().unwrap(); + // dfg ensures the point is torsion free + let other = Option::::from( + dfg::EdwardsPoint::from_bytes(&bytes)).ok_or(MultisigError::InvalidDLEqProof(l) + )?; + // Ensure this is a canonical point + if other.to_bytes() != bytes { + Err(MultisigError::InvalidDLEqProof(l))?; + } DLEqProof::deserialize(&serialized[(start + 32) .. (start + 96)]) .ok_or(MultisigError::InvalidDLEqProof(l))? diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index c2de702c..1bf30d96 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -226,6 +226,11 @@ impl SignMachine for TransactionSignMachine { // FROST commitments, image, H commitments, and their proofs let clsag_len = 64 + ClsagMultisig::serialized_len(); + for (l, commitments) in &commitments { + if commitments.len() != (self.clsags.len() * clsag_len) { + Err(FrostError::InvalidCommitment(*l))?; + } + } // Convert the unified commitments to a Vec of the individual commitments let mut commitments = (0 .. self.clsags.len()).map(|_| commitments.iter_mut().map( diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index c919fea9..eea21cfb 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -27,7 +27,7 @@ use dalek::{ } }; -use group::{ff::{Field, PrimeField}, Group}; +use group::{ff::{Field, PrimeField}, Group, GroupEncoding, prime::PrimeGroup}; macro_rules! deref_borrow { ($Source: ident, $Target: ident) => { @@ -192,6 +192,7 @@ macro_rules! dalek_group { ( $Point: ident, $DPoint: ident, + $torsion_free: expr, $Table: ident, $DTable: ident, @@ -225,6 +226,29 @@ macro_rules! dalek_group { fn double(&self) -> Self { *self + self } } + impl GroupEncoding for $Point { + type Repr = [u8; 32]; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + if let Some(point) = $DCompressed(*bytes).decompress() { + if $torsion_free(point) { + return CtOption::new($Point(point), Choice::from(1)); + } + } + CtOption::new($Point::identity(), Choice::from(0)) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + $Point::from_bytes(bytes) + } + + fn to_bytes(&self) -> Self::Repr { + self.0.compress().to_bytes() + } + } + + impl PrimeGroup for $Point {} + pub struct $Compressed(pub $DCompressed); deref_borrow!($Compressed, $DCompressed); impl $Compressed { @@ -261,6 +285,7 @@ macro_rules! dalek_group { dalek_group!( EdwardsPoint, DEdwardsPoint, + |point: DEdwardsPoint| point.is_torsion_free(), EdwardsBasepointTable, DEdwardsBasepointTable, @@ -272,15 +297,10 @@ dalek_group!( ED25519_BASEPOINT_TABLE ); -impl EdwardsPoint { - pub fn is_torsion_free(&self) -> bool { - self.0.is_torsion_free() - } -} - dalek_group!( RistrettoPoint, DRistrettoPoint, + |_| true, RistrettoBasepointTable, DRistrettoBasepointTable, diff --git a/crypto/frost/src/curve/dalek.rs b/crypto/frost/src/curve/dalek.rs index 362a9614..07515eee 100644 --- a/crypto/frost/src/curve/dalek.rs +++ b/crypto/frost/src/curve/dalek.rs @@ -1,34 +1,27 @@ -use core::convert::TryInto; - use rand_core::{RngCore, CryptoRng}; use sha2::{Digest, Sha512}; -use group::{ff::PrimeField, Group}; - use dalek_ff_group::Scalar; -use crate::{curve::{CurveError, Curve}, algorithm::Hram}; +use crate::{curve::Curve, algorithm::Hram}; macro_rules! dalek_curve { ( $Curve: ident, $Hram: ident, $Point: ident, - $Compressed: ident, $Table: ident, $POINT: ident, $TABLE: ident, - $torsioned: expr, - $ID: literal, $CONTEXT: literal, $chal: literal, $digest: literal, ) => { - use dalek_ff_group::{$Point, $Compressed, $Table, $POINT, $TABLE}; + use dalek_ff_group::{$Point, $Table, $POINT, $TABLE}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct $Curve; @@ -75,43 +68,6 @@ macro_rules! dalek_curve { fn G_len() -> usize { 32 } - - fn F_from_slice(slice: &[u8]) -> Result { - let scalar = Self::F::from_repr( - slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))? - ); - if !bool::from(scalar.is_some()) { - Err(CurveError::InvalidScalar)?; - } - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let bytes = slice.try_into().map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - let point = $Compressed::new(bytes).decompress().ok_or(CurveError::InvalidPoint)?; - - // Ban identity - if point.is_identity().into() { - Err(CurveError::InvalidPoint)?; - } - // Ban torsioned points to meet the prime order group requirement - if $torsioned(point) { - Err(CurveError::InvalidPoint)?; - } - // Ban points which weren't canonically encoded - if point.compress().to_bytes() != bytes { - Err(CurveError::InvalidPoint)?; - } - Ok(point) - } - - fn F_to_bytes(f: &Self::F) -> Vec { - f.to_repr().to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - g.compress().to_bytes().to_vec() - } } #[derive(Copy, Clone)] @@ -130,11 +86,9 @@ dalek_curve!( Ristretto, IetfRistrettoHram, RistrettoPoint, - CompressedRistretto, RistrettoBasepointTable, RISTRETTO_BASEPOINT_POINT, RISTRETTO_BASEPOINT_TABLE, - |_| false, b"ristretto", b"FROST-RISTRETTO255-SHA512-v5", b"chal", @@ -146,11 +100,9 @@ dalek_curve!( Ed25519, IetfEd25519Hram, EdwardsPoint, - CompressedEdwardsY, EdwardsBasepointTable, ED25519_BASEPOINT_POINT, ED25519_BASEPOINT_TABLE, - |point: EdwardsPoint| !bool::from(point.is_torsion_free()), b"edwards25519", b"", b"", diff --git a/crypto/frost/src/curve/kp256.rs b/crypto/frost/src/curve/kp256.rs index 1b762978..278e4eaa 100644 --- a/crypto/frost/src/curve/kp256.rs +++ b/crypto/frost/src/curve/kp256.rs @@ -1,14 +1,12 @@ -use core::convert::TryInto; - use rand_core::{RngCore, CryptoRng}; use sha2::{digest::Update, Digest, Sha256}; -use group::{ff::{Field, PrimeField}, Group, GroupEncoding}; +use group::{ff::Field, GroupEncoding}; use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}}; -use crate::{curve::{CurveError, Curve}, algorithm::Hram}; +use crate::{curve::{Curve, F_from_slice}, algorithm::Hram}; macro_rules! kp_curve { ( @@ -65,7 +63,7 @@ macro_rules! kp_curve { let mut modulus = vec![0; 16]; modulus.extend((Self::F::zero() - Self::F::one()).to_bytes()); let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE); - Self::F_from_slice( + F_from_slice::( &U384::from_be_slice(&{ let mut bytes = [0; 48]; ExpandMsgXmd::::expand_message( @@ -85,38 +83,6 @@ macro_rules! kp_curve { fn G_len() -> usize { 33 } - - fn F_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 32] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(32, slice.len()))?; - - let scalar = Self::F::from_repr(bytes.into()); - if scalar.is_none().into() { - Err(CurveError::InvalidScalar)?; - } - - Ok(scalar.unwrap()) - } - - fn G_from_slice(slice: &[u8]) -> Result { - let bytes: [u8; 33] = slice.try_into() - .map_err(|_| CurveError::InvalidLength(33, slice.len()))?; - - let point = Self::G::from_bytes(&bytes.into()); - if point.is_none().into() || point.unwrap().is_identity().into() { - Err(CurveError::InvalidPoint)?; - } - - Ok(point.unwrap()) - } - - fn F_to_bytes(f: &Self::F) -> Vec { - f.to_bytes().to_vec() - } - - fn G_to_bytes(g: &Self::G) -> Vec { - g.to_bytes().to_vec() - } } #[derive(Clone)] @@ -126,7 +92,7 @@ macro_rules! kp_curve { fn hram(R: &$lib::ProjectivePoint, A: &$lib::ProjectivePoint, m: &[u8]) -> $lib::Scalar { $Curve::hash_to_F( &[$CONTEXT as &[u8], b"chal"].concat(), - &[&$Curve::G_to_bytes(R), &$Curve::G_to_bytes(A), m].concat() + &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat() ) } } diff --git a/crypto/frost/src/curve/mod.rs b/crypto/frost/src/curve/mod.rs index f6ad5cf9..2de31a2a 100644 --- a/crypto/frost/src/curve/mod.rs +++ b/crypto/frost/src/curve/mod.rs @@ -4,7 +4,7 @@ use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use group::{ff::PrimeField, Group, GroupOps}; +use group::{ff::PrimeField, Group, GroupOps, prime::PrimeGroup}; #[cfg(any(test, feature = "dalek"))] mod dalek; @@ -42,7 +42,7 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses type F: PrimeField; /// Group element type - type G: Group + GroupOps; + type G: Group + GroupOps + PrimeGroup; /// Precomputed table type type T: Mul; @@ -99,23 +99,31 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { // that is on them #[allow(non_snake_case)] fn G_len() -> usize; - - /// Field element from slice. Preferred to be canonical yet does not have to be - // Required due to the lack of standardized encoding functions provided by ff/group - // While they do technically exist, their usage of Self::Repr breaks all potential library usage - // without helper functions like this - #[allow(non_snake_case)] - fn F_from_slice(slice: &[u8]) -> Result; - - /// Group element from slice. Must require canonicity or risks differing binding factors - #[allow(non_snake_case)] - fn G_from_slice(slice: &[u8]) -> Result; - - /// Obtain a vector of the byte encoding of F - #[allow(non_snake_case)] - fn F_to_bytes(f: &Self::F) -> Vec; - - /// Obtain a vector of the byte encoding of G - #[allow(non_snake_case)] - fn G_to_bytes(g: &Self::G) -> Vec; +} + +/// Field element from slice +#[allow(non_snake_case)] +pub(crate) fn F_from_slice(slice: &[u8]) -> Result { + let mut encoding = F::Repr::default(); + encoding.as_mut().copy_from_slice(slice); + + let point = Option::::from(F::from_repr(encoding)).ok_or(CurveError::InvalidScalar)?; + if point.to_repr().as_ref() != slice { + Err(CurveError::InvalidScalar)?; + } + Ok(point) +} + +/// Group element from slice +#[allow(non_snake_case)] +pub(crate) fn G_from_slice(slice: &[u8]) -> Result { + let mut encoding = G::Repr::default(); + encoding.as_mut().copy_from_slice(slice); + + let point = Option::::from(G::from_bytes(&encoding)).ok_or(CurveError::InvalidPoint)?; + // Ban the identity, per the FROST spec, and non-canonical points + if (point.is_identity().into()) || (point.to_bytes().as_ref() != slice) { + Err(CurveError::InvalidPoint)?; + } + Ok(point) } diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index aeb18ca0..4f2832e5 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -2,12 +2,12 @@ use std::{marker::PhantomData, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use group::ff::{Field, PrimeField}; +use group::{ff::{Field, PrimeField}, GroupEncoding}; use multiexp::{multiexp_vartime, BatchVerifier}; use crate::{ - curve::Curve, + curve::{Curve, F_from_slice, G_from_slice}, FrostError, FrostParams, FrostKeys, schnorr::{self, SchnorrSignature}, validate_map @@ -43,7 +43,7 @@ fn generate_key_r1( // Step 3: Generate public commitments commitments.push(C::GENERATOR_TABLE * coefficients[i]); // Serialize them for publication - serialized.extend(&C::G_to_bytes(&commitments[i])); + serialized.extend(commitments[i].to_bytes().as_ref()); } // Step 2: Provide a proof of knowledge @@ -59,7 +59,7 @@ fn generate_key_r1( challenge::( context, params.i(), - &C::G_to_bytes(&(C::GENERATOR_TABLE * r)), + (C::GENERATOR_TABLE * r).to_bytes().as_ref(), &serialized ) ).serialize() @@ -90,11 +90,11 @@ fn verify_r1( #[allow(non_snake_case)] let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + C::G_len()]; #[allow(non_snake_case)] - let R = |l| C::G_from_slice(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l)); + let R = |l| G_from_slice::(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l)); #[allow(non_snake_case)] let Am = |l| &serialized[&l][0 .. commitments_len]; - let s = |l| C::F_from_slice( + let s = |l| F_from_slice::( &serialized[&l][commitments_len + C::G_len() ..] ).map_err(|_| FrostError::InvalidProofOfKnowledge(l)); @@ -103,7 +103,7 @@ fn verify_r1( let mut these_commitments = vec![]; for c in 0 .. usize::from(params.t()) { these_commitments.push( - C::G_from_slice( + G_from_slice::( &serialized[&l][(c * C::G_len()) .. ((c + 1) * C::G_len())] ).map_err(|_| FrostError::InvalidCommitment(l.try_into().unwrap()))? ); @@ -166,7 +166,7 @@ fn generate_key_r2( continue; } - res.insert(l, C::F_to_bytes(&polynomial(&coefficients, l))); + res.insert(l, polynomial(&coefficients, l).to_repr().as_ref().to_vec()); } // Calculate our own share @@ -199,13 +199,13 @@ fn complete_r2( validate_map( &mut serialized, &(1 ..= params.n()).into_iter().collect::>(), - (params.i(), C::F_to_bytes(&secret_share)) + (params.i(), secret_share.to_repr().as_ref().to_vec()) )?; // Step 2. Verify each share let mut shares = HashMap::new(); for (l, share) in serialized { - shares.insert(l, C::F_from_slice(&share).map_err(|_| FrostError::InvalidShare(l))?); + shares.insert(l, F_from_slice::(&share).map_err(|_| FrostError::InvalidShare(l))?); } // Calculate the exponent for a given participant and apply it to a series of commitments diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index e337b70e..ca64b96f 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -3,12 +3,12 @@ use std::collections::HashMap; use thiserror::Error; -use group::ff::{Field, PrimeField}; +use group::{ff::{Field, PrimeField}, GroupEncoding}; mod schnorr; pub mod curve; -use curve::Curve; +use curve::{Curve, F_from_slice, G_from_slice}; pub mod key_gen; pub mod algorithm; pub mod sign; @@ -213,10 +213,10 @@ impl FrostKeys { serialized.extend(&self.params.t.to_be_bytes()); serialized.extend(&self.params.n.to_be_bytes()); serialized.extend(&self.params.i.to_be_bytes()); - serialized.extend(&C::F_to_bytes(&self.secret_share)); - serialized.extend(&C::G_to_bytes(&self.group_key)); + serialized.extend(self.secret_share.to_repr().as_ref()); + serialized.extend(self.group_key.to_bytes().as_ref()); for l in 1 ..= self.params.n.into() { - serialized.extend(&C::G_to_bytes(&self.verification_shares[&l])); + serialized.extend(self.verification_shares[&l].to_bytes().as_ref()); } serialized } @@ -253,10 +253,10 @@ impl FrostKeys { let i = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); cursor += 2; - let secret_share = C::F_from_slice(&serialized[cursor .. (cursor + C::F_len())]) + let secret_share = F_from_slice::(&serialized[cursor .. (cursor + C::F_len())]) .map_err(|_| FrostError::InternalError("invalid secret share".to_string()))?; cursor += C::F_len(); - let group_key = C::G_from_slice(&serialized[cursor .. (cursor + C::G_len())]) + let group_key = G_from_slice::(&serialized[cursor .. (cursor + C::G_len())]) .map_err(|_| FrostError::InternalError("invalid group key".to_string()))?; cursor += C::G_len(); @@ -264,7 +264,7 @@ impl FrostKeys { for l in 1 ..= n { verification_shares.insert( l, - C::G_from_slice(&serialized[cursor .. (cursor + C::G_len())]) + G_from_slice::(&serialized[cursor .. (cursor + C::G_len())]) .map_err(|_| FrostError::InternalError("invalid verification share".to_string()))? ); cursor += C::G_len(); diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index 22361173..9424fd28 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -1,6 +1,6 @@ use rand_core::{RngCore, CryptoRng}; -use group::ff::Field; +use group::{ff::{Field, PrimeField}, GroupEncoding}; use multiexp::BatchVerifier; @@ -16,8 +16,8 @@ pub struct SchnorrSignature { impl SchnorrSignature { pub fn serialize(&self) -> Vec { let mut res = Vec::with_capacity(C::G_len() + C::F_len()); - res.extend(C::G_to_bytes(&self.R)); - res.extend(C::F_to_bytes(&self.s)); + res.extend(self.R.to_bytes().as_ref()); + res.extend(self.s.to_repr().as_ref()); res } } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 902607f7..ba2b8203 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -3,12 +3,12 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use group::ff::Field; +use group::{ff::{Field, PrimeField}, GroupEncoding}; use transcript::Transcript; use crate::{ - curve::Curve, + curve::{Curve, F_from_slice, G_from_slice}, FrostError, FrostParams, FrostKeys, FrostView, algorithm::Algorithm, @@ -85,8 +85,8 @@ fn preprocess>( C::random_nonce(params.view().secret_share(), &mut *rng) ]; let commitments = [C::GENERATOR_TABLE * nonces[0], C::GENERATOR_TABLE * nonces[1]]; - let mut serialized = C::G_to_bytes(&commitments[0]); - serialized.extend(&C::G_to_bytes(&commitments[1])); + let mut serialized = commitments[0].to_bytes().as_ref().to_vec(); + serialized.extend(commitments[1].to_bytes().as_ref()); serialized.extend( ¶ms.algorithm.preprocess_addendum( @@ -129,7 +129,7 @@ fn sign_with_share>( transcript.domain_separate(b"FROST"); // Include the offset, if one exists if let Some(offset) = params.keys.offset { - transcript.append_message(b"offset", &C::F_to_bytes(&offset)); + transcript.append_message(b"offset", offset.to_repr().as_ref()); } } @@ -148,7 +148,7 @@ fn sign_with_share>( let mut read_commitment = |c, label| { let commitment = &commitments[c .. (c + C::G_len())]; transcript.append_message(label, commitment); - C::G_from_slice(commitment).map_err(|_| FrostError::InvalidCommitment(*l)) + G_from_slice::(commitment).map_err(|_| FrostError::InvalidCommitment(*l)) }; #[allow(non_snake_case)] @@ -176,15 +176,13 @@ fn sign_with_share>( let R = { B.values().map(|B| B[0]).sum::() + (B.values().map(|B| B[1]).sum::() * binding) }; - let share = C::F_to_bytes( - ¶ms.algorithm.sign_share( - ¶ms.view, - R, - binding, - our_preprocess.nonces[0] + (our_preprocess.nonces[1] * binding), - msg - ) - ); + let share = params.algorithm.sign_share( + ¶ms.view, + R, + binding, + our_preprocess.nonces[0] + (our_preprocess.nonces[1] * binding), + msg + ).to_repr().as_ref().to_vec(); Ok((Package { B, binding, R, share: share.clone() }, share)) } @@ -203,7 +201,7 @@ fn complete>( let mut responses = HashMap::new(); let mut sum = C::F::zero(); for l in &sign_params.view.included { - let part = C::F_from_slice(&shares[l]).map_err(|_| FrostError::InvalidShare(*l))?; + let part = F_from_slice::(&shares[l]).map_err(|_| FrostError::InvalidShare(*l))?; sum += part; responses.insert(*l, part); } diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index 7ef1806a..6450a845 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -2,7 +2,7 @@ use std::{marker::PhantomData, sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use group::ff::Field; +use group::{ff::Field, GroupEncoding}; use crate::{ Curve, FrostKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr}, @@ -96,7 +96,7 @@ pub struct TestHram { impl Hram for TestHram { #[allow(non_snake_case)] fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F { - C::hash_to_F(b"challenge", &[&C::G_to_bytes(R), &C::G_to_bytes(A), m].concat()) + C::hash_to_F(b"challenge", &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat()) } } diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 00f745fb..7a5d1af5 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -2,8 +2,10 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; +use group::{ff::PrimeField, GroupEncoding}; + use crate::{ - Curve, FrostKeys, + curve::{Curve, F_from_slice, G_from_slice}, FrostKeys, algorithm::{Schnorr, Hram}, sign::{PreprocessPackage, SignMachine, SignatureMachine, AlgorithmMachine}, tests::{curve::test_curve, schnorr::test_schnorr, recover} @@ -25,7 +27,7 @@ pub struct Vectors { // Load these vectors into FrostKeys using a custom serialization it'll deserialize fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap> { let shares = vectors.shares.iter().map( - |secret| C::F_from_slice(&hex::decode(secret).unwrap()).unwrap() + |secret| F_from_slice::(&hex::decode(secret).unwrap()).unwrap() ).collect::>(); let verification_shares = shares.iter().map( |secret| C::GENERATOR * secret @@ -39,10 +41,10 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap::deserialize(&serialized).unwrap(); @@ -50,7 +52,7 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap(&vectors); - let group_key = C::G_from_slice(&hex::decode(vectors.group_key).unwrap()).unwrap(); + let group_key = G_from_slice::(&hex::decode(vectors.group_key).unwrap()).unwrap(); assert_eq!( - C::GENERATOR * C::F_from_slice(&hex::decode(vectors.group_secret).unwrap()).unwrap(), + C::GENERATOR * F_from_slice::(&hex::decode(vectors.group_secret).unwrap()).unwrap(), group_key ); assert_eq!( recover(&keys), - C::F_from_slice(&hex::decode(vectors.group_secret).unwrap()).unwrap() + F_from_slice::(&hex::decode(vectors.group_secret).unwrap()).unwrap() ); let mut machines = vec![]; @@ -94,13 +96,13 @@ pub fn test_with_vectors< let mut c = 0; let mut machines = machines.drain(..).map(|(i, machine)| { let nonces = [ - C::F_from_slice(&hex::decode(vectors.nonces[c][0]).unwrap()).unwrap(), - C::F_from_slice(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap() + F_from_slice::(&hex::decode(vectors.nonces[c][0]).unwrap()).unwrap(), + F_from_slice::(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap() ]; c += 1; - let mut serialized = C::G_to_bytes(&(C::GENERATOR * nonces[0])); - serialized.extend(&C::G_to_bytes(&(C::GENERATOR * nonces[1]))); + let mut serialized = (C::GENERATOR * nonces[0]).to_bytes().as_ref().to_vec(); + serialized.extend((C::GENERATOR * nonces[1]).to_bytes().as_ref()); let (machine, serialized) = machine.unsafe_override_preprocess( PreprocessPackage { nonces, serialized: serialized.clone() } @@ -127,8 +129,8 @@ pub fn test_with_vectors< for (_, machine) in machines.drain() { let sig = machine.complete(shares.clone()).unwrap(); - let mut serialized = C::G_to_bytes(&sig.R); - serialized.extend(C::F_to_bytes(&sig.s)); + let mut serialized = sig.R.to_bytes().as_ref().to_vec(); + serialized.extend(sig.s.to_repr().as_ref()); assert_eq!(hex::encode(serialized), vectors.sig); } } diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 131b3106..cf2eb7a3 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -18,6 +18,8 @@ serde_json = "1.0" curve25519-dalek = { version = "3", features = ["std"] } blake2 = "0.10" +group = "0.12" + transcript = { package = "flexible-transcript", path = "../crypto/transcript", features = ["recommended"] } dalek-ff-group = { path = "../crypto/dalek-ff-group" } frost = { package = "modular-frost", path = "../crypto/frost" } diff --git a/processor/src/wallet.rs b/processor/src/wallet.rs index 79458ce1..2b9bbf37 100644 --- a/processor/src/wallet.rs +++ b/processor/src/wallet.rs @@ -2,8 +2,9 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; -use transcript::{Transcript, RecommendedTranscript}; +use group::GroupEncoding; +use transcript::{Transcript, RecommendedTranscript}; use frost::{curve::Curve, FrostKeys, sign::{PreprocessMachine, SignMachine, SignatureMachine}}; use crate::{coin::{CoinError, Output, Coin}, SignError, Network}; @@ -31,7 +32,7 @@ impl WalletKeys { let mut transcript = RecommendedTranscript::new(DST); transcript.append_message(b"chain", chain); transcript.append_message(b"curve", C::ID); - transcript.append_message(b"group_key", &C::G_to_bytes(&self.keys.group_key())); + transcript.append_message(b"group_key", self.keys.group_key().to_bytes().as_ref()); self.keys.offset(C::hash_to_F(DST, &transcript.challenge(b"offset"))) } } From 1430b189bfd7561f7ecb6937bd2c0de528a0b365 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Jun 2022 04:02:56 -0400 Subject: [PATCH 059/105] Enable no_std on transcript Removes the Vec challenge for an associated type. Fixes the merlin feature which was horribly broken. Also adds no_std to dalek-ff-group. --- crypto/dalek-ff-group/src/lib.rs | 2 ++ crypto/frost/src/algorithm.rs | 2 ++ crypto/frost/src/sign.rs | 2 +- crypto/transcript/Cargo.toml | 2 +- crypto/transcript/src/lib.rs | 34 +++++++++++++++++++------------- crypto/transcript/src/merlin.rs | 27 +++++++++++++------------ 6 files changed, 40 insertions(+), 29 deletions(-) diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index eea21cfb..5340b7e2 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -1,3 +1,5 @@ +#![no_std] + use core::{ ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign}, borrow::Borrow, diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index 21d51521..70f6cf92 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -62,6 +62,8 @@ pub trait Algorithm: Clone { #[derive(Clone, Debug)] pub struct IetfTranscript(Vec); impl Transcript for IetfTranscript { + type Challenge = Vec; + fn domain_separate(&mut self, _: &[u8]) {} fn append_message(&mut self, _: &'static [u8], message: &[u8]) { diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index ba2b8203..c01dbe63 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -164,7 +164,7 @@ fn sign_with_share>( transcript.append_message(b"message", &C::hash_msg(&msg)); // Calculate the binding factor - C::hash_binding_factor(&transcript.challenge(b"binding")) + C::hash_binding_factor(transcript.challenge(b"binding").as_ref()) }; // Process the addendums diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index a5d366c2..06790af4 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "flexible-transcript" -version = "0.1.1" +version = "0.1.2" description = "A simple transcript trait definition, along with viable options" license = "MIT" repository = "https://github.com/serai-dex/serai" diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index f01215fb..c11dd38e 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -1,16 +1,18 @@ -use core::fmt::Debug; +#![no_std] -#[cfg(features = "merlin")] +#[cfg(feature = "merlin")] mod merlin; -#[cfg(features = "merlin")] -pub use merlin::MerlinTranscript; +#[cfg(feature = "merlin")] +pub use crate::merlin::MerlinTranscript; -use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest}; +use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest, Output}; pub trait Transcript { + type Challenge: Clone + Send + Sync + AsRef<[u8]>; + fn domain_separate(&mut self, label: &'static [u8]); fn append_message(&mut self, label: &'static [u8], message: &[u8]); - fn challenge(&mut self, label: &'static [u8]) -> Vec; + fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge; fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } @@ -34,10 +36,13 @@ impl DigestTranscriptMember { } } -#[derive(Clone, Debug)] -pub struct DigestTranscript(D) where D::OutputSize: IsGreaterOrEqual; +pub trait SecureDigest: Clone + Digest {} +impl SecureDigest for D where D::OutputSize: IsGreaterOrEqual {} -impl DigestTranscript where D::OutputSize: IsGreaterOrEqual { +#[derive(Clone, Debug)] +pub struct DigestTranscript(D); + +impl DigestTranscript { fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) { self.0.update(&[kind.as_u8()]); // Assumes messages don't exceed 16 exabytes @@ -52,8 +57,9 @@ impl DigestTranscript where D::OutputSize: IsGreaterOrEqua } } -impl Transcript for DigestTranscript - where D::OutputSize: IsGreaterOrEqual { +impl Transcript for DigestTranscript { + type Challenge = Output; + fn domain_separate(&mut self, label: &[u8]) { self.append(DigestTranscriptMember::Domain, label); } @@ -63,14 +69,14 @@ impl Transcript for DigestTranscript self.append(DigestTranscriptMember::Value, message); } - fn challenge(&mut self, label: &'static [u8]) -> Vec { + fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge { self.append(DigestTranscriptMember::Challenge, label); - self.0.clone().finalize().to_vec() + self.0.clone().finalize() } fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { let mut seed = [0; 32]; - seed.copy_from_slice(&self.challenge(label)[0 .. 32]); + seed.copy_from_slice(&self.challenge(label)[.. 32]); seed } } diff --git a/crypto/transcript/src/merlin.rs b/crypto/transcript/src/merlin.rs index b3d2ab50..d0c60cc9 100644 --- a/crypto/transcript/src/merlin.rs +++ b/crypto/transcript/src/merlin.rs @@ -1,15 +1,22 @@ -use core::{marker::PhantomData, fmt::{Debug, Formatter}}; +use core::fmt::{Debug, Formatter}; -use digest::Digest; +use crate::Transcript; -#[derive(Clone, PartialEq)] +#[derive(Clone)] pub struct MerlinTranscript(pub merlin::Transcript); // Merlin doesn't implement Debug so provide a stub which won't panic impl Debug for MerlinTranscript { - fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { Ok(()) } + fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { Ok(()) } } impl Transcript for MerlinTranscript { + // Uses a challenge length of 64 bytes to support wide reduction on generated scalars + // From a security level standpoint, this should just be 32 bytes + // From a Merlin standpoint, this should be variable per call + // From a practical standpoint, this is a demo file not planned to be used and anything using + // this wrapper should be secure with this setting + type Challenge = [u8; 64]; + fn domain_separate(&mut self, label: &'static [u8]) { self.append_message(b"dom-sep", label); } @@ -18,21 +25,15 @@ impl Transcript for MerlinTranscript { self.0.append_message(label, message); } - fn challenge(&mut self, label: &'static [u8]) -> Vec { - let mut challenge = vec![]; - // Uses a challenge length of 64 bytes to support wide reduction on generated scalars - // From a security level standpoint, this should just be 32 bytes - // From a Merlin standpoint, this should be variable per call - // From a practical standpoint, this is a demo file not planned to be used and anything using - // this wrapper is fine without any settings it uses - challenge.resize(64, 0); + fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge { + let mut challenge = [0; 64]; self.0.challenge_bytes(label, &mut challenge); challenge } fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { let mut seed = [0; 32]; - transcript.challenge_bytes(label, &mut seed); + seed.copy_from_slice(&self.challenge(label)[.. 32]); seed } } From f125f441ba72a6469bf9165f51b1b0c2f8719515 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 28 Jun 2022 22:21:17 -0400 Subject: [PATCH 060/105] Bump dalek-ff-group version so its prime subgroup edit can be published --- crypto/dalek-ff-group/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 11515716..0904138f 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dalek-ff-group" -version = "0.1.0" +version = "0.1.1" description = "ff/group bindings around curve25519-dalek" license = "MIT" repository = "https://github.com/serai-dex/serai" From 0a690f5632de56b8300fa4fcac93a0da492abd0b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 30 Jun 2022 03:16:51 -0400 Subject: [PATCH 061/105] Update the reference link for Guaranteed Addresses Also lints Cargo.toml. --- Cargo.toml | 8 ++++++-- coins/monero/src/wallet/mod.rs | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 19cb80f8..0703ddde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,9 +2,13 @@ members = [ "crypto/transcript", - "crypto/multiexp", - "crypto/frost", + "crypto/dalek-ff-group", + "crypto/multiexp", + + "crypto/frost", + "coins/monero", + "processor", ] diff --git a/coins/monero/src/wallet/mod.rs b/coins/monero/src/wallet/mod.rs index ca694744..271c0729 100644 --- a/coins/monero/src/wallet/mod.rs +++ b/coins/monero/src/wallet/mod.rs @@ -23,7 +23,7 @@ fn key_image_sort(x: &EdwardsPoint, y: &EdwardsPoint) -> std::cmp::Ordering { x.compress().to_bytes().cmp(&y.compress().to_bytes()).reverse() } -// https://github.com/monero-project/research-lab/issues/103 +// https://gist.github.com/kayabaNerve/8066c13f1fe1573286ba7a2fd79f6100 pub(crate) fn uniqueness(inputs: &[Input]) -> [u8; 32] { let mut u = b"uniqueness".to_vec(); for input in inputs { From 2e168204f073e72549c661f61293beaa82004d7f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 30 Jun 2022 03:17:15 -0400 Subject: [PATCH 062/105] Implement PrimeFieldBits for dalek-ff-group --- crypto/dalek-ff-group/Cargo.toml | 1 + crypto/dalek-ff-group/src/lib.rs | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 0904138f..3b78578f 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -14,6 +14,7 @@ digest = "0.10" subtle = "2.4" +ff = "0.12" group = "0.12" curve25519-dalek = "3.2" diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index 5340b7e2..e2ed5e75 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -29,7 +29,8 @@ use dalek::{ } }; -use group::{ff::{Field, PrimeField}, Group, GroupEncoding, prime::PrimeGroup}; +use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; +use group::{Group, GroupEncoding, prime::PrimeGroup}; macro_rules! deref_borrow { ($Source: ident, $Target: ident) => { @@ -190,6 +191,21 @@ impl PrimeField for Scalar { fn root_of_unity() -> Self { unimplemented!() } } +impl PrimeFieldBits for Scalar { + type ReprBits = [u8; 32]; + + fn to_le_bits(&self) -> FieldBits { + self.to_repr().into() + } + + fn char_le_bits() -> FieldBits { + let mut bytes = (Scalar::zero() - Scalar::one()).to_repr(); + bytes[0] += 1; + debug_assert_eq!(Scalar::from_bytes_mod_order(bytes), Scalar::zero()); + bytes.into() + } +} + macro_rules! dalek_group { ( $Point: ident, From 5d115f1e1c0de3739141f5896f1b9e720d7b27dd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 30 Jun 2022 05:42:29 -0400 Subject: [PATCH 063/105] Implement a DLEq library While Serai only needs the simple DLEq which was already present under monero, this migrates the implementation of the cross-group DLEq I maintain into Serai. This was to have full access to the ecosystem of libraries built under Serai while also ensuring support for it. The cross_group curve, which is extremely experimental, is feature flagged off. So is the built in serialization functionality, as this should be possible to make nostd once const generics are full featured, yet the implemented serialization adds the additional barrier of std::io. --- Cargo.toml | 1 + coins/monero/Cargo.toml | 3 +- coins/monero/src/frost.rs | 132 ++------ coins/monero/src/ringct/clsag/multisig.rs | 16 +- crypto/dleq/Cargo.toml | 29 ++ crypto/dleq/LICENSE | 21 ++ crypto/dleq/README.md | 10 + crypto/dleq/src/cross_group/mod.rs | 324 +++++++++++++++++++ crypto/dleq/src/cross_group/scalar.rs | 34 ++ crypto/dleq/src/cross_group/schnorr.rs | 71 ++++ crypto/dleq/src/lib.rs | 149 +++++++++ crypto/dleq/src/tests/cross_group/mod.rs | 54 ++++ crypto/dleq/src/tests/cross_group/scalar.rs | 47 +++ crypto/dleq/src/tests/cross_group/schnorr.rs | 31 ++ crypto/dleq/src/tests/mod.rs | 43 +++ 15 files changed, 854 insertions(+), 111 deletions(-) create mode 100644 crypto/dleq/Cargo.toml create mode 100644 crypto/dleq/LICENSE create mode 100644 crypto/dleq/README.md create mode 100644 crypto/dleq/src/cross_group/mod.rs create mode 100644 crypto/dleq/src/cross_group/scalar.rs create mode 100644 crypto/dleq/src/cross_group/schnorr.rs create mode 100644 crypto/dleq/src/lib.rs create mode 100644 crypto/dleq/src/tests/cross_group/mod.rs create mode 100644 crypto/dleq/src/tests/cross_group/scalar.rs create mode 100644 crypto/dleq/src/tests/cross_group/schnorr.rs create mode 100644 crypto/dleq/src/tests/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 0703ddde..816ed070 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "crypto/dalek-ff-group", "crypto/multiexp", + "crypto/dleq", "crypto/frost", "coins/monero", diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index b24adce0..d2f40f8c 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -26,6 +26,7 @@ group = { version = "0.12", optional = true } dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } +dleq = { path = "../../crypto/dleq", features = ["serialize"], optional = true } base58-monero = "1" monero = "0.16" @@ -38,7 +39,7 @@ reqwest = { version = "0.11", features = ["json"] } [features] experimental = [] -multisig = ["rand_chacha", "blake2", "group", "dalek-ff-group", "transcript", "frost"] +multisig = ["rand_chacha", "blake2", "group", "dalek-ff-group", "transcript", "frost", "dleq"] [dev-dependencies] sha2 = "0.10" diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index 69c4747e..ea36be25 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -1,20 +1,15 @@ -use core::convert::TryInto; +use std::{convert::TryInto, io::Cursor}; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use group::GroupEncoding; +use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; -use curve25519_dalek::{ - constants::ED25519_BASEPOINT_TABLE as DTable, - scalar::Scalar as DScalar, - edwards::EdwardsPoint as DPoint -}; +use group::{Group, GroupEncoding}; -use transcript::{Transcript, RecommendedTranscript}; +use transcript::RecommendedTranscript; use dalek_ff_group as dfg; - -use crate::random_scalar; +use dleq::{Generators, DLEqProof}; #[derive(Clone, Error, Debug)] pub enum MultisigError { @@ -26,105 +21,34 @@ pub enum MultisigError { InvalidKeyImage(u16) } -// Used to prove legitimacy of key images and nonces which both involve other basepoints -#[derive(Clone)] -pub struct DLEqProof { - s: DScalar, - c: DScalar -} - #[allow(non_snake_case)] -impl DLEqProof { - fn challenge(H: &DPoint, xG: &DPoint, xH: &DPoint, rG: &DPoint, rH: &DPoint) -> DScalar { +pub(crate) fn write_dleq( + rng: &mut R, + H: EdwardsPoint, + x: Scalar +) -> Vec { + let mut res = Vec::with_capacity(64); + DLEqProof::prove( + rng, // Doesn't take in a larger transcript object due to the usage of this // Every prover would immediately write their own DLEq proof, when they can only do so in // the proper order if they want to reach consensus // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to // merge later in some form, when it should instead just merge xH (as it does) - let mut transcript = RecommendedTranscript::new(b"DLEq Proof"); - // Bit redundant, keeps things consistent - transcript.domain_separate(b"DLEq"); - // Doesn't include G which is constant, does include H which isn't, even though H manipulation - // shouldn't be possible in practice as it's independently calculated as a product of known data - transcript.append_message(b"H", &H.compress().to_bytes()); - transcript.append_message(b"xG", &xG.compress().to_bytes()); - transcript.append_message(b"xH", &xH.compress().to_bytes()); - transcript.append_message(b"rG", &rG.compress().to_bytes()); - transcript.append_message(b"rH", &rH.compress().to_bytes()); - DScalar::from_bytes_mod_order_wide( - &transcript.challenge(b"challenge").try_into().expect("Blake2b512 output wasn't 64 bytes") - ) - } - - pub fn prove( - rng: &mut R, - H: &DPoint, - secret: &DScalar - ) -> DLEqProof { - let r = random_scalar(rng); - let rG = &DTable * &r; - let rH = r * H; - - // We can frequently (always?) save a scalar mul if we accept xH as an arg, yet it opens room - // for incorrect data to be passed, and therefore faults, making it not worth having - // We could also return xH but... it's really micro-optimizing - let c = DLEqProof::challenge(H, &(secret * &DTable), &(secret * H), &rG, &rH); - let s = r + (c * secret); - - DLEqProof { s, c } - } - - pub fn verify( - &self, - H: &DPoint, - l: u16, - xG: &DPoint, - xH: &DPoint - ) -> Result<(), MultisigError> { - let s = self.s; - let c = self.c; - - let rG = (&s * &DTable) - (c * xG); - let rH = (s * H) - (c * xH); - - if c != DLEqProof::challenge(H, &xG, &xH, &rG, &rH) { - Err(MultisigError::InvalidDLEqProof(l))?; - } - - Ok(()) - } - - pub fn serialize( - &self - ) -> Vec { - let mut res = Vec::with_capacity(64); - res.extend(self.s.to_bytes()); - res.extend(self.c.to_bytes()); - res - } - - pub fn deserialize( - serialized: &[u8] - ) -> Option { - if serialized.len() != 64 { - return None; - } - - DScalar::from_canonical_bytes(serialized[0 .. 32].try_into().unwrap()).and_then( - |s| DScalar::from_canonical_bytes(serialized[32 .. 64].try_into().unwrap()).and_then( - |c| Some(DLEqProof { s, c }) - ) - ) - } + &mut RecommendedTranscript::new(b"DLEq Proof"), + Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)), + dfg::Scalar(x) + ).serialize(&mut res).unwrap(); + res } #[allow(non_snake_case)] pub(crate) fn read_dleq( serialized: &[u8], start: usize, - H: &DPoint, + H: EdwardsPoint, l: u16, - xG: &DPoint + xG: dfg::EdwardsPoint ) -> Result { if serialized.len() < start + 96 { Err(MultisigError::InvalidDLEqProof(l))?; @@ -132,17 +56,21 @@ pub(crate) fn read_dleq( let bytes = (&serialized[(start + 0) .. (start + 32)]).try_into().unwrap(); // dfg ensures the point is torsion free - let other = Option::::from( + let xH = Option::::from( dfg::EdwardsPoint::from_bytes(&bytes)).ok_or(MultisigError::InvalidDLEqProof(l) )?; // Ensure this is a canonical point - if other.to_bytes() != bytes { + if xH.to_bytes() != bytes { Err(MultisigError::InvalidDLEqProof(l))?; } - DLEqProof::deserialize(&serialized[(start + 32) .. (start + 96)]) - .ok_or(MultisigError::InvalidDLEqProof(l))? - .verify(H, l, xG, &other).map_err(|_| MultisigError::InvalidDLEqProof(l))?; + let proof = DLEqProof::::deserialize( + &mut Cursor::new(&serialized[(start + 32) .. (start + 96)]) + ).map_err(|_| MultisigError::InvalidDLEqProof(l))?; - Ok(other) + let mut transcript = RecommendedTranscript::new(b"DLEq Proof"); + proof.verify(&mut transcript, Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)), (xG, xH)) + .map_err(|_| MultisigError::InvalidDLEqProof(l))?; + + Ok(xH) } diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 8d15b0e2..77adc0b1 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -19,7 +19,7 @@ use dalek_ff_group as dfg; use crate::{ hash_to_point, - frost::{MultisigError, DLEqProof, read_dleq}, + frost::{MultisigError, write_dleq, read_dleq}, ringct::clsag::{ClsagInput, Clsag} }; @@ -133,12 +133,12 @@ impl Algorithm for ClsagMultisig { let mut serialized = Vec::with_capacity(ClsagMultisig::serialized_len()); serialized.extend((view.secret_share().0 * self.H).compress().to_bytes()); - serialized.extend(DLEqProof::prove(rng, &self.H, &view.secret_share().0).serialize()); + serialized.extend(write_dleq(rng, self.H, view.secret_share().0)); serialized.extend((nonces[0].0 * self.H).compress().to_bytes()); - serialized.extend(&DLEqProof::prove(rng, &self.H, &nonces[0].0).serialize()); + serialized.extend(write_dleq(rng, self.H, nonces[0].0)); serialized.extend((nonces[1].0 * self.H).compress().to_bytes()); - serialized.extend(&DLEqProof::prove(rng, &self.H, &nonces[1].0).serialize()); + serialized.extend(write_dleq(rng, self.H, nonces[1].0)); serialized } @@ -170,18 +170,18 @@ impl Algorithm for ClsagMultisig { self.image += read_dleq( serialized, cursor, - &self.H, + self.H, l, - &view.verification_share(l).0 + view.verification_share(l) ).map_err(|_| FrostError::InvalidCommitment(l))?.0; cursor += 96; self.transcript.append_message(b"commitment_D_H", &serialized[cursor .. (cursor + 32)]); - self.AH.0 += read_dleq(serialized, cursor, &self.H, l, &commitments[0]).map_err(|_| FrostError::InvalidCommitment(l))?; + self.AH.0 += read_dleq(serialized, cursor, self.H, l, commitments[0]).map_err(|_| FrostError::InvalidCommitment(l))?; cursor += 96; self.transcript.append_message(b"commitment_E_H", &serialized[cursor .. (cursor + 32)]); - self.AH.1 += read_dleq(serialized, cursor, &self.H, l, &commitments[1]).map_err(|_| FrostError::InvalidCommitment(l))?; + self.AH.1 += read_dleq(serialized, cursor, self.H, l, commitments[1]).map_err(|_| FrostError::InvalidCommitment(l))?; Ok(()) } diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml new file mode 100644 index 00000000..f8d26a25 --- /dev/null +++ b/crypto/dleq/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "dleq" +version = "0.1.0" +description = "Implementation of single and cross-curve Discrete Log Equality proofs" +license = "MIT" +authors = ["Luke Parker "] +edition = "2021" + +[dependencies] +thiserror = "1" +rand_core = "0.6" + +ff = "0.12" +group = "0.12" + +transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } + +[dev-dependencies] +hex-literal = "0.3" +k256 = { version = "0.11", features = ["arithmetic", "bits"] } +dalek-ff-group = { path = "../dalek-ff-group" } + +transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] } + +[features] +serialize = [] +cross_group = [] +secure_capacity_difference = [] +default = ["secure_capacity_difference"] diff --git a/crypto/dleq/LICENSE b/crypto/dleq/LICENSE new file mode 100644 index 00000000..c1f47de3 --- /dev/null +++ b/crypto/dleq/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020-2022 Luke Parker, Lee Bousfield + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/dleq/README.md b/crypto/dleq/README.md new file mode 100644 index 00000000..77465a4e --- /dev/null +++ b/crypto/dleq/README.md @@ -0,0 +1,10 @@ +# Discrete Log Equality + +Implementation of discrete log equality both within a group and across groups, +the latter being extremely experimental, for curves implementing the ff/group +APIs. This library has not undergone auditing. + +The cross-group DLEq is the one described in +https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf, augmented +with a pair of Schnorr Proof of Knowledges in order to correct for a mistake +present in the paper. diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs new file mode 100644 index 00000000..e8146c4b --- /dev/null +++ b/crypto/dleq/src/cross_group/mod.rs @@ -0,0 +1,324 @@ +use thiserror::Error; +use rand_core::{RngCore, CryptoRng}; + +use transcript::Transcript; + +use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; + +use crate::{Generators, challenge}; + +pub mod scalar; +use scalar::scalar_normalize; + +pub(crate) mod schnorr; +use schnorr::SchnorrPoK; + +#[cfg(feature = "serialize")] +use std::io::{Read, Write}; +#[cfg(feature = "serialize")] +use crate::read_scalar; + +#[cfg(feature = "serialize")] +pub(crate) fn read_point(r: &mut R) -> std::io::Result { + let mut repr = G::Repr::default(); + r.read_exact(repr.as_mut())?; + let point = G::from_bytes(&repr); + if point.is_none().into() { + Err(std::io::Error::new(std::io::ErrorKind::Other, "invalid point"))?; + } + Ok(point.unwrap()) +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Bit { + commitments: (G0, G1), + e: (G0::Scalar, G1::Scalar), + s: [(G0::Scalar, G1::Scalar); 2] +} + +impl Bit { + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + w.write_all(self.commitments.0.to_bytes().as_ref())?; + w.write_all(self.commitments.1.to_bytes().as_ref())?; + w.write_all(self.e.0.to_repr().as_ref())?; + w.write_all(self.e.1.to_repr().as_ref())?; + for i in 0 .. 2 { + w.write_all(self.s[i].0.to_repr().as_ref())?; + w.write_all(self.s[i].1.to_repr().as_ref())?; + } + Ok(()) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> std::io::Result> { + Ok( + Bit { + commitments: (read_point(r)?, read_point(r)?), + e: (read_scalar(r)?, read_scalar(r)?), + s: [ + (read_scalar(r)?, read_scalar(r)?), + (read_scalar(r)?, read_scalar(r)?) + ] + } + ) + } +} + +#[derive(Error, PartialEq, Eq, Debug)] +pub enum DLEqError { + #[error("invalid proof of knowledge")] + InvalidProofOfKnowledge, + #[error("invalid proof length")] + InvalidProofLength, + #[error("invalid proof")] + InvalidProof +} + +// Debug would be such a dump of data this likely isn't helpful, but at least it's available to +// anyone who wants it +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct DLEqProof { + bits: Vec>, + poks: (SchnorrPoK, SchnorrPoK) +} + +impl DLEqProof { + fn initialize_transcript( + transcript: &mut T, + generators: (Generators, Generators), + keys: (G0, G1) + ) { + generators.0.transcript(transcript); + generators.1.transcript(transcript); + transcript.domain_separate(b"points"); + transcript.append_message(b"point_0", keys.0.to_bytes().as_ref()); + transcript.append_message(b"point_1", keys.1.to_bytes().as_ref()); + } + + fn blinding_key( + rng: &mut R, + total: &mut F, + pow_2: &mut F, + last: bool + ) -> F { + let blinding_key = if last { + -*total * pow_2.invert().unwrap() + } else { + F::random(&mut *rng) + }; + *total += blinding_key * *pow_2; + *pow_2 = pow_2.double(); + blinding_key + } + + #[allow(non_snake_case)] + fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { + transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); + transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); + (challenge(&mut transcript, b"challenge_G"), challenge(&mut transcript, b"challenge_H")) + } + + #[allow(non_snake_case)] + fn R_nonces( + transcript: T, + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) + ) -> (G0::Scalar, G1::Scalar) { + Self::nonces( + transcript, + (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) + ) + } + + // TODO: Use multiexp here after https://github.com/serai-dex/serai/issues/17 + fn reconstruct_key(commitments: impl Iterator) -> G { + let mut pow_2 = G::Scalar::one(); + commitments.fold(G::identity(), |key, commitment| { + let res = key + (commitment * pow_2); + pow_2 = pow_2.double(); + res + }) + } + + fn reconstruct_keys(&self) -> (G0, G1) { + ( + Self::reconstruct_key(self.bits.iter().map(|bit| bit.commitments.0)), + Self::reconstruct_key(self.bits.iter().map(|bit| bit.commitments.1)) + ) + } + + fn transcript_bit(transcript: &mut T, i: usize, commitments: (G0, G1)) { + if i == 0 { + transcript.domain_separate(b"cross_group_dleq"); + } + transcript.append_message(b"bit", &u16::try_from(i).unwrap().to_le_bytes()); + transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref()); + transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the provided Scalar. + /// Since DLEq is proven for the same Scalar in both fields, and the provided Scalar may not be + /// valid in the other Scalar field, the Scalar is normalized as needed and the normalized forms + /// are returned. These are the actually equal discrete logarithms. The passed in Scalar is + /// solely to enable various forms of Scalar generation, such as deterministic schemes + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f: G0::Scalar + ) -> ( + Self, + (G0::Scalar, G1::Scalar) + ) where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + // At least one bit will be dropped from either field element, making it irrelevant which one + // we get a random element in + let f = scalar_normalize::<_, G1::Scalar>(f); + + Self::initialize_transcript( + transcript, + generators, + ((generators.0.primary * f.0), (generators.1.primary * f.1)) + ); + + let poks = ( + SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), + SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) + ); + + let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); + let mut pow_2 = (G0::Scalar::one(), G1::Scalar::one()); + + let raw_bits = f.0.to_le_bits(); + let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); + let mut bits = Vec::with_capacity(capacity); + for (i, bit) in raw_bits.iter().enumerate() { + let last = i == (capacity - 1); + let blinding_key = ( + Self::blinding_key(&mut *rng, &mut blinding_key_total.0, &mut pow_2.0, last), + Self::blinding_key(&mut *rng, &mut blinding_key_total.1, &mut pow_2.1, last) + ); + if last { + debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); + debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); + } + + let mut commitments = ( + (generators.0.alt * blinding_key.0), + (generators.1.alt * blinding_key.1) + ); + // TODO: Not constant time + if *bit { + commitments.0 += generators.0.primary; + commitments.1 += generators.1.primary; + } + Self::transcript_bit(transcript, i, commitments); + + let nonces = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + let e_0 = Self::nonces( + transcript.clone(), + ((generators.0.alt * nonces.0), (generators.1.alt * nonces.1)) + ); + let s_0 = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + + let e_1 = Self::R_nonces( + transcript.clone(), + generators, + (s_0.0, s_0.1), + if *bit { + commitments + } else { + ((commitments.0 - generators.0.primary), (commitments.1 - generators.1.primary)) + }, + e_0 + ); + let s_1 = (nonces.0 + (e_1.0 * blinding_key.0), nonces.1 + (e_1.1 * blinding_key.1)); + + bits.push( + if *bit { + Bit { commitments, e: e_0, s: [s_1, s_0] } + } else { + Bit { commitments, e: e_1, s: [s_0, s_1] } + } + ); + + if last { + break; + } + } + + let proof = DLEqProof { bits, poks }; + debug_assert_eq!( + proof.reconstruct_keys(), + (generators.0.primary * f.0, generators.1.primary * f.1) + ); + (proof, f) + } + + /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for + pub fn verify( + &self, + transcript: &mut T, + generators: (Generators, Generators) + ) -> Result<(G0, G1), DLEqError> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); + if self.bits.len() != capacity.try_into().unwrap() { + return Err(DLEqError::InvalidProofLength); + } + + let keys = self.reconstruct_keys(); + Self::initialize_transcript(transcript, generators, keys); + if !( + self.poks.0.verify(transcript, generators.0.primary, keys.0) && + self.poks.1.verify(transcript, generators.1.primary, keys.1) + ) { + Err(DLEqError::InvalidProofOfKnowledge)?; + } + + for (i, bit) in self.bits.iter().enumerate() { + Self::transcript_bit(transcript, i, bit.commitments); + + if bit.e != Self::R_nonces( + transcript.clone(), + generators, + bit.s[0], + ( + bit.commitments.0 - generators.0.primary, + bit.commitments.1 - generators.1.primary + ), + Self::R_nonces( + transcript.clone(), + generators, + bit.s[1], + bit.commitments, + bit.e + ) + ) { + return Err(DLEqError::InvalidProof); + } + } + + Ok(keys) + } + + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + for bit in &self.bits { + bit.serialize(w)?; + } + self.poks.0.serialize(w)?; + self.poks.1.serialize(w) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> std::io::Result> { + let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); + let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); + for _ in 0 .. capacity { + bits.push(Bit::deserialize(r)?); + } + Ok(DLEqProof { bits, poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) }) + } +} diff --git a/crypto/dleq/src/cross_group/scalar.rs b/crypto/dleq/src/cross_group/scalar.rs new file mode 100644 index 00000000..8d922719 --- /dev/null +++ b/crypto/dleq/src/cross_group/scalar.rs @@ -0,0 +1,34 @@ +use ff::PrimeFieldBits; + +/// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed +pub fn scalar_normalize(scalar: F0) -> (F0, F1) { + let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY); + + // The security of a mutual key is the security of the lower field. Accordingly, this bans a + // difference of more than 4 bits + #[cfg(feature = "secure_capacity_difference")] + assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) < 4); + + let mut res1 = F0::zero(); + let mut res2 = F1::zero(); + // Uses the bit view API to ensure a consistent endianess + let mut bits = scalar.to_le_bits(); + // Convert it to big endian + bits.reverse(); + for bit in bits.iter().skip(bits.len() - usize::try_from(mutual_capacity).unwrap()) { + res1 = res1.double(); + res2 = res2.double(); + if *bit { + res1 += F0::one(); + res2 += F1::one(); + } + } + + (res1, res2) +} + +/// Helper to convert a scalar between fields. Returns None if the scalar isn't mutually valid +pub fn scalar_convert(scalar: F0) -> Option { + let (valid, converted) = scalar_normalize(scalar); + Some(converted).filter(|_| scalar == valid) +} diff --git a/crypto/dleq/src/cross_group/schnorr.rs b/crypto/dleq/src/cross_group/schnorr.rs new file mode 100644 index 00000000..cbb7cfc8 --- /dev/null +++ b/crypto/dleq/src/cross_group/schnorr.rs @@ -0,0 +1,71 @@ +use rand_core::{RngCore, CryptoRng}; + +use transcript::Transcript; + +use group::{ff::Field, prime::PrimeGroup}; + +use crate::challenge; + +#[cfg(feature = "serialize")] +use std::io::{Read, Write}; +#[cfg(feature = "serialize")] +use ff::PrimeField; +#[cfg(feature = "serialize")] +use crate::{read_scalar, cross_group::read_point}; + +#[allow(non_snake_case)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct SchnorrPoK { + R: G, + s: G::Scalar +} + +impl SchnorrPoK { + // Not hram due to the lack of m + #[allow(non_snake_case)] + fn hra(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar { + transcript.domain_separate(b"schnorr_proof_of_knowledge"); + transcript.append_message(b"generator", generator.to_bytes().as_ref()); + transcript.append_message(b"nonce", R.to_bytes().as_ref()); + transcript.append_message(b"public_key", A.to_bytes().as_ref()); + challenge(transcript, b"challenge") + } + + pub(crate) fn prove( + rng: &mut R, + transcript: &mut T, + generator: G, + private_key: G::Scalar + ) -> SchnorrPoK { + let nonce = G::Scalar::random(rng); + #[allow(non_snake_case)] + let R = generator * nonce; + SchnorrPoK { + R, + s: nonce + (private_key * SchnorrPoK::hra(transcript, generator, R, generator * private_key)) + } + } + + #[must_use] + pub(crate) fn verify( + &self, + transcript: &mut T, + generator: G, + public_key: G + ) -> bool { + (generator * self.s) == ( + self.R + (public_key * Self::hra(transcript, generator, self.R, public_key)) + ) + } + + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + w.write_all(self.R.to_bytes().as_ref())?; + w.write_all(self.s.to_repr().as_ref()) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> std::io::Result> { + Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? }) + } +} diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs new file mode 100644 index 00000000..cc15775e --- /dev/null +++ b/crypto/dleq/src/lib.rs @@ -0,0 +1,149 @@ +use thiserror::Error; +use rand_core::{RngCore, CryptoRng}; + +use transcript::Transcript; + +use ff::{Field, PrimeField}; +use group::prime::PrimeGroup; + +#[cfg(feature = "serialize")] +use std::io::{self, ErrorKind, Error, Read, Write}; + +#[cfg(feature = "cross_group")] +pub mod cross_group; + +#[cfg(test)] +mod tests; + +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Generators { + primary: G, + alt: G +} + +impl Generators { + pub fn new(primary: G, alt: G) -> Generators { + Generators { primary, alt } + } + + fn transcript(&self, transcript: &mut T) { + transcript.domain_separate(b"generators"); + transcript.append_message(b"primary", self.primary.to_bytes().as_ref()); + transcript.append_message(b"alternate", self.alt.to_bytes().as_ref()); + } +} + +pub(crate) fn challenge( + transcript: &mut T, + label: &'static [u8] +) -> F { + assert!(F::NUM_BITS <= 384); + + // From here, there are three ways to get a scalar under the ff/group API + // 1: Scalar::random(ChaCha12Rng::from_seed(self.transcript.rng_seed(b"challenge"))) + // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess + // and loading it in + // 3: Iterating over each byte and manually doubling/adding. This is simplest + let challenge_bytes = transcript.challenge(label); + assert!(challenge_bytes.as_ref().len() == 64); + + let mut challenge = F::zero(); + for b in challenge_bytes.as_ref() { + for _ in 0 .. 8 { + challenge = challenge.double(); + } + challenge += F::from(u64::from(*b)); + } + challenge +} + +#[cfg(feature = "serialize")] +fn read_scalar(r: &mut R) -> io::Result { + let mut repr = F::Repr::default(); + r.read_exact(repr.as_mut())?; + let scalar = F::from_repr(repr); + if scalar.is_none().into() { + Err(Error::new(ErrorKind::Other, "invalid scalar"))?; + } + Ok(scalar.unwrap()) +} + +#[derive(Error, Debug)] +pub enum DLEqError { + #[error("invalid proof")] + InvalidProof +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct DLEqProof { + c: G::Scalar, + s: G::Scalar +} + +#[allow(non_snake_case)] +impl DLEqProof { + fn challenge( + transcript: &mut T, + generators: Generators, + nonces: (G, G), + points: (G, G) + ) -> G::Scalar { + generators.transcript(transcript); + transcript.domain_separate(b"dleq"); + transcript.append_message(b"nonce_primary", nonces.0.to_bytes().as_ref()); + transcript.append_message(b"nonce_alternate", nonces.1.to_bytes().as_ref()); + transcript.append_message(b"point_primary", points.0.to_bytes().as_ref()); + transcript.append_message(b"point_alternate", points.1.to_bytes().as_ref()); + challenge(transcript, b"challenge") + } + + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: Generators, + scalar: G::Scalar + ) -> DLEqProof { + let r = G::Scalar::random(rng); + let c = Self::challenge( + transcript, + generators, + (generators.primary * r, generators.alt * r), + (generators.primary * scalar, generators.alt * scalar) + ); + let s = r + (c * scalar); + + DLEqProof { c, s } + } + + pub fn verify( + &self, + transcript: &mut T, + generators: Generators, + points: (G, G) + ) -> Result<(), DLEqError> { + if self.c != Self::challenge( + transcript, + generators, + ( + (generators.primary * self.s) - (points.0 * self.c), + (generators.alt * self.s) - (points.1 * self.c) + ), + points + ) { + Err(DLEqError::InvalidProof)?; + } + + Ok(()) + } + + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> io::Result<()> { + w.write_all(self.c.to_repr().as_ref())?; + w.write_all(self.s.to_repr().as_ref()) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> io::Result> { + Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? }) + } +} diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs new file mode 100644 index 00000000..fd4b3e9b --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -0,0 +1,54 @@ +mod scalar; +mod schnorr; + +use hex_literal::hex; +use rand_core::OsRng; + +use ff::Field; +use group::{Group, GroupEncoding}; + +use k256::{Scalar, ProjectivePoint}; +use dalek_ff_group::{EdwardsPoint, CompressedEdwardsY}; + +use transcript::RecommendedTranscript; + +use crate::{Generators, cross_group::DLEqProof}; + +#[test] +fn test_dleq() { + let transcript = || RecommendedTranscript::new(b"Cross-Group DLEq Proof Test"); + + let generators = ( + Generators::new( + ProjectivePoint::GENERATOR, + ProjectivePoint::from_bytes( + &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into()) + ).unwrap() + ), + + Generators::new( + EdwardsPoint::generator(), + CompressedEdwardsY::new( + hex!("8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94") + ).decompress().unwrap() + ) + ); + + let key = Scalar::random(&mut OsRng); + let (proof, keys) = DLEqProof::prove(&mut OsRng, &mut transcript(), generators, key); + + let public_keys = proof.verify(&mut transcript(), generators).unwrap(); + assert_eq!(generators.0.primary * keys.0, public_keys.0); + assert_eq!(generators.1.primary * keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = DLEqProof::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut transcript(), generators).unwrap(); + } +} diff --git a/crypto/dleq/src/tests/cross_group/scalar.rs b/crypto/dleq/src/tests/cross_group/scalar.rs new file mode 100644 index 00000000..30495bb3 --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/scalar.rs @@ -0,0 +1,47 @@ +use rand_core::OsRng; + +use ff::{Field, PrimeField}; + +use k256::Scalar as K256Scalar; +use dalek_ff_group::Scalar as DalekScalar; + +use crate::cross_group::scalar::{scalar_normalize, scalar_convert}; + +#[test] +fn test_scalar() { + assert_eq!( + scalar_normalize::<_, DalekScalar>(K256Scalar::zero()), + (K256Scalar::zero(), DalekScalar::zero()) + ); + + assert_eq!( + scalar_normalize::<_, DalekScalar>(K256Scalar::one()), + (K256Scalar::one(), DalekScalar::one()) + ); + + let mut initial; + while { + initial = K256Scalar::random(&mut OsRng); + let (k, ed) = scalar_normalize::<_, DalekScalar>(initial); + + // The initial scalar should equal the new scalar with Ed25519's capacity + let mut initial_bytes = (&initial.to_repr()).to_vec(); + // Drop the first 4 bits to hit 252 + initial_bytes[0] = initial_bytes[0] & 0b00001111; + let k_bytes = (&k.to_repr()).to_vec(); + assert_eq!(initial_bytes, k_bytes); + + let mut ed_bytes = ed.to_repr().as_ref().to_vec(); + // Reverse to big endian + ed_bytes.reverse(); + assert_eq!(k_bytes, ed_bytes); + + // Verify conversion works as expected + assert_eq!(scalar_convert::<_, DalekScalar>(k), Some(ed)); + + // Run this test again if this secp256k1 scalar didn't have any bits cleared + initial == k + } {} + // Verify conversion returns None when the scalar isn't mutually valid + assert!(scalar_convert::<_, DalekScalar>(initial).is_none()); +} diff --git a/crypto/dleq/src/tests/cross_group/schnorr.rs b/crypto/dleq/src/tests/cross_group/schnorr.rs new file mode 100644 index 00000000..8298afda --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/schnorr.rs @@ -0,0 +1,31 @@ +use rand_core::OsRng; + +use group::{ff::Field, prime::PrimeGroup}; + +use transcript::RecommendedTranscript; + +use crate::cross_group::schnorr::SchnorrPoK; + +fn test_schnorr() { + let private = G::Scalar::random(&mut OsRng); + + let transcript = RecommendedTranscript::new(b"Schnorr Test"); + assert!( + SchnorrPoK::prove( + &mut OsRng, + &mut transcript.clone(), + G::generator(), + private + ).verify(&mut transcript.clone(), G::generator(), G::generator() * private) + ); +} + +#[test] +fn test_secp256k1() { + test_schnorr::(); +} + +#[test] +fn test_ed25519() { + test_schnorr::(); +} diff --git a/crypto/dleq/src/tests/mod.rs b/crypto/dleq/src/tests/mod.rs new file mode 100644 index 00000000..119bbc6b --- /dev/null +++ b/crypto/dleq/src/tests/mod.rs @@ -0,0 +1,43 @@ +#[cfg(feature = "cross_group")] +mod cross_group; + +use hex_literal::hex; +use rand_core::OsRng; + +use ff::Field; +use group::GroupEncoding; + +use k256::{Scalar, ProjectivePoint}; + +use transcript::RecommendedTranscript; + +use crate::{Generators, DLEqProof}; + +#[test] +fn test_dleq() { + let transcript = || RecommendedTranscript::new(b"DLEq Proof Test"); + + let generators = Generators::new( + ProjectivePoint::GENERATOR, + ProjectivePoint::from_bytes( + &(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into()) + ).unwrap() + ); + + let key = Scalar::random(&mut OsRng); + let proof = DLEqProof::prove(&mut OsRng, &mut transcript(), generators, key); + + let keys = (generators.primary * key, generators.alt * key); + proof.verify(&mut transcript(), generators, keys).unwrap(); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = DLEqProof::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut transcript(), generators, keys).unwrap(); + } +} From 7890827a481419f4a5f66e879a1614ef8a3d1f07 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 30 Jun 2022 09:30:24 -0400 Subject: [PATCH 064/105] Implement variable-sized windows into multiexp Closes https://github.com/serai-dex/serai/issues/17 by using the PrimeFieldBits API to do so. Should greatly speed up small batches, along with batches in the hundreds. Saves almost a full second on the cross-group DLEq proof. --- crypto/dleq/Cargo.toml | 4 +- crypto/dleq/src/cross_group/mod.rs | 26 ++--- crypto/frost/Cargo.toml | 5 +- crypto/frost/src/curve/dalek.rs | 2 - crypto/frost/src/curve/kp256.rs | 2 - crypto/frost/src/curve/mod.rs | 8 +- crypto/frost/src/key_gen.rs | 4 +- crypto/frost/src/schnorr.rs | 2 +- crypto/frost/src/tests/curve.rs | 7 +- crypto/multiexp/Cargo.toml | 7 ++ crypto/multiexp/src/batch.rs | 22 ++-- crypto/multiexp/src/lib.rs | 157 +++++++++++++++++++++++++---- crypto/multiexp/src/pippenger.rs | 66 +++++------- crypto/multiexp/src/straus.rs | 66 +++++------- crypto/multiexp/src/tests/mod.rs | 112 ++++++++++++++++++++ 15 files changed, 342 insertions(+), 148 deletions(-) create mode 100644 crypto/multiexp/src/tests/mod.rs diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index f8d26a25..de5338b7 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -10,10 +10,12 @@ edition = "2021" thiserror = "1" rand_core = "0.6" +transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } + ff = "0.12" group = "0.12" -transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } +multiexp = { path = "../multiexp" } [dev-dependencies] hex-literal = "0.3" diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index e8146c4b..498d5f9f 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -83,7 +83,8 @@ pub struct DLEqProof { poks: (SchnorrPoK, SchnorrPoK) } -impl DLEqProof { +impl DLEqProof + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { fn initialize_transcript( transcript: &mut T, generators: (Generators, Generators), @@ -134,13 +135,17 @@ impl DLEqProof { } // TODO: Use multiexp here after https://github.com/serai-dex/serai/issues/17 - fn reconstruct_key(commitments: impl Iterator) -> G { + fn reconstruct_key( + commitments: impl Iterator + ) -> G where G::Scalar: PrimeFieldBits { let mut pow_2 = G::Scalar::one(); - commitments.fold(G::identity(), |key, commitment| { - let res = key + (commitment * pow_2); - pow_2 = pow_2.double(); - res - }) + multiexp::multiexp_vartime( + &commitments.map(|commitment| { + let res = (pow_2, commitment); + pow_2 = pow_2.double(); + res + }).collect::>() + ) } fn reconstruct_keys(&self) -> (G0, G1) { @@ -169,10 +174,7 @@ impl DLEqProof { transcript: &mut T, generators: (Generators, Generators), f: G0::Scalar - ) -> ( - Self, - (G0::Scalar, G1::Scalar) - ) where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + ) -> (Self, (G0::Scalar, G1::Scalar)) { // At least one bit will be dropped from either field element, making it irrelevant which one // we get a random element in let f = scalar_normalize::<_, G1::Scalar>(f); @@ -262,7 +264,7 @@ impl DLEqProof { &self, transcript: &mut T, generators: (Generators, Generators) - ) -> Result<(G0, G1), DLEqError> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + ) -> Result<(G0, G1), DLEqError> { let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); if self.bits.len() != capacity.try_into().unwrap() { return Err(DLEqError::InvalidProofLength); diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 73dbc52d..436c3966 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -16,11 +16,12 @@ hex = "0.4" sha2 = { version = "0.10", optional = true } +ff = "0.12" group = "0.12" elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true } -p256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } -k256 = { version = "0.11", features = ["arithmetic", "hash2curve"], optional = true } +p256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true } +k256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true } dalek-ff-group = { path = "../dalek-ff-group", version = "0.1", optional = true } transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } diff --git a/crypto/frost/src/curve/dalek.rs b/crypto/frost/src/curve/dalek.rs index 07515eee..40e6c252 100644 --- a/crypto/frost/src/curve/dalek.rs +++ b/crypto/frost/src/curve/dalek.rs @@ -35,8 +35,6 @@ macro_rules! dalek_curve { const GENERATOR: Self::G = $POINT; const GENERATOR_TABLE: Self::T = &$TABLE; - const LITTLE_ENDIAN: bool = true; - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { let mut seed = vec![0; 32]; rng.fill_bytes(&mut seed); diff --git a/crypto/frost/src/curve/kp256.rs b/crypto/frost/src/curve/kp256.rs index 278e4eaa..9b1874d8 100644 --- a/crypto/frost/src/curve/kp256.rs +++ b/crypto/frost/src/curve/kp256.rs @@ -29,8 +29,6 @@ macro_rules! kp_curve { const GENERATOR: Self::G = $lib::ProjectivePoint::GENERATOR; const GENERATOR_TABLE: Self::G = $lib::ProjectivePoint::GENERATOR; - const LITTLE_ENDIAN: bool = false; - fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { let mut seed = vec![0; 32]; rng.fill_bytes(&mut seed); diff --git a/crypto/frost/src/curve/mod.rs b/crypto/frost/src/curve/mod.rs index 2de31a2a..e08e2faf 100644 --- a/crypto/frost/src/curve/mod.rs +++ b/crypto/frost/src/curve/mod.rs @@ -4,7 +4,8 @@ use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use group::{ff::PrimeField, Group, GroupOps, prime::PrimeGroup}; +use ff::{PrimeField, PrimeFieldBits}; +use group::{Group, GroupOps, prime::PrimeGroup}; #[cfg(any(test, feature = "dalek"))] mod dalek; @@ -40,7 +41,7 @@ pub enum CurveError { pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { /// Scalar field element type // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses - type F: PrimeField; + type F: PrimeField + PrimeFieldBits; /// Group element type type G: Group + GroupOps + PrimeGroup; /// Precomputed table type @@ -57,9 +58,6 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { /// If there isn't a precomputed table available, the generator itself should be used const GENERATOR_TABLE: Self::T; - /// If little endian is used for the scalar field's Repr - const LITTLE_ENDIAN: bool; - /// Securely generate a random nonce. H4 from the IETF draft fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F; diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index 4f2832e5..e5b0f76f 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -224,7 +224,7 @@ fn complete_r2( res }; - let mut batch = BatchVerifier::new(shares.len(), C::LITTLE_ENDIAN); + let mut batch = BatchVerifier::new(shares.len()); for (l, share) in &shares { if *l == params.i() { continue; @@ -254,7 +254,7 @@ fn complete_r2( // Calculate each user's verification share let mut verification_shares = HashMap::new(); for i in 1 ..= params.n() { - verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes), C::LITTLE_ENDIAN)); + verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes))); } // Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t) debug_assert_eq!(C::GENERATOR_TABLE * secret_share, verification_shares[¶ms.i()]); diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index 9424fd28..af9ff808 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -46,7 +46,7 @@ pub(crate) fn batch_verify( triplets: &[(u16, C::G, C::F, SchnorrSignature)] ) -> Result<(), u16> { let mut values = [(C::F::one(), C::GENERATOR); 3]; - let mut batch = BatchVerifier::new(triplets.len(), C::LITTLE_ENDIAN); + let mut batch = BatchVerifier::new(triplets.len()); for triple in triplets { // s = r + ca // sG == R + cA diff --git a/crypto/frost/src/tests/curve.rs b/crypto/frost/src/tests/curve.rs index 48dd78de..092ee50f 100644 --- a/crypto/frost/src/tests/curve.rs +++ b/crypto/frost/src/tests/curve.rs @@ -21,7 +21,8 @@ pub fn test_curve(rng: &mut R) { // TODO: Test the Curve functions themselves // Test successful multiexp, with enough pairs to trigger its variety of algorithms - // TODO: This should probably be under multiexp + // Multiexp has its own tests, yet only against k256 and Ed25519 (which should be sufficient + // as-is to prove multiexp), and this doesn't hurt { let mut pairs = Vec::with_capacity(1000); let mut sum = C::G::identity(); @@ -30,8 +31,8 @@ pub fn test_curve(rng: &mut R) { pairs.push((C::F::random(&mut *rng), C::GENERATOR * C::F::random(&mut *rng))); sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; } - assert_eq!(multiexp::multiexp(&pairs, C::LITTLE_ENDIAN), sum); - assert_eq!(multiexp::multiexp_vartime(&pairs, C::LITTLE_ENDIAN), sum); + assert_eq!(multiexp::multiexp(&pairs), sum); + assert_eq!(multiexp::multiexp_vartime(&pairs), sum); } } diff --git a/crypto/multiexp/Cargo.toml b/crypto/multiexp/Cargo.toml index c4c73690..0342f0ee 100644 --- a/crypto/multiexp/Cargo.toml +++ b/crypto/multiexp/Cargo.toml @@ -9,9 +9,16 @@ keywords = ["multiexp", "ff", "group"] edition = "2021" [dependencies] +ff = "0.12" group = "0.12" rand_core = { version = "0.6", optional = true } +[dev-dependencies] +rand_core = "0.6" + +k256 = { version = "0.11", features = ["bits"] } +dalek-ff-group = { path = "../dalek-ff-group" } + [features] batch = ["rand_core"] diff --git a/crypto/multiexp/src/batch.rs b/crypto/multiexp/src/batch.rs index 6962ea86..5b5d65fb 100644 --- a/crypto/multiexp/src/batch.rs +++ b/crypto/multiexp/src/batch.rs @@ -1,16 +1,17 @@ use rand_core::{RngCore, CryptoRng}; -use group::{ff::Field, Group}; +use ff::{Field, PrimeFieldBits}; +use group::Group; use crate::{multiexp, multiexp_vartime}; #[cfg(feature = "batch")] -pub struct BatchVerifier(Vec<(Id, Vec<(G::Scalar, G)>)>, bool); +pub struct BatchVerifier(Vec<(Id, Vec<(G::Scalar, G)>)>); #[cfg(feature = "batch")] -impl BatchVerifier { - pub fn new(capacity: usize, endian: bool) -> BatchVerifier { - BatchVerifier(Vec::with_capacity(capacity), endian) +impl BatchVerifier where ::Scalar: PrimeFieldBits { + pub fn new(capacity: usize) -> BatchVerifier { + BatchVerifier(Vec::with_capacity(capacity)) } pub fn queue< @@ -28,15 +29,13 @@ impl BatchVerifier { pub fn verify(&self) -> bool { multiexp( - &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>(), - self.1 + &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>() ).is_identity().into() } pub fn verify_vartime(&self) -> bool { multiexp_vartime( - &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>(), - self.1 + &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>() ).is_identity().into() } @@ -46,8 +45,7 @@ impl BatchVerifier { while slice.len() > 1 { let split = slice.len() / 2; if multiexp_vartime( - &slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>(), - self.1 + &slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>() ).is_identity().into() { slice = &slice[split ..]; } else { @@ -56,7 +54,7 @@ impl BatchVerifier { } slice.get(0).filter( - |(_, value)| !bool::from(multiexp_vartime(value, self.1).is_identity()) + |(_, value)| !bool::from(multiexp_vartime(value).is_identity()) ).map(|(id, _)| *id) } diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index 51651e64..ca1b6495 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -1,3 +1,4 @@ +use ff::PrimeFieldBits; use group::Group; mod straus; @@ -11,39 +12,151 @@ mod batch; #[cfg(feature = "batch")] pub use batch::BatchVerifier; -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum Algorithm { - Straus, - Pippenger +#[cfg(test)] +mod tests; + +pub(crate) fn prep_bits( + pairs: &[(G::Scalar, G)], + window: u8 +) -> Vec> where G::Scalar: PrimeFieldBits { + let w_usize = usize::from(window); + + let mut groupings = vec![]; + for pair in pairs { + let p = groupings.len(); + let bits = pair.0.to_le_bits(); + groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]); + + for (i, bit) in bits.into_iter().enumerate() { + let bit = bit as u8; + debug_assert_eq!(bit | 1, 1); + groupings[p][i / w_usize] |= bit << (i % w_usize); + } + } + + groupings } -fn algorithm(pairs: usize) -> Algorithm { - // TODO: Replace this with an actual formula determining which will use less additions - // Right now, Straus is used until 600, instead of the far more accurate 300, as Pippenger - // operates per byte instead of per nibble, and therefore requires a much longer series to be - // performant - // Technically, 800 is dalek's number for when to use byte Pippenger, yet given Straus's own - // implementation limitations... - if pairs < 600 { - Algorithm::Straus +pub(crate) fn prep_tables( + pairs: &[(G::Scalar, G)], + window: u8 +) -> Vec> { + let mut tables = Vec::with_capacity(pairs.len()); + for pair in pairs { + let p = tables.len(); + tables.push(vec![G::identity(); 2_usize.pow(window.into())]); + let mut accum = G::identity(); + for i in 1 .. tables[p].len() { + accum += pair.1; + tables[p][i] = accum; + } + } + tables +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +enum Algorithm { + Straus(u8), + Pippenger(u8) +} + +/* +Release (with runs 20, so all of these are off by 20x): + +k256 +Straus 3 is more efficient at 5 with 678µs per +Straus 4 is more efficient at 10 with 530µs per +Straus 5 is more efficient at 35 with 467µs per + +Pippenger 5 is more efficient at 125 with 431µs per +Pippenger 6 is more efficient at 275 with 349µs per +Pippenger 7 is more efficient at 375 with 360µs per + +dalek +Straus 3 is more efficient at 5 with 519µs per +Straus 4 is more efficient at 10 with 376µs per +Straus 5 is more efficient at 170 with 330µs per + +Pippenger 5 is more efficient at 125 with 305µs per +Pippenger 6 is more efficient at 275 with 250µs per +Pippenger 7 is more efficient at 450 with 205µs per +Pippenger 8 is more efficient at 800 with 213µs per + +Debug (with runs 5, so...): + +k256 +Straus 3 is more efficient at 5 with 2532µs per +Straus 4 is more efficient at 10 with 1930µs per +Straus 5 is more efficient at 80 with 1632µs per + +Pippenger 5 is more efficient at 150 with 1441µs per +Pippenger 6 is more efficient at 300 with 1235µs per +Pippenger 7 is more efficient at 475 with 1182µs per +Pippenger 8 is more efficient at 625 with 1170µs per + +dalek: +Straus 3 is more efficient at 5 with 971µs per +Straus 4 is more efficient at 10 with 782µs per +Straus 5 is more efficient at 75 with 778µs per +Straus 6 is more efficient at 165 with 867µs per + +Pippenger 5 is more efficient at 125 with 677µs per +Pippenger 6 is more efficient at 250 with 655µs per +Pippenger 7 is more efficient at 475 with 500µs per +Pippenger 8 is more efficient at 875 with 499µs per +*/ +fn algorithm(len: usize) -> Algorithm { + #[cfg(not(debug_assertions))] + if len < 10 { + // Straus 2 never showed a performance benefit, even with just 2 elements + Algorithm::Straus(3) + } else if len < 20 { + Algorithm::Straus(4) + } else if len < 50 { + Algorithm::Straus(5) + } else if len < 100 { + Algorithm::Pippenger(4) + } else if len < 125 { + Algorithm::Pippenger(5) + } else if len < 275 { + Algorithm::Pippenger(6) + } else if len < 400 { + Algorithm::Pippenger(7) } else { - Algorithm::Pippenger + Algorithm::Pippenger(8) + } + + #[cfg(debug_assertions)] + if len < 10 { + Algorithm::Straus(3) + } else if len < 80 { + Algorithm::Straus(4) + } else if len < 100 { + Algorithm::Straus(5) + } else if len < 125 { + Algorithm::Pippenger(4) + } else if len < 275 { + Algorithm::Pippenger(5) + } else if len < 475 { + Algorithm::Pippenger(6) + } else if len < 750 { + Algorithm::Pippenger(7) + } else { + Algorithm::Pippenger(8) } } // Performs a multiexp, automatically selecting the optimal algorithm based on amount of pairs -// Takes in an iterator of scalars and points, with a boolean for if the scalars are little endian -// encoded in their Reprs or not -pub fn multiexp(pairs: &[(G::Scalar, G)], little: bool) -> G { +pub fn multiexp(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits { match algorithm(pairs.len()) { - Algorithm::Straus => straus(pairs, little), - Algorithm::Pippenger => pippenger(pairs, little) + Algorithm::Straus(window) => straus(pairs, window), + Algorithm::Pippenger(window) => pippenger(pairs, window) } } -pub fn multiexp_vartime(pairs: &[(G::Scalar, G)], little: bool) -> G { +pub fn multiexp_vartime(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits { match algorithm(pairs.len()) { - Algorithm::Straus => straus_vartime(pairs, little), - Algorithm::Pippenger => pippenger_vartime(pairs, little) + Algorithm::Straus(window) => straus_vartime(pairs, window), + Algorithm::Pippenger(window) => pippenger_vartime(pairs, window) } } diff --git a/crypto/multiexp/src/pippenger.rs b/crypto/multiexp/src/pippenger.rs index b812c922..cfc24f1b 100644 --- a/crypto/multiexp/src/pippenger.rs +++ b/crypto/multiexp/src/pippenger.rs @@ -1,42 +1,23 @@ -use group::{ff::PrimeField, Group}; +use ff::PrimeFieldBits; +use group::Group; -fn prep(pairs: &[(G::Scalar, G)], little: bool) -> (Vec>, Vec) { - let mut res = vec![]; - let mut points = vec![]; - for pair in pairs { - let p = res.len(); - res.push(vec![]); - { - let mut repr = pair.0.to_repr(); - let bytes = repr.as_mut(); - if !little { - bytes.reverse(); - } +use crate::prep_bits; - res[p].resize(bytes.len(), 0); - for i in 0 .. bytes.len() { - res[p][i] = bytes[i]; - } - } - - points.push(pair.1); - } - - (res, points) -} - -pub(crate) fn pippenger(pairs: &[(G::Scalar, G)], little: bool) -> G { - let (bytes, points) = prep(pairs, little); +pub(crate) fn pippenger( + pairs: &[(G::Scalar, G)], + window: u8 +) -> G where G::Scalar: PrimeFieldBits { + let bits = prep_bits(pairs, window); let mut res = G::identity(); - for n in (0 .. bytes[0].len()).rev() { - for _ in 0 .. 8 { + for n in (0 .. bits[0].len()).rev() { + for _ in 0 .. window { res = res.double(); } - let mut buckets = [G::identity(); 256]; - for p in 0 .. bytes.len() { - buckets[usize::from(bytes[p][n])] += points[p]; + let mut buckets = vec![G::identity(); 2_usize.pow(window.into())]; + for p in 0 .. bits.len() { + buckets[usize::from(bits[p][n])] += pairs[p].1; } let mut intermediate_sum = G::identity(); @@ -49,22 +30,25 @@ pub(crate) fn pippenger(pairs: &[(G::Scalar, G)], little: bool) -> G { res } -pub(crate) fn pippenger_vartime(pairs: &[(G::Scalar, G)], little: bool) -> G { - let (bytes, points) = prep(pairs, little); +pub(crate) fn pippenger_vartime( + pairs: &[(G::Scalar, G)], + window: u8 +) -> G where G::Scalar: PrimeFieldBits { + let bits = prep_bits(pairs, window); let mut res = G::identity(); - for n in (0 .. bytes[0].len()).rev() { - if n != (bytes[0].len() - 1) { - for _ in 0 .. 8 { + for n in (0 .. bits[0].len()).rev() { + if n != (bits[0].len() - 1) { + for _ in 0 .. window { res = res.double(); } } - let mut buckets = [G::identity(); 256]; - for p in 0 .. bytes.len() { - let nibble = usize::from(bytes[p][n]); + let mut buckets = vec![G::identity(); 2_usize.pow(window.into())]; + for p in 0 .. bits.len() { + let nibble = usize::from(bits[p][n]); if nibble != 0 { - buckets[nibble] += points[p]; + buckets[nibble] += pairs[p].1; } } diff --git a/crypto/multiexp/src/straus.rs b/crypto/multiexp/src/straus.rs index b8660f1b..e2955d94 100644 --- a/crypto/multiexp/src/straus.rs +++ b/crypto/multiexp/src/straus.rs @@ -1,66 +1,46 @@ -use group::{ff::PrimeField, Group}; +use ff::PrimeFieldBits; +use group::Group; -fn prep(pairs: &[(G::Scalar, G)], little: bool) -> (Vec>, Vec<[G; 16]>) { - let mut nibbles = vec![]; - let mut tables = vec![]; - for pair in pairs { - let p = nibbles.len(); - nibbles.push(vec![]); - { - let mut repr = pair.0.to_repr(); - let bytes = repr.as_mut(); - if !little { - bytes.reverse(); - } +use crate::{prep_bits, prep_tables}; - nibbles[p].resize(bytes.len() * 2, 0); - for i in 0 .. bytes.len() { - nibbles[p][i * 2] = bytes[i] & 0b1111; - nibbles[p][(i * 2) + 1] = (bytes[i] >> 4) & 0b1111; - } - } - - tables.push([G::identity(); 16]); - let mut accum = G::identity(); - for i in 1 .. 16 { - accum += pair.1; - tables[p][i] = accum; - } - } - - (nibbles, tables) -} - -pub(crate) fn straus(pairs: &[(G::Scalar, G)], little: bool) -> G { - let (nibbles, tables) = prep(pairs, little); +pub(crate) fn straus( + pairs: &[(G::Scalar, G)], + window: u8 +) -> G where G::Scalar: PrimeFieldBits { + let groupings = prep_bits(pairs, window); + let tables = prep_tables(pairs, window); let mut res = G::identity(); - for b in (0 .. nibbles[0].len()).rev() { - for _ in 0 .. 4 { + for b in (0 .. groupings[0].len()).rev() { + for _ in 0 .. window { res = res.double(); } for s in 0 .. tables.len() { - res += tables[s][usize::from(nibbles[s][b])]; + res += tables[s][usize::from(groupings[s][b])]; } } res } -pub(crate) fn straus_vartime(pairs: &[(G::Scalar, G)], little: bool) -> G { - let (nibbles, tables) = prep(pairs, little); +pub(crate) fn straus_vartime( + pairs: &[(G::Scalar, G)], + window: u8 +) -> G where G::Scalar: PrimeFieldBits { + let groupings = prep_bits(pairs, window); + let tables = prep_tables(pairs, window); let mut res = G::identity(); - for b in (0 .. nibbles[0].len()).rev() { - if b != (nibbles[0].len() - 1) { - for _ in 0 .. 4 { + for b in (0 .. groupings[0].len()).rev() { + if b != (groupings[0].len() - 1) { + for _ in 0 .. window { res = res.double(); } } for s in 0 .. tables.len() { - if nibbles[s][b] != 0 { - res += tables[s][usize::from(nibbles[s][b])]; + if groupings[s][b] != 0 { + res += tables[s][usize::from(groupings[s][b])]; } } } diff --git a/crypto/multiexp/src/tests/mod.rs b/crypto/multiexp/src/tests/mod.rs new file mode 100644 index 00000000..628c52c8 --- /dev/null +++ b/crypto/multiexp/src/tests/mod.rs @@ -0,0 +1,112 @@ +use std::time::Instant; + +use rand_core::OsRng; + +use ff::{Field, PrimeFieldBits}; +use group::Group; + +use k256::ProjectivePoint; +use dalek_ff_group::EdwardsPoint; + +use crate::{straus, pippenger, multiexp, multiexp_vartime}; + +#[allow(dead_code)] +fn benchmark_internal(straus_bool: bool) where G::Scalar: PrimeFieldBits { + let runs: usize = 20; + + let mut start = 0; + let mut increment: usize = 5; + let mut total: usize = 250; + let mut current = 2; + + if !straus_bool { + start = 100; + increment = 25; + total = 1000; + current = 4; + }; + + let mut pairs = Vec::with_capacity(total); + let mut sum = G::identity(); + + for _ in 0 .. start { + pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); + sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; + } + + for _ in 0 .. (total / increment) { + for _ in 0 .. increment { + pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); + sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; + } + + let now = Instant::now(); + for _ in 0 .. runs { + if straus_bool { + assert_eq!(straus(&pairs, current), sum); + } else { + assert_eq!(pippenger(&pairs, current), sum); + } + } + let current_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap(); + + let now = Instant::now(); + for _ in 0 .. runs { + if straus_bool { + assert_eq!(straus(&pairs, current + 1), sum); + } else { + assert_eq!(pippenger(&pairs, current + 1), sum); + } + } + let next_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap(); + + if next_per < current_per { + current += 1; + println!( + "{} {} is more efficient at {} with {}µs per", + if straus_bool { "Straus" } else { "Pippenger" }, current, pairs.len(), next_per + ); + if current >= 8 { + return; + } + } + } +} + +fn test_multiexp() where G::Scalar: PrimeFieldBits { + let mut pairs = Vec::with_capacity(1000); + let mut sum = G::identity(); + for _ in 0 .. 10 { + for _ in 0 .. 100 { + pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); + sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; + } + assert_eq!(multiexp(&pairs), sum); + assert_eq!(multiexp_vartime(&pairs), sum); + } +} + +#[test] +fn test_secp256k1() { + test_multiexp::(); +} + +#[test] +fn test_ed25519() { + test_multiexp::(); +} + +#[test] +#[ignore] +fn benchmark() { + // Activate the processor's boost clock + for _ in 0 .. 30 { + test_multiexp::(); + } + + benchmark_internal::(true); + benchmark_internal::(false); + + benchmark_internal::(true); + benchmark_internal::(false); +} From 4eafbe2a09065268ef39c72f2a82c9aec9274487 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 30 Jun 2022 11:23:13 -0400 Subject: [PATCH 065/105] Unify the cross-group DLEq challenges This does reduce the strength of the challenges to that of the weaker field, yet that doesn't have any impact on whether or not this is ZK due to the key being shared across fields. Saves ~8kb. --- crypto/dleq/src/cross_group/mod.rs | 23 ++++++++++++----------- crypto/dleq/src/cross_group/schnorr.rs | 2 +- crypto/dleq/src/lib.rs | 9 +++------ 3 files changed, 16 insertions(+), 18 deletions(-) diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 498d5f9f..012d1138 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -8,7 +8,7 @@ use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; use crate::{Generators, challenge}; pub mod scalar; -use scalar::scalar_normalize; +use scalar::{scalar_normalize, scalar_convert}; pub(crate) mod schnorr; use schnorr::SchnorrPoK; @@ -32,7 +32,7 @@ pub(crate) fn read_point(r: &mut R) -> std::io::Result { commitments: (G0, G1), - e: (G0::Scalar, G1::Scalar), + e: G0::Scalar, s: [(G0::Scalar, G1::Scalar); 2] } @@ -41,8 +41,7 @@ impl Bit { pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { w.write_all(self.commitments.0.to_bytes().as_ref())?; w.write_all(self.commitments.1.to_bytes().as_ref())?; - w.write_all(self.e.0.to_repr().as_ref())?; - w.write_all(self.e.1.to_repr().as_ref())?; + w.write_all(self.e.to_repr().as_ref())?; for i in 0 .. 2 { w.write_all(self.s[i].0.to_repr().as_ref())?; w.write_all(self.s[i].1.to_repr().as_ref())?; @@ -55,7 +54,7 @@ impl Bit { Ok( Bit { commitments: (read_point(r)?, read_point(r)?), - e: (read_scalar(r)?, read_scalar(r)?), + e: read_scalar(r)?, s: [ (read_scalar(r)?, read_scalar(r)?), (read_scalar(r)?, read_scalar(r)?) @@ -71,6 +70,8 @@ pub enum DLEqError { InvalidProofOfKnowledge, #[error("invalid proof length")] InvalidProofLength, + #[error("invalid challenge")] + InvalidChallenge, #[error("invalid proof")] InvalidProof } @@ -117,7 +118,7 @@ impl DLEqProof fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); - (challenge(&mut transcript, b"challenge_G"), challenge(&mut transcript, b"challenge_H")) + scalar_normalize(challenge(&mut transcript)) } #[allow(non_snake_case)] @@ -134,7 +135,6 @@ impl DLEqProof ) } - // TODO: Use multiexp here after https://github.com/serai-dex/serai/issues/17 fn reconstruct_key( commitments: impl Iterator ) -> G where G::Scalar: PrimeFieldBits { @@ -240,9 +240,9 @@ impl DLEqProof bits.push( if *bit { - Bit { commitments, e: e_0, s: [s_1, s_0] } + Bit { commitments, e: e_0.0, s: [s_1, s_0] } } else { - Bit { commitments, e: e_1, s: [s_0, s_1] } + Bit { commitments, e: e_1.0, s: [s_0, s_1] } } ); @@ -282,7 +282,8 @@ impl DLEqProof for (i, bit) in self.bits.iter().enumerate() { Self::transcript_bit(transcript, i, bit.commitments); - if bit.e != Self::R_nonces( + let bit_e = (bit.e, scalar_convert(bit.e).ok_or(DLEqError::InvalidChallenge)?); + if bit_e != Self::R_nonces( transcript.clone(), generators, bit.s[0], @@ -295,7 +296,7 @@ impl DLEqProof generators, bit.s[1], bit.commitments, - bit.e + bit_e ) ) { return Err(DLEqError::InvalidProof); diff --git a/crypto/dleq/src/cross_group/schnorr.rs b/crypto/dleq/src/cross_group/schnorr.rs index cbb7cfc8..cbd60aa6 100644 --- a/crypto/dleq/src/cross_group/schnorr.rs +++ b/crypto/dleq/src/cross_group/schnorr.rs @@ -28,7 +28,7 @@ impl SchnorrPoK { transcript.append_message(b"generator", generator.to_bytes().as_ref()); transcript.append_message(b"nonce", R.to_bytes().as_ref()); transcript.append_message(b"public_key", A.to_bytes().as_ref()); - challenge(transcript, b"challenge") + challenge(transcript) } pub(crate) fn prove( diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index cc15775e..f960cdfe 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -33,10 +33,7 @@ impl Generators { } } -pub(crate) fn challenge( - transcript: &mut T, - label: &'static [u8] -) -> F { +pub(crate) fn challenge(transcript: &mut T) -> F { assert!(F::NUM_BITS <= 384); // From here, there are three ways to get a scalar under the ff/group API @@ -44,7 +41,7 @@ pub(crate) fn challenge( // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess // and loading it in // 3: Iterating over each byte and manually doubling/adding. This is simplest - let challenge_bytes = transcript.challenge(label); + let challenge_bytes = transcript.challenge(b"challenge"); assert!(challenge_bytes.as_ref().len() == 64); let mut challenge = F::zero(); @@ -94,7 +91,7 @@ impl DLEqProof { transcript.append_message(b"nonce_alternate", nonces.1.to_bytes().as_ref()); transcript.append_message(b"point_primary", points.0.to_bytes().as_ref()); transcript.append_message(b"point_alternate", points.1.to_bytes().as_ref()); - challenge(transcript, b"challenge") + challenge(transcript) } pub fn prove( From 133c1222ad9820a4351c69e523de07155354d31c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 30 Jun 2022 18:46:18 -0400 Subject: [PATCH 066/105] Remove C::F_len, C::G_len for F_len and G_len Relies on the ff/group API, instead of the custom Curve type. Also removes GENERATOR_TABLE, only used by dalek, as we should provide our own API for that over ff/group instead. This slows down the FROST tests, under debug, by about 0.2-0.3s. Ed25519 and Ristretto together take ~2.15 seconds now. --- crypto/frost/src/curve/dalek.rs | 19 +----------------- crypto/frost/src/curve/kp256.rs | 11 ----------- crypto/frost/src/curve/mod.rs | 32 +++++++++++-------------------- crypto/frost/src/key_gen.rs | 18 ++++++++--------- crypto/frost/src/lib.rs | 20 +++++++++---------- crypto/frost/src/schnorr.rs | 8 ++++---- crypto/frost/src/sign.rs | 10 +++++----- crypto/frost/src/tests/mod.rs | 2 +- crypto/frost/src/tests/schnorr.rs | 10 +++++----- 9 files changed, 46 insertions(+), 84 deletions(-) diff --git a/crypto/frost/src/curve/dalek.rs b/crypto/frost/src/curve/dalek.rs index 40e6c252..9c0bf65d 100644 --- a/crypto/frost/src/curve/dalek.rs +++ b/crypto/frost/src/curve/dalek.rs @@ -11,29 +11,24 @@ macro_rules! dalek_curve { $Curve: ident, $Hram: ident, $Point: ident, - $Table: ident, $POINT: ident, - $TABLE: ident, $ID: literal, $CONTEXT: literal, $chal: literal, $digest: literal, ) => { - use dalek_ff_group::{$Point, $Table, $POINT, $TABLE}; + use dalek_ff_group::{$Point, $POINT}; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct $Curve; impl Curve for $Curve { type F = Scalar; type G = $Point; - type T = &'static $Table; const ID: &'static [u8] = $ID; - const GENERATOR: Self::G = $POINT; - const GENERATOR_TABLE: Self::T = &$TABLE; fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { let mut seed = vec![0; 32]; @@ -58,14 +53,6 @@ macro_rules! dalek_curve { fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { Scalar::from_hash(Sha512::new().chain_update($CONTEXT).chain_update(dst).chain_update(msg)) } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 32 - } } #[derive(Copy, Clone)] @@ -84,9 +71,7 @@ dalek_curve!( Ristretto, IetfRistrettoHram, RistrettoPoint, - RistrettoBasepointTable, RISTRETTO_BASEPOINT_POINT, - RISTRETTO_BASEPOINT_TABLE, b"ristretto", b"FROST-RISTRETTO255-SHA512-v5", b"chal", @@ -98,9 +83,7 @@ dalek_curve!( Ed25519, IetfEd25519Hram, EdwardsPoint, - EdwardsBasepointTable, ED25519_BASEPOINT_POINT, - ED25519_BASEPOINT_TABLE, b"edwards25519", b"", b"", diff --git a/crypto/frost/src/curve/kp256.rs b/crypto/frost/src/curve/kp256.rs index 9b1874d8..bb3e02ed 100644 --- a/crypto/frost/src/curve/kp256.rs +++ b/crypto/frost/src/curve/kp256.rs @@ -22,12 +22,9 @@ macro_rules! kp_curve { impl Curve for $Curve { type F = $lib::Scalar; type G = $lib::ProjectivePoint; - type T = $lib::ProjectivePoint; const ID: &'static [u8] = $ID; - const GENERATOR: Self::G = $lib::ProjectivePoint::GENERATOR; - const GENERATOR_TABLE: Self::G = $lib::ProjectivePoint::GENERATOR; fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F { let mut seed = vec![0; 32]; @@ -73,14 +70,6 @@ macro_rules! kp_curve { }).reduce(&modulus).unwrap().to_be_bytes()[16 ..] ).unwrap() } - - fn F_len() -> usize { - 32 - } - - fn G_len() -> usize { - 33 - } } #[derive(Clone)] diff --git a/crypto/frost/src/curve/mod.rs b/crypto/frost/src/curve/mod.rs index e08e2faf..32b8fef0 100644 --- a/crypto/frost/src/curve/mod.rs +++ b/crypto/frost/src/curve/mod.rs @@ -1,11 +1,11 @@ -use core::{ops::Mul, fmt::Debug}; +use core::fmt::Debug; use thiserror::Error; use rand_core::{RngCore, CryptoRng}; use ff::{PrimeField, PrimeFieldBits}; -use group::{Group, GroupOps, prime::PrimeGroup}; +use group::{Group, GroupOps, GroupEncoding, prime::PrimeGroup}; #[cfg(any(test, feature = "dalek"))] mod dalek; @@ -44,20 +44,14 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { type F: PrimeField + PrimeFieldBits; /// Group element type type G: Group + GroupOps + PrimeGroup; - /// Precomputed table type - type T: Mul; /// ID for this curve const ID: &'static [u8]; /// Generator for the group - // While group does provide this in its API, privacy coins will want to use a custom basepoint + // While group does provide this in its API, privacy coins may want to use a custom basepoint const GENERATOR: Self::G; - /// Table for the generator for the group - /// If there isn't a precomputed table available, the generator itself should be used - const GENERATOR_TABLE: Self::T; - /// Securely generate a random nonce. H4 from the IETF draft fn random_nonce(secret: Self::F, rng: &mut R) -> Self::F; @@ -83,20 +77,16 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug { // hash_msg and hash_binding_factor #[allow(non_snake_case)] fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F; +} - /// Constant size of a serialized scalar field element - // The alternative way to grab this would be either serializing a junk element and getting its - // length or doing a naive division of its BITS property by 8 and assuming a lack of padding - #[allow(non_snake_case)] - fn F_len() -> usize; +#[allow(non_snake_case)] +pub(crate) fn F_len() -> usize { + ::Repr::default().as_ref().len() +} - /// Constant size of a serialized group element - // We could grab the serialization as described above yet a naive developer may use a - // non-constant size encoding, proving yet another reason to force this to be a provided constant - // A naive developer could still provide a constant for a variable length encoding, yet at least - // that is on them - #[allow(non_snake_case)] - fn G_len() -> usize; +#[allow(non_snake_case)] +pub(crate) fn G_len() -> usize { + ::Repr::default().as_ref().len() } /// Field element from slice diff --git a/crypto/frost/src/key_gen.rs b/crypto/frost/src/key_gen.rs index e5b0f76f..84f4a590 100644 --- a/crypto/frost/src/key_gen.rs +++ b/crypto/frost/src/key_gen.rs @@ -7,7 +7,7 @@ use group::{ff::{Field, PrimeField}, GroupEncoding}; use multiexp::{multiexp_vartime, BatchVerifier}; use crate::{ - curve::{Curve, F_from_slice, G_from_slice}, + curve::{Curve, F_len, G_len, F_from_slice, G_from_slice}, FrostError, FrostParams, FrostKeys, schnorr::{self, SchnorrSignature}, validate_map @@ -35,13 +35,13 @@ fn generate_key_r1( let t = usize::from(params.t); let mut coefficients = Vec::with_capacity(t); let mut commitments = Vec::with_capacity(t); - let mut serialized = Vec::with_capacity((C::G_len() * t) + C::G_len() + C::F_len()); + let mut serialized = Vec::with_capacity((G_len::() * t) + G_len::() + F_len::()); for i in 0 .. t { // Step 1: Generate t random values to form a polynomial with coefficients.push(C::F::random(&mut *rng)); // Step 3: Generate public commitments - commitments.push(C::GENERATOR_TABLE * coefficients[i]); + commitments.push(C::GENERATOR * coefficients[i]); // Serialize them for publication serialized.extend(commitments[i].to_bytes().as_ref()); } @@ -59,7 +59,7 @@ fn generate_key_r1( challenge::( context, params.i(), - (C::GENERATOR_TABLE * r).to_bytes().as_ref(), + (C::GENERATOR * r).to_bytes().as_ref(), &serialized ) ).serialize() @@ -83,19 +83,19 @@ fn verify_r1( (params.i(), our_commitments) )?; - let commitments_len = usize::from(params.t()) * C::G_len(); + let commitments_len = usize::from(params.t()) * G_len::(); let mut commitments = HashMap::new(); #[allow(non_snake_case)] - let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + C::G_len()]; + let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + G_len::()]; #[allow(non_snake_case)] let R = |l| G_from_slice::(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l)); #[allow(non_snake_case)] let Am = |l| &serialized[&l][0 .. commitments_len]; let s = |l| F_from_slice::( - &serialized[&l][commitments_len + C::G_len() ..] + &serialized[&l][commitments_len + G_len::() ..] ).map_err(|_| FrostError::InvalidProofOfKnowledge(l)); let mut signatures = Vec::with_capacity(usize::from(params.n() - 1)); @@ -104,7 +104,7 @@ fn verify_r1( for c in 0 .. usize::from(params.t()) { these_commitments.push( G_from_slice::( - &serialized[&l][(c * C::G_len()) .. ((c + 1) * C::G_len())] + &serialized[&l][(c * G_len::()) .. ((c + 1) * G_len::())] ).map_err(|_| FrostError::InvalidCommitment(l.try_into().unwrap()))? ); } @@ -257,7 +257,7 @@ fn complete_r2( verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes))); } // Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t) - debug_assert_eq!(C::GENERATOR_TABLE * secret_share, verification_shares[¶ms.i()]); + debug_assert_eq!(C::GENERATOR * secret_share, verification_shares[¶ms.i()]); // TODO: Clear serialized and shares diff --git a/crypto/frost/src/lib.rs b/crypto/frost/src/lib.rs index ca64b96f..40363153 100644 --- a/crypto/frost/src/lib.rs +++ b/crypto/frost/src/lib.rs @@ -8,7 +8,7 @@ use group::{ff::{Field, PrimeField}, GroupEncoding}; mod schnorr; pub mod curve; -use curve::{Curve, F_from_slice, G_from_slice}; +use curve::{Curve, F_len, G_len, F_from_slice, G_from_slice}; pub mod key_gen; pub mod algorithm; pub mod sign; @@ -160,7 +160,7 @@ impl FrostKeys { // Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a // one-time-key offset res.offset = Some(offset + res.offset.unwrap_or(C::F::zero())); - res.group_key += C::GENERATOR_TABLE * offset; + res.group_key += C::GENERATOR * offset; res } @@ -195,7 +195,7 @@ impl FrostKeys { verification_shares: self.verification_shares.iter().map( |(l, share)| ( *l, - (*share * lagrange::(*l, &included)) + (C::GENERATOR_TABLE * offset_share) + (*share * lagrange::(*l, &included)) + (C::GENERATOR * offset_share) ) ).collect(), included: included.to_vec(), @@ -203,7 +203,7 @@ impl FrostKeys { } pub fn serialized_len(n: u16) -> usize { - 8 + C::ID.len() + (3 * 2) + C::F_len() + C::G_len() + (usize::from(n) * C::G_len()) + 8 + C::ID.len() + (3 * 2) + F_len::() + G_len::() + (usize::from(n) * G_len::()) } pub fn serialize(&self) -> Vec { @@ -253,21 +253,21 @@ impl FrostKeys { let i = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap()); cursor += 2; - let secret_share = F_from_slice::(&serialized[cursor .. (cursor + C::F_len())]) + let secret_share = F_from_slice::(&serialized[cursor .. (cursor + F_len::())]) .map_err(|_| FrostError::InternalError("invalid secret share".to_string()))?; - cursor += C::F_len(); - let group_key = G_from_slice::(&serialized[cursor .. (cursor + C::G_len())]) + cursor += F_len::(); + let group_key = G_from_slice::(&serialized[cursor .. (cursor + G_len::())]) .map_err(|_| FrostError::InternalError("invalid group key".to_string()))?; - cursor += C::G_len(); + cursor += G_len::(); let mut verification_shares = HashMap::new(); for l in 1 ..= n { verification_shares.insert( l, - G_from_slice::(&serialized[cursor .. (cursor + C::G_len())]) + G_from_slice::(&serialized[cursor .. (cursor + G_len::())]) .map_err(|_| FrostError::InternalError("invalid verification share".to_string()))? ); - cursor += C::G_len(); + cursor += G_len::(); } Ok( diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index af9ff808..dafc8251 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -4,7 +4,7 @@ use group::{ff::{Field, PrimeField}, GroupEncoding}; use multiexp::BatchVerifier; -use crate::Curve; +use crate::{Curve, F_len, G_len}; #[allow(non_snake_case)] #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -15,7 +15,7 @@ pub struct SchnorrSignature { impl SchnorrSignature { pub fn serialize(&self) -> Vec { - let mut res = Vec::with_capacity(C::G_len() + C::F_len()); + let mut res = Vec::with_capacity(G_len::() + F_len::()); res.extend(self.R.to_bytes().as_ref()); res.extend(self.s.to_repr().as_ref()); res @@ -28,7 +28,7 @@ pub(crate) fn sign( challenge: C::F ) -> SchnorrSignature { SchnorrSignature { - R: C::GENERATOR_TABLE * nonce, + R: C::GENERATOR * nonce, s: nonce + (private_key * challenge) } } @@ -38,7 +38,7 @@ pub(crate) fn verify( challenge: C::F, signature: &SchnorrSignature ) -> bool { - (C::GENERATOR_TABLE * signature.s) == (signature.R + (public_key * challenge)) + (C::GENERATOR * signature.s) == (signature.R + (public_key * challenge)) } pub(crate) fn batch_verify( diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index c01dbe63..c05e4a89 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -8,7 +8,7 @@ use group::{ff::{Field, PrimeField}, GroupEncoding}; use transcript::Transcript; use crate::{ - curve::{Curve, F_from_slice, G_from_slice}, + curve::{Curve, G_len, F_from_slice, G_from_slice}, FrostError, FrostParams, FrostKeys, FrostView, algorithm::Algorithm, @@ -84,7 +84,7 @@ fn preprocess>( C::random_nonce(params.view().secret_share(), &mut *rng), C::random_nonce(params.view().secret_share(), &mut *rng) ]; - let commitments = [C::GENERATOR_TABLE * nonces[0], C::GENERATOR_TABLE * nonces[1]]; + let commitments = [C::GENERATOR * nonces[0], C::GENERATOR * nonces[1]]; let mut serialized = commitments[0].to_bytes().as_ref().to_vec(); serialized.extend(commitments[1].to_bytes().as_ref()); @@ -146,18 +146,18 @@ fn sign_with_share>( let commitments = commitments.remove(l).unwrap(); let mut read_commitment = |c, label| { - let commitment = &commitments[c .. (c + C::G_len())]; + let commitment = &commitments[c .. (c + G_len::())]; transcript.append_message(label, commitment); G_from_slice::(commitment).map_err(|_| FrostError::InvalidCommitment(*l)) }; #[allow(non_snake_case)] let mut read_D_E = || Ok( - [read_commitment(0, b"commitment_D")?, read_commitment(C::G_len(), b"commitment_E")?] + [read_commitment(0, b"commitment_D")?, read_commitment(G_len::(), b"commitment_E")?] ); B.insert(*l, read_D_E()?); - addendums.insert(*l, commitments[(C::G_len() * 2) ..].to_vec()); + addendums.insert(*l, commitments[(G_len::() * 2) ..].to_vec()); } // Append the message to the transcript diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index 87f2bf83..3c982cbf 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -98,7 +98,7 @@ pub fn recover(keys: &HashMap>) -> C::F { C::F::zero(), |accum, (i, keys)| accum + (keys.secret_share() * lagrange::(*i, &included)) ); - assert_eq!(C::GENERATOR_TABLE * group_private, first.group_key(), "failed to recover keys"); + assert_eq!(C::GENERATOR * group_private, first.group_key(), "failed to recover keys"); group_private } diff --git a/crypto/frost/src/tests/schnorr.rs b/crypto/frost/src/tests/schnorr.rs index 6450a845..d27e5e5d 100644 --- a/crypto/frost/src/tests/schnorr.rs +++ b/crypto/frost/src/tests/schnorr.rs @@ -15,7 +15,7 @@ pub(crate) fn core_sign(rng: &mut R) { let challenge = C::F::random(rng); // Doesn't bother to craft an HRAM assert!( schnorr::verify::( - C::GENERATOR_TABLE * private_key, + C::GENERATOR * private_key, challenge, &schnorr::sign(private_key, nonce, challenge) ) @@ -28,9 +28,9 @@ pub(crate) fn core_sign(rng: &mut R) { pub(crate) fn core_verify(rng: &mut R) { assert!( !schnorr::verify::( - C::GENERATOR_TABLE * C::F::random(&mut *rng), + C::GENERATOR * C::F::random(&mut *rng), C::F::random(rng), - &SchnorrSignature { R: C::GENERATOR_TABLE * C::F::zero(), s: C::F::zero() } + &SchnorrSignature { R: C::GENERATOR * C::F::zero(), s: C::F::zero() } ) ); } @@ -48,7 +48,7 @@ pub(crate) fn core_batch_verify(rng: &mut R) { // Batch verify let triplets = (0 .. 5).map( - |i| (u16::try_from(i + 1).unwrap(), C::GENERATOR_TABLE * keys[i], challenges[i], sigs[i]) + |i| (u16::try_from(i + 1).unwrap(), C::GENERATOR * keys[i], challenges[i], sigs[i]) ).collect::>(); schnorr::batch_verify(rng, &triplets).unwrap(); @@ -113,7 +113,7 @@ fn sign_with_offset(rng: &mut R) { for i in 1 ..= u16::try_from(keys.len()).unwrap() { keys.insert(i, Arc::new(keys[&i].offset(offset))); } - let offset_key = group_key + (C::GENERATOR_TABLE * offset); + let offset_key = group_key + (C::GENERATOR * offset); sign_core(rng, offset_key, &keys); } From 1c98f15d5b489ae44fa16ff5979147e3a260d0a7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 1 Jul 2022 15:27:16 -0400 Subject: [PATCH 067/105] Make the cross-group DLEqProof prove constant time Instead of having if statements for the bits, it now has constant time ops. While there are still if statements guiding the proof itself, they aren't dependent on the data within. --- crypto/dleq/Cargo.toml | 2 ++ crypto/dleq/src/cross_group/mod.rs | 45 +++++++++++++----------------- 2 files changed, 22 insertions(+), 25 deletions(-) diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index de5338b7..27a806e8 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -10,6 +10,8 @@ edition = "2021" thiserror = "1" rand_core = "0.6" +subtle = "2.4" + transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } ff = "0.12" diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 012d1138..8026bafe 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -1,6 +1,8 @@ use thiserror::Error; use rand_core::{RngCore, CryptoRng}; +use subtle::{Choice, ConditionallySelectable}; + use transcript::Transcript; use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; @@ -197,6 +199,9 @@ impl DLEqProof let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); let mut bits = Vec::with_capacity(capacity); for (i, bit) in raw_bits.iter().enumerate() { + let bit = *bit as u8; + debug_assert_eq!(bit | 1, 1); + let last = i == (capacity - 1); let blinding_key = ( Self::blinding_key(&mut *rng, &mut blinding_key_total.0, &mut pow_2.0, last), @@ -211,11 +216,8 @@ impl DLEqProof (generators.0.alt * blinding_key.0), (generators.1.alt * blinding_key.1) ); - // TODO: Not constant time - if *bit { - commitments.0 += generators.0.primary; - commitments.1 += generators.1.primary; - } + commitments.0 += generators.0.primary * G0::Scalar::from(bit.into()); + commitments.1 += generators.1.primary * G1::Scalar::from(bit.into()); Self::transcript_bit(transcript, i, commitments); let nonces = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); @@ -223,29 +225,22 @@ impl DLEqProof transcript.clone(), ((generators.0.alt * nonces.0), (generators.1.alt * nonces.1)) ); - let s_0 = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + let mut s_0 = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - let e_1 = Self::R_nonces( - transcript.clone(), - generators, - (s_0.0, s_0.1), - if *bit { - commitments - } else { - ((commitments.0 - generators.0.primary), (commitments.1 - generators.1.primary)) - }, - e_0 - ); - let s_1 = (nonces.0 + (e_1.0 * blinding_key.0), nonces.1 + (e_1.1 * blinding_key.1)); + let mut to_sign = commitments; + let bit = Choice::from(bit); + let inv_bit = (!bit).unwrap_u8(); + to_sign.0 -= generators.0.primary * G0::Scalar::from(inv_bit.into()); + to_sign.1 -= generators.1.primary * G1::Scalar::from(inv_bit.into()); + let e_1 = Self::R_nonces(transcript.clone(), generators, (s_0.0, s_0.1), to_sign, e_0); + let mut s_1 = (nonces.0 + (e_1.0 * blinding_key.0), nonces.1 + (e_1.1 * blinding_key.1)); - bits.push( - if *bit { - Bit { commitments, e: e_0.0, s: [s_1, s_0] } - } else { - Bit { commitments, e: e_1.0, s: [s_0, s_1] } - } - ); + let e = G0::Scalar::conditional_select(&e_1.0, &e_0.0, bit); + G0::Scalar::conditional_swap(&mut s_1.0, &mut s_0.0, bit); + G1::Scalar::conditional_swap(&mut s_1.1, &mut s_0.1, bit); + bits.push(Bit { commitments, e, s: [s_0, s_1] }); + // Break in order to not generate commitments for unused bits if last { break; } From 7e058f1c0895a0a88b34803566fb6b24f1499399 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Jul 2022 02:45:26 -0400 Subject: [PATCH 068/105] Remove cross-group DLEq challenge bias as possible --- crypto/dleq/src/cross_group/mod.rs | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 8026bafe..7509ae6c 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -7,7 +7,7 @@ use transcript::Transcript; use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; -use crate::{Generators, challenge}; +use crate::Generators; pub mod scalar; use scalar::{scalar_normalize, scalar_convert}; @@ -34,6 +34,11 @@ pub(crate) fn read_point(r: &mut R) -> std::io::Result { commitments: (G0, G1), + // Merged challenges have a slight security reduction, yet one already applied to the scalar + // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed, + // present here, which is then hashed for each of the two challenges, remaining unbiased/unique + // while maintaining the bandwidth savings, yet also while adding 252 hashes for + // Secp256k1/Ed25519 e: G0::Scalar, s: [(G0::Scalar, G1::Scalar); 2] } @@ -116,11 +121,22 @@ impl DLEqProof blinding_key } + fn mutual_scalar_from_bytes(bytes: &[u8]) -> (G0::Scalar, G1::Scalar) { + let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); + debug_assert!((bytes.len() * 8) >= capacity); + + let mut accum = G0::Scalar::zero(); + for b in 0 .. capacity { + accum += G0::Scalar::from((bytes[b / 8] & (1 << (b % 8))).into()); + } + (accum, scalar_convert(accum).unwrap()) + } + #[allow(non_snake_case)] fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); - scalar_normalize(challenge(&mut transcript)) + Self::mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) } #[allow(non_snake_case)] From 2e3585421555d4f9feb1354f2e518bdb3a5ce82a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Jul 2022 02:46:40 -0400 Subject: [PATCH 069/105] Rewrite the cross-group DLEq API to not allow proving for biased scalars --- crypto/dleq/Cargo.toml | 5 ++ crypto/dleq/src/cross_group/mod.rs | 48 +++++++++--- crypto/dleq/src/tests/cross_group/mod.rs | 94 ++++++++++++++++++------ 3 files changed, 115 insertions(+), 32 deletions(-) diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 27a806e8..6d9e80a7 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -10,6 +10,8 @@ edition = "2021" thiserror = "1" rand_core = "0.6" +digest = "0.10" + subtle = "2.4" transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } @@ -21,6 +23,9 @@ multiexp = { path = "../multiexp" } [dev-dependencies] hex-literal = "0.3" + +blake2 = "0.10" + k256 = { version = "0.11", features = ["arithmetic", "bits"] } dalek-ff-group = { path = "../dalek-ff-group" } diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 7509ae6c..687f5b41 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -1,6 +1,8 @@ use thiserror::Error; use rand_core::{RngCore, CryptoRng}; +use digest::Digest; + use subtle::{Choice, ConditionallySelectable}; use transcript::Transcript; @@ -182,21 +184,12 @@ impl DLEqProof transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); } - /// Prove the cross-Group Discrete Log Equality for the points derived from the provided Scalar. - /// Since DLEq is proven for the same Scalar in both fields, and the provided Scalar may not be - /// valid in the other Scalar field, the Scalar is normalized as needed and the normalized forms - /// are returned. These are the actually equal discrete logarithms. The passed in Scalar is - /// solely to enable various forms of Scalar generation, such as deterministic schemes - pub fn prove( + fn prove_internal( rng: &mut R, transcript: &mut T, generators: (Generators, Generators), - f: G0::Scalar + f: (G0::Scalar, G1::Scalar) ) -> (Self, (G0::Scalar, G1::Scalar)) { - // At least one bit will be dropped from either field element, making it irrelevant which one - // we get a random element in - let f = scalar_normalize::<_, G1::Scalar>(f); - Self::initialize_transcript( transcript, generators, @@ -270,6 +263,39 @@ impl DLEqProof (proof, f) } + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as + /// the output of the passed in Digest. Given the non-standard requirements to achieve + /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way + /// to safely and securely generate a Scalar, without risk of failure, nor bias + /// It also ensures a lack of determinable relation between keys, guaranteeing security in the + /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing + /// the relationship between keys would allow breaking all swaps after just one + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + digest: D + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::prove_internal( + rng, + transcript, + generators, + Self::mutual_scalar_from_bytes(digest.finalize().as_ref()) + ) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, + /// failing if it's not mutually valid. This allows for rejection sampling externally derived + /// scalars until they're safely usable, as needed + pub fn prove_without_bias( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f0: G0::Scalar + ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { + scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) + } + /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for pub fn verify( &self, diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index fd4b3e9b..f39aa4ce 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -2,23 +2,26 @@ mod scalar; mod schnorr; use hex_literal::hex; -use rand_core::OsRng; +use rand_core::{RngCore, OsRng}; -use ff::Field; +use ff::{Field, PrimeField}; use group::{Group, GroupEncoding}; use k256::{Scalar, ProjectivePoint}; -use dalek_ff_group::{EdwardsPoint, CompressedEdwardsY}; +use dalek_ff_group::{self as dfg, EdwardsPoint, CompressedEdwardsY}; + +use blake2::{Digest, Blake2b512}; use transcript::RecommendedTranscript; use crate::{Generators, cross_group::DLEqProof}; -#[test] -fn test_dleq() { - let transcript = || RecommendedTranscript::new(b"Cross-Group DLEq Proof Test"); +fn transcript() -> RecommendedTranscript { + RecommendedTranscript::new(b"Cross-Group DLEq Proof Test") +} - let generators = ( +fn generators() -> (Generators, Generators) { + ( Generators::new( ProjectivePoint::GENERATOR, ProjectivePoint::from_bytes( @@ -32,23 +35,72 @@ fn test_dleq() { hex!("8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94") ).decompress().unwrap() ) + ) +} + +#[test] +fn test_rejection_sampling() { + let mut pow_2 = Scalar::one(); + for _ in 0 .. dfg::Scalar::CAPACITY { + pow_2 = pow_2.double(); + } + + assert!( + DLEqProof::prove_without_bias( + &mut OsRng, + &mut RecommendedTranscript::new(b""), + generators(), + pow_2 + ).is_none() ); +} - let key = Scalar::random(&mut OsRng); - let (proof, keys) = DLEqProof::prove(&mut OsRng, &mut transcript(), generators, key); +#[test] +fn test_dleq() { + let generators = generators(); - let public_keys = proof.verify(&mut transcript(), generators).unwrap(); - assert_eq!(generators.0.primary * keys.0, public_keys.0); - assert_eq!(generators.1.primary * keys.1, public_keys.1); + for i in 0 .. 2 { + let (proof, keys) = if i == 0 { + let mut seed = [0; 32]; + OsRng.fill_bytes(&mut seed); - #[cfg(feature = "serialize")] - { - let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = DLEqProof::::deserialize( - &mut std::io::Cursor::new(&buf) - ).unwrap(); - assert_eq!(proof, deserialized); - deserialized.verify(&mut transcript(), generators).unwrap(); + DLEqProof::prove( + &mut OsRng, + &mut transcript(), + generators, + Blake2b512::new().chain_update(seed) + ) + } else { + let mut key; + let mut res; + while { + key = Scalar::random(&mut OsRng); + res = DLEqProof::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + key + ); + res.is_none() + } {} + let res = res.unwrap(); + assert_eq!(key, res.1.0); + res + }; + + let public_keys = proof.verify(&mut transcript(), generators).unwrap(); + assert_eq!(generators.0.primary * keys.0, public_keys.0); + assert_eq!(generators.1.primary * keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = DLEqProof::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut transcript(), generators).unwrap(); + } } } From ed569ea9c80d267feb326287535986ca92f0135b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Jul 2022 02:48:27 -0400 Subject: [PATCH 070/105] Make multiexp an optional, yet default, feature for DLEq --- crypto/dleq/Cargo.toml | 6 ++++-- crypto/dleq/src/cross_group/mod.rs | 16 +++++++++++++--- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 6d9e80a7..a0b8ac57 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -19,7 +19,7 @@ transcript = { package = "flexible-transcript", path = "../transcript", version ff = "0.12" group = "0.12" -multiexp = { path = "../multiexp" } +multiexp = { path = "../multiexp", optional = true } [dev-dependencies] hex-literal = "0.3" @@ -35,4 +35,6 @@ transcript = { package = "flexible-transcript", path = "../transcript", features serialize = [] cross_group = [] secure_capacity_difference = [] -default = ["secure_capacity_difference"] + +# These only apply to cross_group, yet are default to ensure its integrity and performance +default = ["secure_capacity_difference", "multiexp"] diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 687f5b41..82e12ac2 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -12,7 +12,7 @@ use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; use crate::Generators; pub mod scalar; -use scalar::{scalar_normalize, scalar_convert}; +use scalar::scalar_convert; pub(crate) mod schnorr; use schnorr::SchnorrPoK; @@ -159,13 +159,23 @@ impl DLEqProof commitments: impl Iterator ) -> G where G::Scalar: PrimeFieldBits { let mut pow_2 = G::Scalar::one(); - multiexp::multiexp_vartime( + #[cfg(feature = "multiexp")] + let res = multiexp::multiexp_vartime( &commitments.map(|commitment| { let res = (pow_2, commitment); pow_2 = pow_2.double(); res }).collect::>() - ) + ); + + #[cfg(not(feature = "multiexp"))] + let res = commitments.fold(G::identity(), |key, commitment| { + let res = key + (commitment * pow_2); + pow_2 = pow_2.double(); + res + }); + + res } fn reconstruct_keys(&self) -> (G0, G1) { From daadb438753219b032b74b069bcb43bc523b52df Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Jul 2022 11:04:01 -0400 Subject: [PATCH 071/105] Minor doc updates --- crypto/dleq/src/tests/cross_group/mod.rs | 2 +- crypto/frost/src/sign.rs | 3 --- processor/Cargo.toml | 1 + 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index f39aa4ce..93ffeb0f 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -56,7 +56,7 @@ fn test_rejection_sampling() { } #[test] -fn test_dleq() { +fn test_cross_group_dleq() { let generators = generators(); for i in 0 .. 2 { diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index c05e4a89..057ddc47 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -187,9 +187,6 @@ fn sign_with_share>( Ok((Package { B, binding, R, share: share.clone() }, share)) } -// This doesn't check the signing set is as expected and unexpected changes can cause false blames -// if legitimate participants are still using the original, expected, signing set. This library -// could be made more robust in that regard fn complete>( sign_params: &Params, sign: Package, diff --git a/processor/Cargo.toml b/processor/Cargo.toml index cf2eb7a3..c0a88e4f 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -5,6 +5,7 @@ description = "Multichain processor premised on canonicity to reach distributed license = "AGPL-3.0-only" authors = ["Luke Parker "] edition = "2021" +publish = false [dependencies] async-trait = "0.1" From a81a76da3b12e81045725bf30642e4d555f45e93 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Jul 2022 14:08:04 -0400 Subject: [PATCH 072/105] Ensure multiexp never uses a zero-weight in its batch verifier --- crypto/multiexp/src/batch.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crypto/multiexp/src/batch.rs b/crypto/multiexp/src/batch.rs index 5b5d65fb..81765563 100644 --- a/crypto/multiexp/src/batch.rs +++ b/crypto/multiexp/src/batch.rs @@ -22,7 +22,12 @@ impl BatchVerifier where ::Scalar: PrimeF let u = if self.0.len() == 0 { G::Scalar::one() } else { - G::Scalar::random(rng) + let mut weight = G::Scalar::random(&mut *rng); + // Ensure it's non-zero, as a zero scalar would cause this item to pass no matter what + while weight.is_zero().into() { + weight = G::Scalar::random(&mut *rng); + } + weight }; self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); } From 3acfb5b7d2d5ad1a4f00b1cb8d5077bba7c4a03f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Jul 2022 14:22:17 -0400 Subject: [PATCH 073/105] Use a do-while in multiexp, first to please a friend, and then to annoy them It's also legitimately cleaner code. --- crypto/multiexp/src/batch.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crypto/multiexp/src/batch.rs b/crypto/multiexp/src/batch.rs index 81765563..95a09df5 100644 --- a/crypto/multiexp/src/batch.rs +++ b/crypto/multiexp/src/batch.rs @@ -22,11 +22,12 @@ impl BatchVerifier where ::Scalar: PrimeF let u = if self.0.len() == 0 { G::Scalar::one() } else { - let mut weight = G::Scalar::random(&mut *rng); + let mut weight; // Ensure it's non-zero, as a zero scalar would cause this item to pass no matter what - while weight.is_zero().into() { + while { weight = G::Scalar::random(&mut *rng); - } + weight.is_zero().into() + } {} weight }; self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); From bfe34ea6f8a8fbd941c38f401c51866b38ba8e11 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 5 Jul 2022 05:18:12 -0400 Subject: [PATCH 074/105] Make the cross-group DLEq bit components pow 2, not the commitments as a whole Few percent faster. Enables accumulating the current bit's point representation, whereas the blinding keys can't be accumulated. Also theoretically enables pre-computation of the bit points, removing hundreds of additions from the proof. When tested, this was less performant, possibly due to cache/heap allocation. --- crypto/dleq/src/cross_group/mod.rs | 58 +++++++++++------------------- 1 file changed, 20 insertions(+), 38 deletions(-) diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 82e12ac2..ce00aef2 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -110,16 +110,14 @@ impl DLEqProof fn blinding_key( rng: &mut R, total: &mut F, - pow_2: &mut F, last: bool ) -> F { let blinding_key = if last { - -*total * pow_2.invert().unwrap() + -*total } else { F::random(&mut *rng) }; - *total += blinding_key * *pow_2; - *pow_2 = pow_2.double(); + *total += blinding_key; blinding_key } @@ -155,33 +153,10 @@ impl DLEqProof ) } - fn reconstruct_key( - commitments: impl Iterator - ) -> G where G::Scalar: PrimeFieldBits { - let mut pow_2 = G::Scalar::one(); - #[cfg(feature = "multiexp")] - let res = multiexp::multiexp_vartime( - &commitments.map(|commitment| { - let res = (pow_2, commitment); - pow_2 = pow_2.double(); - res - }).collect::>() - ); - - #[cfg(not(feature = "multiexp"))] - let res = commitments.fold(G::identity(), |key, commitment| { - let res = key + (commitment * pow_2); - pow_2 = pow_2.double(); - res - }); - - res - } - fn reconstruct_keys(&self) -> (G0, G1) { ( - Self::reconstruct_key(self.bits.iter().map(|bit| bit.commitments.0)), - Self::reconstruct_key(self.bits.iter().map(|bit| bit.commitments.1)) + self.bits.iter().map(|bit| bit.commitments.0).sum(), + self.bits.iter().map(|bit| bit.commitments.1).sum() ) } @@ -212,7 +187,7 @@ impl DLEqProof ); let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); - let mut pow_2 = (G0::Scalar::one(), G1::Scalar::one()); + let mut pow_2 = (generators.0.primary, generators.1.primary); let raw_bits = f.0.to_le_bits(); let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); @@ -223,8 +198,8 @@ impl DLEqProof let last = i == (capacity - 1); let blinding_key = ( - Self::blinding_key(&mut *rng, &mut blinding_key_total.0, &mut pow_2.0, last), - Self::blinding_key(&mut *rng, &mut blinding_key_total.1, &mut pow_2.1, last) + Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), + Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) ); if last { debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); @@ -235,8 +210,8 @@ impl DLEqProof (generators.0.alt * blinding_key.0), (generators.1.alt * blinding_key.1) ); - commitments.0 += generators.0.primary * G0::Scalar::from(bit.into()); - commitments.1 += generators.1.primary * G1::Scalar::from(bit.into()); + commitments.0 += pow_2.0 * G0::Scalar::from(bit.into()); + commitments.1 += pow_2.1 * G1::Scalar::from(bit.into()); Self::transcript_bit(transcript, i, commitments); let nonces = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); @@ -249,8 +224,8 @@ impl DLEqProof let mut to_sign = commitments; let bit = Choice::from(bit); let inv_bit = (!bit).unwrap_u8(); - to_sign.0 -= generators.0.primary * G0::Scalar::from(inv_bit.into()); - to_sign.1 -= generators.1.primary * G1::Scalar::from(inv_bit.into()); + to_sign.0 -= pow_2.0 * G0::Scalar::from(inv_bit.into()); + to_sign.1 -= pow_2.1 * G1::Scalar::from(inv_bit.into()); let e_1 = Self::R_nonces(transcript.clone(), generators, (s_0.0, s_0.1), to_sign, e_0); let mut s_1 = (nonces.0 + (e_1.0 * blinding_key.0), nonces.1 + (e_1.1 * blinding_key.1)); @@ -263,6 +238,9 @@ impl DLEqProof if last { break; } + + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); } let proof = DLEqProof { bits, poks }; @@ -326,6 +304,7 @@ impl DLEqProof Err(DLEqError::InvalidProofOfKnowledge)?; } + let mut pow_2 = (generators.0.primary, generators.1.primary); for (i, bit) in self.bits.iter().enumerate() { Self::transcript_bit(transcript, i, bit.commitments); @@ -335,8 +314,8 @@ impl DLEqProof generators, bit.s[0], ( - bit.commitments.0 - generators.0.primary, - bit.commitments.1 - generators.1.primary + bit.commitments.0 - pow_2.0, + bit.commitments.1 - pow_2.1 ), Self::R_nonces( transcript.clone(), @@ -348,6 +327,9 @@ impl DLEqProof ) { return Err(DLEqError::InvalidProof); } + + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); } Ok(keys) From d17c9587b597b58358dd2759ccf42a3d498ee706 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 5 Jul 2022 08:10:16 -0400 Subject: [PATCH 075/105] Fix mutual_scalar_from_bytes It didn't properly grab bits, nor did it double as needed. --- crypto/dleq/src/cross_group/mod.rs | 17 +++-------------- crypto/dleq/src/cross_group/scalar.rs | 23 +++++++++++++++++++---- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index ce00aef2..4a0ce530 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -12,7 +12,7 @@ use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; use crate::Generators; pub mod scalar; -use scalar::scalar_convert; +use scalar::{scalar_convert, mutual_scalar_from_bytes}; pub(crate) mod schnorr; use schnorr::SchnorrPoK; @@ -121,22 +121,11 @@ impl DLEqProof blinding_key } - fn mutual_scalar_from_bytes(bytes: &[u8]) -> (G0::Scalar, G1::Scalar) { - let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - debug_assert!((bytes.len() * 8) >= capacity); - - let mut accum = G0::Scalar::zero(); - for b in 0 .. capacity { - accum += G0::Scalar::from((bytes[b / 8] & (1 << (b % 8))).into()); - } - (accum, scalar_convert(accum).unwrap()) - } - #[allow(non_snake_case)] fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); - Self::mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) + mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) } #[allow(non_snake_case)] @@ -268,7 +257,7 @@ impl DLEqProof rng, transcript, generators, - Self::mutual_scalar_from_bytes(digest.finalize().as_ref()) + mutual_scalar_from_bytes(digest.finalize().as_ref()) ) } diff --git a/crypto/dleq/src/cross_group/scalar.rs b/crypto/dleq/src/cross_group/scalar.rs index 8d922719..6df5dee7 100644 --- a/crypto/dleq/src/cross_group/scalar.rs +++ b/crypto/dleq/src/cross_group/scalar.rs @@ -18,10 +18,12 @@ pub fn scalar_normalize(scalar: F0) -> ( for bit in bits.iter().skip(bits.len() - usize::try_from(mutual_capacity).unwrap()) { res1 = res1.double(); res2 = res2.double(); - if *bit { - res1 += F0::one(); - res2 += F1::one(); - } + + let bit = *bit as u8; + debug_assert_eq!(bit | 1, 1); + + res1 += F0::from(bit.into()); + res2 += F1::from(bit.into()); } (res1, res2) @@ -32,3 +34,16 @@ pub fn scalar_convert(scalar: F0) -> Opt let (valid, converted) = scalar_normalize(scalar); Some(converted).filter(|_| scalar == valid) } + +/// Create a mutually valid scalar from bytes via bit truncation to not introduce bias +pub fn mutual_scalar_from_bytes(bytes: &[u8]) -> (F0, F1) { + let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap(); + debug_assert!((bytes.len() * 8) >= capacity); + + let mut accum = F0::zero(); + for b in 0 .. capacity { + accum = accum.double(); + accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into()); + } + (accum, scalar_convert(accum).unwrap()) +} From 2ac5ea651c321167d834a366d5c8809218f7d945 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 5 Jul 2022 15:01:33 -0400 Subject: [PATCH 076/105] Use a ring per 2 bits instead of per bit Reduces proof size by 21.5% without notable computational complexity changes. I wouldn't be surprised if it has minor ones, yet I can't comment in which way they go without further review. Bit now verifies it can successfully complete the ring under debug, slightly increasing debug times. --- crypto/dleq/src/cross_group/mod.rs | 357 +++++++++++++++-------- crypto/dleq/src/tests/cross_group/mod.rs | 34 ++- 2 files changed, 268 insertions(+), 123 deletions(-) diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 4a0ce530..ff28bca7 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -3,7 +3,7 @@ use rand_core::{RngCore, CryptoRng}; use digest::Digest; -use subtle::{Choice, ConditionallySelectable}; +use subtle::{ConstantTimeEq, ConditionallySelectable}; use transcript::Transcript; @@ -34,24 +34,168 @@ pub(crate) fn read_point(r: &mut R) -> std::io::Result { +pub struct Bits { commitments: (G0, G1), // Merged challenges have a slight security reduction, yet one already applied to the scalar // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed, // present here, which is then hashed for each of the two challenges, remaining unbiased/unique // while maintaining the bandwidth savings, yet also while adding 252 hashes for // Secp256k1/Ed25519 - e: G0::Scalar, - s: [(G0::Scalar, G1::Scalar); 2] + e_0: G0::Scalar, + s: [(G0::Scalar, G1::Scalar); POSSIBLE_VALUES] } -impl Bit { +impl Bits + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + pub fn transcript(transcript: &mut T, i: usize, commitments: (G0, G1)) { + if i == 0 { + transcript.domain_separate(b"cross_group_dleq"); + } + transcript.append_message(b"bit_group", &u16::try_from(i).unwrap().to_le_bytes()); + transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref()); + transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); + } + + #[allow(non_snake_case)] + fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { + transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); + transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); + mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) + } + + #[allow(non_snake_case)] + fn R( + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) + ) -> (G0, G1) { + (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) + } + + #[allow(non_snake_case)] + fn R_nonces( + transcript: T, + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) + ) -> (G0::Scalar, G1::Scalar) { + Self::nonces(transcript, Self::R(generators, s, A, e)) + } + + fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> [(G0, G1); POSSIBLE_VALUES] { + let mut res = [(G0::identity(), G1::identity()); POSSIBLE_VALUES]; + res[POSSIBLE_VALUES - 1] = commitments; + for i in (0 .. (POSSIBLE_VALUES - 1)).rev() { + res[i] = (res[i + 1].0 - pow_2.0, res[i + 1].1 - pow_2.1); + } + res + } + + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + i: usize, + pow_2: &mut (G0, G1), + bits: u8, + blinding_key: (G0::Scalar, G1::Scalar) + ) -> Bits { + // While it is possible to use larger values, it's not efficient to do so + // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3 + debug_assert!((POSSIBLE_VALUES == 2) || (POSSIBLE_VALUES == 4)); + + let mut commitments = ( + (generators.0.alt * blinding_key.0), + (generators.1.alt * blinding_key.1) + ); + commitments.0 += pow_2.0 * G0::Scalar::from(bits.into()); + commitments.1 += pow_2.1 * G1::Scalar::from(bits.into()); + Self::transcript(transcript, i, commitments); + + let ring = Self::ring(*pow_2, commitments); + // Invert the index to get the raw blinding key's position in the ring + let actual = POSSIBLE_VALUES - 1 - usize::from(bits); + + let mut e_0 = G0::Scalar::zero(); + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); POSSIBLE_VALUES]; + + let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + #[allow(non_snake_case)] + let original_R = (generators.0.alt * r.0, generators.1.alt * r.1); + #[allow(non_snake_case)] + let mut R = original_R; + + for i in ((actual + 1) .. (actual + POSSIBLE_VALUES + 1)).map(|i| i % POSSIBLE_VALUES) { + let e = Self::nonces(transcript.clone(), R); + e_0 = G0::Scalar::conditional_select(&e_0, &e.0, usize::ct_eq(&i, &1)); + + // Solve for the real index + if i == actual { + s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); + debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R); + break; + // Generate a decoy response + } else { + s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + } + + R = Self::R(generators, s[i], ring[i], e); + } + + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); + if POSSIBLE_VALUES == 4 { + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); + } + + Bits { commitments, e_0, s } + } + + pub fn verify( + &self, + transcript: &mut T, + generators: (Generators, Generators), + i: usize, + pow_2: &mut (G0, G1) + ) -> Result<(), DLEqError> { + debug_assert!((POSSIBLE_VALUES == 2) || (POSSIBLE_VALUES == 4)); + + Self::transcript(transcript, i, self.commitments); + + let ring = Self::ring(*pow_2, self.commitments); + let e_0 = (self.e_0, scalar_convert(self.e_0).ok_or(DLEqError::InvalidChallenge)?); + let mut e = None; + for i in (1 .. (POSSIBLE_VALUES + 1)).map(|i| i % POSSIBLE_VALUES) { + e = Some( + Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0)) + ); + } + + // Will panic if the above loop is never run somehow + // If e wasn't an Option, and instead initially set to e_0, it'd always pass + if e_0 != e.unwrap() { + return Err(DLEqError::InvalidProof); + } + + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); + if POSSIBLE_VALUES == 4 { + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); + } + + Ok(()) + } + #[cfg(feature = "serialize")] pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { w.write_all(self.commitments.0.to_bytes().as_ref())?; w.write_all(self.commitments.1.to_bytes().as_ref())?; - w.write_all(self.e.to_repr().as_ref())?; - for i in 0 .. 2 { + w.write_all(self.e_0.to_repr().as_ref())?; + for i in 0 .. POSSIBLE_VALUES { w.write_all(self.s[i].0.to_repr().as_ref())?; w.write_all(self.s[i].1.to_repr().as_ref())?; } @@ -59,17 +203,14 @@ impl Bit { } #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result> { - Ok( - Bit { - commitments: (read_point(r)?, read_point(r)?), - e: read_scalar(r)?, - s: [ - (read_scalar(r)?, read_scalar(r)?), - (read_scalar(r)?, read_scalar(r)?) - ] - } - ) + pub fn deserialize(r: &mut R) -> std::io::Result> { + let commitments = (read_point(r)?, read_point(r)?); + let e_0 = read_scalar(r)?; + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); POSSIBLE_VALUES]; + for i in 0 .. POSSIBLE_VALUES { + s[i] = (read_scalar(r)?, read_scalar(r)?); + } + Ok(Bits { commitments, e_0, s }) } } @@ -89,7 +230,8 @@ pub enum DLEqError { // anyone who wants it #[derive(Clone, PartialEq, Eq, Debug)] pub struct DLEqProof { - bits: Vec>, + bits: Vec>, + remainder: Option>, poks: (SchnorrPoK, SchnorrPoK) } @@ -121,43 +263,17 @@ impl DLEqProof blinding_key } - #[allow(non_snake_case)] - fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { - transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); - transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); - mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) - } - - #[allow(non_snake_case)] - fn R_nonces( - transcript: T, - generators: (Generators, Generators), - s: (G0::Scalar, G1::Scalar), - A: (G0, G1), - e: (G0::Scalar, G1::Scalar) - ) -> (G0::Scalar, G1::Scalar) { - Self::nonces( - transcript, - (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) - ) - } - fn reconstruct_keys(&self) -> (G0, G1) { + let remainder = self.remainder + .as_ref() + .map(|bit| bit.commitments) + .unwrap_or((G0::identity(), G1::identity())); ( - self.bits.iter().map(|bit| bit.commitments.0).sum(), - self.bits.iter().map(|bit| bit.commitments.1).sum() + self.bits.iter().map(|bit| bit.commitments.0).sum::() + remainder.0, + self.bits.iter().map(|bit| bit.commitments.1).sum::() + remainder.1 ) } - fn transcript_bit(transcript: &mut T, i: usize, commitments: (G0, G1)) { - if i == 0 { - transcript.domain_separate(b"cross_group_dleq"); - } - transcript.append_message(b"bit", &u16::try_from(i).unwrap().to_le_bytes()); - transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref()); - transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); - } - fn prove_internal( rng: &mut R, transcript: &mut T, @@ -176,16 +292,7 @@ impl DLEqProof ); let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); - let mut pow_2 = (generators.0.primary, generators.1.primary); - - let raw_bits = f.0.to_le_bits(); - let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let mut bits = Vec::with_capacity(capacity); - for (i, bit) in raw_bits.iter().enumerate() { - let bit = *bit as u8; - debug_assert_eq!(bit | 1, 1); - - let last = i == (capacity - 1); + let mut blinding_key = |rng: &mut R, last| { let blinding_key = ( Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) @@ -194,45 +301,54 @@ impl DLEqProof debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); } + blinding_key + }; - let mut commitments = ( - (generators.0.alt * blinding_key.0), - (generators.1.alt * blinding_key.1) - ); - commitments.0 += pow_2.0 * G0::Scalar::from(bit.into()); - commitments.1 += pow_2.1 * G1::Scalar::from(bit.into()); - Self::transcript_bit(transcript, i, commitments); + let mut pow_2 = (generators.0.primary, generators.1.primary); - let nonces = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - let e_0 = Self::nonces( - transcript.clone(), - ((generators.0.alt * nonces.0), (generators.1.alt * nonces.1)) - ); - let mut s_0 = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - - let mut to_sign = commitments; - let bit = Choice::from(bit); - let inv_bit = (!bit).unwrap_u8(); - to_sign.0 -= pow_2.0 * G0::Scalar::from(inv_bit.into()); - to_sign.1 -= pow_2.1 * G1::Scalar::from(inv_bit.into()); - let e_1 = Self::R_nonces(transcript.clone(), generators, (s_0.0, s_0.1), to_sign, e_0); - let mut s_1 = (nonces.0 + (e_1.0 * blinding_key.0), nonces.1 + (e_1.1 * blinding_key.1)); - - let e = G0::Scalar::conditional_select(&e_1.0, &e_0.0, bit); - G0::Scalar::conditional_swap(&mut s_1.0, &mut s_0.0, bit); - G1::Scalar::conditional_swap(&mut s_1.1, &mut s_0.1, bit); - bits.push(Bit { commitments, e, s: [s_0, s_1] }); - - // Break in order to not generate commitments for unused bits - if last { + let raw_bits = f.0.to_le_bits(); + let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); + let mut bits = Vec::with_capacity(capacity); + let mut these_bits: u8 = 0; + for (i, bit) in raw_bits.iter().enumerate() { + if i > ((capacity / 2) * 2) { break; } - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); + let bit = *bit as u8; + debug_assert_eq!(bit | 1, 1); + + if (i % 2) == 0 { + these_bits = bit; + continue; + } else { + these_bits += bit << 1; + } + + let last = i == (capacity - 1); + let blinding_key = blinding_key(&mut *rng, last); + bits.push( + Bits::prove(&mut *rng, transcript, generators, i / 2, &mut pow_2, these_bits, blinding_key) + ); } - let proof = DLEqProof { bits, poks }; + let mut remainder = None; + if (capacity % 2) == 1 { + let blinding_key = blinding_key(&mut *rng, true); + remainder = Some( + Bits::prove( + &mut *rng, + transcript, + generators, + capacity / 2, + &mut pow_2, + these_bits, + blinding_key + ) + ); + } + + let proof = DLEqProof { bits, remainder, poks }; debug_assert_eq!( proof.reconstruct_keys(), (generators.0.primary * f.0, generators.1.primary * f.1) @@ -280,7 +396,11 @@ impl DLEqProof generators: (Generators, Generators) ) -> Result<(G0, G1), DLEqError> { let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - if self.bits.len() != capacity.try_into().unwrap() { + if (self.bits.len() != (capacity / 2).try_into().unwrap()) || ( + // This shouldn't be possible, as deserialize ensures this is present for fields with this + // characteristic, and proofs locally generated will have it. Regardless, best to ensure + self.remainder.is_none() && ((capacity % 2) == 1) + ) { return Err(DLEqError::InvalidProofLength); } @@ -294,31 +414,11 @@ impl DLEqProof } let mut pow_2 = (generators.0.primary, generators.1.primary); - for (i, bit) in self.bits.iter().enumerate() { - Self::transcript_bit(transcript, i, bit.commitments); - - let bit_e = (bit.e, scalar_convert(bit.e).ok_or(DLEqError::InvalidChallenge)?); - if bit_e != Self::R_nonces( - transcript.clone(), - generators, - bit.s[0], - ( - bit.commitments.0 - pow_2.0, - bit.commitments.1 - pow_2.1 - ), - Self::R_nonces( - transcript.clone(), - generators, - bit.s[1], - bit.commitments, - bit_e - ) - ) { - return Err(DLEqError::InvalidProof); - } - - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); + for (i, bits) in self.bits.iter().enumerate() { + bits.verify(transcript, generators, i, &mut pow_2)?; + } + if let Some(bit) = &self.remainder { + bit.verify(transcript, generators, self.bits.len(), &mut pow_2)?; } Ok(keys) @@ -329,6 +429,9 @@ impl DLEqProof for bit in &self.bits { bit.serialize(w)?; } + if let Some(bit) = &self.remainder { + bit.serialize(w)?; + } self.poks.0.serialize(w)?; self.poks.1.serialize(w) } @@ -337,9 +440,19 @@ impl DLEqProof pub fn deserialize(r: &mut R) -> std::io::Result> { let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); - for _ in 0 .. capacity { - bits.push(Bit::deserialize(r)?); + for _ in 0 .. (capacity / 2) { + bits.push(Bits::deserialize(r)?); } - Ok(DLEqProof { bits, poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) }) + let mut remainder = None; + if (capacity % 2) == 1 { + remainder = Some(Bits::deserialize(r)?); + } + Ok( + DLEqProof { + bits, + remainder, + poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) + } + ) } } diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index 93ffeb0f..9f3a1916 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -14,7 +14,7 @@ use blake2::{Digest, Blake2b512}; use transcript::RecommendedTranscript; -use crate::{Generators, cross_group::DLEqProof}; +use crate::{Generators, cross_group::{DLEqProof, scalar::mutual_scalar_from_bytes}}; fn transcript() -> RecommendedTranscript { RecommendedTranscript::new(b"Cross-Group DLEq Proof Test") @@ -104,3 +104,35 @@ fn test_cross_group_dleq() { } } } + +#[test] +fn test_remainder() { + // Uses Secp256k1 for both to achieve an odd capacity of 255 + assert_eq!(Scalar::CAPACITY, 255); + let generators = (generators().0, generators().0); + let keys = mutual_scalar_from_bytes(&[0xFF; 32]); + assert_eq!(keys.0, keys.1); + + let (proof, res) = DLEqProof::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + keys.0 + ).unwrap(); + assert_eq!(keys, res); + + let public_keys = proof.verify(&mut transcript(), generators).unwrap(); + assert_eq!(generators.0.primary * keys.0, public_keys.0); + assert_eq!(generators.1.primary * keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = DLEqProof::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut transcript(), generators).unwrap(); + } +} From 0ff5ee82922e5af34f2f8af923770fa7e335d93b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 5 Jul 2022 15:14:04 -0400 Subject: [PATCH 077/105] Correct e_0 to actually be e_0 --- crypto/dleq/src/cross_group/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index ff28bca7..d2a81763 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -129,7 +129,7 @@ impl Bits Bits Date: Tue, 5 Jul 2022 19:11:31 -0400 Subject: [PATCH 078/105] Add must_use to the BatchVerifier's verify -> bool functions --- crypto/multiexp/src/batch.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crypto/multiexp/src/batch.rs b/crypto/multiexp/src/batch.rs index 95a09df5..d9e8e74b 100644 --- a/crypto/multiexp/src/batch.rs +++ b/crypto/multiexp/src/batch.rs @@ -33,12 +33,14 @@ impl BatchVerifier where ::Scalar: PrimeF self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); } + #[must_use] pub fn verify(&self) -> bool { multiexp( &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>() ).is_identity().into() } + #[must_use] pub fn verify_vartime(&self) -> bool { multiexp_vartime( &self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::>() From 26cee46950dddfd6d6f8b0180c852610e5581717 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 5 Jul 2022 19:10:30 -0400 Subject: [PATCH 079/105] Add a batch verified DLEq The batch verified one offers ~23% faster verification. While this massively refactors for modularity, I'm still not happy with the DLEq proofs at the top level, nor am I happy with the AOS signatures. I'll work on cleaning them up more later. --- crypto/dleq/Cargo.toml | 8 +- crypto/dleq/src/cross_group/bits.rs | 141 ++++++ crypto/dleq/src/cross_group/linear/aos.rs | 278 ++++++++++++ crypto/dleq/src/cross_group/linear/concise.rs | 217 +++++++++ .../dleq/src/cross_group/linear/efficient.rs | 182 ++++++++ crypto/dleq/src/cross_group/linear/mod.rs | 7 + crypto/dleq/src/cross_group/mod.rs | 411 ++---------------- .../src/tests/cross_group/linear/concise.rs | 98 +++++ .../src/tests/cross_group/linear/efficient.rs | 66 +++ .../dleq/src/tests/cross_group/linear/mod.rs | 2 + crypto/dleq/src/tests/cross_group/mod.rs | 96 +--- 11 files changed, 1031 insertions(+), 475 deletions(-) create mode 100644 crypto/dleq/src/cross_group/bits.rs create mode 100644 crypto/dleq/src/cross_group/linear/aos.rs create mode 100644 crypto/dleq/src/cross_group/linear/concise.rs create mode 100644 crypto/dleq/src/cross_group/linear/efficient.rs create mode 100644 crypto/dleq/src/cross_group/linear/mod.rs create mode 100644 crypto/dleq/src/tests/cross_group/linear/concise.rs create mode 100644 crypto/dleq/src/tests/cross_group/linear/efficient.rs create mode 100644 crypto/dleq/src/tests/cross_group/linear/mod.rs diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index a0b8ac57..5ef85242 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -19,7 +19,7 @@ transcript = { package = "flexible-transcript", path = "../transcript", version ff = "0.12" group = "0.12" -multiexp = { path = "../multiexp", optional = true } +multiexp = { path = "../multiexp", features = ["batch"], optional = true } [dev-dependencies] hex-literal = "0.3" @@ -33,8 +33,8 @@ transcript = { package = "flexible-transcript", path = "../transcript", features [features] serialize = [] -cross_group = [] +cross_group = ["multiexp"] secure_capacity_difference = [] -# These only apply to cross_group, yet are default to ensure its integrity and performance -default = ["secure_capacity_difference", "multiexp"] +# Only applies to cross_group, yet is default to ensure security +default = ["secure_capacity_difference"] diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs new file mode 100644 index 00000000..474daa8b --- /dev/null +++ b/crypto/dleq/src/cross_group/bits.rs @@ -0,0 +1,141 @@ +use rand_core::{RngCore, CryptoRng}; + +use transcript::Transcript; + +use group::{ff::PrimeFieldBits, prime::PrimeGroup}; + +use crate::{Generators, cross_group::DLEqError}; + +#[cfg(feature = "serialize")] +use std::io::{Read, Write}; +#[cfg(feature = "serialize")] +use crate::cross_group::read_point; + +pub trait RingSignature: Sized { + type Context; + + const LEN: usize; + + fn prove( + rng: &mut R, + transcript: T, + generators: (Generators, Generators), + ring: &[(G0, G1)], + actual: usize, + blinding_key: (G0::Scalar, G1::Scalar) + ) -> Self; + + fn verify( + &self, + rng: &mut R, + transcript: T, + generators: (Generators, Generators), + context: &mut Self::Context, + ring: &[(G0, G1)] + ) -> Result<(), DLEqError>; + + #[cfg(feature = "serialize")] + fn serialize(&self, w: &mut W) -> std::io::Result<()>; + #[cfg(feature = "serialize")] + fn deserialize(r: &mut R) -> std::io::Result; +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Bits> { + pub(crate) commitments: (G0, G1), + signature: RING +} + +impl> Bits + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + fn transcript(transcript: &mut T, i: usize, commitments: (G0, G1)) { + if i == 0 { + transcript.domain_separate(b"cross_group_dleq"); + } + transcript.append_message(b"bit_group", &u16::try_from(i).unwrap().to_le_bytes()); + transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref()); + transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); + } + + fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> { + let mut res = vec![(G0::identity(), G1::identity()); RING::LEN]; + res[RING::LEN - 1] = commitments; + for i in (0 .. (RING::LEN - 1)).rev() { + res[i] = (res[i + 1].0 - pow_2.0, res[i + 1].1 - pow_2.1); + } + res + } + + fn shift(pow_2: &mut (G0, G1)) { + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); + if RING::LEN == 4 { + pow_2.0 = pow_2.0.double(); + pow_2.1 = pow_2.1.double(); + } + } + + pub(crate) fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + i: usize, + pow_2: &mut (G0, G1), + bits: u8, + blinding_key: (G0::Scalar, G1::Scalar) + ) -> Self { + debug_assert!((RING::LEN == 2) || (RING::LEN == 4)); + + let mut commitments = ( + (generators.0.alt * blinding_key.0), + (generators.1.alt * blinding_key.1) + ); + commitments.0 += pow_2.0 * G0::Scalar::from(bits.into()); + commitments.1 += pow_2.1 * G1::Scalar::from(bits.into()); + Self::transcript(transcript, i, commitments); + + let ring = Self::ring(*pow_2, commitments); + // Invert the index to get the raw blinding key's position in the ring + let actual = RING::LEN - 1 - usize::from(bits); + let signature = RING::prove(rng, transcript.clone(), generators, &ring, actual, blinding_key); + + Self::shift(pow_2); + Bits { commitments, signature } + } + + pub(crate) fn verify( + &self, + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + context: &mut RING::Context, + i: usize, + pow_2: &mut (G0, G1) + ) -> Result<(), DLEqError> { + debug_assert!((RING::LEN == 2) || (RING::LEN == 4)); + + Self::transcript(transcript, i, self.commitments); + self.signature.verify( + rng, + transcript.clone(), + generators, + context, + &Self::ring(*pow_2, self.commitments) + )?; + + Self::shift(pow_2); + Ok(()) + } + + #[cfg(feature = "serialize")] + pub(crate) fn serialize(&self, w: &mut W) -> std::io::Result<()> { + w.write_all(self.commitments.0.to_bytes().as_ref())?; + w.write_all(self.commitments.1.to_bytes().as_ref())?; + self.signature.serialize(w) + } + + #[cfg(feature = "serialize")] + pub(crate) fn deserialize(r: &mut Re) -> std::io::Result { + Ok(Bits { commitments: (read_point(r)?, read_point(r)?), signature: RING::deserialize(r)? }) + } +} diff --git a/crypto/dleq/src/cross_group/linear/aos.rs b/crypto/dleq/src/cross_group/linear/aos.rs new file mode 100644 index 00000000..ada2309a --- /dev/null +++ b/crypto/dleq/src/cross_group/linear/aos.rs @@ -0,0 +1,278 @@ +use rand_core::{RngCore, CryptoRng}; + +use subtle::{ConstantTimeEq, ConditionallySelectable}; + +use transcript::Transcript; + +use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup}; + +use multiexp::BatchVerifier; + +use crate::{ + Generators, + cross_group::{DLEqError, scalar::{scalar_convert, mutual_scalar_from_bytes}, bits::RingSignature} +}; + +#[cfg(feature = "serialize")] +use std::io::{Read, Write}; +#[cfg(feature = "serialize")] +use ff::PrimeField; +#[cfg(feature = "serialize")] +use crate::{read_scalar, cross_group::read_point}; + +#[allow(non_snake_case)] +fn nonces< + T: Transcript, + G0: PrimeGroup, + G1: PrimeGroup +>(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); + transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); + mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) +} + +#[allow(non_snake_case)] +fn calculate_R( + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) +) -> (G0, G1) { + (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) +} + +#[allow(non_snake_case)] +fn R_nonces( + transcript: T, + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) +) -> (G0::Scalar, G1::Scalar) where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + nonces(transcript, calculate_R(generators, s, A, e)) +} + +#[allow(non_snake_case)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ClassicAos { + // Merged challenges have a slight security reduction, yet one already applied to the scalar + // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed, + // present here, which is then hashed for each of the two challenges, remaining unbiased/unique + // while maintaining the bandwidth savings, yet also while adding 252 hashes for + // Secp256k1/Ed25519 + e_0: G0::Scalar, + s: [(G0::Scalar, G1::Scalar); RING_LEN] +} + +impl< + G0: PrimeGroup, + G1: PrimeGroup, + const RING_LEN: usize +> RingSignature for ClassicAos + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + type Context = (); + + const LEN: usize = RING_LEN; + + fn prove( + rng: &mut R, + transcript: T, + generators: (Generators, Generators), + ring: &[(G0, G1)], + actual: usize, + blinding_key: (G0::Scalar, G1::Scalar) + ) -> Self { + // While it is possible to use larger values, it's not efficient to do so + // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3 + debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); + + let mut e_0 = G0::Scalar::zero(); + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN]; + + let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + #[allow(non_snake_case)] + let original_R = (generators.0.alt * r.0, generators.1.alt * r.1); + #[allow(non_snake_case)] + let mut R = original_R; + + for i in ((actual + 1) .. (actual + RING_LEN + 1)).map(|i| i % RING_LEN) { + let e = nonces(transcript.clone(), R); + e_0 = G0::Scalar::conditional_select(&e_0, &e.0, usize::ct_eq(&i, &0)); + + // Solve for the real index + if i == actual { + s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); + debug_assert_eq!(calculate_R(generators, s[i], ring[actual], e), original_R); + break; + // Generate a decoy response + } else { + s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + } + + R = calculate_R(generators, s[i], ring[i], e); + } + + ClassicAos { e_0, s } + } + + fn verify( + &self, + _rng: &mut R, + transcript: T, + generators: (Generators, Generators), + _: &mut Self::Context, + ring: &[(G0, G1)] + ) -> Result<(), DLEqError> { + debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); + + let e_0 = (self.e_0, scalar_convert(self.e_0).ok_or(DLEqError::InvalidChallenge)?); + let mut e = None; + for i in 0 .. RING_LEN { + e = Some(R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0))); + } + + // Will panic if the above loop is never run somehow + // If e wasn't an Option, and instead initially set to e_0, it'd always pass + if e_0 != e.unwrap() { + Err(DLEqError::InvalidProof)?; + } + Ok(()) + } + + #[cfg(feature = "serialize")] + fn serialize(&self, w: &mut W) -> std::io::Result<()> { + w.write_all(self.e_0.to_repr().as_ref())?; + for i in 0 .. Self::LEN { + w.write_all(self.s[i].0.to_repr().as_ref())?; + w.write_all(self.s[i].1.to_repr().as_ref())?; + } + Ok(()) + } + + #[cfg(feature = "serialize")] + fn deserialize(r: &mut R) -> std::io::Result { + let e_0 = read_scalar(r)?; + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN]; + for i in 0 .. Self::LEN { + s[i] = (read_scalar(r)?, read_scalar(r)?); + } + Ok(ClassicAos { e_0, s }) + } +} + +#[allow(non_snake_case)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct MultiexpAos { + R_0: (G0, G1), + s: [(G0::Scalar, G1::Scalar); 2] +} + +impl MultiexpAos { + #[allow(non_snake_case)] + fn R_batch( + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) + ) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) { + (vec![(s.0, generators.0.alt), (-e.0, A.0)], vec![(s.1, generators.1.alt), (-e.1, A.1)]) + } +} + +impl RingSignature for MultiexpAos + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + type Context = (BatchVerifier<(), G0>, BatchVerifier<(), G1>); + + const LEN: usize = 2; + + fn prove( + rng: &mut R, + transcript: T, + generators: (Generators, Generators), + ring: &[(G0, G1)], + actual: usize, + blinding_key: (G0::Scalar, G1::Scalar) + ) -> Self { + #[allow(non_snake_case)] + let mut R_0 = (G0::identity(), G1::identity()); + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); 2]; // Can't use Self::LEN due to 76200 + + let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + #[allow(non_snake_case)] + let original_R = (generators.0.alt * r.0, generators.1.alt * r.1); + #[allow(non_snake_case)] + let mut R = original_R; + + for i in ((actual + 1) .. (actual + Self::LEN + 1)).map(|i| i % Self::LEN) { + if i == 0 { + R_0.0 = R.0; + R_0.1 = R.1; + } + + // Solve for the real index + let e = nonces(transcript.clone(), R); + if i == actual { + s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); + debug_assert_eq!(calculate_R(generators, s[i], ring[actual], e), original_R); + break; + // Generate a decoy response + } else { + s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); + } + + R = calculate_R(generators, s[i], ring[i], e); + } + + MultiexpAos { R_0, s } + } + + fn verify( + &self, + rng: &mut R, + transcript: T, + generators: (Generators, Generators), + batch: &mut Self::Context, + ring: &[(G0, G1)] + ) -> Result<(), DLEqError> { + let mut e = nonces(transcript.clone(), self.R_0); + for i in 0 .. (Self::LEN - 1) { + e = R_nonces(transcript.clone(), generators, self.s[i], ring[i], e); + } + + let mut statements = Self::R_batch( + generators, + *self.s.last().unwrap(), + *ring.last().unwrap(), + e + ); + statements.0.push((-G0::Scalar::one(), self.R_0.0)); + statements.1.push((-G1::Scalar::one(), self.R_0.1)); + batch.0.queue(&mut *rng, (), statements.0); + batch.1.queue(&mut *rng, (), statements.1); + + Ok(()) + } + + #[cfg(feature = "serialize")] + fn serialize(&self, w: &mut W) -> std::io::Result<()> { + w.write_all(self.R_0.0.to_bytes().as_ref())?; + w.write_all(self.R_0.1.to_bytes().as_ref())?; + for i in 0 .. Self::LEN { + w.write_all(self.s[i].0.to_repr().as_ref())?; + w.write_all(self.s[i].1.to_repr().as_ref())?; + } + Ok(()) + } + + #[cfg(feature = "serialize")] + fn deserialize(r: &mut R) -> std::io::Result { + #[allow(non_snake_case)] + let R_0 = (read_point(r)?, read_point(r)?); + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); 2]; + for i in 0 .. Self::LEN { + s[i] = (read_scalar(r)?, read_scalar(r)?); + } + Ok(MultiexpAos { R_0, s }) + } +} diff --git a/crypto/dleq/src/cross_group/linear/concise.rs b/crypto/dleq/src/cross_group/linear/concise.rs new file mode 100644 index 00000000..21cf5652 --- /dev/null +++ b/crypto/dleq/src/cross_group/linear/concise.rs @@ -0,0 +1,217 @@ +use rand_core::{RngCore, CryptoRng}; + +use digest::Digest; + +use transcript::Transcript; + +use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; + +use crate::{ + Generators, + cross_group::{ + DLEqError, DLEqProof, + scalar::{scalar_convert, mutual_scalar_from_bytes}, + schnorr::SchnorrPoK, + linear::aos::ClassicAos, + bits::Bits + } +}; + +#[cfg(feature = "serialize")] +use std::io::{Read, Write}; + +pub type ConciseDLEq = DLEqProof< + G0, + G1, + ClassicAos, + ClassicAos + >; + +impl ConciseDLEq + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + fn prove_internal( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f: (G0::Scalar, G1::Scalar) + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::initialize_transcript( + transcript, + generators, + ((generators.0.primary * f.0), (generators.1.primary * f.1)) + ); + + let poks = ( + SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), + SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) + ); + + let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); + let mut blinding_key = |rng: &mut R, last| { + let blinding_key = ( + Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), + Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) + ); + if last { + debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); + debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); + } + blinding_key + }; + + let mut pow_2 = (generators.0.primary, generators.1.primary); + + let raw_bits = f.0.to_le_bits(); + let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); + let mut bits = Vec::with_capacity(capacity); + let mut these_bits: u8 = 0; + for (i, bit) in raw_bits.iter().enumerate() { + if i > ((capacity / 2) * 2) { + break; + } + + let bit = *bit as u8; + debug_assert_eq!(bit | 1, 1); + + if (i % 2) == 0 { + these_bits = bit; + continue; + } else { + these_bits += bit << 1; + } + + let last = i == (capacity - 1); + let blinding_key = blinding_key(&mut *rng, last); + bits.push( + Bits::prove(&mut *rng, transcript, generators, i / 2, &mut pow_2, these_bits, blinding_key) + ); + } + + let mut remainder = None; + if (capacity % 2) == 1 { + let blinding_key = blinding_key(&mut *rng, true); + remainder = Some( + Bits::prove( + &mut *rng, + transcript, + generators, + capacity / 2, + &mut pow_2, + these_bits, + blinding_key + ) + ); + } + + let proof = DLEqProof { bits, remainder, poks }; + debug_assert_eq!( + proof.reconstruct_keys(), + (generators.0.primary * f.0, generators.1.primary * f.1) + ); + (proof, f) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as + /// the output of the passed in Digest. Given the non-standard requirements to achieve + /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way + /// to safely and securely generate a Scalar, without risk of failure, nor bias + /// It also ensures a lack of determinable relation between keys, guaranteeing security in the + /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing + /// the relationship between keys would allow breaking all swaps after just one + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + digest: D + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::prove_internal( + rng, + transcript, + generators, + mutual_scalar_from_bytes(digest.finalize().as_ref()) + ) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, + /// failing if it's not mutually valid. This allows for rejection sampling externally derived + /// scalars until they're safely usable, as needed + pub fn prove_without_bias( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f0: G0::Scalar + ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { + scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) + } + + /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for + pub fn verify( + &self, + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators) + ) -> Result<(G0, G1), DLEqError> { + let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); + if (self.bits.len() != (capacity / 2).try_into().unwrap()) || ( + // These shouldn't be possible, as deserialize ensures this is present for fields with this + // characteristic, and proofs locally generated will have it. Regardless, best to ensure + (self.remainder.is_none() && ((capacity % 2) == 1)) || + (self.remainder.is_some() && ((capacity % 2) == 0)) + ) { + return Err(DLEqError::InvalidProofLength); + } + + let keys = self.reconstruct_keys(); + Self::initialize_transcript(transcript, generators, keys); + if !( + self.poks.0.verify(transcript, generators.0.primary, keys.0) && + self.poks.1.verify(transcript, generators.1.primary, keys.1) + ) { + Err(DLEqError::InvalidProofOfKnowledge)?; + } + + let mut pow_2 = (generators.0.primary, generators.1.primary); + for (i, bits) in self.bits.iter().enumerate() { + bits.verify(&mut *rng, transcript, generators, &mut (), i, &mut pow_2)?; + } + if let Some(bit) = &self.remainder { + bit.verify(&mut *rng, transcript, generators, &mut (), self.bits.len(), &mut pow_2)?; + } + + Ok(keys) + } + + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + for bit in &self.bits { + bit.serialize(w)?; + } + if let Some(bit) = &self.remainder { + bit.serialize(w)?; + } + self.poks.0.serialize(w)?; + self.poks.1.serialize(w) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> std::io::Result { + let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); + let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); + for _ in 0 .. (capacity / 2) { + bits.push(Bits::deserialize(r)?); + } + + let mut remainder = None; + if (capacity % 2) == 1 { + remainder = Some(Bits::deserialize(r)?); + } + + Ok( + DLEqProof { + bits, + remainder, + poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) + } + ) + } +} diff --git a/crypto/dleq/src/cross_group/linear/efficient.rs b/crypto/dleq/src/cross_group/linear/efficient.rs new file mode 100644 index 00000000..696744d6 --- /dev/null +++ b/crypto/dleq/src/cross_group/linear/efficient.rs @@ -0,0 +1,182 @@ +use rand_core::{RngCore, CryptoRng}; + +use digest::Digest; + +use transcript::Transcript; + +use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; +use multiexp::BatchVerifier; + +use crate::{ + Generators, + cross_group::{ + DLEqError, DLEqProof, + scalar::{scalar_convert, mutual_scalar_from_bytes}, + schnorr::SchnorrPoK, + linear::aos::MultiexpAos, + bits::Bits + } +}; + +#[cfg(feature = "serialize")] +use std::io::{Read, Write}; + +pub type EfficientDLEq = DLEqProof, MultiexpAos>; + +impl EfficientDLEq + where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + fn prove_internal( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f: (G0::Scalar, G1::Scalar) + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::initialize_transcript( + transcript, + generators, + ((generators.0.primary * f.0), (generators.1.primary * f.1)) + ); + + let poks = ( + SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), + SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) + ); + + let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); + let mut blinding_key = |rng: &mut R, last| { + let blinding_key = ( + Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), + Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) + ); + if last { + debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); + debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); + } + blinding_key + }; + + let mut pow_2 = (generators.0.primary, generators.1.primary); + + let raw_bits = f.0.to_le_bits(); + let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); + let mut bits = Vec::with_capacity(capacity); + for (i, bit) in raw_bits.iter().enumerate() { + let bit = *bit as u8; + debug_assert_eq!(bit | 1, 1); + + let last = i == (capacity - 1); + let blinding_key = blinding_key(&mut *rng, last); + bits.push( + Bits::prove(&mut *rng, transcript, generators, i, &mut pow_2, bit, blinding_key) + ); + + if last { + break; + } + } + + let proof = DLEqProof { bits, remainder: None, poks }; + debug_assert_eq!( + proof.reconstruct_keys(), + (generators.0.primary * f.0, generators.1.primary * f.1) + ); + (proof, f) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as + /// the output of the passed in Digest. Given the non-standard requirements to achieve + /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way + /// to safely and securely generate a Scalar, without risk of failure, nor bias + /// It also ensures a lack of determinable relation between keys, guaranteeing security in the + /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing + /// the relationship between keys would allow breaking all swaps after just one + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + digest: D + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::prove_internal( + rng, + transcript, + generators, + mutual_scalar_from_bytes(digest.finalize().as_ref()) + ) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, + /// failing if it's not mutually valid. This allows for rejection sampling externally derived + /// scalars until they're safely usable, as needed + pub fn prove_without_bias( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f0: G0::Scalar + ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { + scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) + } + + /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for + pub fn verify( + &self, + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators) + ) -> Result<(G0, G1), DLEqError> { + let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); + // The latter case shouldn't be possible yet would explicitly be invalid + if (self.bits.len() != capacity.try_into().unwrap()) || self.remainder.is_some() { + return Err(DLEqError::InvalidProofLength); + } + + let keys = self.reconstruct_keys(); + Self::initialize_transcript(transcript, generators, keys); + // TODO: Batch + if !( + self.poks.0.verify(transcript, generators.0.primary, keys.0) && + self.poks.1.verify(transcript, generators.1.primary, keys.1) + ) { + Err(DLEqError::InvalidProofOfKnowledge)?; + } + + let mut batch = ( + BatchVerifier::new(self.bits.len() * 3), + BatchVerifier::new(self.bits.len() * 3) + ); + let mut pow_2 = (generators.0.primary, generators.1.primary); + for (i, bits) in self.bits.iter().enumerate() { + bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?; + } + if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) { + Err(DLEqError::InvalidProof)?; + } + + Ok(keys) + } + + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + for bit in &self.bits { + bit.serialize(w)?; + } + self.poks.0.serialize(w)?; + self.poks.1.serialize(w) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> std::io::Result { + let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); + let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); + for _ in 0 .. capacity { + bits.push(Bits::deserialize(r)?); + } + + Ok( + DLEqProof { + bits, + remainder: None, + poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) + } + ) + } +} diff --git a/crypto/dleq/src/cross_group/linear/mod.rs b/crypto/dleq/src/cross_group/linear/mod.rs new file mode 100644 index 00000000..20322079 --- /dev/null +++ b/crypto/dleq/src/cross_group/linear/mod.rs @@ -0,0 +1,7 @@ +pub(crate) mod aos; + +mod concise; +pub use concise::ConciseDLEq; + +mod efficient; +pub use efficient::EfficientDLEq; diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index d2a81763..57231b93 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -1,26 +1,24 @@ use thiserror::Error; use rand_core::{RngCore, CryptoRng}; -use digest::Digest; - -use subtle::{ConstantTimeEq, ConditionallySelectable}; - use transcript::Transcript; -use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; +use group::{ff::{PrimeField, PrimeFieldBits}, prime::PrimeGroup}; use crate::Generators; pub mod scalar; -use scalar::{scalar_convert, mutual_scalar_from_bytes}; pub(crate) mod schnorr; use schnorr::SchnorrPoK; +mod bits; +use bits::{RingSignature, Bits}; + +pub mod linear; + #[cfg(feature = "serialize")] -use std::io::{Read, Write}; -#[cfg(feature = "serialize")] -use crate::read_scalar; +use std::io::Read; #[cfg(feature = "serialize")] pub(crate) fn read_point(r: &mut R) -> std::io::Result { @@ -33,187 +31,6 @@ pub(crate) fn read_point(r: &mut R) -> std::io::Result { - commitments: (G0, G1), - // Merged challenges have a slight security reduction, yet one already applied to the scalar - // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed, - // present here, which is then hashed for each of the two challenges, remaining unbiased/unique - // while maintaining the bandwidth savings, yet also while adding 252 hashes for - // Secp256k1/Ed25519 - e_0: G0::Scalar, - s: [(G0::Scalar, G1::Scalar); POSSIBLE_VALUES] -} - -impl Bits - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - pub fn transcript(transcript: &mut T, i: usize, commitments: (G0, G1)) { - if i == 0 { - transcript.domain_separate(b"cross_group_dleq"); - } - transcript.append_message(b"bit_group", &u16::try_from(i).unwrap().to_le_bytes()); - transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref()); - transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); - } - - #[allow(non_snake_case)] - fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { - transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); - transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); - mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) - } - - #[allow(non_snake_case)] - fn R( - generators: (Generators, Generators), - s: (G0::Scalar, G1::Scalar), - A: (G0, G1), - e: (G0::Scalar, G1::Scalar) - ) -> (G0, G1) { - (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) - } - - #[allow(non_snake_case)] - fn R_nonces( - transcript: T, - generators: (Generators, Generators), - s: (G0::Scalar, G1::Scalar), - A: (G0, G1), - e: (G0::Scalar, G1::Scalar) - ) -> (G0::Scalar, G1::Scalar) { - Self::nonces(transcript, Self::R(generators, s, A, e)) - } - - fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> [(G0, G1); POSSIBLE_VALUES] { - let mut res = [(G0::identity(), G1::identity()); POSSIBLE_VALUES]; - res[POSSIBLE_VALUES - 1] = commitments; - for i in (0 .. (POSSIBLE_VALUES - 1)).rev() { - res[i] = (res[i + 1].0 - pow_2.0, res[i + 1].1 - pow_2.1); - } - res - } - - pub fn prove( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - i: usize, - pow_2: &mut (G0, G1), - bits: u8, - blinding_key: (G0::Scalar, G1::Scalar) - ) -> Bits { - // While it is possible to use larger values, it's not efficient to do so - // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3 - debug_assert!((POSSIBLE_VALUES == 2) || (POSSIBLE_VALUES == 4)); - - let mut commitments = ( - (generators.0.alt * blinding_key.0), - (generators.1.alt * blinding_key.1) - ); - commitments.0 += pow_2.0 * G0::Scalar::from(bits.into()); - commitments.1 += pow_2.1 * G1::Scalar::from(bits.into()); - Self::transcript(transcript, i, commitments); - - let ring = Self::ring(*pow_2, commitments); - // Invert the index to get the raw blinding key's position in the ring - let actual = POSSIBLE_VALUES - 1 - usize::from(bits); - - let mut e_0 = G0::Scalar::zero(); - let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); POSSIBLE_VALUES]; - - let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - #[allow(non_snake_case)] - let original_R = (generators.0.alt * r.0, generators.1.alt * r.1); - #[allow(non_snake_case)] - let mut R = original_R; - - for i in ((actual + 1) .. (actual + POSSIBLE_VALUES + 1)).map(|i| i % POSSIBLE_VALUES) { - let e = Self::nonces(transcript.clone(), R); - e_0 = G0::Scalar::conditional_select(&e_0, &e.0, usize::ct_eq(&i, &0)); - - // Solve for the real index - if i == actual { - s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); - debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R); - break; - // Generate a decoy response - } else { - s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - } - - R = Self::R(generators, s[i], ring[i], e); - } - - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); - if POSSIBLE_VALUES == 4 { - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); - } - - Bits { commitments, e_0, s } - } - - pub fn verify( - &self, - transcript: &mut T, - generators: (Generators, Generators), - i: usize, - pow_2: &mut (G0, G1) - ) -> Result<(), DLEqError> { - debug_assert!((POSSIBLE_VALUES == 2) || (POSSIBLE_VALUES == 4)); - - Self::transcript(transcript, i, self.commitments); - - let ring = Self::ring(*pow_2, self.commitments); - let e_0 = (self.e_0, scalar_convert(self.e_0).ok_or(DLEqError::InvalidChallenge)?); - let mut e = None; - for i in 0 .. POSSIBLE_VALUES { - e = Some( - Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0)) - ); - } - - // Will panic if the above loop is never run somehow - // If e wasn't an Option, and instead initially set to e_0, it'd always pass - if e_0 != e.unwrap() { - return Err(DLEqError::InvalidProof); - } - - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); - if POSSIBLE_VALUES == 4 { - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); - } - - Ok(()) - } - - #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { - w.write_all(self.commitments.0.to_bytes().as_ref())?; - w.write_all(self.commitments.1.to_bytes().as_ref())?; - w.write_all(self.e_0.to_repr().as_ref())?; - for i in 0 .. POSSIBLE_VALUES { - w.write_all(self.s[i].0.to_repr().as_ref())?; - w.write_all(self.s[i].1.to_repr().as_ref())?; - } - Ok(()) - } - - #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result> { - let commitments = (read_point(r)?, read_point(r)?); - let e_0 = read_scalar(r)?; - let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); POSSIBLE_VALUES]; - for i in 0 .. POSSIBLE_VALUES { - s[i] = (read_scalar(r)?, read_scalar(r)?); - } - Ok(Bits { commitments, e_0, s }) - } -} - #[derive(Error, PartialEq, Eq, Debug)] pub enum DLEqError { #[error("invalid proof of knowledge")] @@ -229,15 +46,24 @@ pub enum DLEqError { // Debug would be such a dump of data this likely isn't helpful, but at least it's available to // anyone who wants it #[derive(Clone, PartialEq, Eq, Debug)] -pub struct DLEqProof { - bits: Vec>, - remainder: Option>, +pub struct DLEqProof< + G0: PrimeGroup, + G1: PrimeGroup, + RING: RingSignature, + REM: RingSignature +> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + bits: Vec>, + remainder: Option>, poks: (SchnorrPoK, SchnorrPoK) } -impl DLEqProof - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - fn initialize_transcript( +impl< + G0: PrimeGroup, + G1: PrimeGroup, + RING: RingSignature, + REM: RingSignature +> DLEqProof where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + pub(crate) fn initialize_transcript( transcript: &mut T, generators: (Generators, Generators), keys: (G0, G1) @@ -249,7 +75,7 @@ impl DLEqProof transcript.append_message(b"point_1", keys.1.to_bytes().as_ref()); } - fn blinding_key( + pub(crate) fn blinding_key( rng: &mut R, total: &mut F, last: bool @@ -264,195 +90,16 @@ impl DLEqProof } fn reconstruct_keys(&self) -> (G0, G1) { - let remainder = self.remainder - .as_ref() - .map(|bit| bit.commitments) - .unwrap_or((G0::identity(), G1::identity())); - ( - self.bits.iter().map(|bit| bit.commitments.0).sum::() + remainder.0, - self.bits.iter().map(|bit| bit.commitments.1).sum::() + remainder.1 - ) - } - - fn prove_internal( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - f: (G0::Scalar, G1::Scalar) - ) -> (Self, (G0::Scalar, G1::Scalar)) { - Self::initialize_transcript( - transcript, - generators, - ((generators.0.primary * f.0), (generators.1.primary * f.1)) + let mut res = ( + self.bits.iter().map(|bit| bit.commitments.0).sum::(), + self.bits.iter().map(|bit| bit.commitments.1).sum::() ); - let poks = ( - SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), - SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) - ); - - let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); - let mut blinding_key = |rng: &mut R, last| { - let blinding_key = ( - Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), - Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) - ); - if last { - debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); - debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); - } - blinding_key - }; - - let mut pow_2 = (generators.0.primary, generators.1.primary); - - let raw_bits = f.0.to_le_bits(); - let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let mut bits = Vec::with_capacity(capacity); - let mut these_bits: u8 = 0; - for (i, bit) in raw_bits.iter().enumerate() { - if i > ((capacity / 2) * 2) { - break; - } - - let bit = *bit as u8; - debug_assert_eq!(bit | 1, 1); - - if (i % 2) == 0 { - these_bits = bit; - continue; - } else { - these_bits += bit << 1; - } - - let last = i == (capacity - 1); - let blinding_key = blinding_key(&mut *rng, last); - bits.push( - Bits::prove(&mut *rng, transcript, generators, i / 2, &mut pow_2, these_bits, blinding_key) - ); - } - - let mut remainder = None; - if (capacity % 2) == 1 { - let blinding_key = blinding_key(&mut *rng, true); - remainder = Some( - Bits::prove( - &mut *rng, - transcript, - generators, - capacity / 2, - &mut pow_2, - these_bits, - blinding_key - ) - ); - } - - let proof = DLEqProof { bits, remainder, poks }; - debug_assert_eq!( - proof.reconstruct_keys(), - (generators.0.primary * f.0, generators.1.primary * f.1) - ); - (proof, f) - } - - /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as - /// the output of the passed in Digest. Given the non-standard requirements to achieve - /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way - /// to safely and securely generate a Scalar, without risk of failure, nor bias - /// It also ensures a lack of determinable relation between keys, guaranteeing security in the - /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing - /// the relationship between keys would allow breaking all swaps after just one - pub fn prove( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - digest: D - ) -> (Self, (G0::Scalar, G1::Scalar)) { - Self::prove_internal( - rng, - transcript, - generators, - mutual_scalar_from_bytes(digest.finalize().as_ref()) - ) - } - - /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, - /// failing if it's not mutually valid. This allows for rejection sampling externally derived - /// scalars until they're safely usable, as needed - pub fn prove_without_bias( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - f0: G0::Scalar - ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { - scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) - } - - /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for - pub fn verify( - &self, - transcript: &mut T, - generators: (Generators, Generators) - ) -> Result<(G0, G1), DLEqError> { - let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - if (self.bits.len() != (capacity / 2).try_into().unwrap()) || ( - // This shouldn't be possible, as deserialize ensures this is present for fields with this - // characteristic, and proofs locally generated will have it. Regardless, best to ensure - self.remainder.is_none() && ((capacity % 2) == 1) - ) { - return Err(DLEqError::InvalidProofLength); - } - - let keys = self.reconstruct_keys(); - Self::initialize_transcript(transcript, generators, keys); - if !( - self.poks.0.verify(transcript, generators.0.primary, keys.0) && - self.poks.1.verify(transcript, generators.1.primary, keys.1) - ) { - Err(DLEqError::InvalidProofOfKnowledge)?; - } - - let mut pow_2 = (generators.0.primary, generators.1.primary); - for (i, bits) in self.bits.iter().enumerate() { - bits.verify(transcript, generators, i, &mut pow_2)?; - } if let Some(bit) = &self.remainder { - bit.verify(transcript, generators, self.bits.len(), &mut pow_2)?; + res.0 += bit.commitments.0; + res.1 += bit.commitments.1; } - Ok(keys) - } - - #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { - for bit in &self.bits { - bit.serialize(w)?; - } - if let Some(bit) = &self.remainder { - bit.serialize(w)?; - } - self.poks.0.serialize(w)?; - self.poks.1.serialize(w) - } - - #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result> { - let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); - for _ in 0 .. (capacity / 2) { - bits.push(Bits::deserialize(r)?); - } - let mut remainder = None; - if (capacity % 2) == 1 { - remainder = Some(Bits::deserialize(r)?); - } - Ok( - DLEqProof { - bits, - remainder, - poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) - } - ) + res } } diff --git a/crypto/dleq/src/tests/cross_group/linear/concise.rs b/crypto/dleq/src/tests/cross_group/linear/concise.rs new file mode 100644 index 00000000..1fbe49a7 --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/linear/concise.rs @@ -0,0 +1,98 @@ +use rand_core::{RngCore, OsRng}; + +use ff::{Field, PrimeField}; + +use k256::Scalar; +#[cfg(feature = "serialize")] +use k256::ProjectivePoint; +#[cfg(feature = "serialize")] +use dalek_ff_group::EdwardsPoint; + +use blake2::{Digest, Blake2b512}; + +use crate::{ + cross_group::{scalar::mutual_scalar_from_bytes, linear::ConciseDLEq}, + tests::cross_group::{transcript, generators} +}; + +#[test] +fn test_linear_concise_cross_group_dleq() { + let generators = generators(); + + for i in 0 .. 1 { + let (proof, keys) = if i == 0 { + let mut seed = [0; 32]; + OsRng.fill_bytes(&mut seed); + + ConciseDLEq::prove( + &mut OsRng, + &mut transcript(), + generators, + Blake2b512::new().chain_update(seed) + ) + } else { + let mut key; + let mut res; + while { + key = Scalar::random(&mut OsRng); + res = ConciseDLEq::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + key + ); + res.is_none() + } {} + let res = res.unwrap(); + assert_eq!(key, res.1.0); + res + }; + + let public_keys = proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + assert_eq!(generators.0.primary * keys.0, public_keys.0); + assert_eq!(generators.1.primary * keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = ConciseDLEq::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + } + } +} + +#[test] +fn test_remainder() { + // Uses Secp256k1 for both to achieve an odd capacity of 255 + assert_eq!(Scalar::CAPACITY, 255); + let generators = (generators().0, generators().0); + let keys = mutual_scalar_from_bytes(&[0xFF; 32]); + assert_eq!(keys.0, keys.1); + + let (proof, res) = ConciseDLEq::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + keys.0 + ).unwrap(); + assert_eq!(keys, res); + + let public_keys = proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + assert_eq!(generators.0.primary * keys.0, public_keys.0); + assert_eq!(generators.1.primary * keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = ConciseDLEq::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + } +} diff --git a/crypto/dleq/src/tests/cross_group/linear/efficient.rs b/crypto/dleq/src/tests/cross_group/linear/efficient.rs new file mode 100644 index 00000000..bafc4902 --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/linear/efficient.rs @@ -0,0 +1,66 @@ +use rand_core::{RngCore, OsRng}; + +use ff::Field; + +use k256::Scalar; +#[cfg(feature = "serialize")] +use k256::ProjectivePoint; +#[cfg(feature = "serialize")] +use dalek_ff_group::EdwardsPoint; + +use blake2::{Digest, Blake2b512}; + +use crate::{ + cross_group::linear::EfficientDLEq, + tests::cross_group::{transcript, generators} +}; + +#[test] +fn test_linear_efficient_cross_group_dleq() { + let generators = generators(); + + for i in 0 .. 1 { + let (proof, keys) = if i == 0 { + let mut seed = [0; 32]; + OsRng.fill_bytes(&mut seed); + + EfficientDLEq::prove( + &mut OsRng, + &mut transcript(), + generators, + Blake2b512::new().chain_update(seed) + ) + } else { + let mut key; + let mut res; + while { + key = Scalar::random(&mut OsRng); + res = EfficientDLEq::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + key + ); + res.is_none() + } {} + let res = res.unwrap(); + assert_eq!(key, res.1.0); + res + }; + + let public_keys = proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + assert_eq!(generators.0.primary * keys.0, public_keys.0); + assert_eq!(generators.1.primary * keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = EfficientDLEq::::deserialize( + &mut std::io::Cursor::new(&buf) + ).unwrap(); + assert_eq!(proof, deserialized); + deserialized.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + } + } +} diff --git a/crypto/dleq/src/tests/cross_group/linear/mod.rs b/crypto/dleq/src/tests/cross_group/linear/mod.rs new file mode 100644 index 00000000..5603b63d --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/linear/mod.rs @@ -0,0 +1,2 @@ +mod concise; +mod efficient; diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index 9f3a1916..9557127d 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -2,7 +2,7 @@ mod scalar; mod schnorr; use hex_literal::hex; -use rand_core::{RngCore, OsRng}; +use rand_core::OsRng; use ff::{Field, PrimeField}; use group::{Group, GroupEncoding}; @@ -10,17 +10,17 @@ use group::{Group, GroupEncoding}; use k256::{Scalar, ProjectivePoint}; use dalek_ff_group::{self as dfg, EdwardsPoint, CompressedEdwardsY}; -use blake2::{Digest, Blake2b512}; - use transcript::RecommendedTranscript; -use crate::{Generators, cross_group::{DLEqProof, scalar::mutual_scalar_from_bytes}}; +use crate::{Generators, cross_group::linear::EfficientDLEq}; -fn transcript() -> RecommendedTranscript { +mod linear; + +pub(crate) fn transcript() -> RecommendedTranscript { RecommendedTranscript::new(b"Cross-Group DLEq Proof Test") } -fn generators() -> (Generators, Generators) { +pub(crate) fn generators() -> (Generators, Generators) { ( Generators::new( ProjectivePoint::GENERATOR, @@ -46,7 +46,7 @@ fn test_rejection_sampling() { } assert!( - DLEqProof::prove_without_bias( + EfficientDLEq::prove_without_bias( &mut OsRng, &mut RecommendedTranscript::new(b""), generators(), @@ -54,85 +54,3 @@ fn test_rejection_sampling() { ).is_none() ); } - -#[test] -fn test_cross_group_dleq() { - let generators = generators(); - - for i in 0 .. 2 { - let (proof, keys) = if i == 0 { - let mut seed = [0; 32]; - OsRng.fill_bytes(&mut seed); - - DLEqProof::prove( - &mut OsRng, - &mut transcript(), - generators, - Blake2b512::new().chain_update(seed) - ) - } else { - let mut key; - let mut res; - while { - key = Scalar::random(&mut OsRng); - res = DLEqProof::prove_without_bias( - &mut OsRng, - &mut transcript(), - generators, - key - ); - res.is_none() - } {} - let res = res.unwrap(); - assert_eq!(key, res.1.0); - res - }; - - let public_keys = proof.verify(&mut transcript(), generators).unwrap(); - assert_eq!(generators.0.primary * keys.0, public_keys.0); - assert_eq!(generators.1.primary * keys.1, public_keys.1); - - #[cfg(feature = "serialize")] - { - let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = DLEqProof::::deserialize( - &mut std::io::Cursor::new(&buf) - ).unwrap(); - assert_eq!(proof, deserialized); - deserialized.verify(&mut transcript(), generators).unwrap(); - } - } -} - -#[test] -fn test_remainder() { - // Uses Secp256k1 for both to achieve an odd capacity of 255 - assert_eq!(Scalar::CAPACITY, 255); - let generators = (generators().0, generators().0); - let keys = mutual_scalar_from_bytes(&[0xFF; 32]); - assert_eq!(keys.0, keys.1); - - let (proof, res) = DLEqProof::prove_without_bias( - &mut OsRng, - &mut transcript(), - generators, - keys.0 - ).unwrap(); - assert_eq!(keys, res); - - let public_keys = proof.verify(&mut transcript(), generators).unwrap(); - assert_eq!(generators.0.primary * keys.0, public_keys.0); - assert_eq!(generators.1.primary * keys.1, public_keys.1); - - #[cfg(feature = "serialize")] - { - let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = DLEqProof::::deserialize( - &mut std::io::Cursor::new(&buf) - ).unwrap(); - assert_eq!(proof, deserialized); - deserialized.verify(&mut transcript(), generators).unwrap(); - } -} From fd817a69589f10868d1f6c3ef4cc97a55cfc1953 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 00:22:19 -0400 Subject: [PATCH 080/105] Fix multiexp for 0-length batches --- crypto/multiexp/src/lib.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index ca1b6495..f4a08e07 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -56,6 +56,8 @@ pub(crate) fn prep_tables( #[derive(Clone, Copy, PartialEq, Eq, Debug)] enum Algorithm { + Null, + Single, Straus(u8), Pippenger(u8) } @@ -107,7 +109,11 @@ Pippenger 8 is more efficient at 875 with 499µs per */ fn algorithm(len: usize) -> Algorithm { #[cfg(not(debug_assertions))] - if len < 10 { + if len == 0 { + Algorithm::Null + } else if len == 1 { + Algorithm::Single + } else if len < 10 { // Straus 2 never showed a performance benefit, even with just 2 elements Algorithm::Straus(3) } else if len < 20 { @@ -149,6 +155,8 @@ fn algorithm(len: usize) -> Algorithm { // Performs a multiexp, automatically selecting the optimal algorithm based on amount of pairs pub fn multiexp(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits { match algorithm(pairs.len()) { + Algorithm::Null => Group::identity(), + Algorithm::Single => pairs[0].1 * pairs[0].0, Algorithm::Straus(window) => straus(pairs, window), Algorithm::Pippenger(window) => pippenger(pairs, window) } @@ -156,6 +164,8 @@ pub fn multiexp(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeF pub fn multiexp_vartime(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits { match algorithm(pairs.len()) { + Algorithm::Null => Group::identity(), + Algorithm::Single => pairs[0].1 * pairs[0].0, Algorithm::Straus(window) => straus_vartime(pairs, window), Algorithm::Pippenger(window) => pippenger_vartime(pairs, window) } From 9f8d1aa220ec389a8516541afa6187f7db04b289 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 00:26:34 -0400 Subject: [PATCH 081/105] Clean AOS signatures --- crypto/dleq/Cargo.toml | 2 - crypto/dleq/src/cross_group/linear/aos.rs | 305 ++++++++---------- .../dleq/src/tests/cross_group/linear/aos.rs | 65 ++++ 3 files changed, 191 insertions(+), 181 deletions(-) create mode 100644 crypto/dleq/src/tests/cross_group/linear/aos.rs diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 5ef85242..8943544c 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -12,8 +12,6 @@ rand_core = "0.6" digest = "0.10" -subtle = "2.4" - transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" } ff = "0.12" diff --git a/crypto/dleq/src/cross_group/linear/aos.rs b/crypto/dleq/src/cross_group/linear/aos.rs index ada2309a..8d2a8c67 100644 --- a/crypto/dleq/src/cross_group/linear/aos.rs +++ b/crypto/dleq/src/cross_group/linear/aos.rs @@ -1,7 +1,5 @@ use rand_core::{RngCore, CryptoRng}; -use subtle::{ConstantTimeEq, ConditionallySelectable}; - use transcript::Transcript; use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup}; @@ -10,7 +8,7 @@ use multiexp::BatchVerifier; use crate::{ Generators, - cross_group::{DLEqError, scalar::{scalar_convert, mutual_scalar_from_bytes}, bits::RingSignature} + cross_group::{DLEqError, scalar::{scalar_convert, mutual_scalar_from_bytes}} }; #[cfg(feature = "serialize")] @@ -20,48 +18,33 @@ use ff::PrimeField; #[cfg(feature = "serialize")] use crate::{read_scalar, cross_group::read_point}; -#[allow(non_snake_case)] -fn nonces< - T: Transcript, - G0: PrimeGroup, - G1: PrimeGroup ->(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); - transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); - mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) -} - -#[allow(non_snake_case)] -fn calculate_R( - generators: (Generators, Generators), - s: (G0::Scalar, G1::Scalar), - A: (G0, G1), - e: (G0::Scalar, G1::Scalar) -) -> (G0, G1) { - (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) -} - -#[allow(non_snake_case)] -fn R_nonces( - transcript: T, - generators: (Generators, Generators), - s: (G0::Scalar, G1::Scalar), - A: (G0, G1), - e: (G0::Scalar, G1::Scalar) -) -> (G0::Scalar, G1::Scalar) where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - nonces(transcript, calculate_R(generators, s, A, e)) -} - -#[allow(non_snake_case)] +#[allow(non_camel_case_types)] #[derive(Clone, PartialEq, Eq, Debug)] -pub struct ClassicAos { +pub(crate) enum Re { + R(G0, G1), // Merged challenges have a slight security reduction, yet one already applied to the scalar // being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed, // present here, which is then hashed for each of the two challenges, remaining unbiased/unique // while maintaining the bandwidth savings, yet also while adding 252 hashes for // Secp256k1/Ed25519 - e_0: G0::Scalar, + e(G0::Scalar) +} + +impl Re { + #[allow(non_snake_case)] + pub(crate) fn R_default() -> Re { + Re::R(G0::identity(), G1::identity()) + } + + pub(crate) fn e_default() -> Re { + Re::e(G0::Scalar::zero()) + } +} + +#[allow(non_snake_case)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Aos { + Re_0: Re, s: [(G0::Scalar, G1::Scalar); RING_LEN] } @@ -69,106 +52,24 @@ impl< G0: PrimeGroup, G1: PrimeGroup, const RING_LEN: usize -> RingSignature for ClassicAos - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - type Context = (); +> Aos where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + #[allow(non_snake_case)] + fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { + transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); + transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); + mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) + } - const LEN: usize = RING_LEN; - - fn prove( - rng: &mut R, - transcript: T, + #[allow(non_snake_case)] + fn R( generators: (Generators, Generators), - ring: &[(G0, G1)], - actual: usize, - blinding_key: (G0::Scalar, G1::Scalar) - ) -> Self { - // While it is possible to use larger values, it's not efficient to do so - // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3 - debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); - - let mut e_0 = G0::Scalar::zero(); - let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN]; - - let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - #[allow(non_snake_case)] - let original_R = (generators.0.alt * r.0, generators.1.alt * r.1); - #[allow(non_snake_case)] - let mut R = original_R; - - for i in ((actual + 1) .. (actual + RING_LEN + 1)).map(|i| i % RING_LEN) { - let e = nonces(transcript.clone(), R); - e_0 = G0::Scalar::conditional_select(&e_0, &e.0, usize::ct_eq(&i, &0)); - - // Solve for the real index - if i == actual { - s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); - debug_assert_eq!(calculate_R(generators, s[i], ring[actual], e), original_R); - break; - // Generate a decoy response - } else { - s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); - } - - R = calculate_R(generators, s[i], ring[i], e); - } - - ClassicAos { e_0, s } + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) + ) -> (G0, G1) { + (((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1))) } - fn verify( - &self, - _rng: &mut R, - transcript: T, - generators: (Generators, Generators), - _: &mut Self::Context, - ring: &[(G0, G1)] - ) -> Result<(), DLEqError> { - debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); - - let e_0 = (self.e_0, scalar_convert(self.e_0).ok_or(DLEqError::InvalidChallenge)?); - let mut e = None; - for i in 0 .. RING_LEN { - e = Some(R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0))); - } - - // Will panic if the above loop is never run somehow - // If e wasn't an Option, and instead initially set to e_0, it'd always pass - if e_0 != e.unwrap() { - Err(DLEqError::InvalidProof)?; - } - Ok(()) - } - - #[cfg(feature = "serialize")] - fn serialize(&self, w: &mut W) -> std::io::Result<()> { - w.write_all(self.e_0.to_repr().as_ref())?; - for i in 0 .. Self::LEN { - w.write_all(self.s[i].0.to_repr().as_ref())?; - w.write_all(self.s[i].1.to_repr().as_ref())?; - } - Ok(()) - } - - #[cfg(feature = "serialize")] - fn deserialize(r: &mut R) -> std::io::Result { - let e_0 = read_scalar(r)?; - let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN]; - for i in 0 .. Self::LEN { - s[i] = (read_scalar(r)?, read_scalar(r)?); - } - Ok(ClassicAos { e_0, s }) - } -} - -#[allow(non_snake_case)] -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct MultiexpAos { - R_0: (G0, G1), - s: [(G0::Scalar, G1::Scalar); 2] -} - -impl MultiexpAos { #[allow(non_snake_case)] fn R_batch( generators: (Generators, Generators), @@ -178,25 +79,34 @@ impl MultiexpAos { ) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) { (vec![(s.0, generators.0.alt), (-e.0, A.0)], vec![(s.1, generators.1.alt), (-e.1, A.1)]) } -} -impl RingSignature for MultiexpAos - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - type Context = (BatchVerifier<(), G0>, BatchVerifier<(), G1>); + #[allow(non_snake_case)] + fn R_nonces( + transcript: T, + generators: (Generators, Generators), + s: (G0::Scalar, G1::Scalar), + A: (G0, G1), + e: (G0::Scalar, G1::Scalar) + ) -> (G0::Scalar, G1::Scalar) { + Self::nonces(transcript, Self::R(generators, s, A, e)) + } - const LEN: usize = 2; - - fn prove( + #[allow(non_snake_case)] + pub(crate) fn prove( rng: &mut R, transcript: T, generators: (Generators, Generators), ring: &[(G0, G1)], actual: usize, - blinding_key: (G0::Scalar, G1::Scalar) + blinding_key: (G0::Scalar, G1::Scalar), + mut Re_0: Re ) -> Self { - #[allow(non_snake_case)] - let mut R_0 = (G0::identity(), G1::identity()); - let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); 2]; // Can't use Self::LEN due to 76200 + // While it is possible to use larger values, it's not efficient to do so + // 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3 + debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); + debug_assert_eq!(RING_LEN, ring.len()); + + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN]; let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); #[allow(non_snake_case)] @@ -204,75 +114,112 @@ impl RingSignature for MultiexpAos { *R0_0 = R.0; *R1_0 = R.1 }, + Re::e(ref mut e_0) => *e_0 = e.0 + } } // Solve for the real index - let e = nonces(transcript.clone(), R); if i == actual { s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1)); - debug_assert_eq!(calculate_R(generators, s[i], ring[actual], e), original_R); + debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R); break; // Generate a decoy response } else { s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); } - R = calculate_R(generators, s[i], ring[i], e); + R = Self::R(generators, s[i], ring[i], e); } - MultiexpAos { R_0, s } + Aos { Re_0, s } } - fn verify( + // Assumes the ring has already been transcripted in some form. Critically insecure if it hasn't + pub(crate) fn verify( &self, rng: &mut R, transcript: T, generators: (Generators, Generators), - batch: &mut Self::Context, + batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>), ring: &[(G0, G1)] ) -> Result<(), DLEqError> { - let mut e = nonces(transcript.clone(), self.R_0); - for i in 0 .. (Self::LEN - 1) { - e = R_nonces(transcript.clone(), generators, self.s[i], ring[i], e); - } + debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); + debug_assert_eq!(RING_LEN, ring.len()); - let mut statements = Self::R_batch( - generators, - *self.s.last().unwrap(), - *ring.last().unwrap(), - e - ); - statements.0.push((-G0::Scalar::one(), self.R_0.0)); - statements.1.push((-G1::Scalar::one(), self.R_0.1)); - batch.0.queue(&mut *rng, (), statements.0); - batch.1.queue(&mut *rng, (), statements.1); + match self.Re_0 { + Re::R(R0_0, R1_0) => { + let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0)); + for i in 0 .. (RING_LEN - 1) { + e = Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e); + } + + let mut statements = Self::R_batch( + generators, + *self.s.last().unwrap(), + *ring.last().unwrap(), + e + ); + statements.0.push((-G0::Scalar::one(), R0_0)); + statements.1.push((-G1::Scalar::one(), R1_0)); + batch.0.queue(&mut *rng, (), statements.0); + batch.1.queue(&mut *rng, (), statements.1); + }, + + Re::e(e_0) => { + let e_0 = (e_0, scalar_convert(e_0).ok_or(DLEqError::InvalidChallenge)?); + let mut e = None; + for i in 0 .. RING_LEN { + e = Some( + Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0)) + ); + } + + // Will panic if the above loop is never run somehow + // If e wasn't an Option, and instead initially set to e_0, it'd always pass + if e_0 != e.unwrap() { + Err(DLEqError::InvalidProof)?; + } + } + } Ok(()) } #[cfg(feature = "serialize")] - fn serialize(&self, w: &mut W) -> std::io::Result<()> { - w.write_all(self.R_0.0.to_bytes().as_ref())?; - w.write_all(self.R_0.1.to_bytes().as_ref())?; - for i in 0 .. Self::LEN { + pub(crate) fn serialize(&self, w: &mut W) -> std::io::Result<()> { + match self.Re_0 { + Re::R(R0, R1) => { + w.write_all(R0.to_bytes().as_ref())?; + w.write_all(R1.to_bytes().as_ref())?; + }, + Re::e(e) => w.write_all(e.to_repr().as_ref())? + } + + for i in 0 .. RING_LEN { w.write_all(self.s[i].0.to_repr().as_ref())?; w.write_all(self.s[i].1.to_repr().as_ref())?; } + Ok(()) } #[cfg(feature = "serialize")] - fn deserialize(r: &mut R) -> std::io::Result { - #[allow(non_snake_case)] - let R_0 = (read_point(r)?, read_point(r)?); - let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); 2]; - for i in 0 .. Self::LEN { + pub(crate) fn deserialize(r: &mut R, mut Re_0: Re) -> std::io::Result { + match Re_0 { + Re::R(ref mut R0, ref mut R1) => { *R0 = read_point(r)?; *R1 = read_point(r)? }, + Re::e(ref mut e) => *e = read_scalar(r)? + } + + let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN]; + for i in 0 .. RING_LEN { s[i] = (read_scalar(r)?, read_scalar(r)?); } - Ok(MultiexpAos { R_0, s }) + + Ok(Aos { Re_0, s }) } } diff --git a/crypto/dleq/src/tests/cross_group/linear/aos.rs b/crypto/dleq/src/tests/cross_group/linear/aos.rs new file mode 100644 index 00000000..9662ab52 --- /dev/null +++ b/crypto/dleq/src/tests/cross_group/linear/aos.rs @@ -0,0 +1,65 @@ +use rand_core::OsRng; + +use group::{ff::Field, Group}; + +use multiexp::BatchVerifier; + +use crate::{ + cross_group::linear::aos::{Re, Aos}, + tests::cross_group::{G0, G1, transcript, generators} +}; + +#[cfg(feature = "serialize")] +fn test_aos_serialization(proof: Aos, Re_0: Re) { + let mut buf = vec![]; + proof.serialize(&mut buf).unwrap(); + let deserialized = Aos::deserialize(&mut std::io::Cursor::new(buf), Re_0).unwrap(); + assert_eq!(proof, deserialized); +} + +fn test_aos(default: Re) { + let generators = generators(); + + let mut ring_keys = [(::Scalar::zero(), ::Scalar::zero()); RING_LEN]; + let mut ring = [(G0::identity(), G1::identity()); RING_LEN]; + for i in 0 .. RING_LEN { + ring_keys[i] = ( + ::Scalar::random(&mut OsRng), + ::Scalar::random(&mut OsRng) + ); + ring[i] = (generators.0.alt * ring_keys[i].0, generators.1.alt * ring_keys[i].1); + } + + for actual in 0 .. RING_LEN { + let proof = Aos::<_, _, RING_LEN>::prove( + &mut OsRng, + transcript(), + generators, + &ring, + actual, + ring_keys[actual], + default.clone() + ); + + let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0)); + proof.verify(&mut OsRng, transcript(), generators, &mut batch, &ring).unwrap(); + // For e, these should have nothing. For R, these should have 6 elements each which sum to 0 + assert!(batch.0.verify_vartime()); + assert!(batch.1.verify_vartime()); + + #[cfg(feature = "serialize")] + test_aos_serialization(proof, default.clone()); + } +} + +#[test] +fn test_aos_e() { + test_aos::<2>(Re::e_default()); + test_aos::<4>(Re::e_default()); +} + +#[test] +fn test_aos_R() { + // Batch verification appreciates the longer vectors, which means not batching bits + test_aos::<2>(Re::R_default()); +} From 7d80b6e85421745e82f54d3640e2b9d338d9249a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 02:40:04 -0400 Subject: [PATCH 082/105] Fix multiexp for debug as well Oversight on my end. --- crypto/multiexp/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index f4a08e07..894e24ff 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -133,7 +133,11 @@ fn algorithm(len: usize) -> Algorithm { } #[cfg(debug_assertions)] - if len < 10 { + if len == 0 { + Algorithm::Null + } else if len == 1 { + Algorithm::Single + } else if len < 10 { Algorithm::Straus(3) } else if len < 80 { Algorithm::Straus(4) From 1a2e6dc5cf9ecac0279a8ed27bfea035e00cc72f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 07:30:10 -0400 Subject: [PATCH 083/105] Consolidate concise/efficient and clean --- .../dleq/src/cross_group/{linear => }/aos.rs | 4 + crypto/dleq/src/cross_group/bits.rs | 121 ++++---- crypto/dleq/src/cross_group/linear/concise.rs | 217 --------------- .../dleq/src/cross_group/linear/efficient.rs | 182 ------------ crypto/dleq/src/cross_group/linear/mod.rs | 7 - crypto/dleq/src/cross_group/mod.rs | 263 +++++++++++++++++- crypto/dleq/src/cross_group/schnorr.rs | 26 +- .../src/tests/cross_group/{linear => }/aos.rs | 5 +- .../src/tests/cross_group/linear/concise.rs | 98 ------- .../src/tests/cross_group/linear/efficient.rs | 66 ----- .../dleq/src/tests/cross_group/linear/mod.rs | 2 - crypto/dleq/src/tests/cross_group/mod.rs | 105 ++++++- crypto/dleq/src/tests/cross_group/schnorr.rs | 25 +- 13 files changed, 458 insertions(+), 663 deletions(-) rename crypto/dleq/src/cross_group/{linear => }/aos.rs (96%) delete mode 100644 crypto/dleq/src/cross_group/linear/concise.rs delete mode 100644 crypto/dleq/src/cross_group/linear/efficient.rs delete mode 100644 crypto/dleq/src/cross_group/linear/mod.rs rename crypto/dleq/src/tests/cross_group/{linear => }/aos.rs (92%) delete mode 100644 crypto/dleq/src/tests/cross_group/linear/concise.rs delete mode 100644 crypto/dleq/src/tests/cross_group/linear/efficient.rs delete mode 100644 crypto/dleq/src/tests/cross_group/linear/mod.rs diff --git a/crypto/dleq/src/cross_group/linear/aos.rs b/crypto/dleq/src/cross_group/aos.rs similarity index 96% rename from crypto/dleq/src/cross_group/linear/aos.rs rename to crypto/dleq/src/cross_group/aos.rs index 8d2a8c67..fb468969 100644 --- a/crypto/dleq/src/cross_group/linear/aos.rs +++ b/crypto/dleq/src/cross_group/aos.rs @@ -55,6 +55,8 @@ impl< > Aos where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { #[allow(non_snake_case)] fn nonces(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) { + transcript.domain_separate(b"aos_membership_proof"); + transcript.append_message(b"ring_len", &u8::try_from(RING_LEN).unwrap().to_le_bytes()); transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref()); transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref()); mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref()) @@ -151,6 +153,7 @@ impl< debug_assert!((RING_LEN == 2) || (RING_LEN == 4)); debug_assert_eq!(RING_LEN, ring.len()); + #[allow(non_snake_case)] match self.Re_0 { Re::R(R0_0, R1_0) => { let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0)); @@ -164,6 +167,7 @@ impl< *ring.last().unwrap(), e ); + // TODO: Make something else negative to speed up vartime statements.0.push((-G0::Scalar::one(), R0_0)); statements.1.push((-G1::Scalar::one(), R1_0)); batch.0.queue(&mut *rng, (), statements.0); diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs index 474daa8b..06e66f58 100644 --- a/crypto/dleq/src/cross_group/bits.rs +++ b/crypto/dleq/src/cross_group/bits.rs @@ -3,73 +3,89 @@ use rand_core::{RngCore, CryptoRng}; use transcript::Transcript; use group::{ff::PrimeFieldBits, prime::PrimeGroup}; +use multiexp::BatchVerifier; -use crate::{Generators, cross_group::DLEqError}; +use crate::{Generators, cross_group::{DLEqError, aos::{Re, Aos}}}; #[cfg(feature = "serialize")] use std::io::{Read, Write}; #[cfg(feature = "serialize")] use crate::cross_group::read_point; -pub trait RingSignature: Sized { - type Context; +pub(crate) enum BitSignature { + ConciseLinear, + EfficientLinear +} - const LEN: usize; +impl BitSignature { + pub(crate) const fn to_u8(&self) -> u8 { + match self { + BitSignature::ConciseLinear => 0, + BitSignature::EfficientLinear => 1 + } + } - fn prove( - rng: &mut R, - transcript: T, - generators: (Generators, Generators), - ring: &[(G0, G1)], - actual: usize, - blinding_key: (G0::Scalar, G1::Scalar) - ) -> Self; + pub(crate) const fn from(algorithm: u8) -> BitSignature { + match algorithm { + 0 => BitSignature::ConciseLinear, + 1 => BitSignature::EfficientLinear, + _ => panic!("Unknown algorithm") + } + } - fn verify( - &self, - rng: &mut R, - transcript: T, - generators: (Generators, Generators), - context: &mut Self::Context, - ring: &[(G0, G1)] - ) -> Result<(), DLEqError>; + pub(crate) const fn bits(&self) -> usize { + match self { + BitSignature::ConciseLinear => 2, + BitSignature::EfficientLinear => 1 + } + } - #[cfg(feature = "serialize")] - fn serialize(&self, w: &mut W) -> std::io::Result<()>; - #[cfg(feature = "serialize")] - fn deserialize(r: &mut R) -> std::io::Result; + pub(crate) const fn ring_len(&self) -> usize { + 2_usize.pow(self.bits() as u32) + } + + fn aos_form(&self) -> Re { + match self { + BitSignature::ConciseLinear => Re::e_default(), + BitSignature::EfficientLinear => Re::R_default() + } + } } #[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) struct Bits> { +pub(crate) struct Bits< + G0: PrimeGroup, + G1: PrimeGroup, + const SIGNATURE: u8, + const RING_LEN: usize +> { pub(crate) commitments: (G0, G1), - signature: RING + signature: Aos } -impl> Bits - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { +impl< + G0: PrimeGroup, + G1: PrimeGroup, + const SIGNATURE: u8, + const RING_LEN: usize +> Bits where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { fn transcript(transcript: &mut T, i: usize, commitments: (G0, G1)) { - if i == 0 { - transcript.domain_separate(b"cross_group_dleq"); - } - transcript.append_message(b"bit_group", &u16::try_from(i).unwrap().to_le_bytes()); + transcript.domain_separate(b"bits"); + transcript.append_message(b"group", &u16::try_from(i).unwrap().to_le_bytes()); transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref()); transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref()); } fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> { - let mut res = vec![(G0::identity(), G1::identity()); RING::LEN]; - res[RING::LEN - 1] = commitments; - for i in (0 .. (RING::LEN - 1)).rev() { - res[i] = (res[i + 1].0 - pow_2.0, res[i + 1].1 - pow_2.1); + let mut res = vec![commitments; RING_LEN]; + for i in 1 .. RING_LEN { + res[i] = (res[i - 1].0 - pow_2.0, res[i - 1].1 - pow_2.1); } res } fn shift(pow_2: &mut (G0, G1)) { - pow_2.0 = pow_2.0.double(); - pow_2.1 = pow_2.1.double(); - if RING::LEN == 4 { + for _ in 0 .. BitSignature::from(SIGNATURE).bits() { pow_2.0 = pow_2.0.double(); pow_2.1 = pow_2.1.double(); } @@ -84,20 +100,24 @@ impl> Bits Self { - debug_assert!((RING::LEN == 2) || (RING::LEN == 4)); - let mut commitments = ( (generators.0.alt * blinding_key.0), (generators.1.alt * blinding_key.1) ); commitments.0 += pow_2.0 * G0::Scalar::from(bits.into()); commitments.1 += pow_2.1 * G1::Scalar::from(bits.into()); + Self::transcript(transcript, i, commitments); - let ring = Self::ring(*pow_2, commitments); - // Invert the index to get the raw blinding key's position in the ring - let actual = RING::LEN - 1 - usize::from(bits); - let signature = RING::prove(rng, transcript.clone(), generators, &ring, actual, blinding_key); + let signature = Aos::prove( + rng, + transcript.clone(), + generators, + &Self::ring(*pow_2, commitments), + usize::from(bits), + blinding_key, + BitSignature::from(SIGNATURE).aos_form() + ); Self::shift(pow_2); Bits { commitments, signature } @@ -108,18 +128,17 @@ impl> Bits, Generators), - context: &mut RING::Context, + batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>), i: usize, pow_2: &mut (G0, G1) ) -> Result<(), DLEqError> { - debug_assert!((RING::LEN == 2) || (RING::LEN == 4)); - Self::transcript(transcript, i, self.commitments); + self.signature.verify( rng, transcript.clone(), generators, - context, + batch, &Self::ring(*pow_2, self.commitments) )?; @@ -135,7 +154,7 @@ impl> Bits(r: &mut Re) -> std::io::Result { - Ok(Bits { commitments: (read_point(r)?, read_point(r)?), signature: RING::deserialize(r)? }) + pub(crate) fn deserialize(r: &mut R) -> std::io::Result { + Ok(Bits { commitments: (read_point(r)?, read_point(r)?), signature: Aos::deserialize(r)? }) } } diff --git a/crypto/dleq/src/cross_group/linear/concise.rs b/crypto/dleq/src/cross_group/linear/concise.rs deleted file mode 100644 index 21cf5652..00000000 --- a/crypto/dleq/src/cross_group/linear/concise.rs +++ /dev/null @@ -1,217 +0,0 @@ -use rand_core::{RngCore, CryptoRng}; - -use digest::Digest; - -use transcript::Transcript; - -use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; - -use crate::{ - Generators, - cross_group::{ - DLEqError, DLEqProof, - scalar::{scalar_convert, mutual_scalar_from_bytes}, - schnorr::SchnorrPoK, - linear::aos::ClassicAos, - bits::Bits - } -}; - -#[cfg(feature = "serialize")] -use std::io::{Read, Write}; - -pub type ConciseDLEq = DLEqProof< - G0, - G1, - ClassicAos, - ClassicAos - >; - -impl ConciseDLEq - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - fn prove_internal( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - f: (G0::Scalar, G1::Scalar) - ) -> (Self, (G0::Scalar, G1::Scalar)) { - Self::initialize_transcript( - transcript, - generators, - ((generators.0.primary * f.0), (generators.1.primary * f.1)) - ); - - let poks = ( - SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), - SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) - ); - - let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); - let mut blinding_key = |rng: &mut R, last| { - let blinding_key = ( - Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), - Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) - ); - if last { - debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); - debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); - } - blinding_key - }; - - let mut pow_2 = (generators.0.primary, generators.1.primary); - - let raw_bits = f.0.to_le_bits(); - let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let mut bits = Vec::with_capacity(capacity); - let mut these_bits: u8 = 0; - for (i, bit) in raw_bits.iter().enumerate() { - if i > ((capacity / 2) * 2) { - break; - } - - let bit = *bit as u8; - debug_assert_eq!(bit | 1, 1); - - if (i % 2) == 0 { - these_bits = bit; - continue; - } else { - these_bits += bit << 1; - } - - let last = i == (capacity - 1); - let blinding_key = blinding_key(&mut *rng, last); - bits.push( - Bits::prove(&mut *rng, transcript, generators, i / 2, &mut pow_2, these_bits, blinding_key) - ); - } - - let mut remainder = None; - if (capacity % 2) == 1 { - let blinding_key = blinding_key(&mut *rng, true); - remainder = Some( - Bits::prove( - &mut *rng, - transcript, - generators, - capacity / 2, - &mut pow_2, - these_bits, - blinding_key - ) - ); - } - - let proof = DLEqProof { bits, remainder, poks }; - debug_assert_eq!( - proof.reconstruct_keys(), - (generators.0.primary * f.0, generators.1.primary * f.1) - ); - (proof, f) - } - - /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as - /// the output of the passed in Digest. Given the non-standard requirements to achieve - /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way - /// to safely and securely generate a Scalar, without risk of failure, nor bias - /// It also ensures a lack of determinable relation between keys, guaranteeing security in the - /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing - /// the relationship between keys would allow breaking all swaps after just one - pub fn prove( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - digest: D - ) -> (Self, (G0::Scalar, G1::Scalar)) { - Self::prove_internal( - rng, - transcript, - generators, - mutual_scalar_from_bytes(digest.finalize().as_ref()) - ) - } - - /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, - /// failing if it's not mutually valid. This allows for rejection sampling externally derived - /// scalars until they're safely usable, as needed - pub fn prove_without_bias( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - f0: G0::Scalar - ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { - scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) - } - - /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for - pub fn verify( - &self, - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators) - ) -> Result<(G0, G1), DLEqError> { - let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - if (self.bits.len() != (capacity / 2).try_into().unwrap()) || ( - // These shouldn't be possible, as deserialize ensures this is present for fields with this - // characteristic, and proofs locally generated will have it. Regardless, best to ensure - (self.remainder.is_none() && ((capacity % 2) == 1)) || - (self.remainder.is_some() && ((capacity % 2) == 0)) - ) { - return Err(DLEqError::InvalidProofLength); - } - - let keys = self.reconstruct_keys(); - Self::initialize_transcript(transcript, generators, keys); - if !( - self.poks.0.verify(transcript, generators.0.primary, keys.0) && - self.poks.1.verify(transcript, generators.1.primary, keys.1) - ) { - Err(DLEqError::InvalidProofOfKnowledge)?; - } - - let mut pow_2 = (generators.0.primary, generators.1.primary); - for (i, bits) in self.bits.iter().enumerate() { - bits.verify(&mut *rng, transcript, generators, &mut (), i, &mut pow_2)?; - } - if let Some(bit) = &self.remainder { - bit.verify(&mut *rng, transcript, generators, &mut (), self.bits.len(), &mut pow_2)?; - } - - Ok(keys) - } - - #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { - for bit in &self.bits { - bit.serialize(w)?; - } - if let Some(bit) = &self.remainder { - bit.serialize(w)?; - } - self.poks.0.serialize(w)?; - self.poks.1.serialize(w) - } - - #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result { - let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); - for _ in 0 .. (capacity / 2) { - bits.push(Bits::deserialize(r)?); - } - - let mut remainder = None; - if (capacity % 2) == 1 { - remainder = Some(Bits::deserialize(r)?); - } - - Ok( - DLEqProof { - bits, - remainder, - poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) - } - ) - } -} diff --git a/crypto/dleq/src/cross_group/linear/efficient.rs b/crypto/dleq/src/cross_group/linear/efficient.rs deleted file mode 100644 index 696744d6..00000000 --- a/crypto/dleq/src/cross_group/linear/efficient.rs +++ /dev/null @@ -1,182 +0,0 @@ -use rand_core::{RngCore, CryptoRng}; - -use digest::Digest; - -use transcript::Transcript; - -use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; -use multiexp::BatchVerifier; - -use crate::{ - Generators, - cross_group::{ - DLEqError, DLEqProof, - scalar::{scalar_convert, mutual_scalar_from_bytes}, - schnorr::SchnorrPoK, - linear::aos::MultiexpAos, - bits::Bits - } -}; - -#[cfg(feature = "serialize")] -use std::io::{Read, Write}; - -pub type EfficientDLEq = DLEqProof, MultiexpAos>; - -impl EfficientDLEq - where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - fn prove_internal( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - f: (G0::Scalar, G1::Scalar) - ) -> (Self, (G0::Scalar, G1::Scalar)) { - Self::initialize_transcript( - transcript, - generators, - ((generators.0.primary * f.0), (generators.1.primary * f.1)) - ); - - let poks = ( - SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), - SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) - ); - - let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); - let mut blinding_key = |rng: &mut R, last| { - let blinding_key = ( - Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), - Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) - ); - if last { - debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); - debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); - } - blinding_key - }; - - let mut pow_2 = (generators.0.primary, generators.1.primary); - - let raw_bits = f.0.to_le_bits(); - let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let mut bits = Vec::with_capacity(capacity); - for (i, bit) in raw_bits.iter().enumerate() { - let bit = *bit as u8; - debug_assert_eq!(bit | 1, 1); - - let last = i == (capacity - 1); - let blinding_key = blinding_key(&mut *rng, last); - bits.push( - Bits::prove(&mut *rng, transcript, generators, i, &mut pow_2, bit, blinding_key) - ); - - if last { - break; - } - } - - let proof = DLEqProof { bits, remainder: None, poks }; - debug_assert_eq!( - proof.reconstruct_keys(), - (generators.0.primary * f.0, generators.1.primary * f.1) - ); - (proof, f) - } - - /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as - /// the output of the passed in Digest. Given the non-standard requirements to achieve - /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way - /// to safely and securely generate a Scalar, without risk of failure, nor bias - /// It also ensures a lack of determinable relation between keys, guaranteeing security in the - /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing - /// the relationship between keys would allow breaking all swaps after just one - pub fn prove( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - digest: D - ) -> (Self, (G0::Scalar, G1::Scalar)) { - Self::prove_internal( - rng, - transcript, - generators, - mutual_scalar_from_bytes(digest.finalize().as_ref()) - ) - } - - /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, - /// failing if it's not mutually valid. This allows for rejection sampling externally derived - /// scalars until they're safely usable, as needed - pub fn prove_without_bias( - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators), - f0: G0::Scalar - ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { - scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) - } - - /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for - pub fn verify( - &self, - rng: &mut R, - transcript: &mut T, - generators: (Generators, Generators) - ) -> Result<(G0, G1), DLEqError> { - let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - // The latter case shouldn't be possible yet would explicitly be invalid - if (self.bits.len() != capacity.try_into().unwrap()) || self.remainder.is_some() { - return Err(DLEqError::InvalidProofLength); - } - - let keys = self.reconstruct_keys(); - Self::initialize_transcript(transcript, generators, keys); - // TODO: Batch - if !( - self.poks.0.verify(transcript, generators.0.primary, keys.0) && - self.poks.1.verify(transcript, generators.1.primary, keys.1) - ) { - Err(DLEqError::InvalidProofOfKnowledge)?; - } - - let mut batch = ( - BatchVerifier::new(self.bits.len() * 3), - BatchVerifier::new(self.bits.len() * 3) - ); - let mut pow_2 = (generators.0.primary, generators.1.primary); - for (i, bits) in self.bits.iter().enumerate() { - bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?; - } - if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) { - Err(DLEqError::InvalidProof)?; - } - - Ok(keys) - } - - #[cfg(feature = "serialize")] - pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { - for bit in &self.bits { - bit.serialize(w)?; - } - self.poks.0.serialize(w)?; - self.poks.1.serialize(w) - } - - #[cfg(feature = "serialize")] - pub fn deserialize(r: &mut R) -> std::io::Result { - let capacity = G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY); - let mut bits = Vec::with_capacity(capacity.try_into().unwrap()); - for _ in 0 .. capacity { - bits.push(Bits::deserialize(r)?); - } - - Ok( - DLEqProof { - bits, - remainder: None, - poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) - } - ) - } -} diff --git a/crypto/dleq/src/cross_group/linear/mod.rs b/crypto/dleq/src/cross_group/linear/mod.rs deleted file mode 100644 index 20322079..00000000 --- a/crypto/dleq/src/cross_group/linear/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) mod aos; - -mod concise; -pub use concise::ConciseDLEq; - -mod efficient; -pub use efficient::EfficientDLEq; diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 57231b93..a0a74c5a 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -1,24 +1,28 @@ use thiserror::Error; + use rand_core::{RngCore, CryptoRng}; +use digest::Digest; use transcript::Transcript; -use group::{ff::{PrimeField, PrimeFieldBits}, prime::PrimeGroup}; +use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup}; +use multiexp::BatchVerifier; use crate::Generators; pub mod scalar; +use scalar::{scalar_convert, mutual_scalar_from_bytes}; pub(crate) mod schnorr; use schnorr::SchnorrPoK; -mod bits; -use bits::{RingSignature, Bits}; +pub(crate) mod aos; -pub mod linear; +mod bits; +use bits::{BitSignature, Bits}; #[cfg(feature = "serialize")] -use std::io::Read; +use std::io::{Read, Write}; #[cfg(feature = "serialize")] pub(crate) fn read_point(r: &mut R) -> std::io::Result { @@ -49,25 +53,48 @@ pub enum DLEqError { pub struct DLEqProof< G0: PrimeGroup, G1: PrimeGroup, - RING: RingSignature, - REM: RingSignature + const SIGNATURE: u8, + const RING_LEN: usize, + const REMAINDER_RING_LEN: usize > where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - bits: Vec>, - remainder: Option>, + bits: Vec>, + remainder: Option>, poks: (SchnorrPoK, SchnorrPoK) } +pub type ConciseLinearDLEq = DLEqProof< + G0, + G1, + { BitSignature::ConciseLinear.to_u8() }, + { BitSignature::ConciseLinear.ring_len() }, + // There may not be a remainder, yet if there is, it'll be just one bit + // A ring for one bit has a RING_LEN of 2 + 2 +>; + + pub type EfficientLinearDLEq = DLEqProof< + G0, + G1, + { BitSignature::EfficientLinear.to_u8() }, + { BitSignature::EfficientLinear.ring_len() }, + 0 +>; + impl< G0: PrimeGroup, G1: PrimeGroup, - RING: RingSignature, - REM: RingSignature -> DLEqProof where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { - pub(crate) fn initialize_transcript( + const SIGNATURE: u8, + const RING_LEN: usize, + const REMAINDER_RING_LEN: usize +> DLEqProof where + G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { + + pub(crate) fn transcript( transcript: &mut T, generators: (Generators, Generators), keys: (G0, G1) ) { + transcript.domain_separate(b"cross_group_dleq"); generators.0.transcript(transcript); generators.1.transcript(transcript); transcript.domain_separate(b"points"); @@ -102,4 +129,214 @@ impl< res } + + fn prove_internal( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f: (G0::Scalar, G1::Scalar) + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::transcript( + transcript, + generators, + ((generators.0.primary * f.0), (generators.1.primary * f.1)) + ); + + let poks = ( + SchnorrPoK::::prove(rng, transcript, generators.0.primary, f.0), + SchnorrPoK::::prove(rng, transcript, generators.1.primary, f.1) + ); + + let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero()); + let mut blinding_key = |rng: &mut R, last| { + let blinding_key = ( + Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last), + Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last) + ); + if last { + debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero()); + debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero()); + } + blinding_key + }; + + let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); + let bits_per_group = BitSignature::from(SIGNATURE).bits(); + + let mut pow_2 = (generators.0.primary, generators.1.primary); + + let raw_bits = f.0.to_le_bits(); + let mut bits = Vec::with_capacity(capacity); + let mut these_bits: u8 = 0; + for (i, bit) in raw_bits.iter().enumerate() { + if i == capacity { + break; + } + + let bit = *bit as u8; + debug_assert_eq!(bit | 1, 1); + + // Accumulate this bit + these_bits |= bit << (i % bits_per_group); + if (i % bits_per_group) == (bits_per_group - 1) { + let last = i == (capacity - 1); + let blinding_key = blinding_key(&mut *rng, last); + bits.push( + Bits::prove( + &mut *rng, + transcript, + generators, + i / bits_per_group, + &mut pow_2, + these_bits, + blinding_key + ) + ); + these_bits = 0; + } + } + debug_assert_eq!(bits.len(), capacity / bits_per_group); + + let mut remainder = None; + if capacity != ((capacity / bits_per_group) * bits_per_group) { + let blinding_key = blinding_key(&mut *rng, true); + remainder = Some( + Bits::prove( + &mut *rng, + transcript, + generators, + capacity / bits_per_group, + &mut pow_2, + these_bits, + blinding_key + ) + ); + } + + let proof = DLEqProof { bits, remainder, poks }; + debug_assert_eq!( + proof.reconstruct_keys(), + (generators.0.primary * f.0, generators.1.primary * f.1) + ); + (proof, f) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as + /// the output of the passed in Digest. Given the non-standard requirements to achieve + /// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way + /// to safely and securely generate a Scalar, without risk of failure, nor bias + /// It also ensures a lack of determinable relation between keys, guaranteeing security in the + /// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing + /// the relationship between keys would allow breaking all swaps after just one + pub fn prove( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + digest: D + ) -> (Self, (G0::Scalar, G1::Scalar)) { + Self::prove_internal( + rng, + transcript, + generators, + mutual_scalar_from_bytes(digest.finalize().as_ref()) + ) + } + + /// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in, + /// failing if it's not mutually valid. This allows for rejection sampling externally derived + /// scalars until they're safely usable, as needed + pub fn prove_without_bias( + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators), + f0: G0::Scalar + ) -> Option<(Self, (G0::Scalar, G1::Scalar))> { + scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1))) + } + + /// Verify a cross-Group Discrete Log Equality statement, returning the points proven for + pub fn verify( + &self, + rng: &mut R, + transcript: &mut T, + generators: (Generators, Generators) + ) -> Result<(G0, G1), DLEqError> { + let capacity = usize::try_from( + G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY) + ).unwrap(); + let bits_per_group = BitSignature::from(SIGNATURE).bits(); + let has_remainder = (capacity % bits_per_group) != 0; + + // These shouldn't be possible, as locally created and deserialized proofs should be properly + // formed in these regards, yet it doesn't hurt to check and would be problematic if true + if (self.bits.len() != (capacity / bits_per_group)) || ( + (self.remainder.is_none() && has_remainder) || (self.remainder.is_some() && !has_remainder) + ) { + return Err(DLEqError::InvalidProofLength); + } + + let keys = self.reconstruct_keys(); + Self::transcript(transcript, generators, keys); + + let batch_capacity = match BitSignature::from(SIGNATURE) { + BitSignature::ConciseLinear => 3, + BitSignature::EfficientLinear => (self.bits.len() + 1) * 3 + }; + let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity)); + + self.poks.0.verify(&mut *rng, transcript, generators.0.primary, keys.0, &mut batch.0); + self.poks.1.verify(&mut *rng, transcript, generators.1.primary, keys.1, &mut batch.1); + + let mut pow_2 = (generators.0.primary, generators.1.primary); + for (i, bits) in self.bits.iter().enumerate() { + bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?; + } + if let Some(bit) = &self.remainder { + bit.verify(&mut *rng, transcript, generators, &mut batch, self.bits.len(), &mut pow_2)?; + } + + if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) { + Err(DLEqError::InvalidProof)?; + } + + Ok(keys) + } + + #[cfg(feature = "serialize")] + pub fn serialize(&self, w: &mut W) -> std::io::Result<()> { + for bit in &self.bits { + bit.serialize(w)?; + } + if let Some(bit) = &self.remainder { + bit.serialize(w)?; + } + self.poks.0.serialize(w)?; + self.poks.1.serialize(w) + } + + #[cfg(feature = "serialize")] + pub fn deserialize(r: &mut R) -> std::io::Result { + let capacity = usize::try_from( + G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY) + ).unwrap(); + let bits_per_group = BitSignature::from(SIGNATURE).bits(); + + let mut bits = Vec::with_capacity(capacity / bits_per_group); + for _ in 0 .. (capacity / bits_per_group) { + bits.push(Bits::deserialize(r)?); + } + + let mut remainder = None; + if (capacity % bits_per_group) != 0 { + remainder = Some(Bits::deserialize(r)?); + } + + Ok( + DLEqProof { + bits, + remainder, + poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) + } + ) + } } diff --git a/crypto/dleq/src/cross_group/schnorr.rs b/crypto/dleq/src/cross_group/schnorr.rs index cbd60aa6..c996f971 100644 --- a/crypto/dleq/src/cross_group/schnorr.rs +++ b/crypto/dleq/src/cross_group/schnorr.rs @@ -2,7 +2,8 @@ use rand_core::{RngCore, CryptoRng}; use transcript::Transcript; -use group::{ff::Field, prime::PrimeGroup}; +use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup}; +use multiexp::BatchVerifier; use crate::challenge; @@ -20,7 +21,7 @@ pub(crate) struct SchnorrPoK { s: G::Scalar } -impl SchnorrPoK { +impl SchnorrPoK where G::Scalar: PrimeFieldBits { // Not hram due to the lack of m #[allow(non_snake_case)] fn hra(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar { @@ -46,16 +47,23 @@ impl SchnorrPoK { } } - #[must_use] - pub(crate) fn verify( + pub(crate) fn verify( &self, + rng: &mut R, transcript: &mut T, generator: G, - public_key: G - ) -> bool { - (generator * self.s) == ( - self.R + (public_key * Self::hra(transcript, generator, self.R, public_key)) - ) + public_key: G, + batch: &mut BatchVerifier<(), G> + ) { + batch.queue( + rng, + (), + [ + (-self.s, generator), + (G::Scalar::one(), self.R), + (Self::hra(transcript, generator, self.R, public_key), public_key) + ] + ); } #[cfg(feature = "serialize")] diff --git a/crypto/dleq/src/tests/cross_group/linear/aos.rs b/crypto/dleq/src/tests/cross_group/aos.rs similarity index 92% rename from crypto/dleq/src/tests/cross_group/linear/aos.rs rename to crypto/dleq/src/tests/cross_group/aos.rs index 9662ab52..efd37026 100644 --- a/crypto/dleq/src/tests/cross_group/linear/aos.rs +++ b/crypto/dleq/src/tests/cross_group/aos.rs @@ -5,7 +5,7 @@ use group::{ff::Field, Group}; use multiexp::BatchVerifier; use crate::{ - cross_group::linear::aos::{Re, Aos}, + cross_group::aos::{Re, Aos}, tests::cross_group::{G0, G1, transcript, generators} }; @@ -21,6 +21,8 @@ fn test_aos(default: Re) { let generators = generators(); let mut ring_keys = [(::Scalar::zero(), ::Scalar::zero()); RING_LEN]; + // Side-effect of G0 being a type-alias with identity() deprecated + #[allow(deprecated)] let mut ring = [(G0::identity(), G1::identity()); RING_LEN]; for i in 0 .. RING_LEN { ring_keys[i] = ( @@ -58,6 +60,7 @@ fn test_aos_e() { test_aos::<4>(Re::e_default()); } +#[allow(non_snake_case)] #[test] fn test_aos_R() { // Batch verification appreciates the longer vectors, which means not batching bits diff --git a/crypto/dleq/src/tests/cross_group/linear/concise.rs b/crypto/dleq/src/tests/cross_group/linear/concise.rs deleted file mode 100644 index 1fbe49a7..00000000 --- a/crypto/dleq/src/tests/cross_group/linear/concise.rs +++ /dev/null @@ -1,98 +0,0 @@ -use rand_core::{RngCore, OsRng}; - -use ff::{Field, PrimeField}; - -use k256::Scalar; -#[cfg(feature = "serialize")] -use k256::ProjectivePoint; -#[cfg(feature = "serialize")] -use dalek_ff_group::EdwardsPoint; - -use blake2::{Digest, Blake2b512}; - -use crate::{ - cross_group::{scalar::mutual_scalar_from_bytes, linear::ConciseDLEq}, - tests::cross_group::{transcript, generators} -}; - -#[test] -fn test_linear_concise_cross_group_dleq() { - let generators = generators(); - - for i in 0 .. 1 { - let (proof, keys) = if i == 0 { - let mut seed = [0; 32]; - OsRng.fill_bytes(&mut seed); - - ConciseDLEq::prove( - &mut OsRng, - &mut transcript(), - generators, - Blake2b512::new().chain_update(seed) - ) - } else { - let mut key; - let mut res; - while { - key = Scalar::random(&mut OsRng); - res = ConciseDLEq::prove_without_bias( - &mut OsRng, - &mut transcript(), - generators, - key - ); - res.is_none() - } {} - let res = res.unwrap(); - assert_eq!(key, res.1.0); - res - }; - - let public_keys = proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); - assert_eq!(generators.0.primary * keys.0, public_keys.0); - assert_eq!(generators.1.primary * keys.1, public_keys.1); - - #[cfg(feature = "serialize")] - { - let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = ConciseDLEq::::deserialize( - &mut std::io::Cursor::new(&buf) - ).unwrap(); - assert_eq!(proof, deserialized); - deserialized.verify(&mut OsRng, &mut transcript(), generators).unwrap(); - } - } -} - -#[test] -fn test_remainder() { - // Uses Secp256k1 for both to achieve an odd capacity of 255 - assert_eq!(Scalar::CAPACITY, 255); - let generators = (generators().0, generators().0); - let keys = mutual_scalar_from_bytes(&[0xFF; 32]); - assert_eq!(keys.0, keys.1); - - let (proof, res) = ConciseDLEq::prove_without_bias( - &mut OsRng, - &mut transcript(), - generators, - keys.0 - ).unwrap(); - assert_eq!(keys, res); - - let public_keys = proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); - assert_eq!(generators.0.primary * keys.0, public_keys.0); - assert_eq!(generators.1.primary * keys.1, public_keys.1); - - #[cfg(feature = "serialize")] - { - let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = ConciseDLEq::::deserialize( - &mut std::io::Cursor::new(&buf) - ).unwrap(); - assert_eq!(proof, deserialized); - deserialized.verify(&mut OsRng, &mut transcript(), generators).unwrap(); - } -} diff --git a/crypto/dleq/src/tests/cross_group/linear/efficient.rs b/crypto/dleq/src/tests/cross_group/linear/efficient.rs deleted file mode 100644 index bafc4902..00000000 --- a/crypto/dleq/src/tests/cross_group/linear/efficient.rs +++ /dev/null @@ -1,66 +0,0 @@ -use rand_core::{RngCore, OsRng}; - -use ff::Field; - -use k256::Scalar; -#[cfg(feature = "serialize")] -use k256::ProjectivePoint; -#[cfg(feature = "serialize")] -use dalek_ff_group::EdwardsPoint; - -use blake2::{Digest, Blake2b512}; - -use crate::{ - cross_group::linear::EfficientDLEq, - tests::cross_group::{transcript, generators} -}; - -#[test] -fn test_linear_efficient_cross_group_dleq() { - let generators = generators(); - - for i in 0 .. 1 { - let (proof, keys) = if i == 0 { - let mut seed = [0; 32]; - OsRng.fill_bytes(&mut seed); - - EfficientDLEq::prove( - &mut OsRng, - &mut transcript(), - generators, - Blake2b512::new().chain_update(seed) - ) - } else { - let mut key; - let mut res; - while { - key = Scalar::random(&mut OsRng); - res = EfficientDLEq::prove_without_bias( - &mut OsRng, - &mut transcript(), - generators, - key - ); - res.is_none() - } {} - let res = res.unwrap(); - assert_eq!(key, res.1.0); - res - }; - - let public_keys = proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); - assert_eq!(generators.0.primary * keys.0, public_keys.0); - assert_eq!(generators.1.primary * keys.1, public_keys.1); - - #[cfg(feature = "serialize")] - { - let mut buf = vec![]; - proof.serialize(&mut buf).unwrap(); - let deserialized = EfficientDLEq::::deserialize( - &mut std::io::Cursor::new(&buf) - ).unwrap(); - assert_eq!(proof, deserialized); - deserialized.verify(&mut OsRng, &mut transcript(), generators).unwrap(); - } - } -} diff --git a/crypto/dleq/src/tests/cross_group/linear/mod.rs b/crypto/dleq/src/tests/cross_group/linear/mod.rs deleted file mode 100644 index 5603b63d..00000000 --- a/crypto/dleq/src/tests/cross_group/linear/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod concise; -mod efficient; diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index 9557127d..38ef0341 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -1,26 +1,33 @@ -mod scalar; -mod schnorr; - use hex_literal::hex; -use rand_core::OsRng; +use rand_core::{RngCore, OsRng}; use ff::{Field, PrimeField}; use group::{Group, GroupEncoding}; +use blake2::{Digest, Blake2b512}; + use k256::{Scalar, ProjectivePoint}; use dalek_ff_group::{self as dfg, EdwardsPoint, CompressedEdwardsY}; use transcript::RecommendedTranscript; -use crate::{Generators, cross_group::linear::EfficientDLEq}; +use crate::{ + Generators, + cross_group::{scalar::mutual_scalar_from_bytes, EfficientLinearDLEq, ConciseLinearDLEq} +}; -mod linear; +mod scalar; +mod schnorr; +mod aos; + +type G0 = ProjectivePoint; +type G1 = EdwardsPoint; pub(crate) fn transcript() -> RecommendedTranscript { RecommendedTranscript::new(b"Cross-Group DLEq Proof Test") } -pub(crate) fn generators() -> (Generators, Generators) { +pub(crate) fn generators() -> (Generators, Generators) { ( Generators::new( ProjectivePoint::GENERATOR, @@ -38,6 +45,66 @@ pub(crate) fn generators() -> (Generators, Generators { + let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap(); + assert_eq!($generators.0.primary * $keys.0, public_keys.0); + assert_eq!($generators.1.primary * $keys.1, public_keys.1); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + $proof.serialize(&mut buf).unwrap(); + let deserialized = $type::::deserialize(&mut std::io::Cursor::new(&buf)).unwrap(); + assert_eq!(proof, deserialized); + } + } +} + +macro_rules! test_dleq { + ($name: ident, $type: ident) => { + #[test] + fn $name() { + let generators = generators(); + + for i in 0 .. 1 { + let (proof, keys) = if i == 0 { + let mut seed = [0; 32]; + OsRng.fill_bytes(&mut seed); + + $type::prove( + &mut OsRng, + &mut transcript(), + generators, + Blake2b512::new().chain_update(seed) + ) + } else { + let mut key; + let mut res; + while { + key = Scalar::random(&mut OsRng); + res = $type::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + key + ); + res.is_none() + } {} + let res = res.unwrap(); + assert_eq!(key, res.1.0); + res + }; + + verify_and_deserialize!($type, proof, generators, keys); + } + } + } +} + +test_dleq!(test_efficient_linear_dleq, EfficientLinearDLEq); +test_dleq!(test_concise_linear_dleq, ConciseLinearDLEq); + #[test] fn test_rejection_sampling() { let mut pow_2 = Scalar::one(); @@ -46,7 +113,8 @@ fn test_rejection_sampling() { } assert!( - EfficientDLEq::prove_without_bias( + // Either would work + EfficientLinearDLEq::prove_without_bias( &mut OsRng, &mut RecommendedTranscript::new(b""), generators(), @@ -54,3 +122,24 @@ fn test_rejection_sampling() { ).is_none() ); } + +#[test] +fn test_remainder() { + // Uses Secp256k1 for both to achieve an odd capacity of 255 + assert_eq!(Scalar::CAPACITY, 255); + let generators = (generators().0, generators().0); + // This will ignore any unused bits, ensuring every remaining one is set + let keys = mutual_scalar_from_bytes(&[0xFF; 32]); + assert_eq!(keys.0 + Scalar::one(), Scalar::from(2u64).pow_vartime(&[255])); + assert_eq!(keys.0, keys.1); + + let (proof, res) = ConciseLinearDLEq::prove_without_bias( + &mut OsRng, + &mut transcript(), + generators, + keys.0 + ).unwrap(); + assert_eq!(keys, res); + + verify_and_deserialize!(ConciseLinearDLEq, proof, generators, keys); +} diff --git a/crypto/dleq/src/tests/cross_group/schnorr.rs b/crypto/dleq/src/tests/cross_group/schnorr.rs index 8298afda..857044db 100644 --- a/crypto/dleq/src/tests/cross_group/schnorr.rs +++ b/crypto/dleq/src/tests/cross_group/schnorr.rs @@ -1,23 +1,30 @@ use rand_core::OsRng; -use group::{ff::Field, prime::PrimeGroup}; +use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup}; +use multiexp::BatchVerifier; use transcript::RecommendedTranscript; use crate::cross_group::schnorr::SchnorrPoK; -fn test_schnorr() { +fn test_schnorr() where G::Scalar: PrimeFieldBits { let private = G::Scalar::random(&mut OsRng); let transcript = RecommendedTranscript::new(b"Schnorr Test"); - assert!( - SchnorrPoK::prove( - &mut OsRng, - &mut transcript.clone(), - G::generator(), - private - ).verify(&mut transcript.clone(), G::generator(), G::generator() * private) + let mut batch = BatchVerifier::new(3); + SchnorrPoK::prove( + &mut OsRng, + &mut transcript.clone(), + G::generator(), + private + ).verify( + &mut OsRng, + &mut transcript.clone(), + G::generator(), + G::generator() * private, + &mut batch ); + assert!(batch.verify_vartime()); } #[test] From 44e0a41ca1f01d32b5cae76d080ae1691a41d038 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 08:26:59 -0400 Subject: [PATCH 084/105] Add Classic/Compromise DLEqs and a benchmark Formatted results from my laptop: EfficientLinear had a average prove time of 188ms EfficientLinear had a average verify time of 126ms CompromiseLinear had a average prove time of 176ms CompromiseLinear had a average verify time of 141ms ConciseLinear had a average prove time of 191ms ConciseLinear had a average verify time of 160ms ClassicLinear had a average prove time of 214ms ClassicLinear had a average verify time of 159ms There is a decent error margin here. Concise is a drop-in replacement for Classic, in practice *not* theory. Efficient is optimal for performance, yet largest. Compromise is a middleground. --- crypto/dleq/src/cross_group/bits.rs | 24 ++++++--- crypto/dleq/src/cross_group/mod.rs | 52 +++++++++++++------- crypto/dleq/src/tests/cross_group/mod.rs | 62 ++++++++++++++++++++---- crypto/multiexp/src/tests/mod.rs | 2 +- 4 files changed, 105 insertions(+), 35 deletions(-) diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs index 06e66f58..ac77de97 100644 --- a/crypto/dleq/src/cross_group/bits.rs +++ b/crypto/dleq/src/cross_group/bits.rs @@ -13,30 +13,38 @@ use std::io::{Read, Write}; use crate::cross_group::read_point; pub(crate) enum BitSignature { + ClassicLinear, ConciseLinear, - EfficientLinear + EfficientLinear, + CompromiseLinear } impl BitSignature { pub(crate) const fn to_u8(&self) -> u8 { match self { - BitSignature::ConciseLinear => 0, - BitSignature::EfficientLinear => 1 + BitSignature::ClassicLinear => 0, + BitSignature::ConciseLinear => 1, + BitSignature::EfficientLinear => 2, + BitSignature::CompromiseLinear => 3 } } pub(crate) const fn from(algorithm: u8) -> BitSignature { match algorithm { - 0 => BitSignature::ConciseLinear, - 1 => BitSignature::EfficientLinear, + 0 => BitSignature::ClassicLinear, + 1 => BitSignature::ConciseLinear, + 2 => BitSignature::EfficientLinear, + 3 => BitSignature::CompromiseLinear, _ => panic!("Unknown algorithm") } } pub(crate) const fn bits(&self) -> usize { match self { + BitSignature::ClassicLinear => 1, BitSignature::ConciseLinear => 2, - BitSignature::EfficientLinear => 1 + BitSignature::EfficientLinear => 1, + BitSignature::CompromiseLinear => 2 } } @@ -46,8 +54,10 @@ impl BitSignature { fn aos_form(&self) -> Re { match self { + BitSignature::ClassicLinear => Re::e_default(), BitSignature::ConciseLinear => Re::e_default(), - BitSignature::EfficientLinear => Re::R_default() + BitSignature::EfficientLinear => Re::R_default(), + BitSignature::CompromiseLinear => Re::R_default() } } } diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index a0a74c5a..65692585 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -62,23 +62,39 @@ pub struct DLEqProof< poks: (SchnorrPoK, SchnorrPoK) } -pub type ConciseLinearDLEq = DLEqProof< - G0, - G1, - { BitSignature::ConciseLinear.to_u8() }, - { BitSignature::ConciseLinear.ring_len() }, - // There may not be a remainder, yet if there is, it'll be just one bit - // A ring for one bit has a RING_LEN of 2 - 2 ->; +macro_rules! dleq { + ($name: ident, $signature: expr, $remainder: expr) => { + pub type $name = DLEqProof< + G0, + G1, + { $signature.to_u8() }, + { $signature.ring_len() }, + // There may not be a remainder, yet if there is one, it'll be just one bit + // A ring for one bit has a RING_LEN of 2 + { if $remainder { 2 } else { 0 } } + >; + } +} - pub type EfficientLinearDLEq = DLEqProof< - G0, - G1, - { BitSignature::EfficientLinear.to_u8() }, - { BitSignature::EfficientLinear.ring_len() }, - 0 ->; +// Proves for 1-bit at a time with the signature form (e, s), as originally described in MRL-0010. +// Uses a merged challenge, unlike MRL-0010, for the ring signature, saving an element from each +// bit and removing a hash while slightly reducing challenge security. This security reduction is +// already applied to the scalar being proven for, a result of the requirement it's mutually valid +// over both scalar fields, hence its application here as well. This is mainly here as a point of +// reference for the following DLEq proofs, all which use merged challenges +dleq!(ClassicLinearDLEq, BitSignature::ClassicLinear, false); + +// Proves for 2-bits at a time to save 3/7 elements of every other bit +dleq!(ConciseLinearDLEq, BitSignature::ConciseLinear, true); + +// Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be +// batch verified, at the cost of adding an additional element per bit +dleq!(EfficientLinearDLEq, BitSignature::EfficientLinear, false); + +// Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other +// bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less +// efficient than EfficientLinear due to having more ring signature steps which aren't batched +dleq!(CompromiseLinearDLEq, BitSignature::CompromiseLinear, true); impl< G0: PrimeGroup, @@ -279,8 +295,10 @@ impl< Self::transcript(transcript, generators, keys); let batch_capacity = match BitSignature::from(SIGNATURE) { + BitSignature::ClassicLinear => 3, BitSignature::ConciseLinear => 3, - BitSignature::EfficientLinear => (self.bits.len() + 1) * 3 + BitSignature::EfficientLinear => (self.bits.len() + 1) * 3, + BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3 }; let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity)); diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index 38ef0341..a6cd4f73 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -13,7 +13,10 @@ use transcript::RecommendedTranscript; use crate::{ Generators, - cross_group::{scalar::mutual_scalar_from_bytes, EfficientLinearDLEq, ConciseLinearDLEq} + cross_group::{ + scalar::mutual_scalar_from_bytes, + ClassicLinearDLEq, EfficientLinearDLEq, ConciseLinearDLEq, CompromiseLinearDLEq + } }; mod scalar; @@ -62,7 +65,39 @@ macro_rules! verify_and_deserialize { } macro_rules! test_dleq { - ($name: ident, $type: ident) => { + ($str: expr, $benchmark: ident, $name: ident, $type: ident) => { + #[ignore] + #[test] + fn $benchmark() { + println!("Benchmarking with Secp256k1/Ed25519"); + let generators = generators(); + + let mut seed = [0; 32]; + OsRng.fill_bytes(&mut seed); + let key = Blake2b512::new().chain_update(seed); + + let runs = 200; + let mut proofs = Vec::with_capacity(usize::try_from(runs).unwrap()); + let time = std::time::Instant::now(); + for _ in 0 .. runs { + proofs.push($type::prove(&mut OsRng, &mut transcript(), generators, key.clone()).0); + } + println!("{} had a average prove time of {}ms", $str, time.elapsed().as_millis() / runs); + + let time = std::time::Instant::now(); + for proof in &proofs { + proof.verify(&mut OsRng, &mut transcript(), generators).unwrap(); + } + println!("{} had a average verify time of {}ms", $str, time.elapsed().as_millis() / runs); + + #[cfg(feature = "serialize")] + { + let mut buf = vec![]; + proofs[0].serialize(&mut buf); + println!("{} had a proof size of {} bytes", $str, buf.len()); + } + } + #[test] fn $name() { let generators = generators(); @@ -83,12 +118,7 @@ macro_rules! test_dleq { let mut res; while { key = Scalar::random(&mut OsRng); - res = $type::prove_without_bias( - &mut OsRng, - &mut transcript(), - generators, - key - ); + res = $type::prove_without_bias(&mut OsRng, &mut transcript(), generators, key); res.is_none() } {} let res = res.unwrap(); @@ -102,8 +132,20 @@ macro_rules! test_dleq { } } -test_dleq!(test_efficient_linear_dleq, EfficientLinearDLEq); -test_dleq!(test_concise_linear_dleq, ConciseLinearDLEq); +test_dleq!("ClassicLinear", benchmark_classic_linear, test_classic_linear, ClassicLinearDLEq); +test_dleq!("ConciseLinear", benchmark_concise_linear, test_concise_linear, ConciseLinearDLEq); +test_dleq!( + "EfficientLinear", + benchmark_efficient_linear, + test_efficient_linear, + EfficientLinearDLEq +); +test_dleq!( + "CompromiseLinear", + benchmark_compromise_linear, + test_compromise_linear, + CompromiseLinearDLEq +); #[test] fn test_rejection_sampling() { diff --git a/crypto/multiexp/src/tests/mod.rs b/crypto/multiexp/src/tests/mod.rs index 628c52c8..45e968a7 100644 --- a/crypto/multiexp/src/tests/mod.rs +++ b/crypto/multiexp/src/tests/mod.rs @@ -96,8 +96,8 @@ fn test_ed25519() { test_multiexp::(); } -#[test] #[ignore] +#[test] fn benchmark() { // Activate the processor's boost clock for _ in 0 .. 30 { From c3a0e0375ddae760c61f6b36a8b7895c12a7975f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 08:27:20 -0400 Subject: [PATCH 085/105] Save an inversion on AOS verification Incredibly minor, just nagged me --- crypto/dleq/src/cross_group/aos.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crypto/dleq/src/cross_group/aos.rs b/crypto/dleq/src/cross_group/aos.rs index fb468969..926bcb64 100644 --- a/crypto/dleq/src/cross_group/aos.rs +++ b/crypto/dleq/src/cross_group/aos.rs @@ -79,7 +79,7 @@ impl< A: (G0, G1), e: (G0::Scalar, G1::Scalar) ) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) { - (vec![(s.0, generators.0.alt), (-e.0, A.0)], vec![(s.1, generators.1.alt), (-e.1, A.1)]) + (vec![(-s.0, generators.0.alt), (e.0, A.0)], vec![(-s.1, generators.1.alt), (e.1, A.1)]) } #[allow(non_snake_case)] @@ -167,9 +167,8 @@ impl< *ring.last().unwrap(), e ); - // TODO: Make something else negative to speed up vartime - statements.0.push((-G0::Scalar::one(), R0_0)); - statements.1.push((-G1::Scalar::one(), R1_0)); + statements.0.push((G0::Scalar::one(), R0_0)); + statements.1.push((G1::Scalar::one(), R1_0)); batch.0.queue(&mut *rng, (), statements.0); batch.1.queue(&mut *rng, (), statements.1); }, From 4dbf50243be6c65e04d78eb9bfd5ed21056dccd5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 08:46:11 -0400 Subject: [PATCH 086/105] Fix serialization This enabled getting the proof sizes, which are: - ConciseLinear had a proof size of 44607 bytes - CompromiseLinear had a proof size of 48765 bytes - ClassicLinear had a proof size of 56829 bytes - EfficientLinear had a proof size of 65145 byte --- crypto/dleq/src/cross_group/aos.rs | 2 ++ crypto/dleq/src/cross_group/bits.rs | 7 ++++++- crypto/dleq/src/cross_group/mod.rs | 7 +++++-- crypto/dleq/src/tests/cross_group/aos.rs | 1 + crypto/dleq/src/tests/cross_group/mod.rs | 19 ++++++++++++------- 5 files changed, 26 insertions(+), 10 deletions(-) diff --git a/crypto/dleq/src/cross_group/aos.rs b/crypto/dleq/src/cross_group/aos.rs index 926bcb64..b73cb8b2 100644 --- a/crypto/dleq/src/cross_group/aos.rs +++ b/crypto/dleq/src/cross_group/aos.rs @@ -195,6 +195,7 @@ impl< #[cfg(feature = "serialize")] pub(crate) fn serialize(&self, w: &mut W) -> std::io::Result<()> { + #[allow(non_snake_case)] match self.Re_0 { Re::R(R0, R1) => { w.write_all(R0.to_bytes().as_ref())?; @@ -211,6 +212,7 @@ impl< Ok(()) } + #[allow(non_snake_case)] #[cfg(feature = "serialize")] pub(crate) fn deserialize(r: &mut R, mut Re_0: Re) -> std::io::Result { match Re_0 { diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs index ac77de97..5f55d181 100644 --- a/crypto/dleq/src/cross_group/bits.rs +++ b/crypto/dleq/src/cross_group/bits.rs @@ -165,6 +165,11 @@ impl< #[cfg(feature = "serialize")] pub(crate) fn deserialize(r: &mut R) -> std::io::Result { - Ok(Bits { commitments: (read_point(r)?, read_point(r)?), signature: Aos::deserialize(r)? }) + Ok( + Bits { + commitments: (read_point(r)?, read_point(r)?), + signature: Aos::deserialize(r, BitSignature::from(SIGNATURE).aos_form())? + } + ) } } diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 65692585..932f7242 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -63,7 +63,7 @@ pub struct DLEqProof< } macro_rules! dleq { - ($name: ident, $signature: expr, $remainder: expr) => { + ($name: ident, $signature: expr, $remainder: literal) => { pub type $name = DLEqProof< G0, G1, @@ -81,10 +81,12 @@ macro_rules! dleq { // bit and removing a hash while slightly reducing challenge security. This security reduction is // already applied to the scalar being proven for, a result of the requirement it's mutually valid // over both scalar fields, hence its application here as well. This is mainly here as a point of -// reference for the following DLEq proofs, all which use merged challenges +// reference for the following DLEq proofs, all which use merged challenges, and isn't performant +// in comparison to the others dleq!(ClassicLinearDLEq, BitSignature::ClassicLinear, false); // Proves for 2-bits at a time to save 3/7 elements of every other bit +// <9% smaller than CompromiseLinear, yet ~12% slower dleq!(ConciseLinearDLEq, BitSignature::ConciseLinear, true); // Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be @@ -94,6 +96,7 @@ dleq!(EfficientLinearDLEq, BitSignature::EfficientLinear, false); // Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other // bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less // efficient than EfficientLinear due to having more ring signature steps which aren't batched +// >25% smaller than EfficientLinear and just 11% slower, making it the recommended option dleq!(CompromiseLinearDLEq, BitSignature::CompromiseLinear, true); impl< diff --git a/crypto/dleq/src/tests/cross_group/aos.rs b/crypto/dleq/src/tests/cross_group/aos.rs index efd37026..0ec83836 100644 --- a/crypto/dleq/src/tests/cross_group/aos.rs +++ b/crypto/dleq/src/tests/cross_group/aos.rs @@ -9,6 +9,7 @@ use crate::{ tests::cross_group::{G0, G1, transcript, generators} }; +#[allow(non_snake_case)] #[cfg(feature = "serialize")] fn test_aos_serialization(proof: Aos, Re_0: Re) { let mut buf = vec![]; diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index a6cd4f73..c201f497 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -49,7 +49,7 @@ pub(crate) fn generators() -> (Generators, Generators) { } macro_rules! verify_and_deserialize { - ($type: ident, $proof: ident, $generators: ident, $keys: ident) => { + ($type: ty, $proof: ident, $generators: ident, $keys: ident) => { let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap(); assert_eq!($generators.0.primary * $keys.0, public_keys.0); assert_eq!($generators.1.primary * $keys.1, public_keys.1); @@ -58,14 +58,14 @@ macro_rules! verify_and_deserialize { { let mut buf = vec![]; $proof.serialize(&mut buf).unwrap(); - let deserialized = $type::::deserialize(&mut std::io::Cursor::new(&buf)).unwrap(); - assert_eq!(proof, deserialized); + let deserialized = <$type>::deserialize(&mut std::io::Cursor::new(&buf)).unwrap(); + assert_eq!($proof, deserialized); } } } macro_rules! test_dleq { - ($str: expr, $benchmark: ident, $name: ident, $type: ident) => { + ($str: literal, $benchmark: ident, $name: ident, $type: ident) => { #[ignore] #[test] fn $benchmark() { @@ -93,7 +93,7 @@ macro_rules! test_dleq { #[cfg(feature = "serialize")] { let mut buf = vec![]; - proofs[0].serialize(&mut buf); + proofs[0].serialize(&mut buf).unwrap(); println!("{} had a proof size of {} bytes", $str, buf.len()); } } @@ -126,7 +126,7 @@ macro_rules! test_dleq { res }; - verify_and_deserialize!($type, proof, generators, keys); + verify_and_deserialize!($type::, proof, generators, keys); } } } @@ -183,5 +183,10 @@ fn test_remainder() { ).unwrap(); assert_eq!(keys, res); - verify_and_deserialize!(ConciseLinearDLEq, proof, generators, keys); + verify_and_deserialize!( + ConciseLinearDLEq::, + proof, + generators, + keys + ); } From 72afcf1f063b92bee8021755a7d9da68950758b1 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 08:51:17 -0400 Subject: [PATCH 087/105] Mark cross_group as experimental While all of Serai can be argued as experimental, the DLEq proof is especially so, as it's lacking any formal proofs over its theory. Also adds doc(hidden) to the generic DLEqProof, now prefixed with __. --- crypto/dleq/Cargo.toml | 2 +- crypto/dleq/src/cross_group/mod.rs | 13 ++++++++----- crypto/dleq/src/lib.rs | 2 +- crypto/dleq/src/tests/mod.rs | 2 +- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 8943544c..7216c338 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -31,7 +31,7 @@ transcript = { package = "flexible-transcript", path = "../transcript", features [features] serialize = [] -cross_group = ["multiexp"] +experimental_cross_group = ["multiexp"] secure_capacity_difference = [] # Only applies to cross_group, yet is default to ensure security diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 932f7242..370b6c8e 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -47,10 +47,13 @@ pub enum DLEqError { InvalidProof } +// This should never be directly instantiated and uses a u8 to represent internal values +// Any external usage is likely invalid +#[doc(hidden)] // Debug would be such a dump of data this likely isn't helpful, but at least it's available to // anyone who wants it #[derive(Clone, PartialEq, Eq, Debug)] -pub struct DLEqProof< +pub struct __DLEqProof< G0: PrimeGroup, G1: PrimeGroup, const SIGNATURE: u8, @@ -64,7 +67,7 @@ pub struct DLEqProof< macro_rules! dleq { ($name: ident, $signature: expr, $remainder: literal) => { - pub type $name = DLEqProof< + pub type $name = __DLEqProof< G0, G1, { $signature.to_u8() }, @@ -105,7 +108,7 @@ impl< const SIGNATURE: u8, const RING_LEN: usize, const REMAINDER_RING_LEN: usize -> DLEqProof where +> __DLEqProof where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits { pub(crate) fn transcript( @@ -232,7 +235,7 @@ impl< ); } - let proof = DLEqProof { bits, remainder, poks }; + let proof = __DLEqProof { bits, remainder, poks }; debug_assert_eq!( proof.reconstruct_keys(), (generators.0.primary * f.0, generators.1.primary * f.1) @@ -353,7 +356,7 @@ impl< } Ok( - DLEqProof { + __DLEqProof { bits, remainder, poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?) diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index f960cdfe..176de0b4 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -9,7 +9,7 @@ use group::prime::PrimeGroup; #[cfg(feature = "serialize")] use std::io::{self, ErrorKind, Error, Read, Write}; -#[cfg(feature = "cross_group")] +#[cfg(feature = "experimental_cross_group")] pub mod cross_group; #[cfg(test)] diff --git a/crypto/dleq/src/tests/mod.rs b/crypto/dleq/src/tests/mod.rs index 119bbc6b..1fe2172f 100644 --- a/crypto/dleq/src/tests/mod.rs +++ b/crypto/dleq/src/tests/mod.rs @@ -1,4 +1,4 @@ -#[cfg(feature = "cross_group")] +#[cfg(feature = "experimental_cross_group")] mod cross_group; use hex_literal::hex; From 7cbdcc8ae6763a36d739234047a606dfc2a108c7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 09:34:35 -0400 Subject: [PATCH 088/105] Add a comprehensive README to dleq --- crypto/dleq/README.md | 62 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/crypto/dleq/README.md b/crypto/dleq/README.md index 77465a4e..ac5cc190 100644 --- a/crypto/dleq/README.md +++ b/crypto/dleq/README.md @@ -2,9 +2,61 @@ Implementation of discrete log equality both within a group and across groups, the latter being extremely experimental, for curves implementing the ff/group -APIs. This library has not undergone auditing. +APIs. This library has not undergone auditing and the cross-group DLEq proof has +no formal proofs available. -The cross-group DLEq is the one described in -https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf, augmented -with a pair of Schnorr Proof of Knowledges in order to correct for a mistake -present in the paper. +### Cross-Group DLEq + +The present cross-group DLEq is based off +[MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf), +which isn't computationally correct as while it proves both keys have the same +discrete-log value for the G'/H' component, yet doesn't prove a lack of a G/H +component. Accordingly, it was augmented with a pair of Schnorr Proof of +Knowledges, proving a known G'/H' component, guaranteeing a lack of a G/H +component (assuming an unknown relation between G/H and G'/H'). + +The challenges for the ring signatures were also merged, removing one-element +from each bit's proof with only a slight reduction to challenge security (as +instead of being uniform over each scalar field, they're uniform over the +mutual bit capacity of each scalar field). This reduction is identical to the +one applied to the proved-for scalar, and accordingly should not reduce overall +security. It does create a lack of domain separation, yet that shouldn't be an +issue. + +The following variants are available: + +- `ClassicLinear`. This is only for reference purposes, being the above + described proof, with no further optimizations. + +- `ConciseLinear`. This proves for 2 bits at a time, not increasing the + signature size for both bits yet decreasing the amount of + commitments/challenges in total. + +- `EfficientLinear`. This provides ring signatures in the form ((R_G, R_H), s), + instead of (e, s), and accordingly enables a batch verification of their final + step. It is the most performant, and also the largest, option. + +- `CompromiseLinear`. This provides signatures in the form ((R_G, R_H), s) AND + proves for 2-bits at a time. While this increases the amount of steps in + verifying the ring signatures, which aren't batch verified, and decreases the + amount of items batched (an operation which grows in efficiency with + quantity), it strikes a balance between speed and size. + +The following numbers are from benchmarks performed with Secp256k1/Ed25519 on a +Intel i7-118567: + +| Algorithm | Size | Performance | +|--------------------|-------------------------|-------------------| +| `ClassicLinear` | 56829 bytes (+27%) | 157ms (0%) | +| `ConciseLinear` | 44607 bytes (Reference) | 156ms (Reference) | +| `EfficientLinear` | 65145 bytes (+46%) | 122ms (-22%) | +| `CompromiseLinear` | 48765 bytes (+9%) | 137ms (-12%) | + +CompromiseLinear is the best choce by only being marginally sub-optimal +regarding size, yet still achieving most of the desired performance +improvements. That said, neither the original postulation (which had flaws) nor +any construction here has been proven nor audited. Accordingly, they are solely +experimental, and none are recommended. + +All proofs are suffixed Linear in the hope a logarithmic proof makes itself +available, which would likely immediately become the most efficient option. From b69337a3a67f0c780f084a674810947f4fbcd7d9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 09:52:10 -0400 Subject: [PATCH 089/105] Tweak DLEq README and rename the experimental_cross_group feature to just experimental --- crypto/dleq/Cargo.toml | 2 +- crypto/dleq/README.md | 33 +++++++++++++++++---------------- crypto/dleq/src/lib.rs | 2 +- crypto/dleq/src/tests/mod.rs | 2 +- 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 7216c338..e1a018d1 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -31,7 +31,7 @@ transcript = { package = "flexible-transcript", path = "../transcript", features [features] serialize = [] -experimental_cross_group = ["multiexp"] +experimental = ["multiexp"] secure_capacity_difference = [] # Only applies to cross_group, yet is default to ensure security diff --git a/crypto/dleq/README.md b/crypto/dleq/README.md index ac5cc190..356c89c2 100644 --- a/crypto/dleq/README.md +++ b/crypto/dleq/README.md @@ -1,19 +1,19 @@ # Discrete Log Equality -Implementation of discrete log equality both within a group and across groups, -the latter being extremely experimental, for curves implementing the ff/group -APIs. This library has not undergone auditing and the cross-group DLEq proof has -no formal proofs available. +Implementation of discrete log equality proofs for curves implementing +`ff`/`group`. There is also a highly experimental cross-group DLEq proof, under +the `experimental` feature, which has no formal proofs available yet is +available here regardless. This library has NOT undergone auditing. ### Cross-Group DLEq The present cross-group DLEq is based off [MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf), which isn't computationally correct as while it proves both keys have the same -discrete-log value for the G'/H' component, yet doesn't prove a lack of a G/H -component. Accordingly, it was augmented with a pair of Schnorr Proof of -Knowledges, proving a known G'/H' component, guaranteeing a lack of a G/H -component (assuming an unknown relation between G/H and G'/H'). +discrete logarithm for their `G'`/`H'` component, it doesn't prove a lack of a +`G`/`H` component. Accordingly, it was augmented with a pair of Schnorr Proof of +Knowledges, proving a known `G'`/`H'` component, guaranteeing a lack of a +`G`/`H` component (assuming an unknown relation between `G`/`H` and `G'`/`H'`). The challenges for the ring signatures were also merged, removing one-element from each bit's proof with only a slight reduction to challenge security (as @@ -32,18 +32,19 @@ The following variants are available: signature size for both bits yet decreasing the amount of commitments/challenges in total. -- `EfficientLinear`. This provides ring signatures in the form ((R_G, R_H), s), - instead of (e, s), and accordingly enables a batch verification of their final - step. It is the most performant, and also the largest, option. +- `EfficientLinear`. This provides ring signatures in the form + `((R_G, R_H), s)`, instead of `(e, s)`, and accordingly enables a batch + verification of their final step. It is the most performant, and also the + largest, option. -- `CompromiseLinear`. This provides signatures in the form ((R_G, R_H), s) AND +- `CompromiseLinear`. This provides signatures in the form `((R_G, R_H), s)` AND proves for 2-bits at a time. While this increases the amount of steps in verifying the ring signatures, which aren't batch verified, and decreases the amount of items batched (an operation which grows in efficiency with quantity), it strikes a balance between speed and size. -The following numbers are from benchmarks performed with Secp256k1/Ed25519 on a -Intel i7-118567: +The following numbers are from benchmarks performed with k256/curve25519_dalek +on a Intel i7-118567: | Algorithm | Size | Performance | |--------------------|-------------------------|-------------------| @@ -52,11 +53,11 @@ Intel i7-118567: | `EfficientLinear` | 65145 bytes (+46%) | 122ms (-22%) | | `CompromiseLinear` | 48765 bytes (+9%) | 137ms (-12%) | -CompromiseLinear is the best choce by only being marginally sub-optimal +`CompromiseLinear` is the best choice by only being marginally sub-optimal regarding size, yet still achieving most of the desired performance improvements. That said, neither the original postulation (which had flaws) nor any construction here has been proven nor audited. Accordingly, they are solely experimental, and none are recommended. -All proofs are suffixed Linear in the hope a logarithmic proof makes itself +All proofs are suffixed "Linear" in the hope a logarithmic proof makes itself available, which would likely immediately become the most efficient option. diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index 176de0b4..5a619d12 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -9,7 +9,7 @@ use group::prime::PrimeGroup; #[cfg(feature = "serialize")] use std::io::{self, ErrorKind, Error, Read, Write}; -#[cfg(feature = "experimental_cross_group")] +#[cfg(feature = "experimental")] pub mod cross_group; #[cfg(test)] diff --git a/crypto/dleq/src/tests/mod.rs b/crypto/dleq/src/tests/mod.rs index 1fe2172f..781c4840 100644 --- a/crypto/dleq/src/tests/mod.rs +++ b/crypto/dleq/src/tests/mod.rs @@ -1,4 +1,4 @@ -#[cfg(feature = "experimental_cross_group")] +#[cfg(feature = "experimental")] mod cross_group; use hex_literal::hex; From 7d13be5797f39f71a864408683b6d7742adff9a3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 09:53:31 -0400 Subject: [PATCH 090/105] Update Monero submodule --- coins/monero/c/monero | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coins/monero/c/monero b/coins/monero/c/monero index ab18fea3..424e4de1 160000 --- a/coins/monero/c/monero +++ b/coins/monero/c/monero @@ -1 +1 @@ -Subproject commit ab18fea3500841fc312630d49ed6840b3aedb34d +Subproject commit 424e4de16b98506170db7b0d7d87a79ccf541744 From 6c7645806342bf737133a1ccda903ffca3bf3c31 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 14:13:24 -0400 Subject: [PATCH 091/105] Statically link Monero Closes https://github.com/serai-dex/serai/issues/11. --- coins/monero/Cargo.toml | 3 ++ coins/monero/build.rs | 86 +++++++++++++++++++++++++---------------- 2 files changed, 55 insertions(+), 34 deletions(-) diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index d2f40f8c..f62145a7 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -6,6 +6,9 @@ license = "MIT" authors = ["Luke Parker "] edition = "2021" +[build-dependencies] +cc = "1.0" + [dependencies] hex-literal = "0.3" lazy_static = "1" diff --git a/coins/monero/build.rs b/coins/monero/build.rs index 807e80ef..d2b6f66d 100644 --- a/coins/monero/build.rs +++ b/coins/monero/build.rs @@ -1,6 +1,4 @@ -use std::process::Command; -use std::env; -use std::path::Path; +use std::{env, path::Path, process::Command}; fn main() { if !Command::new("git").args(&["submodule", "update", "--init", "--recursive"]).status().unwrap().success() { @@ -19,11 +17,6 @@ fn main() { // TODO: Move this signaling file into OUT_DIR once Monero is built statically successfully println!("cargo:rerun-if-changed=c/.build/monero"); if !Path::new("c/.build/monero").exists() { - if !Command::new("cmake").args(&["cmake", "-DCMAKE_BUILD_TYPE=Release", "-DBUILD_SHARED_LIBS=1", "."]) - .current_dir(&Path::new("c/monero")).status().unwrap().success() { - panic!("cmake failed to generate Monero's build scripts"); - } - if !Command::new("make").arg(format!("-j{}", &env::var("THREADS").unwrap_or("2".to_string()))) .current_dir(&Path::new("c/monero")).status().unwrap().success() { panic!("make failed to build Monero. Please check your dependencies"); @@ -44,47 +37,72 @@ fn main() { &env::consts::DLL_EXTENSION ) ).exists() { + let mut paths = vec![ + "c/monero/build/release/contrib/epee/src/libepee.a".to_string(), + "c/monero/build/release/external/easylogging++/libeasylogging.a".to_string(), + "c/monero/build/release/external/randomx/librandomx.a".to_string() + ]; + for (folder, lib) in [ + ("common", "common"), ("crypto", "cncrypto"), + ("crypto/wallet", "wallet-crypto"), + ("cryptonote_basic", "cryptonote_basic"), + ("cryptonote_basic", "cryptonote_format_utils_basic"), + ("", "version"), ("device", "device"), ("ringct", "ringct_basic"), ("ringct", "ringct") ] { - if !Command::new("cp").args(&[ - &format!( - "c/monero/src/{}/{}{}.{}", + paths.push( + format!( + "c/monero/build/release/src/{}/{}{}.a", folder, &env::consts::DLL_PREFIX, - lib, - &env::consts::DLL_EXTENSION - ), - out_dir - ]).status().unwrap().success() { - panic!("Failed to cp {}", lib); + lib + ) + ); + } + + for path in paths { + if !Command::new("cp").args(&[&path, out_dir]).status().unwrap().success() { + panic!("Failed to cp {}", path); } } } println!("cargo:rerun-if-changed=c/wrapper.cpp"); - if !Command::new("g++").args(&[ - "-O3", "-Wall", "-shared", "-std=c++14", "-fPIC", - "-Imonero/contrib/epee/include", "-Imonero/src", - "wrapper.cpp", "-o", &format!( - "{}/{}wrapper.{}", - out_dir, - &env::consts::DLL_PREFIX, - &env::consts::DLL_EXTENSION - ), - &format!("-L{}", out_dir), - "-ldevice", "-lringct_basic", "-lringct" - ]).current_dir(&Path::new("c")).status().unwrap().success() { - panic!("g++ failed to build the wrapper"); + if !Path::new(&format!("{}/{}wrapper.a", out_dir, &env::consts::DLL_PREFIX)).exists() { + cc::Build::new() + .file("c/wrapper.cpp") + .cpp(true) + .warnings(false) + .include("c/monero/contrib/epee/include") + .include("c/monero/src") + .compile("wrapper"); } println!("cargo:rustc-link-search={}", out_dir); - println!("cargo:rustc-link-lib=cncrypto"); - println!("cargo:rustc-link-lib=device"); - println!("cargo:rustc-link-lib=ringct_basic"); - println!("cargo:rustc-link-lib=ringct"); println!("cargo:rustc-link-lib=wrapper"); + println!("cargo:rustc-link-lib=ringct"); + println!("cargo:rustc-link-lib=ringct_basic"); + println!("cargo:rustc-link-lib=device"); + println!("cargo:rustc-link-lib=cryptonote_basic"); + println!("cargo:rustc-link-lib=cncrypto"); + println!("cargo:rustc-link-lib=cryptonote_format_utils_basic"); + println!("cargo:rustc-link-lib=version"); + println!("cargo:rustc-link-lib=wallet-crypto"); + println!("cargo:rustc-link-lib=easylogging"); + println!("cargo:rustc-link-lib=epee"); + println!("cargo:rustc-link-lib=common"); + println!("cargo:rustc-link-lib=randomx"); + println!("cargo:rustc-link-lib=unbound"); + println!("cargo:rustc-link-lib=sodium"); + println!("cargo:rustc-link-lib=boost_system"); + println!("cargo:rustc-link-lib=boost_thread"); + println!("cargo:rustc-link-lib=boost_filesystem"); + println!("cargo:rustc-link-lib=hidapi-hidraw"); + println!("cargo:rustc-link-lib=stdc++"); + + println!("cargo:rustc-link-arg=-zmuldefs"); } From f7106f09ed6efde2bd87afe6133e347b57b29af4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 14:28:42 -0400 Subject: [PATCH 092/105] Add further must_uses --- coins/monero/src/ringct/bulletproofs.rs | 1 + coins/monero/src/ringct/clsag/multisig.rs | 2 ++ crypto/frost/src/algorithm.rs | 4 ++++ crypto/frost/src/schnorr.rs | 1 + 4 files changed, 8 insertions(+) diff --git a/coins/monero/src/ringct/bulletproofs.rs b/coins/monero/src/ringct/bulletproofs.rs index e6a258d9..46077f2e 100644 --- a/coins/monero/src/ringct/bulletproofs.rs +++ b/coins/monero/src/ringct/bulletproofs.rs @@ -76,6 +76,7 @@ impl Bulletproofs { Ok(res) } + #[must_use] pub fn verify(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool { if commitments.len() > 16 { return false; diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 77adc0b1..19fff6a7 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -227,6 +227,7 @@ impl Algorithm for ClsagMultisig { share } + #[must_use] fn verify( &self, _: dfg::EdwardsPoint, @@ -247,6 +248,7 @@ impl Algorithm for ClsagMultisig { return None; } + #[must_use] fn verify_share( &self, verification_share: dfg::EdwardsPoint, diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index 70f6cf92..a85bab11 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -46,10 +46,12 @@ pub trait Algorithm: Clone { ) -> C::F; /// Verify a signature + #[must_use] fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option; /// Verify a specific share given as a response. Used to determine blame if signature /// verification fails + #[must_use] fn verify_share( &self, verification_share: C::G, @@ -145,6 +147,7 @@ impl> Algorithm for Schnorr { schnorr::sign::(params.secret_share(), nonce, c).s } + #[must_use] fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option { let sig = SchnorrSignature { R: nonce, s: sum }; if schnorr::verify::(group_key, self.c.unwrap(), &sig) { @@ -154,6 +157,7 @@ impl> Algorithm for Schnorr { } } + #[must_use] fn verify_share( &self, verification_share: C::G, diff --git a/crypto/frost/src/schnorr.rs b/crypto/frost/src/schnorr.rs index dafc8251..6bc63768 100644 --- a/crypto/frost/src/schnorr.rs +++ b/crypto/frost/src/schnorr.rs @@ -33,6 +33,7 @@ pub(crate) fn sign( } } +#[must_use] pub(crate) fn verify( public_key: C::G, challenge: C::F, From a4c2f71610a6c2c518a5a4e26927b857ecc6e874 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Jul 2022 14:28:53 -0400 Subject: [PATCH 093/105] Correct DLEq README column title --- crypto/dleq/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/dleq/README.md b/crypto/dleq/README.md index 356c89c2..6725f837 100644 --- a/crypto/dleq/README.md +++ b/crypto/dleq/README.md @@ -46,7 +46,7 @@ The following variants are available: The following numbers are from benchmarks performed with k256/curve25519_dalek on a Intel i7-118567: -| Algorithm | Size | Performance | +| Algorithm | Size | Verification Time | |--------------------|-------------------------|-------------------| | `ClassicLinear` | 56829 bytes (+27%) | 157ms (0%) | | `ConciseLinear` | 44607 bytes (Reference) | 156ms (Reference) | From 41eaa1b124b5fff93dad6386afa367eb5f0b7d94 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 8 Jul 2022 15:30:56 -0400 Subject: [PATCH 094/105] Increase constant-time properties of from_repr/from_bytes It's still not perfect, as it's Option -> CtOption which requires an unwrap_or, but... --- crypto/dalek-ff-group/src/lib.rs | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index e2ed5e75..f96d0210 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -32,6 +32,12 @@ use dalek::{ use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; use group::{Group, GroupEncoding, prime::PrimeGroup}; +fn choice(value: bool) -> Choice { + let bit = value as u8; + debug_assert_eq!(bit | 1, 1); + Choice::from(bit) +} + macro_rules! deref_borrow { ($Source: ident, $Target: ident) => { impl Deref for $Source { @@ -160,7 +166,7 @@ impl Field for Scalar { fn square(&self) -> Self { *self * self } fn double(&self) -> Self { *self + self } fn invert(&self) -> CtOption { - CtOption::new(Self(self.0.invert()), Choice::from(1 as u8)) + CtOption::new(Self(self.0.invert()), self.is_zero()) } fn sqrt(&self) -> CtOption { unimplemented!() } fn is_zero(&self) -> Choice { self.0.ct_eq(&DScalar::zero()) } @@ -177,11 +183,9 @@ impl PrimeField for Scalar { const NUM_BITS: u32 = 253; const CAPACITY: u32 = 252; fn from_repr(bytes: [u8; 32]) -> CtOption { - let scalar = DScalar::from_canonical_bytes(bytes).map(|x| Scalar(x)); - CtOption::new( - scalar.unwrap_or(Scalar::zero()), - Choice::from(if scalar.is_some() { 1 } else { 0 }) - ) + let scalar = DScalar::from_canonical_bytes(bytes); + // TODO: This unwrap_or isn't constant time, yet do we have an alternative? + CtOption::new(Scalar(scalar.unwrap_or(DScalar::zero())), choice(scalar.is_some())) } fn to_repr(&self) -> [u8; 32] { self.0.to_bytes() } @@ -237,6 +241,8 @@ macro_rules! dalek_group { impl Group for $Point { type Scalar = Scalar; + // Ideally, this would be cryptographically secure, yet that's not a bound on the trait + // k256 also does this fn random(rng: impl RngCore) -> Self { &$BASEPOINT_TABLE * Scalar::random(rng) } fn identity() -> Self { Self($DPoint::identity()) } fn generator() -> Self { $BASEPOINT_POINT } @@ -248,12 +254,10 @@ macro_rules! dalek_group { type Repr = [u8; 32]; fn from_bytes(bytes: &Self::Repr) -> CtOption { - if let Some(point) = $DCompressed(*bytes).decompress() { - if $torsion_free(point) { - return CtOption::new($Point(point), Choice::from(1)); - } - } - CtOption::new($Point::identity(), Choice::from(0)) + let decompressed = $DCompressed(*bytes).decompress(); + // TODO: Same note on unwrap_or as above + let point = decompressed.unwrap_or($DPoint::identity()); + CtOption::new($Point(point), choice(decompressed.is_some()) & choice($torsion_free(point))) } fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { From f50fd76b31d5f339a87595989f61ed3588394706 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 8 Jul 2022 16:05:17 -0400 Subject: [PATCH 095/105] Fix dalek_ff_group invert --- crypto/dalek-ff-group/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index f96d0210..96a68501 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -166,7 +166,7 @@ impl Field for Scalar { fn square(&self) -> Self { *self * self } fn double(&self) -> Self { *self + self } fn invert(&self) -> CtOption { - CtOption::new(Self(self.0.invert()), self.is_zero()) + CtOption::new(Self(self.0.invert()), !self.is_zero()) } fn sqrt(&self) -> CtOption { unimplemented!() } fn is_zero(&self) -> Choice { self.0.ct_eq(&DScalar::zero()) } From c5f75568cda68ec3a0766d4cf41f9febfc861b73 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 8 Jul 2022 16:23:26 -0400 Subject: [PATCH 096/105] Add READMEs to Serai and Monero --- README.md | 22 ++++++++++++++++++++++ coins/monero/README.md | 7 +++++++ crypto/dalek-ff-group/README.md | 3 ++- 3 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 README.md create mode 100644 coins/monero/README.md diff --git a/README.md b/README.md new file mode 100644 index 00000000..9c565a77 --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +# Serai + +Serai is a new DEX, built from the ground up, initially planning on listing +Bitcoin, Ethereum, Monero, DAI, and USDC, offering a liquidity pool trading +experience. Funds are stored in an economically secured threshold multisig +wallet. + +### Layout + +- `docs` - Documentation on the Serai protocol. + +- `coins` - Various coin libraries intended for usage in Serai yet also by the + wider community. This means they will always support the functionality Serai + needs, yet won't disadvantage other use cases when possible. + +- `crypto` - A series of composable cryptographic libraries built around the + `ff`/`group` APIs achieving a variety of tasks. These range from generic + infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as + needed for Bitcoin-Monero atomic swaps. + +- `processor` - A generic chain processor to process data for Serai and process + events from Serai, executing transactions as expected and needed. diff --git a/coins/monero/README.md b/coins/monero/README.md new file mode 100644 index 00000000..50fc1700 --- /dev/null +++ b/coins/monero/README.md @@ -0,0 +1,7 @@ +# monero-serai + +A modern Monero transaction library intended for usage in wallets. It prides +itself on accuracy, correctness, and removing common pit falls developers may +face. + +Threshold multisignature support is available via the `multisig` feature. diff --git a/crypto/dalek-ff-group/README.md b/crypto/dalek-ff-group/README.md index b5b600b7..730de5a7 100644 --- a/crypto/dalek-ff-group/README.md +++ b/crypto/dalek-ff-group/README.md @@ -1,5 +1,6 @@ # Dalek FF/Group -ff/group bindings around curve25519-dalek with a random function based around a more modern rand_core. +ff/group bindings around curve25519-dalek with a from_hash/random function based +around modern dependencies. Some functions currently remain unimplemented. From f8760ae021d9e99eeb1c43cb85526a1e1a77a42f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Jul 2022 00:37:39 -0400 Subject: [PATCH 097/105] Document the transcript library --- crypto/transcript/README.md | 3 +++ crypto/transcript/src/lib.rs | 13 +++++++++++++ 2 files changed, 16 insertions(+) diff --git a/crypto/transcript/README.md b/crypto/transcript/README.md index 92777a52..6081c0dd 100644 --- a/crypto/transcript/README.md +++ b/crypto/transcript/README.md @@ -6,6 +6,9 @@ Flexible Transcript is a crate offering: provided hash function. - `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the `merlin` feature). +- `RecommendedTranscript`, a transcript recommended for usage in applications. + Currently, this is `DigestTranscript` (available via the + `recommended` feature). The trait was created while working on an IETF draft which defined an incredibly simple transcript format. Extensions of the protocol would quickly require a diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index c11dd38e..eff02b5a 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -10,9 +10,19 @@ use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest, Ou pub trait Transcript { type Challenge: Clone + Send + Sync + AsRef<[u8]>; + /// Apply a domain separator to the transcript fn domain_separate(&mut self, label: &'static [u8]); + + /// Append a message to the transcript fn append_message(&mut self, label: &'static [u8], message: &[u8]); + + /// Produce a challenge. This MUST update the transcript as it does so, preventing the same + /// challenge from being generated multiple times fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge; + + /// Produce a RNG seed. Helper function for parties needing to generate random data from an + /// agreed upon state. Internally calls the challenge function for the needed bytes, converting + /// them to the seed format rand_core expects fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } @@ -36,9 +46,12 @@ impl DigestTranscriptMember { } } +/// A trait defining Digests with at least a 256-byte output size, assuming at least a 128-bit +/// level of security accordingly pub trait SecureDigest: Clone + Digest {} impl SecureDigest for D where D::OutputSize: IsGreaterOrEqual {} +/// A simple transcript format constructed around the specified hash algorithm #[derive(Clone, Debug)] pub struct DigestTranscript(D); From 5942492519f7f701dd901bf17e87456d720652e6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Jul 2022 00:38:19 -0400 Subject: [PATCH 098/105] Support transcripts with 32-byte challenges in the DLEq crate --- crypto/dleq/src/lib.rs | 17 +++++++++++------ crypto/dleq/src/tests/cross_group/mod.rs | 2 +- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index 5a619d12..1c7069ca 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -34,22 +34,27 @@ impl Generators { } pub(crate) fn challenge(transcript: &mut T) -> F { - assert!(F::NUM_BITS <= 384); - // From here, there are three ways to get a scalar under the ff/group API // 1: Scalar::random(ChaCha12Rng::from_seed(self.transcript.rng_seed(b"challenge"))) // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess // and loading it in // 3: Iterating over each byte and manually doubling/adding. This is simplest - let challenge_bytes = transcript.challenge(b"challenge"); - assert!(challenge_bytes.as_ref().len() == 64); + + // Get a wide amount of bytes to safely reduce without bias + let target = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2; + let mut challenge_bytes = transcript.challenge(b"challenge").as_ref().to_vec(); + while challenge_bytes.len() < target { + // Secure given transcripts updating on challenge + challenge_bytes.extend(transcript.challenge(b"challenge_extension").as_ref()); + } + challenge_bytes.truncate(target); let mut challenge = F::zero(); - for b in challenge_bytes.as_ref() { + for b in challenge_bytes { for _ in 0 .. 8 { challenge = challenge.double(); } - challenge += F::from(u64::from(*b)); + challenge += F::from(u64::from(b)); } challenge } diff --git a/crypto/dleq/src/tests/cross_group/mod.rs b/crypto/dleq/src/tests/cross_group/mod.rs index c201f497..9e7043f6 100644 --- a/crypto/dleq/src/tests/cross_group/mod.rs +++ b/crypto/dleq/src/tests/cross_group/mod.rs @@ -158,7 +158,7 @@ fn test_rejection_sampling() { // Either would work EfficientLinearDLEq::prove_without_bias( &mut OsRng, - &mut RecommendedTranscript::new(b""), + &mut transcript(), generators(), pow_2 ).is_none() From a4cd1755a5c14474875b0f87cf1ef788e28cade0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Jul 2022 02:01:22 -0400 Subject: [PATCH 099/105] Document dalek-ff-group Removes from_canonical_bytes, which is offered by from_repr, and from_bytes_mod_order, which frequently leads to security issues. Removes the pointless Compressed type. Adds From u8/u16/u32 as they're pleasant. --- crypto/dalek-ff-group/src/lib.rs | 54 ++++++++++++-------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index 96a68501..66146d1b 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -32,6 +32,7 @@ use dalek::{ use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; use group::{Group, GroupEncoding, prime::PrimeGroup}; +// Convert a boolean to a Choice in a *presumably* constant time manner fn choice(value: bool) -> Choice { let bit = value as u8; debug_assert_eq!(bit | 1, 1); @@ -119,24 +120,19 @@ macro_rules! math { } } +/// Wrapper around the dalek Scalar type #[derive(Clone, Copy, PartialEq, Eq, Debug, Default)] pub struct Scalar(pub DScalar); deref_borrow!(Scalar, DScalar); math!(Scalar, Scalar, Scalar); impl Scalar { - pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option { - DScalar::from_canonical_bytes(bytes).map(|x| Self(x)) - } - - pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar { - Self(DScalar::from_bytes_mod_order(bytes)) - } - + /// Perform wide reduction on a 64-byte array to create a Scalar without bias pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar { Self(DScalar::from_bytes_mod_order_wide(bytes)) } + /// Derive a Scalar without bias from a digest via wide reduction pub fn from_hash>(hash: D) -> Scalar { let mut output = [0u8; 64]; output.copy_from_slice(&hash.finalize()); @@ -174,6 +170,18 @@ impl Field for Scalar { fn pow_vartime>(&self, _exp: S) -> Self { unimplemented!() } } +impl From for Scalar { + fn from(a: u8) -> Scalar { Self(DScalar::from(a)) } +} + +impl From for Scalar { + fn from(a: u16) -> Scalar { Self(DScalar::from(a)) } +} + +impl From for Scalar { + fn from(a: u32) -> Scalar { Self(DScalar::from(a)) } +} + impl From for Scalar { fn from(a: u64) -> Scalar { Self(DScalar::from(a)) } } @@ -205,7 +213,7 @@ impl PrimeFieldBits for Scalar { fn char_le_bits() -> FieldBits { let mut bytes = (Scalar::zero() - Scalar::one()).to_repr(); bytes[0] += 1; - debug_assert_eq!(Scalar::from_bytes_mod_order(bytes), Scalar::zero()); + debug_assert_eq!(DScalar::from_bytes_mod_order(bytes), DScalar::zero()); bytes.into() } } @@ -219,12 +227,12 @@ macro_rules! dalek_group { $Table: ident, $DTable: ident, - $Compressed: ident, $DCompressed: ident, $BASEPOINT_POINT: ident, $BASEPOINT_TABLE: ident ) => { + /// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct $Point(pub $DPoint); deref_borrow!($Point, $DPoint); @@ -271,28 +279,8 @@ macro_rules! dalek_group { impl PrimeGroup for $Point {} - pub struct $Compressed(pub $DCompressed); - deref_borrow!($Compressed, $DCompressed); - impl $Compressed { - pub fn new(y: [u8; 32]) -> $Compressed { - Self($DCompressed(y)) - } - - pub fn decompress(&self) -> Option<$Point> { - self.0.decompress().map(|x| $Point(x)) - } - - pub fn to_bytes(&self) -> [u8; 32] { - self.0.to_bytes() - } - } - - impl $Point { - pub fn compress(&self) -> $Compressed { - $Compressed(self.0.compress()) - } - } - + /// Wrapper around the dalek Table type, offering efficient multiplication against the + /// basepoint pub struct $Table(pub $DTable); deref_borrow!($Table, $DTable); pub const $BASEPOINT_TABLE: $Table = $Table(constants::$BASEPOINT_TABLE); @@ -312,7 +300,6 @@ dalek_group!( EdwardsBasepointTable, DEdwardsBasepointTable, - CompressedEdwardsY, DCompressedEdwards, ED25519_BASEPOINT_POINT, @@ -327,7 +314,6 @@ dalek_group!( RistrettoBasepointTable, DRistrettoBasepointTable, - CompressedRistretto, DCompressedRistretto, RISTRETTO_BASEPOINT_POINT, From 53267a46c815fd4a1c374a6a5ae9456720bf8342 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Jul 2022 18:53:52 -0400 Subject: [PATCH 100/105] Return Timelocked instead of (Timelock, Vec) Timelocked doesn't expose the Vec yet requires calling not_locked(), unlocked(Timelock), or ignore_timelock(). --- coins/monero/src/transaction.rs | 13 +++++++++++++ coins/monero/src/wallet/scan.rs | 30 +++++++++++++++++++++++++++--- coins/monero/tests/send.rs | 4 ++-- processor/build.rs | 3 +++ processor/src/coin/monero.rs | 13 +++---------- 5 files changed, 48 insertions(+), 15 deletions(-) create mode 100644 processor/build.rs diff --git a/coins/monero/src/transaction.rs b/coins/monero/src/transaction.rs index 32c68750..9c3ea5bc 100644 --- a/coins/monero/src/transaction.rs +++ b/coins/monero/src/transaction.rs @@ -1,3 +1,5 @@ +use core::cmp::Ordering; + use curve25519_dalek::edwards::EdwardsPoint; use crate::{hash, serialize::*, ringct::{RctPrunable, RctSignatures}}; @@ -131,6 +133,17 @@ impl Timelock { } } +impl PartialOrd for Timelock { + fn partial_cmp(&self, other: &Self) -> Option { + match (self, other) { + (Timelock::None, _) => Some(Ordering::Less), + (Timelock::Block(a), Timelock::Block(b)) => a.partial_cmp(b), + (Timelock::Time(a), Timelock::Time(b)) => a.partial_cmp(b), + _ => None + } + } +} + #[derive(Clone, PartialEq, Debug)] pub struct TransactionPrefix { pub version: u64, diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index c813169e..8ca7797f 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -24,6 +24,30 @@ pub struct SpendableOutput { pub commitment: Commitment } +pub struct Timelocked(Timelock, Vec); +impl Timelocked { + pub fn timelock(&self) -> Timelock { + self.0 + } + + pub fn not_locked(&self) -> Vec { + if self.0 == Timelock::None { + return self.1.clone(); + } + vec![] + } + + /// Returns None if the Timelocks aren't comparable. Returns Some(vec![]) if none are unlocked + pub fn unlocked(&self, timelock: Timelock) -> Option> { + // If the Timelocks are comparable, return the outputs if they're now unlocked + self.0.partial_cmp(&timelock).filter(|_| self.0 <= timelock).map(|_| self.1.clone()) + } + + pub fn ignore_timelock(&self) -> Vec { + self.1.clone() + } +} + impl SpendableOutput { pub fn serialize(&self) -> Vec { let mut res = Vec::with_capacity(32 + 1 + 32 + 32 + 40); @@ -57,7 +81,7 @@ impl Transaction { &self, view: ViewPair, guaranteed: bool - ) -> (Vec, Timelock) { + ) -> Timelocked { let mut extra = vec![]; write_varint(&u64::try_from(self.prefix.extra.len()).unwrap(), &mut extra).unwrap(); extra.extend(&self.prefix.extra); @@ -75,7 +99,7 @@ impl Transaction { pubkeys = m_pubkeys.iter().map(|key| key.point.decompress()).filter_map(|key| key).collect(); } else { - return (vec![], self.prefix.timelock); + return Timelocked(self.prefix.timelock, vec![]); }; let mut res = vec![]; @@ -132,6 +156,6 @@ impl Transaction { } } - (res, self.prefix.timelock) + Timelocked(self.prefix.timelock, res) } } diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index a3585ce0..c875a023 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -91,7 +91,7 @@ async fn send_core(test: usize, multisig: bool) { // Grab the largest output available let output = { - let mut outputs = tx.as_ref().unwrap().scan(view_pair, false).0; + let mut outputs = tx.as_ref().unwrap().scan(view_pair, false).ignore_timelock(); outputs.sort_by(|x, y| x.commitment.amount.cmp(&y.commitment.amount).reverse()); outputs.swap_remove(0) }; @@ -116,7 +116,7 @@ async fn send_core(test: usize, multisig: bool) { for i in (start + 1) .. (start + 9) { let tx = rpc.get_block_transactions(i).await.unwrap().swap_remove(0); - let output = tx.scan(view_pair, false).0.swap_remove(0); + let output = tx.scan(view_pair, false).ignore_timelock().swap_remove(0); amount += output.commitment.amount; outputs.push(output); } diff --git a/processor/build.rs b/processor/build.rs new file mode 100644 index 00000000..a8fb5e40 --- /dev/null +++ b/processor/build.rs @@ -0,0 +1,3 @@ +fn main() { + println!("cargo:rustc-link-arg=-zmuldefs"); +} diff --git a/processor/src/coin/monero.rs b/processor/src/coin/monero.rs index 980dedde..192650cd 100644 --- a/processor/src/coin/monero.rs +++ b/processor/src/coin/monero.rs @@ -9,7 +9,7 @@ use transcript::RecommendedTranscript; use frost::{curve::Ed25519, FrostKeys}; use monero_serai::{ - transaction::{Timelock, Transaction}, + transaction::Transaction, rpc::Rpc, wallet::{ ViewPair, address::{Network, AddressType, Address}, @@ -126,14 +126,7 @@ impl Coin for Monero { async fn get_outputs(&self, block: &Self::Block, key: dfg::EdwardsPoint) -> Vec { block .iter() - .flat_map(|tx| { - let (outputs, timelock) = tx.scan(self.view_pair(key), true); - if timelock == Timelock::None { - outputs - } else { - vec![] - } - }) + .flat_map(|tx| tx.scan(self.view_pair(key), true).not_locked()) .map(Output::from) .collect() } @@ -215,7 +208,7 @@ impl Coin for Monero { let outputs = self.rpc .get_block_transactions_possible(height).await.unwrap() - .swap_remove(0).scan(self.empty_view_pair(), false).0; + .swap_remove(0).scan(self.empty_view_pair(), false).ignore_timelock(); let amount = outputs[0].commitment.amount; let fee = 1000000000; // TODO From 854fca3806aff228881f85528d4cc144637e1d3a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Jul 2022 21:51:39 -0400 Subject: [PATCH 101/105] Close https://github.com/serai-dex/serai/issues/30. An extremely minimal subset of Monero is now all that's built, and I'm sufficiently happy with it. --- coins/monero/Cargo.toml | 9 ++- coins/monero/build.rs | 110 +++++++++++++------------------------ coins/monero/c/wrapper.cpp | 70 ++++++++++++++++------- coins/monero/src/lib.rs | 27 +++++++++ processor/build.rs | 3 - 5 files changed, 121 insertions(+), 98 deletions(-) delete mode 100644 processor/build.rs diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index f62145a7..f262a3ae 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -19,6 +19,8 @@ rand_chacha = { version = "0.3", optional = true } rand = "0.8" rand_distr = "0.4" +subtle = "2.4" + tiny-keccak = { version = "2", features = ["keccak"] } blake2 = { version = "0.10", optional = true } @@ -31,13 +33,14 @@ transcript = { package = "flexible-transcript", path = "../../crypto/transcript" frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } dleq = { path = "../../crypto/dleq", features = ["serialize"], optional = true } -base58-monero = "1" -monero = "0.16" - hex = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" + +base58-monero = "1" monero-epee-bin-serde = "1.0" +monero = "0.16" + reqwest = { version = "0.11", features = ["json"] } [features] diff --git a/coins/monero/build.rs b/coins/monero/build.rs index d2b6f66d..dc5943b1 100644 --- a/coins/monero/build.rs +++ b/coins/monero/build.rs @@ -14,7 +14,6 @@ fn main() { // Use a file to signal if Monero was already built, as that should never be rebuilt // If the signaling file was deleted, run this script again to rebuild Monero though - // TODO: Move this signaling file into OUT_DIR once Monero is built statically successfully println!("cargo:rerun-if-changed=c/.build/monero"); if !Path::new("c/.build/monero").exists() { if !Command::new("make").arg(format!("-j{}", &env::var("THREADS").unwrap_or("2".to_string()))) @@ -28,81 +27,46 @@ fn main() { } } - println!("cargo:rerun-if-env-changed=OUT_DIR"); - if !Path::new( - &format!( - "{}/{}cncrypto.{}", - out_dir, - &env::consts::DLL_PREFIX, - &env::consts::DLL_EXTENSION - ) - ).exists() { - let mut paths = vec![ - "c/monero/build/release/contrib/epee/src/libepee.a".to_string(), - "c/monero/build/release/external/easylogging++/libeasylogging.a".to_string(), - "c/monero/build/release/external/randomx/librandomx.a".to_string() - ]; - - for (folder, lib) in [ - ("common", "common"), - ("crypto", "cncrypto"), - ("crypto/wallet", "wallet-crypto"), - ("cryptonote_basic", "cryptonote_basic"), - ("cryptonote_basic", "cryptonote_format_utils_basic"), - ("", "version"), - ("device", "device"), - ("ringct", "ringct_basic"), - ("ringct", "ringct") - ] { - paths.push( - format!( - "c/monero/build/release/src/{}/{}{}.a", - folder, - &env::consts::DLL_PREFIX, - lib - ) - ); - } - - for path in paths { - if !Command::new("cp").args(&[&path, out_dir]).status().unwrap().success() { - panic!("Failed to cp {}", path); - } - } - } - println!("cargo:rerun-if-changed=c/wrapper.cpp"); - if !Path::new(&format!("{}/{}wrapper.a", out_dir, &env::consts::DLL_PREFIX)).exists() { - cc::Build::new() - .file("c/wrapper.cpp") - .cpp(true) - .warnings(false) - .include("c/monero/contrib/epee/include") - .include("c/monero/src") - .compile("wrapper"); - } + cc::Build::new() + .static_flag(true) + .warnings(false) + .extra_warnings(false) + .flag("-Wno-deprecated-declarations") + + .include("c/monero/external/supercop/include") + .include("c/monero/contrib/epee/include") + .include("c/monero/src") + .include("c/monero/build/release/generated_include") + + .define("AUTO_INITIALIZE_EASYLOGGINGPP", None) + .include("c/monero/external/easylogging++") + .file("c/monero/external/easylogging++/easylogging++.cc") + + .file("c/monero/src/common/aligned.c") + .file("c/monero/src/common/perf_timer.cpp") + + .include("c/monero/src/crypto") + .file("c/monero/src/crypto/crypto-ops-data.c") + .file("c/monero/src/crypto/crypto-ops.c") + .file("c/monero/src/crypto/keccak.c") + .file("c/monero/src/crypto/hash.c") + + .include("c/monero/src/device") + .file("c/monero/src/device/device_default.cpp") + + .include("c/monero/src/ringct") + .file("c/monero/src/ringct/rctCryptoOps.c") + .file("c/monero/src/ringct/rctTypes.cpp") + .file("c/monero/src/ringct/rctOps.cpp") + .file("c/monero/src/ringct/multiexp.cc") + .file("c/monero/src/ringct/bulletproofs.cc") + .file("c/monero/src/ringct/rctSigs.cpp") + + .file("c/wrapper.cpp") + .compile("wrapper"); println!("cargo:rustc-link-search={}", out_dir); println!("cargo:rustc-link-lib=wrapper"); - println!("cargo:rustc-link-lib=ringct"); - println!("cargo:rustc-link-lib=ringct_basic"); - println!("cargo:rustc-link-lib=device"); - println!("cargo:rustc-link-lib=cryptonote_basic"); - println!("cargo:rustc-link-lib=cncrypto"); - println!("cargo:rustc-link-lib=cryptonote_format_utils_basic"); - println!("cargo:rustc-link-lib=version"); - println!("cargo:rustc-link-lib=wallet-crypto"); - println!("cargo:rustc-link-lib=easylogging"); - println!("cargo:rustc-link-lib=epee"); - println!("cargo:rustc-link-lib=common"); - println!("cargo:rustc-link-lib=randomx"); - println!("cargo:rustc-link-lib=unbound"); - println!("cargo:rustc-link-lib=sodium"); - println!("cargo:rustc-link-lib=boost_system"); - println!("cargo:rustc-link-lib=boost_thread"); - println!("cargo:rustc-link-lib=boost_filesystem"); - println!("cargo:rustc-link-lib=hidapi-hidraw"); println!("cargo:rustc-link-lib=stdc++"); - - println!("cargo:rustc-link-arg=-zmuldefs"); } diff --git a/coins/monero/c/wrapper.cpp b/coins/monero/c/wrapper.cpp index e99a363a..7ed31ac8 100644 --- a/coins/monero/c/wrapper.cpp +++ b/coins/monero/c/wrapper.cpp @@ -6,36 +6,50 @@ #include "ringct/rctSigs.h" typedef std::lock_guard lock; -std::mutex rng_mutex; +std::mutex rng_mutex; uint8_t rng_entropy[64]; -void rng(uint8_t* seed) { - // Set the first half to the seed - memcpy(rng_entropy, seed, 32); - // Set the second half to the hash of a DST to ensure a lack of collisions - crypto::cn_fast_hash("RNG_entropy_seed", 16, (char*) &rng_entropy[32]); -} extern "C" { - void generate_random_bytes_not_thread_safe(size_t n, uint8_t* value) { + void rng(uint8_t* seed) { + // Set the first half to the seed + memcpy(rng_entropy, seed, 32); + // Set the second half to the hash of a DST to ensure a lack of collisions + crypto::cn_fast_hash("RNG_entropy_seed", 16, (char*) &rng_entropy[32]); + } +} + +extern "C" void monero_wide_reduce(uint8_t* value); +namespace crypto { + void generate_random_bytes_not_thread_safe(size_t n, void* value) { size_t written = 0; while (written != n) { uint8_t hash[32]; crypto::cn_fast_hash(rng_entropy, 64, (char*) hash); // Step the RNG by setting the latter half to the most recent result - // Does not leak the RNG, even if the values are leaked (which they are expected to be) due to - // the first half remaining constant and undisclosed + // Does not leak the RNG, even if the values are leaked (which they are + // expected to be) due to the first half remaining constant and + // undisclosed memcpy(&rng_entropy[32], hash, 32); size_t next = n - written; if (next > 32) { next = 32; } - memcpy(&value[written], hash, next); + memcpy(&((uint8_t*) value)[written], hash, next); written += next; } } + void random32_unbiased(unsigned char *bytes) { + uint8_t value[64]; + generate_random_bytes_not_thread_safe(64, value); + monero_wide_reduce(value); + memcpy(bytes, value, 32); + } +} + +extern "C" { void c_hash_to_point(uint8_t* point) { rct::key key_point; ge_p3 e_p3; @@ -62,16 +76,24 @@ extern "C" { std::stringstream ss; binary_archive ba(ss); ::serialization::serialize(ba, bp); - uint8_t* res = (uint8_t*) calloc(ss.str().size(), 1); // malloc would also work + uint8_t* res = (uint8_t*) calloc(ss.str().size(), 1); memcpy(res, ss.str().data(), ss.str().size()); return res; } - bool c_verify_bp(uint8_t* seed, uint s_len, uint8_t* s, uint8_t c_len, uint8_t* c) { - // BPs are batch verified which use RNG based challenges to ensure individual integrity - // That's why this must also have control over RNG, to prevent interrupting multisig signing - // while not using known seeds. Considering this doesn't actually define a batch, - // and it's only verifying a single BP, it'd probably be fine, but... + bool c_verify_bp( + uint8_t* seed, + uint s_len, + uint8_t* s, + uint8_t c_len, + uint8_t* c + ) { + // BPs are batch verified which use RNG based weights to ensure individual + // integrity + // That's why this must also have control over RNG, to prevent interrupting + // multisig signing while not using known seeds. Considering this doesn't + // actually define a batch, and it's only verifying a single BP, + // it'd probably be fine, but... lock guard(rng_mutex); rng(seed); @@ -94,7 +116,15 @@ extern "C" { try { return rct::bulletproof_VERIFY(bp); } catch(...) { return false; } } - bool c_verify_clsag(uint s_len, uint8_t* s, uint8_t k_len, uint8_t* k, uint8_t* I, uint8_t* p, uint8_t* m) { + bool c_verify_clsag( + uint s_len, + uint8_t* s, + uint8_t k_len, + uint8_t* k, + uint8_t* I, + uint8_t* p, + uint8_t* m + ) { rct::clsag clsag; std::stringstream ss; std::string str; @@ -121,6 +151,8 @@ extern "C" { rct::key msg; memcpy(msg.bytes, m, 32); - try { return verRctCLSAGSimple(msg, clsag, keys, pseudo_out); } catch(...) { return false; } + try { + return verRctCLSAGSimple(msg, clsag, keys, pseudo_out); + } catch(...) { return false; } } } diff --git a/coins/monero/src/lib.rs b/coins/monero/src/lib.rs index 7425282d..8237b4f6 100644 --- a/coins/monero/src/lib.rs +++ b/coins/monero/src/lib.rs @@ -1,6 +1,10 @@ +use std::slice; + use lazy_static::lazy_static; use rand_core::{RngCore, CryptoRng}; +use subtle::ConstantTimeEq; + use tiny_keccak::{Hasher, Keccak}; use curve25519_dalek::{ @@ -32,6 +36,29 @@ lazy_static! { static ref H_TABLE: EdwardsBasepointTable = EdwardsBasepointTable::create(&*H); } +// Function from libsodium our subsection of Monero relies on. Implementing it here means we don't +// need to link against libsodium +#[no_mangle] +unsafe extern "C" fn crypto_verify_32(a: *const u8, b: *const u8) -> isize { + isize::from( + slice::from_raw_parts(a, 32).ct_eq(slice::from_raw_parts(b, 32)).unwrap_u8() + ) - 1 +} + +// Offer a wide reduction to C. Our seeded RNG prevented Monero from defining an unbiased scalar +// generation function, and in order to not use Monero code (which would require propagating its +// license), the function was rewritten. It was rewritten with wide reduction, instead of rejection +// sampling however, hence the need for this function +#[no_mangle] +unsafe extern "C" fn monero_wide_reduce(value: *mut u8) { + let res = Scalar::from_bytes_mod_order_wide( + std::slice::from_raw_parts(value, 64).try_into().unwrap() + ); + for (i, b) in res.to_bytes().iter().enumerate() { + value.add(i).write(*b); + } +} + #[allow(non_snake_case)] #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Commitment { diff --git a/processor/build.rs b/processor/build.rs deleted file mode 100644 index a8fb5e40..00000000 --- a/processor/build.rs +++ /dev/null @@ -1,3 +0,0 @@ -fn main() { - println!("cargo:rustc-link-arg=-zmuldefs"); -} From 6ce506a79d27ac110e1e7332911092b222ed136a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 10 Jul 2022 15:20:42 -0400 Subject: [PATCH 102/105] Add an implementation of Ed25519 FieldElements --- crypto/dalek-ff-group/Cargo.toml | 1 + crypto/dalek-ff-group/src/field.rs | 178 +++++++++++++++++++++++++++++ crypto/dalek-ff-group/src/lib.rs | 40 ++++--- 3 files changed, 203 insertions(+), 16 deletions(-) create mode 100644 crypto/dalek-ff-group/src/field.rs diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 3b78578f..a0625ca1 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -17,4 +17,5 @@ subtle = "2.4" ff = "0.12" group = "0.12" +crypto-bigint = "0.4" curve25519-dalek = "3.2" diff --git a/crypto/dalek-ff-group/src/field.rs b/crypto/dalek-ff-group/src/field.rs new file mode 100644 index 00000000..2e7db7ca --- /dev/null +++ b/crypto/dalek-ff-group/src/field.rs @@ -0,0 +1,178 @@ +use core::ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign}; + +use rand_core::RngCore; + +use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable}; +use crypto_bigint::{Encoding, U256, U512}; + +use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; + +use crate::{choice, from_wrapper, from_uint}; + +const FIELD_MODULUS: U256 = U256::from_be_hex( + "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed" +); + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)] +pub struct FieldElement(U256); + +pub const SQRT_M1: FieldElement = FieldElement( + U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0") +); + +macro_rules! math { + ($Op: ident, $op_fn: ident, $Assign: ident, $assign_fn: ident, $function: expr) => { + impl $Op for FieldElement { + type Output = Self; + fn $op_fn(self, other: FieldElement) -> Self::Output { + Self($function(&self.0, &other.0, &FIELD_MODULUS)) + } + } + impl $Assign for FieldElement { + fn $assign_fn(&mut self, other: FieldElement) { + self.0 = $function(&self.0, &other.0, &FIELD_MODULUS); + } + } + impl<'a> $Op<&'a FieldElement> for FieldElement { + type Output = Self; + fn $op_fn(self, other: &'a FieldElement) -> Self::Output { + Self($function(&self.0, &other.0, &FIELD_MODULUS)) + } + } + impl<'a> $Assign<&'a FieldElement> for FieldElement { + fn $assign_fn(&mut self, other: &'a FieldElement) { + self.0 = $function(&self.0, &other.0, &FIELD_MODULUS); + } + } + } +} +math!(Add, add, AddAssign, add_assign, U256::add_mod); +math!(Sub, sub, SubAssign, sub_assign, U256::sub_mod); +math!( + Mul, mul, + MulAssign, mul_assign, + |a, b, _: &U256| { + #[allow(non_snake_case)] + let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS)); + debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]); + + let wide = U256::mul_wide(a, b); + U256::from_le_slice( + &U512::from((wide.1, wide.0)).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32] + ) + } +); + +impl Neg for FieldElement { + type Output = Self; + fn neg(self) -> Self::Output { Self(self.0.neg_mod(&FIELD_MODULUS)) } +} + +impl ConstantTimeEq for FieldElement { + fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } +} + +impl ConditionallySelectable for FieldElement { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + FieldElement(U256::conditional_select(&a.0, &b.0, choice)) + } +} + +impl Field for FieldElement { + fn random(mut rng: impl RngCore) -> Self { + let mut bytes = [0; 64]; + rng.fill_bytes(&mut bytes); + + #[allow(non_snake_case)] + let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS)); + debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]); + + FieldElement( + U256::from_le_slice( + &U512::from_be_bytes(bytes).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32] + ) + ) + } + + fn zero() -> Self { Self(U256::ZERO) } + fn one() -> Self { Self(U256::ONE) } + fn square(&self) -> Self { *self * self } + fn double(&self) -> Self { *self + self } + + fn invert(&self) -> CtOption { + CtOption::new(self.pow(-FieldElement(U256::from(2u64))), !self.is_zero()) + } + + fn sqrt(&self) -> CtOption { + let c1 = SQRT_M1; + let c2 = FIELD_MODULUS.saturating_add(&U256::from(3u8)).checked_div(&U256::from(8u8)).unwrap(); + + let tv1 = self.pow(FieldElement(c2)); + let tv2 = tv1 * c1; + let res = Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self)); + debug_assert_eq!(res * res, *self); + CtOption::new(Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self)), 1.into()) + } + + fn is_zero(&self) -> Choice { self.0.ct_eq(&U256::ZERO) } + fn cube(&self) -> Self { *self * self * self } + fn pow_vartime>(&self, _exp: S) -> Self { unimplemented!() } +} + +from_uint!(FieldElement, U256); + +impl PrimeField for FieldElement { + type Repr = [u8; 32]; + const NUM_BITS: u32 = 255; + const CAPACITY: u32 = 254; + fn from_repr(bytes: [u8; 32]) -> CtOption { + let res = Self(U256::from_le_bytes(bytes)); + CtOption::new(res, res.0.add_mod(&U256::ZERO, &FIELD_MODULUS).ct_eq(&res.0)) + } + fn to_repr(&self) -> [u8; 32] { self.0.to_le_bytes() } + + const S: u32 = 2; + fn is_odd(&self) -> Choice { unimplemented!() } + fn multiplicative_generator() -> Self { 2u64.into() } + fn root_of_unity() -> Self { + FieldElement( + U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0") + ) + } +} + +impl PrimeFieldBits for FieldElement { + type ReprBits = [u8; 32]; + + fn to_le_bits(&self) -> FieldBits { + self.to_repr().into() + } + + fn char_le_bits() -> FieldBits { + FIELD_MODULUS.to_le_bytes().into() + } +} + +impl FieldElement { + pub fn from_square(value: [u8; 32]) -> FieldElement { + let value = U256::from_le_bytes(value); + FieldElement(value) * FieldElement(value) + } + + pub fn pow(&self, other: FieldElement) -> FieldElement { + let mut res = FieldElement(U256::ONE); + let mut m = *self; + for bit in other.to_le_bits() { + res *= FieldElement::conditional_select(&FieldElement(U256::ONE), &m, choice(bit)); + m *= m; + } + res + } +} + +#[test] +fn test_mul() { + assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one(), FieldElement::zero()); + assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one().double(), FieldElement::zero()); + assert_eq!(FieldElement(SQRT_M1).square(), -FieldElement::one()); +} diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index 66146d1b..1e3d12b2 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -32,6 +32,8 @@ use dalek::{ use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; use group::{Group, GroupEncoding, prime::PrimeGroup}; +pub mod field; + // Convert a boolean to a Choice in a *presumably* constant time manner fn choice(value: bool) -> Choice { let bit = value as u8; @@ -120,11 +122,33 @@ macro_rules! math { } } +#[doc(hidden)] +#[macro_export] +macro_rules! from_wrapper { + ($wrapper: ident, $inner: ident, $uint: ident) => { + impl From<$uint> for $wrapper { + fn from(a: $uint) -> $wrapper { Self($inner::from(a)) } + } + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! from_uint { + ($wrapper: ident, $inner: ident) => { + from_wrapper!($wrapper, $inner, u8); + from_wrapper!($wrapper, $inner, u16); + from_wrapper!($wrapper, $inner, u32); + from_wrapper!($wrapper, $inner, u64); + } +} + /// Wrapper around the dalek Scalar type #[derive(Clone, Copy, PartialEq, Eq, Debug, Default)] pub struct Scalar(pub DScalar); deref_borrow!(Scalar, DScalar); math!(Scalar, Scalar, Scalar); +from_uint!(Scalar, DScalar); impl Scalar { /// Perform wide reduction on a 64-byte array to create a Scalar without bias @@ -170,22 +194,6 @@ impl Field for Scalar { fn pow_vartime>(&self, _exp: S) -> Self { unimplemented!() } } -impl From for Scalar { - fn from(a: u8) -> Scalar { Self(DScalar::from(a)) } -} - -impl From for Scalar { - fn from(a: u16) -> Scalar { Self(DScalar::from(a)) } -} - -impl From for Scalar { - fn from(a: u32) -> Scalar { Self(DScalar::from(a)) } -} - -impl From for Scalar { - fn from(a: u64) -> Scalar { Self(DScalar::from(a)) } -} - impl PrimeField for Scalar { type Repr = [u8; 32]; const NUM_BITS: u32 = 253; From 953a873338e98427e03473c20fb891625dc94426 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 10 Jul 2022 16:11:55 -0400 Subject: [PATCH 103/105] Implement hash_to_point in Rust Closes https://github.com/serai-dex/serai/issues/32. --- coins/monero/Cargo.toml | 6 +- coins/monero/src/lib.rs | 17 ------ coins/monero/src/ringct/clsag/mod.rs | 14 ++--- coins/monero/src/ringct/clsag/multisig.rs | 5 +- coins/monero/src/ringct/hash_to_point.rs | 67 +++++++++++++++++++++++ coins/monero/src/ringct/mod.rs | 11 +++- coins/monero/src/tests/clsag.rs | 6 +- coins/monero/src/tests/hash_to_point.rs | 13 +++++ coins/monero/src/tests/mod.rs | 1 + coins/monero/src/wallet/send/mod.rs | 6 +- 10 files changed, 108 insertions(+), 38 deletions(-) create mode 100644 coins/monero/src/ringct/hash_to_point.rs create mode 100644 coins/monero/src/tests/hash_to_point.rs diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index f262a3ae..10001ea5 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -26,9 +26,9 @@ blake2 = { version = "0.10", optional = true } curve25519-dalek = { version = "3", features = ["std"] } -group = { version = "0.12", optional = true } +group = { version = "0.12" } +dalek-ff-group = { path = "../../crypto/dalek-ff-group" } -dalek-ff-group = { path = "../../crypto/dalek-ff-group", optional = true } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true } dleq = { path = "../../crypto/dleq", features = ["serialize"], optional = true } @@ -45,7 +45,7 @@ reqwest = { version = "0.11", features = ["json"] } [features] experimental = [] -multisig = ["rand_chacha", "blake2", "group", "dalek-ff-group", "transcript", "frost", "dleq"] +multisig = ["rand_chacha", "blake2", "transcript", "frost", "dleq"] [dev-dependencies] sha2 = "0.10" diff --git a/coins/monero/src/lib.rs b/coins/monero/src/lib.rs index 8237b4f6..f92da959 100644 --- a/coins/monero/src/lib.rs +++ b/coins/monero/src/lib.rs @@ -98,20 +98,3 @@ pub fn hash(data: &[u8]) -> [u8; 32] { pub fn hash_to_scalar(data: &[u8]) -> Scalar { Scalar::from_bytes_mod_order(hash(&data)) } - -pub fn hash_to_point(point: &EdwardsPoint) -> EdwardsPoint { - let mut bytes = point.compress().to_bytes(); - unsafe { - #[link(name = "wrapper")] - extern "C" { - fn c_hash_to_point(point: *const u8); - } - - c_hash_to_point(bytes.as_mut_ptr()); - } - CompressedEdwardsY::from_slice(&bytes).decompress().unwrap() -} - -pub fn generate_key_image(secret: &Scalar) -> EdwardsPoint { - secret * hash_to_point(&(secret * &ED25519_BASEPOINT_TABLE)) -} diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 80a50300..41f68c96 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -12,11 +12,11 @@ use curve25519_dalek::{ }; use crate::{ - Commitment, + Commitment, random_scalar, hash_to_scalar, + transaction::RING_LEN, wallet::decoys::Decoys, - random_scalar, hash_to_scalar, hash_to_point, - serialize::*, - transaction::RING_LEN + ringct::hash_to_point, + serialize::* }; #[cfg(feature = "multisig")] @@ -170,7 +170,7 @@ fn core( let c_c = mu_C * c; let L = (&s[i] * &ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]); - let PH = hash_to_point(&P[i]); + let PH = hash_to_point(P[i]); // Shouldn't be an issue as all of the variables in this vartime statement are public let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul(&[c_p, c_c]); @@ -208,7 +208,7 @@ impl Clsag { let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate(); let z = input.commitment.mask - mask; - let H = hash_to_point(&input.decoys.ring[r][0]); + let H = hash_to_point(input.decoys.ring[r][0]); let D = H * z; let mut s = Vec::with_capacity(input.decoys.ring.len()); for _ in 0 .. input.decoys.ring.len() { @@ -254,7 +254,7 @@ impl Clsag { mask, &msg, &nonce * &ED25519_BASEPOINT_TABLE, - nonce * hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]) + nonce * hash_to_point(inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]) ); clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce - ((p * inputs[i].0) + c); diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 19fff6a7..8365ff6d 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -18,9 +18,8 @@ use frost::{curve::Ed25519, FrostError, FrostView, algorithm::Algorithm}; use dalek_ff_group as dfg; use crate::{ - hash_to_point, frost::{MultisigError, write_dleq, read_dleq}, - ringct::clsag::{ClsagInput, Clsag} + ringct::{hash_to_point, clsag::{ClsagInput, Clsag}} }; impl ClsagInput { @@ -129,7 +128,7 @@ impl Algorithm for ClsagMultisig { view: &FrostView, nonces: &[dfg::Scalar; 2] ) -> Vec { - self.H = hash_to_point(&view.group_key().0); + self.H = hash_to_point(view.group_key().0); let mut serialized = Vec::with_capacity(ClsagMultisig::serialized_len()); serialized.extend((view.secret_share().0 * self.H).compress().to_bytes()); diff --git a/coins/monero/src/ringct/hash_to_point.rs b/coins/monero/src/ringct/hash_to_point.rs new file mode 100644 index 00000000..946307c4 --- /dev/null +++ b/coins/monero/src/ringct/hash_to_point.rs @@ -0,0 +1,67 @@ +use subtle::ConditionallySelectable; + +use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; + +use group::ff::{Field, PrimeField}; +use dalek_ff_group::field::FieldElement; + +use crate::hash; + +pub fn hash_to_point(point: EdwardsPoint) -> EdwardsPoint { + let mut bytes = point.compress().to_bytes(); + unsafe { + #[link(name = "wrapper")] + extern "C" { + fn c_hash_to_point(point: *const u8); + } + + c_hash_to_point(bytes.as_mut_ptr()); + } + CompressedEdwardsY::from_slice(&bytes).decompress().unwrap() +} + +// This works without issue. It's also 140 times slower (@ 3.5ms), and despite checking it passes +// for all branches, there still could be *some* discrepancy somewhere. There's no reason to use it +// unless we're trying to purge that section of the C static library, which we aren't right now +#[allow(dead_code)] +pub(crate) fn rust_hash_to_point(key: EdwardsPoint) -> EdwardsPoint { + #[allow(non_snake_case)] + let A = FieldElement::from(486662u64); + + let v = FieldElement::from_square(hash(&key.compress().to_bytes())).double(); + let w = v + FieldElement::one(); + let x = w.square() + (-A.square() * v); + + // This isn't the complete X, yet its initial value + // We don't calculate the full X, and instead solely calculate Y, letting dalek reconstruct X + // While inefficient, it solves API boundaries and reduces the amount of work done here + #[allow(non_snake_case)] + let X = { + let u = w; + let v = x; + let v3 = v * v * v; + let uv3 = u * v3; + let v7 = v3 * v3 * v; + let uv7 = u * v7; + uv3 * uv7.pow((-FieldElement::from(5u8)) * FieldElement::from(8u8).invert().unwrap()) + }; + let x = X.square() * x; + + let y = w - x; + let non_zero_0 = !y.is_zero(); + let y_if_non_zero_0 = w + x; + let sign = non_zero_0 & (!y_if_non_zero_0.is_zero()); + + let mut z = -A; + z *= FieldElement::conditional_select(&v, &FieldElement::from(1u8), sign); + #[allow(non_snake_case)] + let Z = z + w; + #[allow(non_snake_case)] + let mut Y = z - w; + + Y = Y * Z.invert().unwrap(); + let mut bytes = Y.to_repr(); + bytes[31] |= sign.unwrap_u8() << 7; + + CompressedEdwardsY(bytes).decompress().unwrap().mul_by_cofactor() +} diff --git a/coins/monero/src/ringct/mod.rs b/coins/monero/src/ringct/mod.rs index dbfc0fad..4935265e 100644 --- a/coins/monero/src/ringct/mod.rs +++ b/coins/monero/src/ringct/mod.rs @@ -1,13 +1,20 @@ -use curve25519_dalek::edwards::EdwardsPoint; +use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint}; + +pub(crate) mod hash_to_point; +pub use hash_to_point::hash_to_point; -pub mod bulletproofs; pub mod clsag; +pub mod bulletproofs; use crate::{ serialize::*, ringct::{clsag::Clsag, bulletproofs::Bulletproofs} }; +pub fn generate_key_image(secret: Scalar) -> EdwardsPoint { + secret * hash_to_point(&secret * &ED25519_BASEPOINT_TABLE) +} + #[derive(Clone, PartialEq, Debug)] pub struct RctBase { pub fee: u64, diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 66644a50..17cb9940 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -12,9 +12,9 @@ use frost::curve::Ed25519; use crate::{ Commitment, - random_scalar, generate_key_image, + random_scalar, wallet::Decoys, - ringct::clsag::{ClsagInput, Clsag} + ringct::{generate_key_image, clsag::{ClsagInput, Clsag}} }; #[cfg(feature = "multisig")] use crate::{frost::MultisigError, ringct::clsag::{ClsagDetails, ClsagMultisig}}; @@ -48,7 +48,7 @@ fn clsag() { ring.push([&dest * &ED25519_BASEPOINT_TABLE, Commitment::new(mask, amount).calculate()]); } - let image = generate_key_image(&secrets[0]); + let image = generate_key_image(secrets[0]); let (clsag, pseudo_out) = Clsag::sign( &mut OsRng, &vec![( diff --git a/coins/monero/src/tests/hash_to_point.rs b/coins/monero/src/tests/hash_to_point.rs new file mode 100644 index 00000000..b04bbb9f --- /dev/null +++ b/coins/monero/src/tests/hash_to_point.rs @@ -0,0 +1,13 @@ +use rand::rngs::OsRng; + +use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE; + +use crate::{random_scalar, ringct::hash_to_point::{hash_to_point, rust_hash_to_point}}; + +#[test] +fn test_hash_to_point() { + for _ in 0 .. 200 { + let point = &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE; + assert_eq!(rust_hash_to_point(point), hash_to_point(point)); + } +} diff --git a/coins/monero/src/tests/mod.rs b/coins/monero/src/tests/mod.rs index d9b85f0c..0ef934c8 100644 --- a/coins/monero/src/tests/mod.rs +++ b/coins/monero/src/tests/mod.rs @@ -1,2 +1,3 @@ +mod hash_to_point; mod clsag; mod address; diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index cf9ab33f..16cdca6c 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -17,8 +17,8 @@ use frost::FrostError; use crate::{ Commitment, random_scalar, - generate_key_image, ringct::{ + generate_key_image, clsag::{ClsagError, ClsagInput, Clsag}, bulletproofs::{MAX_OUTPUTS, Bulletproofs}, RctBase, RctPrunable, RctSignatures @@ -126,7 +126,7 @@ async fn prepare_inputs( for (i, input) in inputs.iter().enumerate() { signable.push(( spend + input.key_offset, - generate_key_image(&(spend + input.key_offset)), + generate_key_image(spend + input.key_offset), ClsagInput::new( input.commitment, decoys[i].clone() @@ -337,7 +337,7 @@ impl SignableTransaction { Err(TransactionError::WrongPrivateKey)?; } - images.push(generate_key_image(&offset)); + images.push(generate_key_image(offset)); } images.sort_by(key_image_sort); From cf28967754bc6b554c9b7ab7c7f08af6f59b0140 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 10 Jul 2022 16:48:08 -0400 Subject: [PATCH 104/105] Consolidate macros in dalek-ff-group --- crypto/dalek-ff-group/src/field.rs | 56 +++--------- crypto/dalek-ff-group/src/lib.rs | 131 ++++++++++++++++------------- 2 files changed, 81 insertions(+), 106 deletions(-) diff --git a/crypto/dalek-ff-group/src/field.rs b/crypto/dalek-ff-group/src/field.rs index 2e7db7ca..63a6a817 100644 --- a/crypto/dalek-ff-group/src/field.rs +++ b/crypto/dalek-ff-group/src/field.rs @@ -7,7 +7,7 @@ use crypto_bigint::{Encoding, U256, U512}; use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; -use crate::{choice, from_wrapper, from_uint}; +use crate::{choice, constant_time, math_op, math, from_wrapper, from_uint}; const FIELD_MODULUS: U256 = U256::from_be_hex( "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed" @@ -20,64 +20,30 @@ pub const SQRT_M1: FieldElement = FieldElement( U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0") ); -macro_rules! math { - ($Op: ident, $op_fn: ident, $Assign: ident, $assign_fn: ident, $function: expr) => { - impl $Op for FieldElement { - type Output = Self; - fn $op_fn(self, other: FieldElement) -> Self::Output { - Self($function(&self.0, &other.0, &FIELD_MODULUS)) - } - } - impl $Assign for FieldElement { - fn $assign_fn(&mut self, other: FieldElement) { - self.0 = $function(&self.0, &other.0, &FIELD_MODULUS); - } - } - impl<'a> $Op<&'a FieldElement> for FieldElement { - type Output = Self; - fn $op_fn(self, other: &'a FieldElement) -> Self::Output { - Self($function(&self.0, &other.0, &FIELD_MODULUS)) - } - } - impl<'a> $Assign<&'a FieldElement> for FieldElement { - fn $assign_fn(&mut self, other: &'a FieldElement) { - self.0 = $function(&self.0, &other.0, &FIELD_MODULUS); - } - } - } -} -math!(Add, add, AddAssign, add_assign, U256::add_mod); -math!(Sub, sub, SubAssign, sub_assign, U256::sub_mod); +constant_time!(FieldElement, U256); math!( - Mul, mul, - MulAssign, mul_assign, - |a, b, _: &U256| { + FieldElement, + FieldElement, + |x, y| U256::add_mod(&x, &y, &FIELD_MODULUS), + |x, y| U256::sub_mod(&x, &y, &FIELD_MODULUS), + |x, y| { #[allow(non_snake_case)] let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS)); debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]); - let wide = U256::mul_wide(a, b); + let wide = U256::mul_wide(&x, &y); U256::from_le_slice( &U512::from((wide.1, wide.0)).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32] ) } ); +from_uint!(FieldElement, U256); impl Neg for FieldElement { type Output = Self; fn neg(self) -> Self::Output { Self(self.0.neg_mod(&FIELD_MODULUS)) } } -impl ConstantTimeEq for FieldElement { - fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } -} - -impl ConditionallySelectable for FieldElement { - fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { - FieldElement(U256::conditional_select(&a.0, &b.0, choice)) - } -} - impl Field for FieldElement { fn random(mut rng: impl RngCore) -> Self { let mut bytes = [0; 64]; @@ -119,8 +85,6 @@ impl Field for FieldElement { fn pow_vartime>(&self, _exp: S) -> Self { unimplemented!() } } -from_uint!(FieldElement, U256); - impl PrimeField for FieldElement { type Repr = [u8; 32]; const NUM_BITS: u32 = 255; @@ -174,5 +138,5 @@ impl FieldElement { fn test_mul() { assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one(), FieldElement::zero()); assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one().double(), FieldElement::zero()); - assert_eq!(FieldElement(SQRT_M1).square(), -FieldElement::one()); + assert_eq!(SQRT_M1.square(), -FieldElement::one()); } diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index 1e3d12b2..590485d7 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -6,10 +6,12 @@ use core::{ iter::{Iterator, Sum} }; +use subtle::{ConstantTimeEq, ConditionallySelectable}; + use rand_core::RngCore; use digest::{consts::U64, Digest}; -use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable}; +use subtle::{Choice, CtOption}; pub use curve25519_dalek as dalek; @@ -65,60 +67,77 @@ macro_rules! deref_borrow { } } +#[doc(hidden)] +#[macro_export] +macro_rules! constant_time { + ($Value: ident, $Inner: ident) => { + impl ConstantTimeEq for $Value { + fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } + } + + impl ConditionallySelectable for $Value { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $Value($Inner::conditional_select(&a.0, &b.0, choice)) + } + } + } +} + +#[doc(hidden)] +#[macro_export] +macro_rules! math_op { + ( + $Value: ident, + $Other: ident, + $Op: ident, + $op_fn: ident, + $Assign: ident, + $assign_fn: ident, + $function: expr + ) => { + impl $Op<$Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl $Assign<$Other> for $Value { + fn $assign_fn(&mut self, other: $Other) { + self.0 = $function(self.0, other.0); + } + } + impl<'a> $Op<&'a $Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: &'a $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl<'a> $Assign<&'a $Other> for $Value { + fn $assign_fn(&mut self, other: &'a $Other) { + self.0 = $function(self.0, other.0); + } + } + } +} + +#[doc(hidden)] +#[macro_export] macro_rules! math { - ($Value: ident, $Factor: ident, $Product: ident) => { - impl Add<$Value> for $Value { - type Output = Self; - fn add(self, other: $Value) -> Self::Output { Self(self.0 + other.0) } - } - impl AddAssign for $Value { - fn add_assign(&mut self, other: $Value) { self.0 += other.0 } - } + ($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => { + math_op!($Value, $Value, Add, add, AddAssign, add_assign, $add); + math_op!($Value, $Value, Sub, sub, SubAssign, sub_assign, $sub); + math_op!($Value, $Factor, Mul, mul, MulAssign, mul_assign, $mul); + } +} - impl<'a> Add<&'a $Value> for $Value { - type Output = Self; - fn add(self, other: &'a $Value) -> Self::Output { Self(self.0 + other.0) } - } - impl<'a> AddAssign<&'a $Value> for $Value { - fn add_assign(&mut self, other: &'a $Value) { self.0 += other.0 } - } - - impl Sub<$Value> for $Value { - type Output = Self; - fn sub(self, other: $Value) -> Self::Output { Self(self.0 - other.0) } - } - impl SubAssign for $Value { - fn sub_assign(&mut self, other: $Value) { self.0 -= other.0 } - } - - impl<'a> Sub<&'a $Value> for $Value { - type Output = Self; - fn sub(self, other: &'a $Value) -> Self::Output { Self(self.0 - other.0) } - } - impl<'a> SubAssign<&'a $Value> for $Value { - fn sub_assign(&mut self, other: &'a $Value) { self.0 -= other.0 } - } +macro_rules! math_neg { + ($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => { + math!($Value, $Factor, $add, $sub, $mul); impl Neg for $Value { type Output = Self; fn neg(self) -> Self::Output { Self(-self.0) } } - - impl Mul<$Factor> for $Value { - type Output = $Product; - fn mul(self, other: $Factor) -> Self::Output { Self(self.0 * other.0) } - } - impl MulAssign<$Factor> for $Value { - fn mul_assign(&mut self, other: $Factor) { self.0 *= other.0 } - } - - impl<'a> Mul<&'a $Factor> for $Value { - type Output = Self; - fn mul(self, b: &'a $Factor) -> $Product { Self(b.0 * self.0) } - } - impl<'a> MulAssign<&'a $Factor> for $Value { - fn mul_assign(&mut self, other: &'a $Factor) { self.0 *= other.0 } - } } } @@ -147,7 +166,8 @@ macro_rules! from_uint { #[derive(Clone, Copy, PartialEq, Eq, Debug, Default)] pub struct Scalar(pub DScalar); deref_borrow!(Scalar, DScalar); -math!(Scalar, Scalar, Scalar); +constant_time!(Scalar, DScalar); +math_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul); from_uint!(Scalar, DScalar); impl Scalar { @@ -164,16 +184,6 @@ impl Scalar { } } -impl ConstantTimeEq for Scalar { - fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) } -} - -impl ConditionallySelectable for Scalar { - fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { - Scalar(DScalar::conditional_select(a, b, choice)) - } -} - impl Field for Scalar { fn random(mut rng: impl RngCore) -> Self { let mut r = [0; 64]; @@ -244,7 +254,8 @@ macro_rules! dalek_group { #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct $Point(pub $DPoint); deref_borrow!($Point, $DPoint); - math!($Point, Scalar, $Point); + constant_time!($Point, $DPoint); + math_neg!($Point, Scalar, $DPoint::add, $DPoint::sub, $DPoint::mul); pub const $BASEPOINT_POINT: $Point = $Point(constants::$BASEPOINT_POINT); From 5eb61f3a8778766f520887529e0ad1efa608d1c6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 12 Jul 2022 01:28:01 -0400 Subject: [PATCH 105/105] Fix https://github.com/serai-dex/serai/issues/14. --- coins/monero/src/frost.rs | 25 ++-- coins/monero/src/ringct/clsag/multisig.rs | 66 ++++------ coins/monero/src/tests/clsag.rs | 3 +- coins/monero/src/wallet/send/multisig.rs | 12 +- coins/monero/tests/send.rs | 2 +- crypto/frost/Cargo.toml | 2 + crypto/frost/src/algorithm.rs | 43 ++++--- crypto/frost/src/sign.rs | 147 ++++++++++++++++------ crypto/frost/src/tests/vectors.rs | 2 +- crypto/transcript/src/lib.rs | 15 ++- crypto/transcript/src/merlin.rs | 4 + 11 files changed, 198 insertions(+), 123 deletions(-) diff --git a/coins/monero/src/frost.rs b/coins/monero/src/frost.rs index ea36be25..82b61a65 100644 --- a/coins/monero/src/frost.rs +++ b/coins/monero/src/frost.rs @@ -7,7 +7,7 @@ use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; use group::{Group, GroupEncoding}; -use transcript::RecommendedTranscript; +use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group as dfg; use dleq::{Generators, DLEqProof}; @@ -21,6 +21,10 @@ pub enum MultisigError { InvalidKeyImage(u16) } +fn transcript() -> RecommendedTranscript { + RecommendedTranscript::new(b"monero_key_image_dleq") +} + #[allow(non_snake_case)] pub(crate) fn write_dleq( rng: &mut R, @@ -35,7 +39,7 @@ pub(crate) fn write_dleq( // the proper order if they want to reach consensus // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to // merge later in some form, when it should instead just merge xH (as it does) - &mut RecommendedTranscript::new(b"DLEq Proof"), + &mut transcript(), Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)), dfg::Scalar(x) ).serialize(&mut res).unwrap(); @@ -45,16 +49,15 @@ pub(crate) fn write_dleq( #[allow(non_snake_case)] pub(crate) fn read_dleq( serialized: &[u8], - start: usize, H: EdwardsPoint, l: u16, xG: dfg::EdwardsPoint ) -> Result { - if serialized.len() < start + 96 { + if serialized.len() != 96 { Err(MultisigError::InvalidDLEqProof(l))?; } - let bytes = (&serialized[(start + 0) .. (start + 32)]).try_into().unwrap(); + let bytes = (&serialized[.. 32]).try_into().unwrap(); // dfg ensures the point is torsion free let xH = Option::::from( dfg::EdwardsPoint::from_bytes(&bytes)).ok_or(MultisigError::InvalidDLEqProof(l) @@ -64,13 +67,13 @@ pub(crate) fn read_dleq( Err(MultisigError::InvalidDLEqProof(l))?; } - let proof = DLEqProof::::deserialize( - &mut Cursor::new(&serialized[(start + 32) .. (start + 96)]) + DLEqProof::::deserialize( + &mut Cursor::new(&serialized[32 ..]) + ).map_err(|_| MultisigError::InvalidDLEqProof(l))?.verify( + &mut transcript(), + Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)), + (xG, xH) ).map_err(|_| MultisigError::InvalidDLEqProof(l))?; - let mut transcript = RecommendedTranscript::new(b"DLEq Proof"); - proof.verify(&mut transcript, Generators::new(dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)), (xG, xH)) - .map_err(|_| MultisigError::InvalidDLEqProof(l))?; - Ok(xH) } diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 8365ff6d..5d5748ea 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -6,7 +6,7 @@ use rand_chacha::ChaCha12Rng; use curve25519_dalek::{ constants::ED25519_BASEPOINT_TABLE, - traits::Identity, + traits::{Identity, IsIdentity}, scalar::Scalar, edwards::EdwardsPoint }; @@ -76,7 +76,6 @@ pub struct ClsagMultisig { H: EdwardsPoint, // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires a round image: EdwardsPoint, - AH: (dfg::EdwardsPoint, dfg::EdwardsPoint), details: Arc>>, @@ -87,15 +86,15 @@ pub struct ClsagMultisig { impl ClsagMultisig { pub fn new( transcript: RecommendedTranscript, + output_key: EdwardsPoint, details: Arc>> ) -> Result { Ok( ClsagMultisig { transcript, - H: EdwardsPoint::identity(), + H: hash_to_point(output_key), image: EdwardsPoint::identity(), - AH: (dfg::EdwardsPoint::identity(), dfg::EdwardsPoint::identity()), details, @@ -106,7 +105,7 @@ impl ClsagMultisig { } pub fn serialized_len() -> usize { - 3 * (32 + 64) + 32 + (2 * 32) } fn input(&self) -> ClsagInput { @@ -122,22 +121,18 @@ impl Algorithm for ClsagMultisig { type Transcript = RecommendedTranscript; type Signature = (Clsag, EdwardsPoint); + fn nonces(&self) -> Vec> { + vec![vec![dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)]] + } + fn preprocess_addendum( &mut self, rng: &mut R, - view: &FrostView, - nonces: &[dfg::Scalar; 2] + view: &FrostView ) -> Vec { - self.H = hash_to_point(view.group_key().0); - - let mut serialized = Vec::with_capacity(ClsagMultisig::serialized_len()); + let mut serialized = Vec::with_capacity(Self::serialized_len()); serialized.extend((view.secret_share().0 * self.H).compress().to_bytes()); serialized.extend(write_dleq(rng, self.H, view.secret_share().0)); - - serialized.extend((nonces[0].0 * self.H).compress().to_bytes()); - serialized.extend(write_dleq(rng, self.H, nonces[0].0)); - serialized.extend((nonces[1].0 * self.H).compress().to_bytes()); - serialized.extend(write_dleq(rng, self.H, nonces[1].0)); serialized } @@ -145,42 +140,27 @@ impl Algorithm for ClsagMultisig { &mut self, view: &FrostView, l: u16, - commitments: &[dfg::EdwardsPoint; 2], serialized: &[u8] ) -> Result<(), FrostError> { - if serialized.len() != ClsagMultisig::serialized_len() { + if serialized.len() != Self::serialized_len() { // Not an optimal error but... Err(FrostError::InvalidCommitment(l))?; } - if self.AH.0.is_identity().into() { + if self.image.is_identity().into() { self.transcript.domain_separate(b"CLSAG"); self.input().transcript(&mut self.transcript); self.transcript.append_message(b"mask", &self.mask().to_bytes()); } - // Uses the same format FROST does for the expected commitments (nonce * G where this is nonce * H) - // The following technically shouldn't need to be committed to, as we've committed to equivalents, - // yet it doesn't hurt and may resolve some unknown issues self.transcript.append_message(b"participant", &l.to_be_bytes()); - - let mut cursor = 0; - self.transcript.append_message(b"image_share", &serialized[cursor .. (cursor + 32)]); + self.transcript.append_message(b"key_image_share", &serialized[.. 32]); self.image += read_dleq( serialized, - cursor, self.H, l, view.verification_share(l) ).map_err(|_| FrostError::InvalidCommitment(l))?.0; - cursor += 96; - - self.transcript.append_message(b"commitment_D_H", &serialized[cursor .. (cursor + 32)]); - self.AH.0 += read_dleq(serialized, cursor, self.H, l, commitments[0]).map_err(|_| FrostError::InvalidCommitment(l))?; - cursor += 96; - - self.transcript.append_message(b"commitment_E_H", &serialized[cursor .. (cursor + 32)]); - self.AH.1 += read_dleq(serialized, cursor, self.H, l, commitments[1]).map_err(|_| FrostError::InvalidCommitment(l))?; Ok(()) } @@ -192,14 +172,10 @@ impl Algorithm for ClsagMultisig { fn sign_share( &mut self, view: &FrostView, - nonce_sum: dfg::EdwardsPoint, - b: dfg::Scalar, - nonce: dfg::Scalar, + nonce_sums: &[Vec], + nonces: &[dfg::Scalar], msg: &[u8] ) -> dfg::Scalar { - // Apply the binding factor to the H variant of the nonce - self.AH.0 += self.AH.1 * b; - // Use the transcript to get a seeded random number generator // The transcript contains private data, preventing passive adversaries from recreating this // process even if they have access to commitments (specifically, the ring index being signed @@ -216,12 +192,12 @@ impl Algorithm for ClsagMultisig { &self.input(), self.mask(), &self.msg.as_ref().unwrap(), - nonce_sum.0, - self.AH.0.0 + nonce_sums[0][0].0, + nonce_sums[0][1].0 ); self.interim = Some(Interim { p, c, clsag, pseudo_out }); - let share = dfg::Scalar(nonce.0 - (p * view.secret_share().0)); + let share = dfg::Scalar(nonces[0].0 - (p * view.secret_share().0)); share } @@ -230,7 +206,7 @@ impl Algorithm for ClsagMultisig { fn verify( &self, _: dfg::EdwardsPoint, - _: dfg::EdwardsPoint, + _: &[Vec], sum: dfg::Scalar ) -> Option { let interim = self.interim.as_ref().unwrap(); @@ -251,12 +227,12 @@ impl Algorithm for ClsagMultisig { fn verify_share( &self, verification_share: dfg::EdwardsPoint, - nonce: dfg::EdwardsPoint, + nonces: &[Vec], share: dfg::Scalar, ) -> bool { let interim = self.interim.as_ref().unwrap(); return (&share.0 * &ED25519_BASEPOINT_TABLE) == ( - nonce.0 - (interim.p * verification_share.0) + nonces[0][0].0 - (interim.p * verification_share.0) ); } } diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 17cb9940..d48d4c4a 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -6,7 +6,7 @@ use rand::{RngCore, rngs::OsRng}; use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; #[cfg(feature = "multisig")] -use transcript::RecommendedTranscript; +use transcript::{Transcript, RecommendedTranscript}; #[cfg(feature = "multisig")] use frost::curve::Ed25519; @@ -102,6 +102,7 @@ fn clsag_multisig() -> Result<(), MultisigError> { &mut OsRng, ClsagMultisig::new( RecommendedTranscript::new(b"Monero Serai CLSAG Test"), + keys[&1].group_key().0, Arc::new(RwLock::new(Some( ClsagDetails::new( ClsagInput::new( diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 1bf30d96..33dee744 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -112,6 +112,7 @@ impl SignableTransaction { AlgorithmMachine::new( ClsagMultisig::new( transcript.clone(), + input.key, inputs[i].clone() ).map_err(|e| TransactionError::MultisigError(e))?, Arc::new(offset), @@ -159,7 +160,10 @@ impl PreprocessMachine for TransactionMachine { rng: &mut R ) -> (TransactionSignMachine, Vec) { // Iterate over each CLSAG calling preprocess - let mut serialized = Vec::with_capacity(self.clsags.len() * (64 + ClsagMultisig::serialized_len())); + let mut serialized = Vec::with_capacity( + // D_{G, H}, E_{G, H}, DLEqs, key image addendum + self.clsags.len() * ((2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len()) + ); let clsags = self.clsags.drain(..).map(|clsag| { let (clsag, preprocess) = clsag.preprocess(rng); serialized.extend(&preprocess); @@ -224,8 +228,8 @@ impl SignMachine for TransactionSignMachine { } } - // FROST commitments, image, H commitments, and their proofs - let clsag_len = 64 + ClsagMultisig::serialized_len(); + // FROST commitments and their DLEqs, and the image and its DLEq + let clsag_len = (2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len(); for (l, commitments) in &commitments { if commitments.len() != (self.clsags.len() * clsag_len) { Err(FrostError::InvalidCommitment(*l))?; @@ -246,7 +250,7 @@ impl SignMachine for TransactionSignMachine { for c in 0 .. self.clsags.len() { for (l, preprocess) in &commitments[c] { images[c] += CompressedEdwardsY( - preprocess[64 .. 96].try_into().map_err(|_| FrostError::InvalidCommitment(*l))? + preprocess[(clsag_len - 96) .. (clsag_len - 64)].try_into().map_err(|_| FrostError::InvalidCommitment(*l))? ).decompress().ok_or(FrostError::InvalidCommitment(*l))?; } } diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index c875a023..44b68a7a 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -14,7 +14,7 @@ use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE; #[cfg(feature = "multisig")] use dalek_ff_group::Scalar; #[cfg(feature = "multisig")] -use transcript::RecommendedTranscript; +use transcript::{Transcript, RecommendedTranscript}; #[cfg(feature = "multisig")] use frost::{curve::Ed25519, tests::{THRESHOLD, key_gen, sign}}; diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 436c3966..e68c166b 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -28,6 +28,8 @@ transcript = { package = "flexible-transcript", path = "../transcript", version multiexp = { path = "../multiexp", version = "0.1", features = ["batch"] } +dleq = { package = "dleq", path = "../dleq", version = "0.1", features = ["serialize"] } + [dev-dependencies] rand = "0.8" diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index a85bab11..12f48e52 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -13,14 +13,18 @@ pub trait Algorithm: Clone { /// The resulting type of the signatures this algorithm will produce type Signature: Clone + PartialEq + Debug; + /// Obtain a mutable borrow of the underlying transcript fn transcript(&mut self) -> &mut Self::Transcript; + /// Obtain the list of nonces to generate, as specified by the basepoints to create commitments + /// against per-nonce. These are not committed to by FROST on the underlying transcript + fn nonces(&self) -> Vec>; + /// Generate an addendum to FROST"s preprocessing stage fn preprocess_addendum( &mut self, rng: &mut R, params: &FrostView, - nonces: &[C::F; 2], ) -> Vec; /// Proccess the addendum for the specified participant. Guaranteed to be ordered @@ -28,7 +32,6 @@ pub trait Algorithm: Clone { &mut self, params: &FrostView, l: u16, - commitments: &[C::G; 2], serialized: &[u8], ) -> Result<(), FrostError>; @@ -39,15 +42,14 @@ pub trait Algorithm: Clone { fn sign_share( &mut self, params: &FrostView, - nonce_sum: C::G, - binding: C::F, - nonce: C::F, + nonce_sums: &[Vec], + nonces: &[C::F], msg: &[u8], ) -> C::F; /// Verify a signature #[must_use] - fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option; + fn verify(&self, group_key: C::G, nonces: &[Vec], sum: C::F) -> Option; /// Verify a specific share given as a response. Used to determine blame if signature /// verification fails @@ -55,7 +57,7 @@ pub trait Algorithm: Clone { fn verify_share( &self, verification_share: C::G, - nonce: C::G, + nonces: &[Vec], share: C::F, ) -> bool; } @@ -66,6 +68,10 @@ pub struct IetfTranscript(Vec); impl Transcript for IetfTranscript { type Challenge = Vec; + fn new(_: &'static [u8]) -> IetfTranscript { + unimplemented!("IetfTranscript should not be used with multiple nonce protocols"); + } + fn domain_separate(&mut self, _: &[u8]) {} fn append_message(&mut self, _: &'static [u8], message: &[u8]) { @@ -115,11 +121,14 @@ impl> Algorithm for Schnorr { &mut self.transcript } + fn nonces(&self) -> Vec> { + vec![vec![C::GENERATOR]] + } + fn preprocess_addendum( &mut self, _: &mut R, _: &FrostView, - _: &[C::F; 2], ) -> Vec { vec![] } @@ -128,7 +137,6 @@ impl> Algorithm for Schnorr { &mut self, _: &FrostView, _: u16, - _: &[C::G; 2], _: &[u8], ) -> Result<(), FrostError> { Ok(()) @@ -137,19 +145,18 @@ impl> Algorithm for Schnorr { fn sign_share( &mut self, params: &FrostView, - nonce_sum: C::G, - _: C::F, - nonce: C::F, + nonce_sums: &[Vec], + nonces: &[C::F], msg: &[u8], ) -> C::F { - let c = H::hram(&nonce_sum, ¶ms.group_key(), msg); + let c = H::hram(&nonce_sums[0][0], ¶ms.group_key(), msg); self.c = Some(c); - schnorr::sign::(params.secret_share(), nonce, c).s + schnorr::sign::(params.secret_share(), nonces[0], c).s } #[must_use] - fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option { - let sig = SchnorrSignature { R: nonce, s: sum }; + fn verify(&self, group_key: C::G, nonces: &[Vec], sum: C::F) -> Option { + let sig = SchnorrSignature { R: nonces[0][0], s: sum }; if schnorr::verify::(group_key, self.c.unwrap(), &sig) { Some(sig) } else { @@ -161,13 +168,13 @@ impl> Algorithm for Schnorr { fn verify_share( &self, verification_share: C::G, - nonce: C::G, + nonces: &[Vec], share: C::F, ) -> bool { schnorr::verify::( verification_share, self.c.unwrap(), - &SchnorrSignature { R: nonce, s: share} + &SchnorrSignature { R: nonces[0][0], s: share} ) } } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 057ddc47..eab8a035 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -3,12 +3,14 @@ use std::{sync::Arc, collections::HashMap}; use rand_core::{RngCore, CryptoRng}; -use group::{ff::{Field, PrimeField}, GroupEncoding}; +use group::{ff::{Field, PrimeField}, Group, GroupEncoding}; use transcript::Transcript; +use dleq::{Generators, DLEqProof}; + use crate::{ - curve::{Curve, G_len, F_from_slice, G_from_slice}, + curve::{Curve, F_len, G_len, F_from_slice, G_from_slice}, FrostError, FrostParams, FrostKeys, FrostView, algorithm::Algorithm, @@ -69,8 +71,12 @@ impl> Params { } } +fn nonce_transcript() -> T { + T::new(b"FROST_nonce_dleq") +} + pub(crate) struct PreprocessPackage { - pub(crate) nonces: [C::F; 2], + pub(crate) nonces: Vec<[C::F; 2]>, pub(crate) serialized: Vec, } @@ -80,30 +86,53 @@ fn preprocess>( rng: &mut R, params: &mut Params, ) -> PreprocessPackage { - let nonces = [ - C::random_nonce(params.view().secret_share(), &mut *rng), - C::random_nonce(params.view().secret_share(), &mut *rng) - ]; - let commitments = [C::GENERATOR * nonces[0], C::GENERATOR * nonces[1]]; - let mut serialized = commitments[0].to_bytes().as_ref().to_vec(); - serialized.extend(commitments[1].to_bytes().as_ref()); + let mut serialized = Vec::with_capacity(2 * G_len::()); + let nonces = params.algorithm.nonces().iter().cloned().map( + |mut generators| { + let nonces = [ + C::random_nonce(params.view().secret_share(), &mut *rng), + C::random_nonce(params.view().secret_share(), &mut *rng) + ]; - serialized.extend( - ¶ms.algorithm.preprocess_addendum( - rng, - ¶ms.view, - &nonces - ) - ); + let commit = |generator: C::G| { + let commitments = [generator * nonces[0], generator * nonces[1]]; + [commitments[0].to_bytes().as_ref(), commitments[1].to_bytes().as_ref()].concat().to_vec() + }; + + let first = generators.remove(0); + serialized.extend(commit(first)); + + // Iterate over the rest + for generator in generators.iter() { + serialized.extend(commit(*generator)); + // Provide a DLEq to verify these commitments are for the same nonce + // TODO: Provide a single DLEq. See https://github.com/serai-dex/serai/issues/34 + for nonce in nonces { + DLEqProof::prove( + &mut *rng, + // Uses an independent transcript as each signer must do this now, yet we validate them + // sequentially by the global order. Avoids needing to clone the transcript around + &mut nonce_transcript::(), + Generators::new(first, *generator), + nonce + ).serialize(&mut serialized).unwrap(); + } + } + + nonces + } + ).collect::>(); + + serialized.extend(¶ms.algorithm.preprocess_addendum(rng, ¶ms.view)); PreprocessPackage { nonces, serialized } } #[allow(non_snake_case)] struct Package { - B: HashMap, + B: HashMap>>, binding: C::F, - R: C::G, + Rs: Vec>, share: Vec } @@ -137,27 +166,59 @@ fn sign_with_share>( let mut B = HashMap::::with_capacity(params.view.included.len()); // Get the binding factor + let nonces = params.algorithm.nonces(); let mut addendums = HashMap::new(); let binding = { let transcript = params.algorithm.transcript(); // Parse the commitments for l in ¶ms.view.included { transcript.append_message(b"participant", &l.to_be_bytes()); + let serialized = commitments.remove(l).unwrap(); - let commitments = commitments.remove(l).unwrap(); let mut read_commitment = |c, label| { - let commitment = &commitments[c .. (c + G_len::())]; + let commitment = &serialized[c .. (c + G_len::())]; transcript.append_message(label, commitment); G_from_slice::(commitment).map_err(|_| FrostError::InvalidCommitment(*l)) }; + // While this doesn't note which nonce/basepoint this is for, those are expected to be + // static. Beyond that, they're committed to in the DLEq proof transcripts, ensuring + // consistency. While this is suboptimal, it maintains IETF compliance, and Algorithm is + // documented accordingly #[allow(non_snake_case)] - let mut read_D_E = || Ok( - [read_commitment(0, b"commitment_D")?, read_commitment(G_len::(), b"commitment_E")?] - ); + let mut read_D_E = |c| Ok([ + read_commitment(c, b"commitment_D")?, + read_commitment(c + G_len::(), b"commitment_E")? + ]); - B.insert(*l, read_D_E()?); - addendums.insert(*l, commitments[(G_len::() * 2) ..].to_vec()); + let mut c = 0; + let mut commitments = Vec::with_capacity(nonces.len()); + for (n, nonce_generators) in nonces.clone().iter_mut().enumerate() { + commitments.push(Vec::with_capacity(nonce_generators.len())); + + let first = nonce_generators.remove(0); + commitments[n].push(read_D_E(c)?); + c += 2 * G_len::(); + + let mut c = 2 * G_len::(); + for generator in nonce_generators { + commitments[n].push(read_D_E(c)?); + c += 2 * G_len::(); + for de in 0 .. 2 { + DLEqProof::deserialize( + &mut std::io::Cursor::new(&serialized[c .. (c + (2 * F_len::()))]) + ).map_err(|_| FrostError::InvalidCommitment(*l))?.verify( + &mut nonce_transcript::(), + Generators::new(first, *generator), + (commitments[n][0][de], commitments[n][commitments[n].len() - 1][de]) + ).map_err(|_| FrostError::InvalidCommitment(*l))?; + c += 2 * F_len::(); + } + } + + addendums.insert(*l, serialized[c ..].to_vec()); + } + B.insert(*l, commitments); } // Append the message to the transcript @@ -169,22 +230,32 @@ fn sign_with_share>( // Process the addendums for l in ¶ms.view.included { - params.algorithm.process_addendum(¶ms.view, *l, &B[l], &addendums[l])?; + params.algorithm.process_addendum(¶ms.view, *l, &addendums[l])?; } #[allow(non_snake_case)] - let R = { - B.values().map(|B| B[0]).sum::() + (B.values().map(|B| B[1]).sum::() * binding) - }; + let mut Rs = Vec::with_capacity(nonces.len()); + for n in 0 .. nonces.len() { + Rs.push(vec![C::G::identity(); nonces[n].len()]); + #[allow(non_snake_case)] + for g in 0 .. nonces[n].len() { + Rs[n][g] = { + B.values().map(|B| B[n][g][0]).sum::() + + (B.values().map(|B| B[n][g][1]).sum::() * binding) + }; + } + } + let share = params.algorithm.sign_share( ¶ms.view, - R, - binding, - our_preprocess.nonces[0] + (our_preprocess.nonces[1] * binding), + &Rs, + &our_preprocess.nonces.iter().map( + |nonces| nonces[0] + (nonces[1] * binding) + ).collect::>(), msg ).to_repr().as_ref().to_vec(); - Ok((Package { B, binding, R, share: share.clone() }, share)) + Ok((Package { B, binding, Rs, share: share.clone() }, share)) } fn complete>( @@ -206,7 +277,7 @@ fn complete>( // Perform signature validation instead of individual share validation // For the success route, which should be much more frequent, this should be faster // It also acts as an integrity check of this library's signing function - let res = sign_params.algorithm.verify(sign_params.view.group_key, sign.R, sum); + let res = sign_params.algorithm.verify(sign_params.view.group_key, &sign.Rs, sum); if let Some(res) = res { return Ok(res); } @@ -216,7 +287,11 @@ fn complete>( for l in &sign_params.view.included { if !sign_params.algorithm.verify_share( sign_params.view.verification_share(*l), - sign.B[l][0] + (sign.B[l][1] * sign.binding), + &sign.B[l].iter().map( + |nonces| nonces.iter().map( + |commitments| commitments[0] + (commitments[1] * sign.binding) + ).collect() + ).collect::>(), responses[l] ) { Err(FrostError::InvalidShare(*l))?; diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 7a5d1af5..7fc2458c 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -105,7 +105,7 @@ pub fn test_with_vectors< serialized.extend((C::GENERATOR * nonces[1]).to_bytes().as_ref()); let (machine, serialized) = machine.unsafe_override_preprocess( - PreprocessPackage { nonces, serialized: serialized.clone() } + PreprocessPackage { nonces: vec![nonces], serialized: serialized.clone() } ); commitments.insert(i, serialized); diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index eff02b5a..72663b93 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -10,6 +10,9 @@ use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest, Ou pub trait Transcript { type Challenge: Clone + Send + Sync + AsRef<[u8]>; + /// Create a new transcript with the specified name + fn new(name: &'static [u8]) -> Self; + /// Apply a domain separator to the transcript fn domain_separate(&mut self, label: &'static [u8]); @@ -62,17 +65,17 @@ impl DigestTranscript { self.0.update(u64::try_from(value.len()).unwrap().to_le_bytes()); self.0.update(value); } - - pub fn new(name: &'static [u8]) -> Self { - let mut res = DigestTranscript(D::new()); - res.append(DigestTranscriptMember::Name, name); - res - } } impl Transcript for DigestTranscript { type Challenge = Output; + fn new(name: &'static [u8]) -> Self { + let mut res = DigestTranscript(D::new()); + res.append(DigestTranscriptMember::Name, name); + res + } + fn domain_separate(&mut self, label: &[u8]) { self.append(DigestTranscriptMember::Domain, label); } diff --git a/crypto/transcript/src/merlin.rs b/crypto/transcript/src/merlin.rs index d0c60cc9..882fea81 100644 --- a/crypto/transcript/src/merlin.rs +++ b/crypto/transcript/src/merlin.rs @@ -17,6 +17,10 @@ impl Transcript for MerlinTranscript { // this wrapper should be secure with this setting type Challenge = [u8; 64]; + fn new(name: &'static [u8]) -> Self { + MerlinTranscript(merlin::Transcript::new(name)) + } + fn domain_separate(&mut self, label: &'static [u8]) { self.append_message(b"dom-sep", label); }