Merge pull request #255 from serai-dex/crypto-tweaks

Crypto audit/tweaks
This commit is contained in:
Luke Parker
2023-03-16 16:43:27 -04:00
committed by GitHub
73 changed files with 2473 additions and 953 deletions

113
Cargo.lock generated
View File

@@ -1032,9 +1032,10 @@ dependencies = [
"elliptic-curve", "elliptic-curve",
"ff", "ff",
"ff-group-tests", "ff-group-tests",
"flexible-transcript",
"group", "group",
"hex", "hex",
"k256", "k256 0.12.0",
"minimal-ed448", "minimal-ed448",
"p256", "p256",
"rand_core 0.6.4", "rand_core 0.6.4",
@@ -1153,7 +1154,7 @@ dependencies = [
"digest 0.10.6", "digest 0.10.6",
"getrandom 0.2.8", "getrandom 0.2.8",
"hmac 0.12.1", "hmac 0.12.1",
"k256", "k256 0.11.6",
"lazy_static", "lazy_static",
"serde", "serde",
"sha2 0.10.6", "sha2 0.10.6",
@@ -1644,6 +1645,7 @@ dependencies = [
"ff-group-tests", "ff-group-tests",
"group", "group",
"rand_core 0.6.4", "rand_core 0.6.4",
"sha2 0.9.9",
"subtle", "subtle",
"zeroize", "zeroize",
] ]
@@ -1948,12 +1950,10 @@ dependencies = [
"ciphersuite", "ciphersuite",
"dleq", "dleq",
"flexible-transcript", "flexible-transcript",
"group",
"hex",
"multiexp", "multiexp",
"rand_core 0.6.4", "rand_core 0.6.4",
"schnorr-signatures", "schnorr-signatures",
"subtle", "serde",
"thiserror", "thiserror",
"zeroize", "zeroize",
] ]
@@ -1969,7 +1969,7 @@ dependencies = [
"flexible-transcript", "flexible-transcript",
"group", "group",
"hex-literal", "hex-literal",
"k256", "k256 0.12.0",
"multiexp", "multiexp",
"rand_core 0.6.4", "rand_core 0.6.4",
"thiserror", "thiserror",
@@ -2036,7 +2036,19 @@ dependencies = [
"der", "der",
"elliptic-curve", "elliptic-curve",
"rfc6979", "rfc6979",
"signature", "signature 1.6.4",
]
[[package]]
name = "ecdsa"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e"
dependencies = [
"der",
"elliptic-curve",
"rfc6979",
"signature 2.0.0",
] ]
[[package]] [[package]]
@@ -2045,7 +2057,7 @@ version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7"
dependencies = [ dependencies = [
"signature", "signature 1.6.4",
] ]
[[package]] [[package]]
@@ -2242,11 +2254,12 @@ dependencies = [
"ethers-solc", "ethers-solc",
"eyre", "eyre",
"group", "group",
"k256", "k256 0.12.0",
"modular-frost", "modular-frost",
"rand_core 0.6.4", "rand_core 0.6.4",
"serde", "serde",
"serde_json", "serde_json",
"sha2 0.10.6",
"sha3", "sha3",
"thiserror", "thiserror",
"tokio", "tokio",
@@ -2370,7 +2383,7 @@ dependencies = [
"ethabi", "ethabi",
"generic-array 0.14.6", "generic-array 0.14.6",
"hex", "hex",
"k256", "k256 0.11.6",
"once_cell", "once_cell",
"open-fastrlp", "open-fastrlp",
"proc-macro2", "proc-macro2",
@@ -2585,8 +2598,9 @@ name = "ff-group-tests"
version = "0.12.0" version = "0.12.0"
dependencies = [ dependencies = [
"group", "group",
"k256", "k256 0.12.0",
"p256", "p256",
"rand_core 0.6.4",
] ]
[[package]] [[package]]
@@ -2681,6 +2695,7 @@ dependencies = [
"blake2", "blake2",
"digest 0.10.6", "digest 0.10.6",
"merlin 3.0.0", "merlin 3.0.0",
"sha2 0.10.6",
] ]
[[package]] [[package]]
@@ -2845,7 +2860,7 @@ dependencies = [
"frame-metadata", "frame-metadata",
"frame-support-procedural", "frame-support-procedural",
"impl-trait-for-tuples", "impl-trait-for-tuples",
"k256", "k256 0.11.6",
"log", "log",
"once_cell", "once_cell",
"parity-scale-codec", "parity-scale-codec",
@@ -3335,15 +3350,6 @@ version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0"
[[package]]
name = "hkdf"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437"
dependencies = [
"hmac 0.12.1",
]
[[package]] [[package]]
name = "hmac" name = "hmac"
version = "0.8.1" version = "0.8.1"
@@ -3973,12 +3979,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"ecdsa", "ecdsa 0.14.8",
"elliptic-curve", "elliptic-curve",
"sha2 0.10.6", "sha2 0.10.6",
"sha3", "sha3",
] ]
[[package]]
name = "k256"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92a55e0ff3b72c262bcf041d9e97f1b84492b68f1c1a384de2323d3dc9403397"
dependencies = [
"cfg-if",
"ecdsa 0.15.1",
"elliptic-curve",
"once_cell",
"sha2 0.10.6",
"signature 2.0.0",
]
[[package]] [[package]]
name = "keccak" name = "keccak"
version = "0.1.3" version = "0.1.3"
@@ -4865,7 +4885,6 @@ version = "0.1.2"
dependencies = [ dependencies = [
"crypto-bigint", "crypto-bigint",
"dalek-ff-group", "dalek-ff-group",
"digest 0.10.6",
"ff", "ff",
"ff-group-tests", "ff-group-tests",
"generic-array 0.14.6", "generic-array 0.14.6",
@@ -4935,16 +4954,13 @@ dependencies = [
name = "modular-frost" name = "modular-frost"
version = "0.5.0" version = "0.5.0"
dependencies = [ dependencies = [
"chacha20 0.9.0",
"ciphersuite", "ciphersuite",
"dalek-ff-group", "dalek-ff-group",
"digest 0.10.6", "digest 0.10.6",
"dkg", "dkg",
"dleq", "dleq",
"flexible-transcript", "flexible-transcript",
"group",
"hex", "hex",
"hkdf",
"minimal-ed448", "minimal-ed448",
"multiexp", "multiexp",
"rand_chacha 0.3.1", "rand_chacha 0.3.1",
@@ -5105,7 +5121,7 @@ dependencies = [
"dalek-ff-group", "dalek-ff-group",
"ff", "ff",
"group", "group",
"k256", "k256 0.12.0",
"rand_core 0.6.4", "rand_core 0.6.4",
"zeroize", "zeroize",
] ]
@@ -5561,23 +5577,13 @@ checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
[[package]] [[package]]
name = "p256" name = "p256"
version = "0.11.1" version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" checksum = "49c124b3cbce43bcbac68c58ec181d98ed6cc7e6d0aa7c3ba97b2563410b0e55"
dependencies = [ dependencies = [
"ecdsa", "ecdsa 0.15.1",
"elliptic-curve",
"sha2 0.10.6",
]
[[package]]
name = "p384"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa"
dependencies = [
"ecdsa",
"elliptic-curve", "elliptic-curve",
"primeorder",
"sha2 0.10.6", "sha2 0.10.6",
] ]
@@ -6187,6 +6193,15 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "primeorder"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b54f7131b3dba65a2f414cf5bd25b66d4682e4608610668eae785750ba4c5b2"
dependencies = [
"elliptic-curve",
]
[[package]] [[package]]
name = "primitive-types" name = "primitive-types"
version = "0.12.1" version = "0.12.1"
@@ -7937,13 +7952,13 @@ dependencies = [
name = "schnorr-signatures" name = "schnorr-signatures"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"blake2",
"ciphersuite", "ciphersuite",
"dalek-ff-group", "dalek-ff-group",
"digest 0.10.6", "flexible-transcript",
"group", "hex",
"multiexp", "multiexp",
"rand_core 0.6.4", "rand_core 0.6.4",
"sha2 0.10.6",
"zeroize", "zeroize",
] ]
@@ -8423,6 +8438,16 @@ dependencies = [
"rand_core 0.6.4", "rand_core 0.6.4",
] ]
[[package]]
name = "signature"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d"
dependencies = [
"digest 0.10.6",
"rand_core 0.6.4",
]
[[package]] [[package]]
name = "simba" name = "simba"
version = "0.8.0" version = "0.8.0"

View File

@@ -19,10 +19,11 @@ rand_core = "0.6"
serde_json = "1.0" serde_json = "1.0"
serde = "1.0" serde = "1.0"
sha2 = "0.10"
sha3 = "0.10" sha3 = "0.10"
group = "0.12" group = "0.12"
k256 = { version = "0.11", features = ["arithmetic", "keccak256", "ecdsa"] } k256 = { version = "0.12", features = ["arithmetic", "ecdsa"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
eyre = "0.6" eyre = "0.6"

View File

@@ -2,7 +2,7 @@ use std::{convert::TryFrom, sync::Arc, time::Duration};
use rand_core::OsRng; use rand_core::OsRng;
use k256::{elliptic_curve::bigint::ArrayEncoding, U256}; use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256};
use ethers::{ use ethers::{
prelude::*, prelude::*,
@@ -11,7 +11,8 @@ use ethers::{
use frost::{ use frost::{
curve::Secp256k1, curve::Secp256k1,
algorithm::Schnorr as Algo, Participant,
algorithm::IetfSchnorr,
tests::{key_gen, algorithm_machines, sign}, tests::{key_gen, algorithm_machines, sign},
}; };
@@ -44,14 +45,14 @@ async fn test_ecrecover_hack() {
let chain_id = U256::from(chain_id); let chain_id = U256::from(chain_id);
let keys = key_gen::<_, Secp256k1>(&mut OsRng); let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&1].group_key(); let group_key = keys[&Participant::new(1).unwrap()].group_key();
const MESSAGE: &[u8] = b"Hello, World!"; const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE); let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = Algo::<Secp256k1, crypto::EthereumHram>::new(); let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
let sig = sign( let sig = sign(
&mut OsRng, &mut OsRng,
algo.clone(), algo.clone(),

View File

@@ -1,51 +1,57 @@
use ethereum_serai::crypto::*;
use frost::curve::Secp256k1;
use k256::{ use k256::{
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint}, elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
ProjectivePoint, Scalar, U256, ProjectivePoint, Scalar, U256,
}; };
use frost::{curve::Secp256k1, Participant};
use ethereum_serai::crypto::*;
#[test] #[test]
fn test_ecrecover() { fn test_ecrecover() {
use k256::ecdsa::{
recoverable::Signature,
signature::{Signer, Verifier},
SigningKey, VerifyingKey,
};
use rand_core::OsRng; use rand_core::OsRng;
use sha2::Sha256;
use sha3::{Digest, Keccak256};
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
let private = SigningKey::random(&mut OsRng); let private = SigningKey::random(&mut OsRng);
let public = VerifyingKey::from(&private); let public = VerifyingKey::from(&private);
const MESSAGE: &[u8] = b"Hello, World!"; const MESSAGE: &[u8] = b"Hello, World!";
let sig: Signature = private.sign(MESSAGE); let (sig, recovery_id) = private
public.verify(MESSAGE, &sig).unwrap(); .as_nonzero_scalar()
.try_sign_prehashed_rfc6979::<Sha256>(Keccak256::digest(MESSAGE), b"")
.unwrap();
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
}
assert_eq!( assert_eq!(
ecrecover(hash_to_scalar(MESSAGE), sig.as_ref()[64], *sig.r(), *sig.s()).unwrap(), ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
address(&ProjectivePoint::from(public)) .unwrap(),
address(&ProjectivePoint::from(public.as_affine()))
); );
} }
#[test] #[test]
fn test_signing() { fn test_signing() {
use frost::{ use frost::{
algorithm::Schnorr, algorithm::IetfSchnorr,
tests::{algorithm_machines, key_gen, sign}, tests::{algorithm_machines, key_gen, sign},
}; };
use rand_core::OsRng; use rand_core::OsRng;
let keys = key_gen::<_, Secp256k1>(&mut OsRng); let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let _group_key = keys[&1].group_key(); let _group_key = keys[&Participant::new(1).unwrap()].group_key();
const MESSAGE: &[u8] = b"Hello, World!"; const MESSAGE: &[u8] = b"Hello, World!";
let algo = Schnorr::<Secp256k1, EthereumHram>::new(); let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let _sig = sign( let _sig = sign(
&mut OsRng, &mut OsRng,
algo, algo,
keys.clone(), keys.clone(),
algorithm_machines(&mut OsRng, Schnorr::<Secp256k1, EthereumHram>::new(), &keys), algorithm_machines(&mut OsRng, IetfSchnorr::<Secp256k1, EthereumHram>::ietf(), &keys),
MESSAGE, MESSAGE,
); );
} }
@@ -53,13 +59,13 @@ fn test_signing() {
#[test] #[test]
fn test_ecrecover_hack() { fn test_ecrecover_hack() {
use frost::{ use frost::{
algorithm::Schnorr, algorithm::IetfSchnorr,
tests::{algorithm_machines, key_gen, sign}, tests::{algorithm_machines, key_gen, sign},
}; };
use rand_core::OsRng; use rand_core::OsRng;
let keys = key_gen::<_, Secp256k1>(&mut OsRng); let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&1].group_key(); let group_key = keys[&Participant::new(1).unwrap()].group_key();
let group_key_encoded = group_key.to_encoded_point(true); let group_key_encoded = group_key.to_encoded_point(true);
let group_key_compressed = group_key_encoded.as_ref(); let group_key_compressed = group_key_encoded.as_ref();
let group_key_x = Scalar::from_uint_reduced(U256::from_be_slice(&group_key_compressed[1 .. 33])); let group_key_x = Scalar::from_uint_reduced(U256::from_be_slice(&group_key_compressed[1 .. 33]));
@@ -70,7 +76,7 @@ fn test_ecrecover_hack() {
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = Schnorr::<Secp256k1, EthereumHram>::new(); let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign( let sig = sign(
&mut OsRng, &mut OsRng,
algo.clone(), algo.clone(),

View File

@@ -12,9 +12,6 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
futures = "0.3"
hex-literal = "0.3"
lazy_static = "1" lazy_static = "1"
thiserror = "1" thiserror = "1"
crc = "3" crc = "3"
@@ -24,12 +21,12 @@ rand_chacha = { version = "0.3", optional = true }
rand = "0.8" rand = "0.8"
rand_distr = "0.4" rand_distr = "0.4"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
subtle = "2.4" subtle = "^2.4"
sha3 = "0.10" sha3 = "0.10"
curve25519-dalek = { version = "3", features = ["std"] } curve25519-dalek = { version = "^3.2", features = ["std"] }
group = "0.12" group = "0.12"
dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.1" } dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.1" }
@@ -56,6 +53,8 @@ dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.1" }
monero-generators = { path = "generators", version = "0.1" } monero-generators = { path = "generators", version = "0.1" }
[dev-dependencies] [dev-dependencies]
hex-literal = "0.3"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
monero-rpc = "0.3" monero-rpc = "0.3"

View File

@@ -14,7 +14,7 @@ rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
lazy_static = "1" lazy_static = "1"
subtle = "2.4" subtle = "^2.4"
sha3 = "0.10" sha3 = "0.10"

View File

@@ -23,7 +23,7 @@ use dleq::DLEqProof;
use frost::{ use frost::{
dkg::lagrange, dkg::lagrange,
curve::Ed25519, curve::Ed25519,
FrostError, ThresholdKeys, ThresholdView, Participant, FrostError, ThresholdKeys, ThresholdView,
algorithm::{WriteAddendum, Algorithm}, algorithm::{WriteAddendum, Algorithm},
}; };
@@ -145,8 +145,8 @@ pub(crate) fn add_key_image_share(
image: &mut EdwardsPoint, image: &mut EdwardsPoint,
generator: EdwardsPoint, generator: EdwardsPoint,
offset: Scalar, offset: Scalar,
included: &[u16], included: &[Participant],
participant: u16, participant: Participant,
share: EdwardsPoint, share: EdwardsPoint,
) { ) {
if image.is_identity() { if image.is_identity() {
@@ -202,7 +202,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
fn process_addendum( fn process_addendum(
&mut self, &mut self,
view: &ThresholdView<Ed25519>, view: &ThresholdView<Ed25519>,
l: u16, l: Participant,
addendum: ClsagAddendum, addendum: ClsagAddendum,
) -> Result<(), FrostError> { ) -> Result<(), FrostError> {
if self.image.is_identity() { if self.image.is_identity() {
@@ -211,7 +211,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
self.transcript.append_message(b"mask", self.mask().to_bytes()); self.transcript.append_message(b"mask", self.mask().to_bytes());
} }
self.transcript.append_message(b"participant", l.to_be_bytes()); self.transcript.append_message(b"participant", l.to_bytes());
addendum addendum
.dleq .dleq

View File

@@ -24,7 +24,10 @@ use crate::{
use crate::ringct::clsag::{ClsagDetails, ClsagMultisig}; use crate::ringct::clsag::{ClsagDetails, ClsagMultisig};
#[cfg(feature = "multisig")] #[cfg(feature = "multisig")]
use frost::tests::{key_gen, algorithm_machines, sign}; use frost::{
Participant,
tests::{key_gen, algorithm_machines, sign},
};
const RING_LEN: u64 = 11; const RING_LEN: u64 = 11;
const AMOUNT: u64 = 1337; const AMOUNT: u64 = 1337;
@@ -93,7 +96,7 @@ fn clsag_multisig() {
mask = random_scalar(&mut OsRng); mask = random_scalar(&mut OsRng);
amount = OsRng.next_u64(); amount = OsRng.next_u64();
} else { } else {
dest = keys[&1].group_key().0; dest = keys[&Participant::new(1).unwrap()].group_key().0;
mask = randomness; mask = randomness;
amount = AMOUNT; amount = AMOUNT;
} }
@@ -103,7 +106,7 @@ fn clsag_multisig() {
let mask_sum = random_scalar(&mut OsRng); let mask_sum = random_scalar(&mut OsRng);
let algorithm = ClsagMultisig::new( let algorithm = ClsagMultisig::new(
RecommendedTranscript::new(b"Monero Serai CLSAG Test"), RecommendedTranscript::new(b"Monero Serai CLSAG Test"),
keys[&1].group_key().0, keys[&Participant::new(1).unwrap()].group_key().0,
Arc::new(RwLock::new(Some(ClsagDetails::new( Arc::new(RwLock::new(Some(ClsagDetails::new(
ClsagInput::new( ClsagInput::new(
Commitment::new(randomness, AMOUNT), Commitment::new(randomness, AMOUNT),

View File

@@ -16,7 +16,7 @@ use dalek_ff_group as dfg;
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use frost::{ use frost::{
curve::Ed25519, curve::Ed25519,
FrostError, ThresholdKeys, Participant, FrostError, ThresholdKeys,
sign::{ sign::{
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
@@ -39,7 +39,7 @@ use crate::{
/// FROST signing machine to produce a signed transaction. /// FROST signing machine to produce a signed transaction.
pub struct TransactionMachine { pub struct TransactionMachine {
signable: SignableTransaction, signable: SignableTransaction,
i: u16, i: Participant,
transcript: RecommendedTranscript, transcript: RecommendedTranscript,
decoys: Vec<Decoys>, decoys: Vec<Decoys>,
@@ -52,7 +52,7 @@ pub struct TransactionMachine {
pub struct TransactionSignMachine { pub struct TransactionSignMachine {
signable: SignableTransaction, signable: SignableTransaction,
i: u16, i: Participant,
transcript: RecommendedTranscript, transcript: RecommendedTranscript,
decoys: Vec<Decoys>, decoys: Vec<Decoys>,
@@ -251,7 +251,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
fn sign( fn sign(
mut self, mut self,
mut commitments: HashMap<u16, Self::Preprocess>, mut commitments: HashMap<Participant, Self::Preprocess>,
msg: &[u8], msg: &[u8],
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> { ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() { if !msg.is_empty() {
@@ -278,7 +278,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// While each CLSAG will do this as they need to for security, they have their own // While each CLSAG will do this as they need to for security, they have their own
// transcripts cloned from this TX's initial premise's transcript. For our TX // transcripts cloned from this TX's initial premise's transcript. For our TX
// transcript to have the CLSAG data for entropy, it'll have to be added ourselves here // transcript to have the CLSAG data for entropy, it'll have to be added ourselves here
self.transcript.append_message(b"participant", (*l).to_be_bytes()); self.transcript.append_message(b"participant", (*l).to_bytes());
let preprocess = if *l == self.i { let preprocess = if *l == self.i {
self.our_preprocess[c].clone() self.our_preprocess[c].clone()
@@ -404,7 +404,7 @@ impl SignatureMachine<Transaction> for TransactionSignatureMachine {
fn complete( fn complete(
mut self, mut self,
shares: HashMap<u16, Self::SignatureShare>, shares: HashMap<Participant, Self::SignatureShare>,
) -> Result<Transaction, FrostError> { ) -> Result<Transaction, FrostError> {
let mut tx = self.tx; let mut tx = self.tx;
match tx.rct_signatures.prunable { match tx.rct_signatures.prunable {

View File

@@ -151,6 +151,7 @@ macro_rules! test {
#[cfg(feature = "multisig")] #[cfg(feature = "multisig")]
use frost::{ use frost::{
curve::Ed25519, curve::Ed25519,
Participant,
tests::{THRESHOLD, key_gen}, tests::{THRESHOLD, key_gen},
}; };
@@ -185,7 +186,7 @@ macro_rules! test {
#[cfg(not(feature = "multisig"))] #[cfg(not(feature = "multisig"))]
panic!("Multisig branch called without the multisig feature"); panic!("Multisig branch called without the multisig feature");
#[cfg(feature = "multisig")] #[cfg(feature = "multisig")]
keys[&1].group_key().0 keys[&Participant::new(1).unwrap()].group_key().0
}; };
let rpc = rpc().await; let rpc = rpc().await;
@@ -221,7 +222,7 @@ macro_rules! test {
#[cfg(feature = "multisig")] #[cfg(feature = "multisig")]
{ {
let mut machines = HashMap::new(); let mut machines = HashMap::new();
for i in 1 ..= THRESHOLD { for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) {
machines.insert( machines.insert(
i, i,
tx tx

View File

@@ -13,7 +13,7 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
zeroize = "1.5" zeroize = "^1.5"
[features] [features]
# Commented for now as it requires nightly and we don't use nightly # Commented for now as it requires nightly and we don't use nightly

View File

@@ -15,10 +15,11 @@ rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
rand_core = "0.6" rand_core = "0.6"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
subtle = "2" subtle = "^2.4"
digest = "0.10" digest = "0.10"
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.2" }
sha2 = { version = "0.10", optional = true } sha2 = { version = "0.10", optional = true }
sha3 = { version = "0.10", optional = true } sha3 = { version = "0.10", optional = true }
@@ -28,8 +29,8 @@ group = "0.12"
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2", optional = true } dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2", optional = true }
elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true } elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true }
p256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true } p256 = { version = "0.12", features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true } k256 = { version = "0.12", features = ["arithmetic", "bits", "hash2curve"], optional = true }
minimal-ed448 = { path = "../ed448", version = "^0.1.2", optional = true } minimal-ed448 = { path = "../ed448", version = "^0.1.2", optional = true }

View File

@@ -27,8 +27,8 @@ The domain-separation tag is naively prefixed to the message.
### Ed448 ### Ed448
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
explicitly not recommended, unaudited Ed448 implementation, limited to its explicitly not recommended, unaudited, incomplete Ed448 implementation, limited
prime-order subgroup. to its prime-order subgroup.
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The

View File

@@ -17,8 +17,6 @@ macro_rules! dalek_curve {
) => { ) => {
use dalek_ff_group::$Point; use dalek_ff_group::$Point;
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Ciphersuite;
impl Ciphersuite for $Ciphersuite { impl Ciphersuite for $Ciphersuite {
type F = Scalar; type F = Scalar;
type G = $Point; type G = $Point;
@@ -37,12 +35,20 @@ macro_rules! dalek_curve {
}; };
} }
/// Ciphersuite for Ristretto.
///
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(any(test, feature = "ristretto"))]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ristretto;
#[cfg(any(test, feature = "ristretto"))] #[cfg(any(test, feature = "ristretto"))]
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto"); dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
#[cfg(any(test, feature = "ristretto"))] #[cfg(any(test, feature = "ristretto"))]
#[test] #[test]
fn test_ristretto() { fn test_ristretto() {
ff_group_tests::group::test_prime_group_bits::<RistrettoPoint>(); ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
assert_eq!( assert_eq!(
Ristretto::hash_to_F( Ristretto::hash_to_F(
@@ -60,12 +66,20 @@ fn test_ristretto() {
); );
} }
/// Ciphersuite for Ed25519.
///
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(feature = "ed25519")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed25519;
#[cfg(feature = "ed25519")] #[cfg(feature = "ed25519")]
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519"); dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
#[cfg(feature = "ed25519")] #[cfg(feature = "ed25519")]
#[test] #[test]
fn test_ed25519() { fn test_ed25519() {
ff_group_tests::group::test_prime_group_bits::<EdwardsPoint>(); ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);
// Ideally, a test vector from RFC-8032 (not FROST) would be here // Ideally, a test vector from RFC-8032 (not FROST) would be here
// Unfortunately, the IETF draft doesn't provide any vectors for the derived challenges // Unfortunately, the IETF draft doesn't provide any vectors for the derived challenges

View File

@@ -11,7 +11,7 @@ use minimal_ed448::{scalar::Scalar, point::Point};
use crate::Ciphersuite; use crate::Ciphersuite;
// Re-define Shake256 as a traditional Digest to meet API expectations /// Shake256, fixed to a 114-byte output, as used by Ed448.
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct Shake256_114(Shake256); pub struct Shake256_114(Shake256);
impl BlockSizeUser for Shake256_114 { impl BlockSizeUser for Shake256_114 {
@@ -48,6 +48,11 @@ impl FixedOutput for Shake256_114 {
} }
impl HashMarker for Shake256_114 {} impl HashMarker for Shake256_114 {}
/// Ciphersuite for Ed448.
///
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed448; pub struct Ed448;
impl Ciphersuite for Ed448 { impl Ciphersuite for Ed448 {

View File

@@ -1,12 +1,12 @@
use zeroize::Zeroize; use zeroize::Zeroize;
use sha2::{Digest, Sha256}; use sha2::Sha256;
use group::ff::{Field, PrimeField}; use group::ff::{Field, PrimeField};
use elliptic_curve::{ use elliptic_curve::{
generic_array::GenericArray, generic_array::GenericArray,
bigint::{Encoding, U384}, bigint::{CheckedAdd, Encoding, U384},
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
}; };
@@ -20,8 +20,6 @@ macro_rules! kp_curve {
$Ciphersuite: ident, $Ciphersuite: ident,
$ID: literal $ID: literal
) => { ) => {
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Ciphersuite;
impl Ciphersuite for $Ciphersuite { impl Ciphersuite for $Ciphersuite {
type F = $lib::Scalar; type F = $lib::Scalar;
type G = $lib::ProjectivePoint; type G = $lib::ProjectivePoint;
@@ -34,19 +32,44 @@ macro_rules! kp_curve {
} }
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
let mut dst = dst;
let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-".as_ref(), dst].concat());
if dst.len() > 255 {
dst = oversize.as_ref();
}
// While one of these two libraries does support directly hashing to the Scalar field, the // While one of these two libraries does support directly hashing to the Scalar field, the
// other doesn't. While that's probably an oversight, this is a universally working method // other doesn't. While that's probably an oversight, this is a universally working method
let mut modulus = [0; 48];
modulus[16 ..].copy_from_slice(&(Self::F::zero() - Self::F::one()).to_bytes());
let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE);
let mut unreduced = U384::from_be_bytes({ // This method is from
// https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html
// Specifically, Section 5
// While that draft, overall, is intended for hashing to curves, that necessitates
// detailing how to hash to a finite field. The draft comments that its mechanism for
// doing so, which it uses to derive field elements, is also applicable to the scalar field
// The hash_to_field function is intended to provide unbiased values
// In order to do so, a wide reduction from an extra k bits is applied, minimizing bias to
// 2^-k
// k is intended to be the bits of security of the suite, which is 128 for secp256k1 and
// P-256
const K: usize = 128;
// L is the amount of bytes of material which should be used in the wide reduction
// The 256 is for the bit-length of the primes, rounded up to the nearest byte threshold
// This is a simplification of the formula from the end of section 5
const L: usize = (256 + K) / 8; // 48
// In order to perform this reduction, we need to use 48-byte numbers
// First, convert the modulus to a 48-byte number
// This is done by getting -1 as bytes, parsing it into a U384, and then adding back one
let mut modulus = [0; L];
// The byte repr of scalars will be 32 big-endian bytes
// Set the lower 32 bytes of our 48-byte array accordingly
modulus[16 ..].copy_from_slice(&(Self::F::zero() - Self::F::one()).to_bytes());
// Use a checked_add + unwrap since this addition cannot fail (being a 32-byte value with
// 48-bytes of space)
// While a non-panicking saturating_add/wrapping_add could be used, they'd likely be less
// performant
let modulus = U384::from_be_slice(&modulus).checked_add(&U384::ONE).unwrap();
// The defined P-256 and secp256k1 ciphersuites both use expand_message_xmd
let mut wide = U384::from_be_bytes({
let mut bytes = [0; 48]; let mut bytes = [0; 48];
ExpandMsgXmd::<Sha256>::expand_message(&[msg], dst, 48).unwrap().fill_bytes(&mut bytes); ExpandMsgXmd::<Sha256>::expand_message(&[msg], dst, 48).unwrap().fill_bytes(&mut bytes);
bytes bytes
@@ -55,9 +78,12 @@ macro_rules! kp_curve {
.unwrap() .unwrap()
.to_be_bytes(); .to_be_bytes();
let mut array = *GenericArray::from_slice(&unreduced[16 ..]); // Now that this has been reduced back to a 32-byte value, grab the lower 32-bytes
let mut array = *GenericArray::from_slice(&wide[16 ..]);
let res = $lib::Scalar::from_repr(array).unwrap(); let res = $lib::Scalar::from_repr(array).unwrap();
unreduced.zeroize();
// Zeroize the temp values we can due to the possibility hash_to_F is being used for nonces
wide.zeroize();
array.zeroize(); array.zeroize();
res res
} }
@@ -65,15 +91,36 @@ macro_rules! kp_curve {
}; };
} }
#[cfg(test)]
fn test_oversize_dst<C: Ciphersuite>() {
use sha2::Digest;
// The draft specifies DSTs >255 bytes should be hashed into a 32-byte DST
let oversize_dst = [0x00; 256];
let actual_dst = Sha256::digest([b"H2C-OVERSIZE-DST-".as_ref(), &oversize_dst].concat());
// Test the hash_to_F function handles this
// If it didn't, these would return different values
assert_eq!(C::hash_to_F(&oversize_dst, &[]), C::hash_to_F(&actual_dst, &[]));
}
/// Ciphersuite for Secp256k1.
///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "secp256k1")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Secp256k1;
#[cfg(feature = "secp256k1")] #[cfg(feature = "secp256k1")]
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1"); kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
#[cfg(feature = "secp256k1")] #[cfg(feature = "secp256k1")]
#[test] #[test]
fn test_secp256k1() { fn test_secp256k1() {
ff_group_tests::group::test_prime_group_bits::<k256::ProjectivePoint>(); ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
// Ideally, a test vector from hash to field (not FROST) would be here // Ideally, a test vector from hash_to_field (not FROST) would be here
// Unfortunately, the IETF draft only provides vectors for field elements, not scalars // Unfortunately, the IETF draft only provides vectors for field elements, not scalars
// Vectors have been requested in
// https://github.com/cfrg/draft-irtf-cfrg-hash-to-curve/issues/343
assert_eq!( assert_eq!(
Secp256k1::hash_to_F( Secp256k1::hash_to_F(
b"FROST-secp256k1-SHA256-v11nonce", b"FROST-secp256k1-SHA256-v11nonce",
@@ -90,14 +137,22 @@ fn test_secp256k1() {
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
hex::decode("acc83278035223c1ba464e2d11bfacfc872b2b23e1041cf5f6130da21e4d8068").unwrap() hex::decode("acc83278035223c1ba464e2d11bfacfc872b2b23e1041cf5f6130da21e4d8068").unwrap()
); );
test_oversize_dst::<Secp256k1>();
} }
/// Ciphersuite for P-256.
///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "p256")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct P256;
#[cfg(feature = "p256")] #[cfg(feature = "p256")]
kp_curve!("p256", p256, P256, b"P-256"); kp_curve!("p256", p256, P256, b"P-256");
#[cfg(feature = "p256")] #[cfg(feature = "p256")]
#[test] #[test]
fn test_p256() { fn test_p256() {
ff_group_tests::group::test_prime_group_bits::<p256::ProjectivePoint>(); ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);
assert_eq!( assert_eq!(
P256::hash_to_F( P256::hash_to_F(
@@ -115,4 +170,6 @@ f4e8cf80aec3f888d997900ac7e3e349944b5a6b47649fc32186d2f1238103c6\
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
hex::decode("f871dfcf6bcd199342651adc361b92c941cb6a0d8c8c1a3b91d79e2c1bf3722d").unwrap() hex::decode("f871dfcf6bcd199342651adc361b92c941cb6a0d8c8c1a3b91d79e2c1bf3722d").unwrap()
); );
test_oversize_dst::<P256>();
} }

View File

@@ -11,8 +11,10 @@ use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize; use zeroize::Zeroize;
use subtle::ConstantTimeEq; use subtle::ConstantTimeEq;
use digest::{core_api::BlockSizeUser, Digest}; use digest::{core_api::BlockSizeUser, Digest, HashMarker};
use transcript::SecureDigest;
pub use group;
use group::{ use group::{
ff::{Field, PrimeField, PrimeFieldBits}, ff::{Field, PrimeField, PrimeFieldBits},
Group, GroupOps, Group, GroupOps,
@@ -41,7 +43,9 @@ mod ed448;
pub use ed448::*; pub use ed448::*;
/// Unified trait defining a ciphersuite around an elliptic curve. /// Unified trait defining a ciphersuite around an elliptic curve.
pub trait Ciphersuite: Clone + Copy + PartialEq + Eq + Debug + Zeroize { pub trait Ciphersuite:
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
{
/// Scalar field element type. /// Scalar field element type.
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses // This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField + PrimeFieldBits + Zeroize; type F: PrimeField + PrimeFieldBits + Zeroize;
@@ -49,7 +53,7 @@ pub trait Ciphersuite: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq; type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq;
/// Hash algorithm used with this curve. /// Hash algorithm used with this curve.
// Requires BlockSizeUser so it can be used within Hkdf which requies that. // Requires BlockSizeUser so it can be used within Hkdf which requies that.
type H: Clone + BlockSizeUser + Digest; type H: Send + Clone + BlockSizeUser + Digest + HashMarker + SecureDigest;
/// ID for this curve. /// ID for this curve.
const ID: &'static [u8]; const ID: &'static [u8];
@@ -90,9 +94,7 @@ pub trait Ciphersuite: Clone + Copy + PartialEq + Eq + Debug + Zeroize {
// ff mandates this is canonical // ff mandates this is canonical
let res = Option::<Self::F>::from(Self::F::from_repr(encoding)) let res = Option::<Self::F>::from(Self::F::from_repr(encoding))
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "non-canonical scalar")); .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "non-canonical scalar"));
for b in encoding.as_mut() { encoding.as_mut().zeroize();
b.zeroize();
}
res res
} }

View File

@@ -16,14 +16,19 @@ rustdoc-args = ["--cfg", "docsrs"]
rand_core = "0.6" rand_core = "0.6"
digest = "0.10" digest = "0.10"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
subtle = "2.4" subtle = "^2.4"
ff = "0.12" ff = { version = "0.12", features = ["bits"] }
group = "0.12" group = "0.12"
crypto-bigint = "0.4" crypto-bigint = "0.4"
curve25519-dalek = "3.2"
sha2 = "0.9"
curve25519-dalek = "^3.2"
[dev-dependencies] [dev-dependencies]
ff-group-tests = { path = "../ff-group-tests" } ff-group-tests = { path = "../ff-group-tests" }
[features]
black_box = []

View File

@@ -1,5 +1,6 @@
use core::ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign}; use core::ops::{DerefMut, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign};
use zeroize::Zeroize;
use rand_core::RngCore; use rand_core::RngCore;
use subtle::{ use subtle::{
@@ -9,34 +10,73 @@ use subtle::{
use crypto_bigint::{Integer, Encoding, U256, U512}; use crypto_bigint::{Integer, Encoding, U256, U512};
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use crate::{constant_time, math, from_uint}; use crate::{u8_from_bool, constant_time, math, from_uint};
const MODULUS: U256 = // 2^255 - 19
U256::from_be_hex("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed"); // Uses saturating_sub because checked_sub isn't available at compile time
const MODULUS: U256 = U256::from_u8(1).shl_vartime(255).saturating_sub(&U256::from_u8(19));
const WIDE_MODULUS: U512 = U512::from_be_hex(concat!( const WIDE_MODULUS: U512 = U256::ZERO.concat(&MODULUS);
"0000000000000000000000000000000000000000000000000000000000000000",
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed"
));
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Default, Debug)]
pub struct FieldElement(U256); pub struct FieldElement(U256);
pub const MOD_3_8: FieldElement = /*
FieldElement(MODULUS.saturating_add(&U256::from_u8(3)).wrapping_div(&U256::from_u8(8))); The following is a valid const definition of sqrt(-1) yet exceeds the const_eval_limit by 24x.
Accordingly, it'd only be usable on a nightly compiler with the following crate attributes:
#![feature(const_eval_limit)]
#![const_eval_limit = "24000000"]
pub const MOD_5_8: FieldElement = FieldElement(MOD_3_8.0.saturating_sub(&U256::ONE)); const SQRT_M1: FieldElement = {
// Formula from RFC-8032 (modp_sqrt_m1/sqrt8k5 z)
// 2 ** ((MODULUS - 1) // 4) % MODULUS
let base = U256::from_u8(2);
let exp = MODULUS.saturating_sub(&U256::from_u8(1)).wrapping_div(&U256::from_u8(4));
pub const EDWARDS_D: FieldElement = FieldElement(U256::from_be_hex( const fn mul(x: U256, y: U256) -> U256 {
"52036cee2b6ffe738cc740797779e89800700a4d4141d8ab75eb4dca135978a3", let wide = U256::mul_wide(&x, &y);
)); let wide = U256::concat(&wide.1, &wide.0);
wide.wrapping_rem(&WIDE_MODULUS).split().1
}
pub const SQRT_M1: FieldElement = FieldElement(U256::from_be_hex( // Perform the pow via multiply and square
let mut res = U256::ONE;
// Iterate from highest bit to lowest bit
let mut bit = 255;
loop {
if bit != 255 {
res = mul(res, res);
}
// Reverse from little endian to big endian
if exp.bit_vartime(bit) == 1 {
res = mul(res, base);
}
if bit == 0 {
break;
}
bit -= 1;
}
FieldElement(res)
};
*/
// Use a constant since we can't calculate it at compile-time without a nightly compiler
// Even without const_eval_limit, it'd take ~30s to calculate, which isn't worth it
const SQRT_M1: FieldElement = FieldElement(U256::from_be_hex(
"2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0", "2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0",
)); ));
// Constant useful in calculating square roots (RFC-8032 sqrt8k5's exponent used to calculate y)
const MOD_3_8: FieldElement =
FieldElement(MODULUS.saturating_add(&U256::from_u8(3)).wrapping_div(&U256::from_u8(8)));
// Constant useful in sqrt_ratio_i (sqrt(u / v))
const MOD_5_8: FieldElement = FieldElement(MOD_3_8.0.saturating_sub(&U256::ONE));
fn reduce(x: U512) -> U256 { fn reduce(x: U512) -> U256 {
U256::from_le_slice(&x.reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32]) U256::from_le_slice(&x.reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32])
} }
@@ -93,6 +133,7 @@ impl Field for FieldElement {
CtOption::new(self.pow(NEG_2), !self.is_zero()) CtOption::new(self.pow(NEG_2), !self.is_zero())
} }
// RFC-8032 sqrt8k5
fn sqrt(&self) -> CtOption<Self> { fn sqrt(&self) -> CtOption<Self> {
let tv1 = self.pow(MOD_3_8); let tv1 = self.pow(MOD_3_8);
let tv2 = tv1 * SQRT_M1; let tv2 = tv1 * SQRT_M1;
@@ -113,14 +154,20 @@ impl PrimeField for FieldElement {
self.0.to_le_bytes() self.0.to_le_bytes()
} }
// This was set per the specification in the ff crate docs
// The number of leading zero bits in the little-endian bit representation of (modulus - 1)
const S: u32 = 2; const S: u32 = 2;
fn is_odd(&self) -> Choice { fn is_odd(&self) -> Choice {
self.0.is_odd() self.0.is_odd()
} }
fn multiplicative_generator() -> Self { fn multiplicative_generator() -> Self {
// This was calculated with the method from the ff crate docs
// SageMath GF(modulus).primitive_element()
2u64.into() 2u64.into()
} }
fn root_of_unity() -> Self { fn root_of_unity() -> Self {
// This was calculated via the formula from the ff crate docs
// Self::multiplicative_generator() ** ((modulus - 1) >> Self::S)
FieldElement(U256::from_be_hex( FieldElement(U256::from_be_hex(
"2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0", "2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0",
)) ))
@@ -154,10 +201,11 @@ impl FieldElement {
let mut res = FieldElement::one(); let mut res = FieldElement::one();
let mut bits = 0; let mut bits = 0;
for (i, bit) in other.to_le_bits().iter().rev().enumerate() { for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {
bits <<= 1; bits <<= 1;
let bit = u8::from(*bit); let mut bit = u8_from_bool(bit.deref_mut());
bits |= bit; bits |= bit;
bit.zeroize();
if ((i + 1) % 4) == 0 { if ((i + 1) % 4) == 0 {
if i != 3 { if i != 3 {
@@ -172,28 +220,68 @@ impl FieldElement {
res res
} }
/// The square root of u/v, as used for Ed25519 point decoding (RFC 8032 5.1.3) and within
/// Ristretto (5.1 Extracting an Inverse Square Root).
///
/// The result is only a valid square root if the Choice is true.
/// RFC 8032 simply fails if there isn't a square root, leaving any return value undefined.
/// Ristretto explicitly returns 0 or sqrt((SQRT_M1 * u) / v).
pub fn sqrt_ratio_i(u: FieldElement, v: FieldElement) -> (Choice, FieldElement) { pub fn sqrt_ratio_i(u: FieldElement, v: FieldElement) -> (Choice, FieldElement) {
let i = SQRT_M1; let i = SQRT_M1;
let v3 = v.square() * v; let v3 = v.square() * v;
let v7 = v3.square() * v; let v7 = v3.square() * v;
// Candidate root
let mut r = (u * v3) * (u * v7).pow(MOD_5_8); let mut r = (u * v3) * (u * v7).pow(MOD_5_8);
// 8032 3.1
let check = v * r.square(); let check = v * r.square();
let correct_sign = check.ct_eq(&u); let correct_sign = check.ct_eq(&u);
let flipped_sign = check.ct_eq(&(-u)); // 8032 3.2 conditional
let flipped_sign_i = check.ct_eq(&((-u) * i)); let neg_u = -u;
let flipped_sign = check.ct_eq(&neg_u);
// Ristretto Step 5
let flipped_sign_i = check.ct_eq(&(neg_u * i));
// 3.2 set
r.conditional_assign(&(r * i), flipped_sign | flipped_sign_i); r.conditional_assign(&(r * i), flipped_sign | flipped_sign_i);
let r_is_negative = r.is_odd(); // Always return the even root, per Ristretto
r.conditional_negate(r_is_negative); // This doesn't break Ed25519 point decoding as that doesn't expect these steps to return a
// specific root
// Ed25519 points include a dedicated sign bit to determine which root to use, so at worst
// this is a pointless inefficiency
r.conditional_negate(r.is_odd());
(correct_sign | flipped_sign, r) (correct_sign | flipped_sign, r)
} }
} }
#[test] #[test]
fn test_field() { fn test_wide_modulus() {
ff_group_tests::prime_field::test_prime_field_bits::<FieldElement>(); let mut wide = [0; 64];
wide[.. 32].copy_from_slice(&MODULUS.to_le_bytes());
assert_eq!(wide, WIDE_MODULUS.to_le_bytes());
}
#[test]
fn test_sqrt_m1() {
// Test equivalence against the known constant value
const SQRT_M1_MAGIC: U256 =
U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0");
assert_eq!(SQRT_M1.0, SQRT_M1_MAGIC);
// Also test equivalence against the result of the formula from RFC-8032 (modp_sqrt_m1/sqrt8k5 z)
// 2 ** ((MODULUS - 1) // 4) % MODULUS
assert_eq!(
SQRT_M1,
FieldElement::from(2u8).pow(FieldElement(
(FieldElement::zero() - FieldElement::one()).0.wrapping_div(&U256::from(4u8))
))
);
}
#[test]
fn test_field() {
ff_group_tests::prime_field::test_prime_field_bits::<_, FieldElement>(&mut rand_core::OsRng);
} }

View File

@@ -2,9 +2,10 @@
#![no_std] #![no_std]
use core::{ use core::{
ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},
borrow::Borrow, borrow::Borrow,
ops::{Deref, DerefMut, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},
iter::{Iterator, Sum}, iter::{Iterator, Sum},
hash::{Hash, Hasher},
}; };
use zeroize::Zeroize; use zeroize::Zeroize;
@@ -32,14 +33,40 @@ use dalek::{
}, },
}; };
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; use group::{
use group::{Group, GroupEncoding, prime::PrimeGroup}; ff::{Field, PrimeField, FieldBits, PrimeFieldBits},
Group, GroupEncoding,
prime::PrimeGroup,
};
pub mod field; pub mod field;
// Feature gated due to MSRV requirements
#[cfg(feature = "black_box")]
pub(crate) fn black_box<T>(val: T) -> T {
core::hint::black_box(val)
}
#[cfg(not(feature = "black_box"))]
pub(crate) fn black_box<T>(val: T) -> T {
val
}
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
let mut bit = black_box(*bit_ref);
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
bit_ref.zeroize();
res
}
// Convert a boolean to a Choice in a *presumably* constant time manner // Convert a boolean to a Choice in a *presumably* constant time manner
fn choice(value: bool) -> Choice { fn choice(mut value: bool) -> Choice {
Choice::from(u8::from(value)) Choice::from(u8_from_bool(&mut value))
} }
macro_rules! deref_borrow { macro_rules! deref_borrow {
@@ -177,6 +204,7 @@ constant_time!(Scalar, DScalar);
math_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul); math_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul);
from_uint!(Scalar, DScalar); from_uint!(Scalar, DScalar);
// Ed25519 order/scalar modulus
const MODULUS: U256 = const MODULUS: U256 =
U256::from_be_hex("1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed"); U256::from_be_hex("1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed");
@@ -190,10 +218,11 @@ impl Scalar {
let mut res = Scalar::one(); let mut res = Scalar::one();
let mut bits = 0; let mut bits = 0;
for (i, bit) in other.to_le_bits().iter().rev().enumerate() { for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {
bits <<= 1; bits <<= 1;
let bit = u8::from(*bit); let mut bit = u8_from_bool(bit.deref_mut());
bits |= bit; bits |= bit;
bit.zeroize();
if ((i + 1) % 4) == 0 { if ((i + 1) % 4) == 0 {
if i != 3 { if i != 3 {
@@ -272,19 +301,36 @@ impl PrimeField for Scalar {
self.0.to_bytes() self.0.to_bytes()
} }
// This was set per the specification in the ff crate docs
// The number of leading zero bits in the little-endian bit representation of (modulus - 1)
const S: u32 = 2; const S: u32 = 2;
fn is_odd(&self) -> Choice { fn is_odd(&self) -> Choice {
choice(self.to_le_bits()[0]) // This is probably overkill? Yet it's better safe than sorry since this is a complete
// decomposition of the scalar
let mut bits = self.to_le_bits();
let res = choice(bits[0]);
// This shouldn't need mut since it should be a mutable reference
// Per the bitvec docs, writing through a derefence requires mut, writing through one of its
// methods does not
// We do not use one of its methods to ensure we write via zeroize
for mut bit in bits.iter_mut() {
bit.deref_mut().zeroize();
}
res
} }
fn multiplicative_generator() -> Self { fn multiplicative_generator() -> Self {
// This was calculated with the method from the ff crate docs
// SageMath GF(modulus).primitive_element()
2u64.into() 2u64.into()
} }
fn root_of_unity() -> Self { fn root_of_unity() -> Self {
const ROOT: [u8; 32] = [ // This was calculated via the formula from the ff crate docs
// Self::multiplicative_generator() ** ((modulus - 1) >> Self::S)
Scalar::from_repr([
212, 7, 190, 235, 223, 117, 135, 190, 254, 131, 206, 66, 83, 86, 240, 14, 122, 194, 193, 171, 212, 7, 190, 235, 223, 117, 135, 190, 254, 131, 206, 66, 83, 86, 240, 14, 122, 194, 193, 171,
96, 109, 61, 125, 231, 129, 121, 224, 16, 115, 74, 9, 96, 109, 61, 125, 231, 129, 121, 224, 16, 115, 74, 9,
]; ])
Scalar::from_repr(ROOT).unwrap() .unwrap()
} }
} }
@@ -347,11 +393,12 @@ macro_rules! dalek_group {
type Scalar = Scalar; type Scalar = Scalar;
fn random(mut rng: impl RngCore) -> Self { fn random(mut rng: impl RngCore) -> Self {
loop { loop {
let mut bytes = field::FieldElement::random(&mut rng).to_repr(); let mut bytes = [0; 64];
bytes[31] |= u8::try_from(rng.next_u32() % 2).unwrap() << 7; rng.fill_bytes(&mut bytes);
let opt = Self::from_bytes(&bytes); let point = $Point($DPoint::hash_from_bytes::<sha2::Sha512>(&bytes));
if opt.is_some().into() { // Ban identity, per the trait specification
return opt.unwrap(); if !bool::from(point.is_identity()) {
return point;
} }
} }
} }
@@ -402,6 +449,17 @@ macro_rules! dalek_group {
$Point(&b.0 * &self.0) $Point(&b.0 * &self.0)
} }
} }
// Support being used as a key in a table
// While it is expensive as a key, due to the field operations required, there's frequently
// use cases for public key -> value lookups
#[allow(unknown_lints, renamed_and_removed_lints)]
#[allow(clippy::derived_hash_with_manual_eq, clippy::derive_hash_xor_eq)]
impl Hash for $Point {
fn hash<H: Hasher>(&self, state: &mut H) {
self.to_bytes().hash(state);
}
}
}; };
} }
@@ -433,12 +491,17 @@ dalek_group!(
RISTRETTO_BASEPOINT_TABLE RISTRETTO_BASEPOINT_TABLE
); );
#[test]
fn test_scalar_modulus() {
assert_eq!(MODULUS.to_le_bytes(), curve25519_dalek::constants::BASEPOINT_ORDER.to_bytes());
}
#[test] #[test]
fn test_ed25519_group() { fn test_ed25519_group() {
ff_group_tests::group::test_prime_group_bits::<EdwardsPoint>(); ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);
} }
#[test] #[test]
fn test_ristretto_group() { fn test_ristretto_group() {
ff_group_tests::group::test_prime_group_bits::<RistrettoPoint>(); ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
} }

View File

@@ -17,17 +17,15 @@ thiserror = "1"
rand_core = "0.6" rand_core = "0.6"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
subtle = "2"
hex = "0.4" serde = { version = "1", features = ["derive"], optional = true }
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.2", features = ["recommended"] } transcript = { package = "flexible-transcript", path = "../transcript", version = "0.2", features = ["recommended"] }
chacha20 = { version = "0.9", features = ["zeroize"] } chacha20 = { version = "0.9", features = ["zeroize"] }
group = "0.12"
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std"] } ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std"] }
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.2" } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.2" }
dleq = { path = "../dleq", version = "0.2", features = ["serialize"] } dleq = { path = "../dleq", version = "0.2", features = ["serialize"] }
@@ -36,4 +34,5 @@ dleq = { path = "../dleq", version = "0.2", features = ["serialize"] }
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std", "ristretto"] } ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std", "ristretto"] }
[features] [features]
serde = ["dep:serde"]
tests = [] tests = []

View File

@@ -1,6 +1,5 @@
use core::fmt::Debug; use core::{ops::Deref, fmt};
use std::{ use std::{
ops::Deref,
io::{self, Read, Write}, io::{self, Read, Write},
collections::HashMap, collections::HashMap,
}; };
@@ -18,15 +17,14 @@ use chacha20::{
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
#[cfg(test)] #[cfg(test)]
use group::ff::Field; use ciphersuite::group::ff::Field;
use group::GroupEncoding; use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::Ciphersuite;
use multiexp::BatchVerifier; use multiexp::BatchVerifier;
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use dleq::DLEqProof; use dleq::DLEqProof;
use crate::ThresholdParams; use crate::{Participant, ThresholdParams};
pub trait ReadWrite: Sized { pub trait ReadWrite: Sized {
fn read<R: Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self>; fn read<R: Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self>;
@@ -39,8 +37,8 @@ pub trait ReadWrite: Sized {
} }
} }
pub trait Message: Clone + PartialEq + Eq + Debug + Zeroize + ReadWrite {} pub trait Message: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite {}
impl<M: Clone + PartialEq + Eq + Debug + Zeroize + ReadWrite> Message for M {} impl<M: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite> Message for M {}
/// Wraps a message with a key to use for encryption in the future. /// Wraps a message with a key to use for encryption in the future.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
@@ -66,7 +64,7 @@ impl<C: Ciphersuite, M: Message> EncryptionKeyMessage<C, M> {
buf buf
} }
// Used by tests #[cfg(any(test, feature = "tests"))]
pub(crate) fn enc_key(&self) -> C::G { pub(crate) fn enc_key(&self) -> C::G {
self.enc_key self.enc_key
} }
@@ -96,11 +94,15 @@ fn ecdh<C: Ciphersuite>(private: &Zeroizing<C::F>, public: C::G) -> Zeroizing<C:
Zeroizing::new(public * private.deref()) Zeroizing::new(public * private.deref())
} }
fn cipher<C: Ciphersuite>(dst: &'static [u8], ecdh: &Zeroizing<C::G>) -> ChaCha20 { // Each ecdh must be distinct. Reuse of an ecdh for multiple ciphers will cause the messages to be
// leaked.
fn cipher<C: Ciphersuite>(context: &str, ecdh: &Zeroizing<C::G>) -> ChaCha20 {
// Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly
// TODO: https://github.com/serai-dex/serai/issues/151 // TODO: https://github.com/serai-dex/serai/issues/151
let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2"); let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2");
transcript.domain_separate(dst); transcript.append_message(b"context", context.as_bytes());
transcript.domain_separate(b"encryption_key");
let mut ecdh = ecdh.to_bytes(); let mut ecdh = ecdh.to_bytes();
transcript.append_message(b"shared_key", ecdh.as_ref()); transcript.append_message(b"shared_key", ecdh.as_ref());
@@ -113,25 +115,25 @@ fn cipher<C: Ciphersuite>(dst: &'static [u8], ecdh: &Zeroizing<C::G>) -> ChaCha2
key.copy_from_slice(&challenge[.. 32]); key.copy_from_slice(&challenge[.. 32]);
zeroize(challenge.as_mut()); zeroize(challenge.as_mut());
// The RecommendedTranscript isn't vulnerable to length extension attacks, yet if it was, // Since the key is single-use, it doesn't matter what we use for the IV
// it'd make sense to clone it (and fork it) just to hedge against that // The isssue is key + IV reuse. If we never reuse the key, we can't have the opportunity to
// reuse a nonce
// Use a static IV in acknowledgement of this
let mut iv = Cc20Iv::default(); let mut iv = Cc20Iv::default();
let mut challenge = transcript.challenge(b"iv"); // The \0 is to satisfy the length requirement (12), not to be null terminated
iv.copy_from_slice(&challenge[.. 12]); iv.copy_from_slice(b"DKG IV v0.2\0");
zeroize(challenge.as_mut());
// Same commentary as the transcript regarding ZAlloc // ChaCha20 has the same commentary as the transcript regarding ZAlloc
// TODO: https://github.com/serai-dex/serai/issues/151 // TODO: https://github.com/serai-dex/serai/issues/151
let res = ChaCha20::new(&key, &iv); let res = ChaCha20::new(&key, &iv);
zeroize(key.as_mut()); zeroize(key.as_mut());
zeroize(iv.as_mut());
res res
} }
fn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>( fn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>(
rng: &mut R, rng: &mut R,
dst: &'static [u8], context: &str,
from: u16, from: Participant,
to: C::G, to: C::G,
mut msg: Zeroizing<E>, mut msg: Zeroizing<E>,
) -> EncryptedMessage<C, E> { ) -> EncryptedMessage<C, E> {
@@ -144,8 +146,10 @@ fn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>(
last.as_mut().zeroize(); last.as_mut().zeroize();
*/ */
// Generate a new key for this message, satisfying cipher's requirement of distinct keys per
// message, and enabling revealing this message without revealing any others
let key = Zeroizing::new(C::random_nonzero_F(rng)); let key = Zeroizing::new(C::random_nonzero_F(rng));
cipher::<C>(dst, &ecdh::<C>(&key, to)).apply_keystream(msg.as_mut().as_mut()); cipher::<C>(context, &ecdh::<C>(&key, to)).apply_keystream(msg.as_mut().as_mut());
let pub_key = C::generator() * key.deref(); let pub_key = C::generator() * key.deref();
let nonce = Zeroizing::new(C::random_nonzero_F(rng)); let nonce = Zeroizing::new(C::random_nonzero_F(rng));
@@ -155,7 +159,7 @@ fn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>(
pop: SchnorrSignature::sign( pop: SchnorrSignature::sign(
&key, &key,
nonce, nonce,
pop_challenge::<C>(pub_nonce, pub_key, from, msg.deref().as_ref()), pop_challenge::<C>(context, pub_nonce, pub_key, from, msg.deref().as_ref()),
), ),
msg, msg,
} }
@@ -188,7 +192,12 @@ impl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {
} }
#[cfg(test)] #[cfg(test)]
pub(crate) fn invalidate_msg<R: RngCore + CryptoRng>(&mut self, rng: &mut R, from: u16) { pub(crate) fn invalidate_msg<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
context: &str,
from: Participant,
) {
// Invalidate the message by specifying a new key/Schnorr PoP // Invalidate the message by specifying a new key/Schnorr PoP
// This will cause all initial checks to pass, yet a decrypt to gibberish // This will cause all initial checks to pass, yet a decrypt to gibberish
let key = Zeroizing::new(C::random_nonzero_F(rng)); let key = Zeroizing::new(C::random_nonzero_F(rng));
@@ -199,7 +208,7 @@ impl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {
self.pop = SchnorrSignature::sign( self.pop = SchnorrSignature::sign(
&key, &key,
nonce, nonce,
pop_challenge::<C>(pub_nonce, pub_key, from, self.msg.deref().as_ref()), pop_challenge::<C>(context, pub_nonce, pub_key, from, self.msg.deref().as_ref()),
); );
} }
@@ -208,11 +217,11 @@ impl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {
pub(crate) fn invalidate_share_serialization<R: RngCore + CryptoRng>( pub(crate) fn invalidate_share_serialization<R: RngCore + CryptoRng>(
&mut self, &mut self,
rng: &mut R, rng: &mut R,
dst: &'static [u8], context: &str,
from: u16, from: Participant,
to: C::G, to: C::G,
) { ) {
use group::ff::PrimeField; use ciphersuite::group::ff::PrimeField;
let mut repr = <C::F as PrimeField>::Repr::default(); let mut repr = <C::F as PrimeField>::Repr::default();
for b in repr.as_mut().iter_mut() { for b in repr.as_mut().iter_mut() {
@@ -224,7 +233,7 @@ impl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {
assert!(!bool::from(C::F::from_repr(repr).is_some())); assert!(!bool::from(C::F::from_repr(repr).is_some()));
self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref()); self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref());
*self = encrypt(rng, dst, from, to, self.msg.clone()); *self = encrypt(rng, context, from, to, self.msg.clone());
} }
// Assumes the encrypted message is a secret share. // Assumes the encrypted message is a secret share.
@@ -232,16 +241,16 @@ impl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {
pub(crate) fn invalidate_share_value<R: RngCore + CryptoRng>( pub(crate) fn invalidate_share_value<R: RngCore + CryptoRng>(
&mut self, &mut self,
rng: &mut R, rng: &mut R,
dst: &'static [u8], context: &str,
from: u16, from: Participant,
to: C::G, to: C::G,
) { ) {
use group::ff::PrimeField; use ciphersuite::group::ff::PrimeField;
// Assumes the share isn't randomly 1 // Assumes the share isn't randomly 1
let repr = C::F::one().to_repr(); let repr = C::F::one().to_repr();
self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref()); self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref());
*self = encrypt(rng, dst, from, to, self.msg.clone()); *self = encrypt(rng, context, from, to, self.msg.clone());
} }
} }
@@ -288,12 +297,22 @@ impl<C: Ciphersuite> EncryptionKeyProof<C> {
// This doesn't need to take the msg. It just doesn't hurt as an extra layer. // This doesn't need to take the msg. It just doesn't hurt as an extra layer.
// This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no // This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no
// root of trust other than their existence in the assumed-to-exist external authenticated channel. // root of trust other than their existence in the assumed-to-exist external authenticated channel.
fn pop_challenge<C: Ciphersuite>(nonce: C::G, key: C::G, sender: u16, msg: &[u8]) -> C::F { fn pop_challenge<C: Ciphersuite>(
context: &str,
nonce: C::G,
key: C::G,
sender: Participant,
msg: &[u8],
) -> C::F {
let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2"); let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2");
transcript.append_message(b"context", context.as_bytes());
transcript.domain_separate(b"proof_of_possession");
transcript.append_message(b"nonce", nonce.to_bytes()); transcript.append_message(b"nonce", nonce.to_bytes());
transcript.append_message(b"key", key.to_bytes()); transcript.append_message(b"key", key.to_bytes());
// This is sufficient to prevent the attack this is meant to stop // This is sufficient to prevent the attack this is meant to stop
transcript.append_message(b"sender", sender.to_le_bytes()); transcript.append_message(b"sender", sender.to_bytes());
// This, as written above, doesn't hurt // This, as written above, doesn't hurt
transcript.append_message(b"message", msg); transcript.append_message(b"message", msg);
// While this is a PoK and a PoP, it's called a PoP here since the important part is its owner // While this is a PoK and a PoP, it's called a PoP here since the important part is its owner
@@ -302,8 +321,10 @@ fn pop_challenge<C: Ciphersuite>(nonce: C::G, key: C::G, sender: u16, msg: &[u8]
C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr")) C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr"))
} }
fn encryption_key_transcript() -> RecommendedTranscript { fn encryption_key_transcript(context: &str) -> RecommendedTranscript {
RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2") let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2");
transcript.append_message(b"context", context.as_bytes());
transcript
} }
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
@@ -317,11 +338,23 @@ pub(crate) enum DecryptionError {
// A simple box for managing encryption. // A simple box for managing encryption.
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Encryption<C: Ciphersuite> { pub(crate) struct Encryption<C: Ciphersuite> {
dst: &'static [u8], context: String,
i: u16, i: Participant,
enc_key: Zeroizing<C::F>, enc_key: Zeroizing<C::F>,
enc_pub_key: C::G, enc_pub_key: C::G,
enc_keys: HashMap<u16, C::G>, enc_keys: HashMap<Participant, C::G>,
}
impl<C: Ciphersuite> fmt::Debug for Encryption<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("Encryption")
.field("context", &self.context)
.field("i", &self.i)
.field("enc_pub_key", &self.enc_pub_key)
.field("enc_keys", &self.enc_keys)
.finish_non_exhaustive()
}
} }
impl<C: Ciphersuite> Zeroize for Encryption<C> { impl<C: Ciphersuite> Zeroize for Encryption<C> {
@@ -335,10 +368,10 @@ impl<C: Ciphersuite> Zeroize for Encryption<C> {
} }
impl<C: Ciphersuite> Encryption<C> { impl<C: Ciphersuite> Encryption<C> {
pub(crate) fn new<R: RngCore + CryptoRng>(dst: &'static [u8], i: u16, rng: &mut R) -> Self { pub(crate) fn new<R: RngCore + CryptoRng>(context: String, i: Participant, rng: &mut R) -> Self {
let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); let enc_key = Zeroizing::new(C::random_nonzero_F(rng));
Self { Self {
dst, context,
i, i,
enc_pub_key: C::generator() * enc_key.deref(), enc_pub_key: C::generator() * enc_key.deref(),
enc_key, enc_key,
@@ -352,7 +385,7 @@ impl<C: Ciphersuite> Encryption<C> {
pub(crate) fn register<M: Message>( pub(crate) fn register<M: Message>(
&mut self, &mut self,
participant: u16, participant: Participant,
msg: EncryptionKeyMessage<C, M>, msg: EncryptionKeyMessage<C, M>,
) -> M { ) -> M {
if self.enc_keys.contains_key(&participant) { if self.enc_keys.contains_key(&participant) {
@@ -365,10 +398,10 @@ impl<C: Ciphersuite> Encryption<C> {
pub(crate) fn encrypt<R: RngCore + CryptoRng, E: Encryptable>( pub(crate) fn encrypt<R: RngCore + CryptoRng, E: Encryptable>(
&self, &self,
rng: &mut R, rng: &mut R,
participant: u16, participant: Participant,
msg: Zeroizing<E>, msg: Zeroizing<E>,
) -> EncryptedMessage<C, E> { ) -> EncryptedMessage<C, E> {
encrypt(rng, self.dst, self.i, self.enc_keys[&participant], msg) encrypt(rng, &self.context, self.i, self.enc_keys[&participant], msg)
} }
pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encryptable>( pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encryptable>(
@@ -378,7 +411,7 @@ impl<C: Ciphersuite> Encryption<C> {
// Uses a distinct batch ID so if this batch verifier is reused, we know its the PoP aspect // Uses a distinct batch ID so if this batch verifier is reused, we know its the PoP aspect
// which failed, and therefore to use None for the blame // which failed, and therefore to use None for the blame
batch_id: I, batch_id: I,
from: u16, from: Participant,
mut msg: EncryptedMessage<C, E>, mut msg: EncryptedMessage<C, E>,
) -> (Zeroizing<E>, EncryptionKeyProof<C>) { ) -> (Zeroizing<E>, EncryptionKeyProof<C>) {
msg.pop.batch_verify( msg.pop.batch_verify(
@@ -386,18 +419,18 @@ impl<C: Ciphersuite> Encryption<C> {
batch, batch,
batch_id, batch_id,
msg.key, msg.key,
pop_challenge::<C>(msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), pop_challenge::<C>(&self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()),
); );
let key = ecdh::<C>(&self.enc_key, msg.key); let key = ecdh::<C>(&self.enc_key, msg.key);
cipher::<C>(self.dst, &key).apply_keystream(msg.msg.as_mut().as_mut()); cipher::<C>(&self.context, &key).apply_keystream(msg.msg.as_mut().as_mut());
( (
msg.msg, msg.msg,
EncryptionKeyProof { EncryptionKeyProof {
key, key,
dleq: DLEqProof::prove( dleq: DLEqProof::prove(
rng, rng,
&mut encryption_key_transcript(), &mut encryption_key_transcript(&self.context),
&[C::generator(), msg.key], &[C::generator(), msg.key],
&self.enc_key, &self.enc_key,
), ),
@@ -409,16 +442,16 @@ impl<C: Ciphersuite> Encryption<C> {
// Returns None if the key was wrong. // Returns None if the key was wrong.
pub(crate) fn decrypt_with_proof<E: Encryptable>( pub(crate) fn decrypt_with_proof<E: Encryptable>(
&self, &self,
from: u16, from: Participant,
decryptor: u16, decryptor: Participant,
mut msg: EncryptedMessage<C, E>, mut msg: EncryptedMessage<C, E>,
// There's no encryption key proof if the accusation is of an invalid signature // There's no encryption key proof if the accusation is of an invalid signature
proof: Option<EncryptionKeyProof<C>>, proof: Option<EncryptionKeyProof<C>>,
) -> Result<Zeroizing<E>, DecryptionError> { ) -> Result<Zeroizing<E>, DecryptionError> {
if !msg if !msg.pop.verify(
.pop msg.key,
.verify(msg.key, pop_challenge::<C>(msg.pop.R, msg.key, from, msg.msg.deref().as_ref())) pop_challenge::<C>(&self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()),
{ ) {
Err(DecryptionError::InvalidSignature)?; Err(DecryptionError::InvalidSignature)?;
} }
@@ -427,13 +460,13 @@ impl<C: Ciphersuite> Encryption<C> {
proof proof
.dleq .dleq
.verify( .verify(
&mut encryption_key_transcript(), &mut encryption_key_transcript(&self.context),
&[C::generator(), msg.key], &[C::generator(), msg.key],
&[self.enc_keys[&decryptor], *proof.key], &[self.enc_keys[&decryptor], *proof.key],
) )
.map_err(|_| DecryptionError::InvalidProof)?; .map_err(|_| DecryptionError::InvalidProof)?;
cipher::<C>(self.dst, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); cipher::<C>(&self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut());
Ok(msg.msg) Ok(msg.msg)
} else { } else {
Err(DecryptionError::InvalidProof) Err(DecryptionError::InvalidProof)

View File

@@ -1,8 +1,4 @@
use core::{ use core::{marker::PhantomData, ops::Deref, fmt};
marker::PhantomData,
ops::Deref,
fmt::{Debug, Formatter},
};
use std::{ use std::{
io::{self, Read, Write}, io::{self, Read, Write},
collections::HashMap, collections::HashMap,
@@ -14,17 +10,19 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use group::{ use ciphersuite::{
group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
Group, GroupEncoding, Group, GroupEncoding,
},
Ciphersuite,
}; };
use ciphersuite::Ciphersuite;
use multiexp::{multiexp_vartime, BatchVerifier}; use multiexp::{multiexp_vartime, BatchVerifier};
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use crate::{ use crate::{
DkgError, ThresholdParams, ThresholdCore, validate_map, Participant, DkgError, ThresholdParams, ThresholdCore, validate_map,
encryption::{ encryption::{
ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, EncryptionKeyProof, ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, EncryptionKeyProof,
DecryptionError, DecryptionError,
@@ -34,11 +32,11 @@ use crate::{
type FrostError<C> = DkgError<EncryptionKeyProof<C>>; type FrostError<C> = DkgError<EncryptionKeyProof<C>>;
#[allow(non_snake_case)] #[allow(non_snake_case)]
fn challenge<C: Ciphersuite>(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F { fn challenge<C: Ciphersuite>(context: &str, l: Participant, R: &[u8], Am: &[u8]) -> C::F {
let mut transcript = RecommendedTranscript::new(b"DKG FROST v0.2"); let mut transcript = RecommendedTranscript::new(b"DKG FROST v0.2");
transcript.domain_separate(b"schnorr_proof_of_knowledge"); transcript.domain_separate(b"schnorr_proof_of_knowledge");
transcript.append_message(b"context", context.as_bytes()); transcript.append_message(b"context", context.as_bytes());
transcript.append_message(b"participant", l.to_le_bytes()); transcript.append_message(b"participant", l.to_bytes());
transcript.append_message(b"nonce", R); transcript.append_message(b"nonce", R);
transcript.append_message(b"commitments", Am); transcript.append_message(b"commitments", Am);
C::hash_to_F(b"DKG-FROST-proof_of_knowledge-0", &transcript.challenge(b"schnorr")) C::hash_to_F(b"DKG-FROST-proof_of_knowledge-0", &transcript.challenge(b"schnorr"))
@@ -85,6 +83,7 @@ impl<C: Ciphersuite> ReadWrite for Commitments<C> {
} }
/// State machine to begin the key generation protocol. /// State machine to begin the key generation protocol.
#[derive(Debug, Zeroize)]
pub struct KeyGenMachine<C: Ciphersuite> { pub struct KeyGenMachine<C: Ciphersuite> {
params: ThresholdParams, params: ThresholdParams,
context: String, context: String,
@@ -132,7 +131,7 @@ impl<C: Ciphersuite> KeyGenMachine<C> {
); );
// Additionally create an encryption mechanism to protect the secret shares // Additionally create an encryption mechanism to protect the secret shares
let encryption = Encryption::new(b"FROST", self.params.i, rng); let encryption = Encryption::new(self.context.clone(), self.params.i, rng);
// Step 4: Broadcast // Step 4: Broadcast
let msg = let msg =
@@ -150,8 +149,13 @@ impl<C: Ciphersuite> KeyGenMachine<C> {
} }
} }
fn polynomial<F: PrimeField + Zeroize>(coefficients: &[Zeroizing<F>], l: u16) -> Zeroizing<F> { fn polynomial<F: PrimeField + Zeroize>(
let l = F::from(u64::from(l)); coefficients: &[Zeroizing<F>],
l: Participant,
) -> Zeroizing<F> {
let l = F::from(u64::from(u16::from(l)));
// This should never be reached since Participant is explicitly non-zero
assert!(l != F::zero(), "zero participant passed to polynomial");
let mut share = Zeroizing::new(F::zero()); let mut share = Zeroizing::new(F::zero());
for (idx, coefficient) in coefficients.iter().rev().enumerate() { for (idx, coefficient) in coefficients.iter().rev().enumerate() {
*share += coefficient.deref(); *share += coefficient.deref();
@@ -181,8 +185,8 @@ impl<F: PrimeField> AsMut<[u8]> for SecretShare<F> {
self.0.as_mut() self.0.as_mut()
} }
} }
impl<F: PrimeField> Debug for SecretShare<F> { impl<F: PrimeField> fmt::Debug for SecretShare<F> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("SecretShare").finish_non_exhaustive() fmt.debug_struct("SecretShare").finish_non_exhaustive()
} }
} }
@@ -193,7 +197,7 @@ impl<F: PrimeField> Zeroize for SecretShare<F> {
} }
// Still manually implement ZeroizeOnDrop to ensure these don't stick around. // Still manually implement ZeroizeOnDrop to ensure these don't stick around.
// We could replace Zeroizing<M> with a bound M: ZeroizeOnDrop. // We could replace Zeroizing<M> with a bound M: ZeroizeOnDrop.
// Doing so would potentially fail to highlight thr expected behavior with these and remove a layer // Doing so would potentially fail to highlight the expected behavior with these and remove a layer
// of depth. // of depth.
impl<F: PrimeField> Drop for SecretShare<F> { impl<F: PrimeField> Drop for SecretShare<F> {
fn drop(&mut self) { fn drop(&mut self) {
@@ -224,17 +228,33 @@ pub struct SecretShareMachine<C: Ciphersuite> {
encryption: Encryption<C>, encryption: Encryption<C>,
} }
impl<C: Ciphersuite> fmt::Debug for SecretShareMachine<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("SecretShareMachine")
.field("params", &self.params)
.field("context", &self.context)
.field("our_commitments", &self.our_commitments)
.field("encryption", &self.encryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> SecretShareMachine<C> { impl<C: Ciphersuite> SecretShareMachine<C> {
/// Verify the data from the previous round (canonicity, PoKs, message authenticity) /// Verify the data from the previous round (canonicity, PoKs, message authenticity)
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn verify_r1<R: RngCore + CryptoRng>( fn verify_r1<R: RngCore + CryptoRng>(
&mut self, &mut self,
rng: &mut R, rng: &mut R,
mut commitments: HashMap<u16, EncryptionKeyMessage<C, Commitments<C>>>, mut commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<HashMap<u16, Vec<C::G>>, FrostError<C>> { ) -> Result<HashMap<Participant, Vec<C::G>>, FrostError<C>> {
validate_map(&commitments, &(1 ..= self.params.n()).collect::<Vec<_>>(), self.params.i())?; validate_map(
&commitments,
&(1 ..= self.params.n()).map(Participant).collect::<Vec<_>>(),
self.params.i(),
)?;
let mut batch = BatchVerifier::<u16, C::G>::new(commitments.len()); let mut batch = BatchVerifier::<Participant, C::G>::new(commitments.len());
let mut commitments = commitments let mut commitments = commitments
.drain() .drain()
.map(|(l, msg)| { .map(|(l, msg)| {
@@ -254,7 +274,7 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
}) })
.collect::<HashMap<_, _>>(); .collect::<HashMap<_, _>>();
batch.verify_with_vartime_blame().map_err(FrostError::InvalidProofOfKnowledge)?; batch.verify_vartime_with_vartime_blame().map_err(FrostError::InvalidProofOfKnowledge)?;
commitments.insert(self.params.i, self.our_commitments.drain(..).collect()); commitments.insert(self.params.i, self.our_commitments.drain(..).collect());
Ok(commitments) Ok(commitments)
@@ -268,14 +288,16 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
pub fn generate_secret_shares<R: RngCore + CryptoRng>( pub fn generate_secret_shares<R: RngCore + CryptoRng>(
mut self, mut self,
rng: &mut R, rng: &mut R,
commitments: HashMap<u16, EncryptionKeyMessage<C, Commitments<C>>>, commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<(KeyMachine<C>, HashMap<u16, EncryptedMessage<C, SecretShare<C::F>>>), FrostError<C>> ) -> Result<
{ (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
FrostError<C>,
> {
let commitments = self.verify_r1(&mut *rng, commitments)?; let commitments = self.verify_r1(&mut *rng, commitments)?;
// Step 1: Generate secret shares for all other parties // Step 1: Generate secret shares for all other parties
let mut res = HashMap::new(); let mut res = HashMap::new();
for l in 1 ..= self.params.n() { for l in (1 ..= self.params.n()).map(Participant) {
// Don't insert our own shares to the byte buffer which is meant to be sent around // Don't insert our own shares to the byte buffer which is meant to be sent around
// An app developer could accidentally send it. Best to keep this black boxed // An app developer could accidentally send it. Best to keep this black boxed
if l == self.params.i() { if l == self.params.i() {
@@ -307,10 +329,21 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
pub struct KeyMachine<C: Ciphersuite> { pub struct KeyMachine<C: Ciphersuite> {
params: ThresholdParams, params: ThresholdParams,
secret: Zeroizing<C::F>, secret: Zeroizing<C::F>,
commitments: HashMap<u16, Vec<C::G>>, commitments: HashMap<Participant, Vec<C::G>>,
encryption: Encryption<C>, encryption: Encryption<C>,
} }
impl<C: Ciphersuite> fmt::Debug for KeyMachine<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("KeyMachine")
.field("params", &self.params)
.field("commitments", &self.commitments)
.field("encryption", &self.encryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for KeyMachine<C> { impl<C: Ciphersuite> Zeroize for KeyMachine<C> {
fn zeroize(&mut self) { fn zeroize(&mut self) {
self.params.zeroize(); self.params.zeroize();
@@ -325,8 +358,8 @@ impl<C: Ciphersuite> Zeroize for KeyMachine<C> {
// Calculate the exponent for a given participant and apply it to a series of commitments // Calculate the exponent for a given participant and apply it to a series of commitments
// Initially used with the actual commitments to verify the secret share, later used with // Initially used with the actual commitments to verify the secret share, later used with
// stripes to generate the verification shares // stripes to generate the verification shares
fn exponential<C: Ciphersuite>(i: u16, values: &[C::G]) -> Vec<(C::F, C::G)> { fn exponential<C: Ciphersuite>(i: Participant, values: &[C::G]) -> Vec<(C::F, C::G)> {
let i = C::F::from(i.into()); let i = C::F::from(u16::from(i).into());
let mut res = Vec::with_capacity(values.len()); let mut res = Vec::with_capacity(values.len());
(0 .. values.len()).fold(C::F::one(), |exp, l| { (0 .. values.len()).fold(C::F::one(), |exp, l| {
res.push((exp, values[l])); res.push((exp, values[l]));
@@ -336,7 +369,7 @@ fn exponential<C: Ciphersuite>(i: u16, values: &[C::G]) -> Vec<(C::F, C::G)> {
} }
fn share_verification_statements<C: Ciphersuite>( fn share_verification_statements<C: Ciphersuite>(
target: u16, target: Participant,
commitments: &[C::G], commitments: &[C::G],
mut share: Zeroizing<C::F>, mut share: Zeroizing<C::F>,
) -> Vec<(C::F, C::G)> { ) -> Vec<(C::F, C::G)> {
@@ -358,8 +391,8 @@ fn share_verification_statements<C: Ciphersuite>(
#[derive(Clone, Copy, Hash, Debug, Zeroize)] #[derive(Clone, Copy, Hash, Debug, Zeroize)]
enum BatchId { enum BatchId {
Decryption(u16), Decryption(Participant),
Share(u16), Share(Participant),
} }
impl<C: Ciphersuite> KeyMachine<C> { impl<C: Ciphersuite> KeyMachine<C> {
@@ -369,9 +402,13 @@ impl<C: Ciphersuite> KeyMachine<C> {
pub fn calculate_share<R: RngCore + CryptoRng>( pub fn calculate_share<R: RngCore + CryptoRng>(
mut self, mut self,
rng: &mut R, rng: &mut R,
mut shares: HashMap<u16, EncryptedMessage<C, SecretShare<C::F>>>, mut shares: HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>,
) -> Result<BlameMachine<C>, FrostError<C>> { ) -> Result<BlameMachine<C>, FrostError<C>> {
validate_map(&shares, &(1 ..= self.params.n()).collect::<Vec<_>>(), self.params.i())?; validate_map(
&shares,
&(1 ..= self.params.n()).map(Participant).collect::<Vec<_>>(),
self.params.i(),
)?;
let mut batch = BatchVerifier::new(shares.len()); let mut batch = BatchVerifier::new(shares.len());
let mut blames = HashMap::new(); let mut blames = HashMap::new();
@@ -411,7 +448,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
// Calculate each user's verification share // Calculate each user's verification share
let mut verification_shares = HashMap::new(); let mut verification_shares = HashMap::new();
for i in 1 ..= self.params.n() { for i in (1 ..= self.params.n()).map(Participant) {
verification_shares.insert( verification_shares.insert(
i, i,
if i == self.params.i() { if i == self.params.i() {
@@ -437,11 +474,21 @@ impl<C: Ciphersuite> KeyMachine<C> {
} }
pub struct BlameMachine<C: Ciphersuite> { pub struct BlameMachine<C: Ciphersuite> {
commitments: HashMap<u16, Vec<C::G>>, commitments: HashMap<Participant, Vec<C::G>>,
encryption: Encryption<C>, encryption: Encryption<C>,
result: ThresholdCore<C>, result: ThresholdCore<C>,
} }
impl<C: Ciphersuite> fmt::Debug for BlameMachine<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("BlameMachine")
.field("commitments", &self.commitments)
.field("encryption", &self.encryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for BlameMachine<C> { impl<C: Ciphersuite> Zeroize for BlameMachine<C> {
fn zeroize(&mut self) { fn zeroize(&mut self) {
for (_, commitments) in self.commitments.iter_mut() { for (_, commitments) in self.commitments.iter_mut() {
@@ -468,11 +515,11 @@ impl<C: Ciphersuite> BlameMachine<C> {
fn blame_internal( fn blame_internal(
&self, &self,
sender: u16, sender: Participant,
recipient: u16, recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>, msg: EncryptedMessage<C, SecretShare<C::F>>,
proof: Option<EncryptionKeyProof<C>>, proof: Option<EncryptionKeyProof<C>>,
) -> u16 { ) -> Participant {
let share_bytes = match self.encryption.decrypt_with_proof(sender, recipient, msg, proof) { let share_bytes = match self.encryption.decrypt_with_proof(sender, recipient, msg, proof) {
Ok(share_bytes) => share_bytes, Ok(share_bytes) => share_bytes,
// If there's an invalid signature, the sender did not send a properly formed message // If there's an invalid signature, the sender did not send a properly formed message
@@ -517,17 +564,17 @@ impl<C: Ciphersuite> BlameMachine<C> {
/// order to prevent multiple instances of blame over a single incident. /// order to prevent multiple instances of blame over a single incident.
pub fn blame( pub fn blame(
self, self,
sender: u16, sender: Participant,
recipient: u16, recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>, msg: EncryptedMessage<C, SecretShare<C::F>>,
proof: Option<EncryptionKeyProof<C>>, proof: Option<EncryptionKeyProof<C>>,
) -> (AdditionalBlameMachine<C>, u16) { ) -> (AdditionalBlameMachine<C>, Participant) {
let faulty = self.blame_internal(sender, recipient, msg, proof); let faulty = self.blame_internal(sender, recipient, msg, proof);
(AdditionalBlameMachine(self), faulty) (AdditionalBlameMachine(self), faulty)
} }
} }
#[derive(Zeroize)] #[derive(Debug, Zeroize)]
pub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>); pub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>);
impl<C: Ciphersuite> AdditionalBlameMachine<C> { impl<C: Ciphersuite> AdditionalBlameMachine<C> {
/// Given an accusation of fault, determine the faulty party (either the sender, who sent an /// Given an accusation of fault, determine the faulty party (either the sender, who sent an
@@ -542,11 +589,11 @@ impl<C: Ciphersuite> AdditionalBlameMachine<C> {
/// over a single incident. /// over a single incident.
pub fn blame( pub fn blame(
self, self,
sender: u16, sender: Participant,
recipient: u16, recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>, msg: EncryptedMessage<C, SecretShare<C::F>>,
proof: Option<EncryptionKeyProof<C>>, proof: Option<EncryptionKeyProof<C>>,
) -> u16 { ) -> Participant {
self.0.blame_internal(sender, recipient, msg, proof) self.0.blame_internal(sender, recipient, msg, proof)
} }
} }

View File

@@ -7,23 +7,25 @@
//! provided. //! provided.
use core::{ use core::{
fmt::{Debug, Formatter}, fmt::{self, Debug},
ops::Deref, ops::Deref,
}; };
use std::{io::Read, sync::Arc, collections::HashMap}; use std::{io, sync::Arc, collections::HashMap};
use thiserror::Error; use thiserror::Error;
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
use group::{ use ciphersuite::{
group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
GroupEncoding, GroupEncoding,
},
Ciphersuite,
}; };
use ciphersuite::Ciphersuite; /// Encryption types and utilities used to secure DKG messages.
pub mod encryption;
mod encryption;
/// The distributed key generation protocol described in the /// The distributed key generation protocol described in the
/// [FROST paper](https://eprint.iacr.org/2020/852). /// [FROST paper](https://eprint.iacr.org/2020/852).
@@ -36,29 +38,60 @@ pub mod promote;
#[cfg(any(test, feature = "tests"))] #[cfg(any(test, feature = "tests"))]
pub mod tests; pub mod tests;
/// The ID of a participant, defined as a non-zero u16.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Participant(pub(crate) u16);
impl Participant {
pub fn new(i: u16) -> Option<Participant> {
if i == 0 {
None
} else {
Some(Participant(i))
}
}
#[allow(clippy::wrong_self_convention)]
pub fn to_bytes(&self) -> [u8; 2] {
self.0.to_le_bytes()
}
}
impl From<Participant> for u16 {
fn from(participant: Participant) -> u16 {
participant.0
}
}
impl fmt::Display for Participant {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
/// Various errors possible during key generation/signing. /// Various errors possible during key generation/signing.
#[derive(Clone, PartialEq, Eq, Debug, Error)] #[derive(Clone, PartialEq, Eq, Debug, Error)]
pub enum DkgError<B: Clone + PartialEq + Eq + Debug> { pub enum DkgError<B: Clone + PartialEq + Eq + Debug> {
#[error("a parameter was 0 (required {0}, participants {1})")] #[error("a parameter was 0 (threshold {0}, participants {1})")]
ZeroParameter(u16, u16), ZeroParameter(u16, u16),
#[error("invalid amount of required participants (max {1}, got {0})")] #[error("invalid amount of required participants (max {1}, got {0})")]
InvalidRequiredQuantity(u16, u16), InvalidRequiredQuantity(u16, u16),
#[error("invalid participant index (0 < index <= {0}, yet index is {1})")] #[error("invalid participant (0 < participant <= {0}, yet participant is {1})")]
InvalidParticipantIndex(u16, u16), InvalidParticipant(u16, Participant),
#[error("invalid signing set")] #[error("invalid signing set")]
InvalidSigningSet, InvalidSigningSet,
#[error("invalid participant quantity (expected {0}, got {1})")] #[error("invalid participant quantity (expected {0}, got {1})")]
InvalidParticipantQuantity(usize, usize), InvalidParticipantQuantity(usize, usize),
#[error("duplicated participant index ({0})")] #[error("duplicated participant ({0})")]
DuplicatedIndex(u16), DuplicatedParticipant(Participant),
#[error("missing participant {0}")] #[error("missing participant {0}")]
MissingParticipant(u16), MissingParticipant(Participant),
#[error("invalid proof of knowledge (participant {0})")] #[error("invalid proof of knowledge (participant {0})")]
InvalidProofOfKnowledge(u16), InvalidProofOfKnowledge(Participant),
#[error("invalid share (participant {participant}, blame {blame})")] #[error("invalid share (participant {participant}, blame {blame})")]
InvalidShare { participant: u16, blame: Option<B> }, InvalidShare { participant: Participant, blame: Option<B> },
#[error("internal error ({0})")] #[error("internal error ({0})")]
InternalError(&'static str), InternalError(&'static str),
@@ -66,9 +99,9 @@ pub enum DkgError<B: Clone + PartialEq + Eq + Debug> {
// Validate a map of values to have the expected included participants // Validate a map of values to have the expected included participants
pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>( pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>(
map: &HashMap<u16, T>, map: &HashMap<Participant, T>,
included: &[u16], included: &[Participant],
ours: u16, ours: Participant,
) -> Result<(), DkgError<B>> { ) -> Result<(), DkgError<B>> {
if (map.len() + 1) != included.len() { if (map.len() + 1) != included.len() {
Err(DkgError::InvalidParticipantQuantity(included.len(), map.len() + 1))?; Err(DkgError::InvalidParticipantQuantity(included.len(), map.len() + 1))?;
@@ -77,7 +110,7 @@ pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>(
for included in included { for included in included {
if *included == ours { if *included == ours {
if map.contains_key(included) { if map.contains_key(included) {
Err(DkgError::DuplicatedIndex(*included))?; Err(DkgError::DuplicatedParticipant(*included))?;
} }
continue; continue;
} }
@@ -93,17 +126,18 @@ pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>(
/// Parameters for a multisig. /// Parameters for a multisig.
// These fields should not be made public as they should be static // These fields should not be made public as they should be static
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct ThresholdParams { pub struct ThresholdParams {
/// Participants needed to sign on behalf of the group. /// Participants needed to sign on behalf of the group.
t: u16, t: u16,
/// Amount of participants. /// Amount of participants.
n: u16, n: u16,
/// Index of the participant being acted for. /// Index of the participant being acted for.
i: u16, i: Participant,
} }
impl ThresholdParams { impl ThresholdParams {
pub fn new(t: u16, n: u16, i: u16) -> Result<ThresholdParams, DkgError<()>> { pub fn new(t: u16, n: u16, i: Participant) -> Result<ThresholdParams, DkgError<()>> {
if (t == 0) || (n == 0) { if (t == 0) || (n == 0) {
Err(DkgError::ZeroParameter(t, n))?; Err(DkgError::ZeroParameter(t, n))?;
} }
@@ -113,8 +147,8 @@ impl ThresholdParams {
if t > n { if t > n {
Err(DkgError::InvalidRequiredQuantity(t, n))?; Err(DkgError::InvalidRequiredQuantity(t, n))?;
} }
if (i == 0) || (i > n) { if u16::from(i) > n {
Err(DkgError::InvalidParticipantIndex(n, i))?; Err(DkgError::InvalidParticipant(n, i))?;
} }
Ok(ThresholdParams { t, n, i }) Ok(ThresholdParams { t, n, i })
@@ -126,13 +160,15 @@ impl ThresholdParams {
pub fn n(&self) -> u16 { pub fn n(&self) -> u16 {
self.n self.n
} }
pub fn i(&self) -> u16 { pub fn i(&self) -> Participant {
self.i self.i
} }
} }
/// Calculate the lagrange coefficient for a signing set. /// Calculate the lagrange coefficient for a signing set.
pub fn lagrange<F: PrimeField>(i: u16, included: &[u16]) -> F { pub fn lagrange<F: PrimeField>(i: Participant, included: &[Participant]) -> F {
let i_f = F::from(u64::from(u16::from(i)));
let mut num = F::one(); let mut num = F::one();
let mut denom = F::one(); let mut denom = F::one();
for l in included { for l in included {
@@ -140,9 +176,9 @@ pub fn lagrange<F: PrimeField>(i: u16, included: &[u16]) -> F {
continue; continue;
} }
let share = F::from(u64::try_from(*l).unwrap()); let share = F::from(u64::from(u16::from(*l)));
num *= share; num *= share;
denom *= share - F::from(u64::try_from(i).unwrap()); denom *= share - i_f;
} }
// Safe as this will only be 0 if we're part of the above loop // Safe as this will only be 0 if we're part of the above loop
@@ -162,11 +198,11 @@ pub struct ThresholdCore<C: Ciphersuite> {
/// Group key. /// Group key.
group_key: C::G, group_key: C::G,
/// Verification shares. /// Verification shares.
verification_shares: HashMap<u16, C::G>, verification_shares: HashMap<Participant, C::G>,
} }
impl<C: Ciphersuite> Debug for ThresholdCore<C> { impl<C: Ciphersuite> fmt::Debug for ThresholdCore<C> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt fmt
.debug_struct("ThresholdCore") .debug_struct("ThresholdCore")
.field("params", &self.params) .field("params", &self.params)
@@ -191,16 +227,9 @@ impl<C: Ciphersuite> ThresholdCore<C> {
pub(crate) fn new( pub(crate) fn new(
params: ThresholdParams, params: ThresholdParams,
secret_share: Zeroizing<C::F>, secret_share: Zeroizing<C::F>,
verification_shares: HashMap<u16, C::G>, verification_shares: HashMap<Participant, C::G>,
) -> ThresholdCore<C> { ) -> ThresholdCore<C> {
debug_assert!(validate_map::<_, ()>( let t = (1 ..= params.t).map(Participant).collect::<Vec<_>>();
&verification_shares,
&(0 ..= params.n).collect::<Vec<_>>(),
0
)
.is_ok());
let t = (1 ..= params.t).collect::<Vec<_>>();
ThresholdCore { ThresholdCore {
params, params,
secret_share, secret_share,
@@ -220,32 +249,40 @@ impl<C: Ciphersuite> ThresholdCore<C> {
self.group_key self.group_key
} }
pub(crate) fn verification_shares(&self) -> HashMap<u16, C::G> { pub(crate) fn verification_shares(&self) -> HashMap<Participant, C::G> {
self.verification_shares.clone() self.verification_shares.clone()
} }
pub fn serialize(&self) -> Vec<u8> { pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
let mut serialized = vec![]; writer.write_all(&u32::try_from(C::ID.len()).unwrap().to_le_bytes())?;
serialized.extend(u32::try_from(C::ID.len()).unwrap().to_be_bytes()); writer.write_all(C::ID)?;
serialized.extend(C::ID); writer.write_all(&self.params.t.to_le_bytes())?;
serialized.extend(self.params.t.to_be_bytes()); writer.write_all(&self.params.n.to_le_bytes())?;
serialized.extend(self.params.n.to_be_bytes()); writer.write_all(&self.params.i.to_bytes())?;
serialized.extend(self.params.i.to_be_bytes()); let mut share_bytes = self.secret_share.to_repr();
serialized.extend(self.secret_share.to_repr().as_ref()); writer.write_all(share_bytes.as_ref())?;
share_bytes.as_mut().zeroize();
for l in 1 ..= self.params.n { for l in 1 ..= self.params.n {
serialized.extend(self.verification_shares[&l].to_bytes().as_ref()); writer
.write_all(self.verification_shares[&Participant::new(l).unwrap()].to_bytes().as_ref())?;
} }
Ok(())
}
pub fn serialize(&self) -> Zeroizing<Vec<u8>> {
let mut serialized = Zeroizing::new(vec![]);
self.write::<Vec<u8>>(serialized.as_mut()).unwrap();
serialized serialized
} }
pub fn deserialize<R: Read>(reader: &mut R) -> Result<ThresholdCore<C>, DkgError<()>> { pub fn read<R: io::Read>(reader: &mut R) -> Result<ThresholdCore<C>, DkgError<()>> {
{ {
let missing = DkgError::InternalError("ThresholdCore serialization is missing its curve"); let missing = DkgError::InternalError("ThresholdCore serialization is missing its curve");
let different = DkgError::InternalError("deserializing ThresholdCore for another curve"); let different = DkgError::InternalError("deserializing ThresholdCore for another curve");
let mut id_len = [0; 4]; let mut id_len = [0; 4];
reader.read_exact(&mut id_len).map_err(|_| missing.clone())?; reader.read_exact(&mut id_len).map_err(|_| missing.clone())?;
if u32::try_from(C::ID.len()).unwrap().to_be_bytes() != id_len { if u32::try_from(C::ID.len()).unwrap().to_le_bytes() != id_len {
Err(different.clone())?; Err(different.clone())?;
} }
@@ -262,9 +299,14 @@ impl<C: Ciphersuite> ThresholdCore<C> {
reader reader
.read_exact(&mut value) .read_exact(&mut value)
.map_err(|_| DkgError::InternalError("missing participant quantities"))?; .map_err(|_| DkgError::InternalError("missing participant quantities"))?;
Ok(u16::from_be_bytes(value)) Ok(u16::from_le_bytes(value))
}; };
(read_u16()?, read_u16()?, read_u16()?) (
read_u16()?,
read_u16()?,
Participant::new(read_u16()?)
.ok_or(DkgError::InternalError("invalid participant index"))?,
)
}; };
let secret_share = Zeroizing::new( let secret_share = Zeroizing::new(
@@ -272,7 +314,7 @@ impl<C: Ciphersuite> ThresholdCore<C> {
); );
let mut verification_shares = HashMap::new(); let mut verification_shares = HashMap::new();
for l in 1 ..= n { for l in (1 ..= n).map(Participant) {
verification_shares.insert( verification_shares.insert(
l, l,
<C as Ciphersuite>::read_G(reader) <C as Ciphersuite>::read_G(reader)
@@ -306,10 +348,23 @@ pub struct ThresholdKeys<C: Ciphersuite> {
pub struct ThresholdView<C: Ciphersuite> { pub struct ThresholdView<C: Ciphersuite> {
offset: C::F, offset: C::F,
group_key: C::G, group_key: C::G,
included: Vec<u16>, included: Vec<Participant>,
secret_share: Zeroizing<C::F>, secret_share: Zeroizing<C::F>,
original_verification_shares: HashMap<u16, C::G>, original_verification_shares: HashMap<Participant, C::G>,
verification_shares: HashMap<u16, C::G>, verification_shares: HashMap<Participant, C::G>,
}
impl<C: Ciphersuite> fmt::Debug for ThresholdView<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("ThresholdView")
.field("offset", &self.offset)
.field("group_key", &self.group_key)
.field("included", &self.included)
.field("original_verification_shares", &self.original_verification_shares)
.field("verification_shares", &self.verification_shares)
.finish_non_exhaustive()
}
} }
impl<C: Ciphersuite> Zeroize for ThresholdView<C> { impl<C: Ciphersuite> Zeroize for ThresholdView<C> {
@@ -335,6 +390,7 @@ impl<C: Ciphersuite> ThresholdKeys<C> {
/// Offset the keys by a given scalar to allow for account and privacy schemes. /// Offset the keys by a given scalar to allow for account and privacy schemes.
/// This offset is ephemeral and will not be included when these keys are serialized. /// This offset is ephemeral and will not be included when these keys are serialized.
/// Keys offset multiple times will form a new offset of their sum. /// Keys offset multiple times will form a new offset of their sum.
#[must_use]
pub fn offset(&self, offset: C::F) -> ThresholdKeys<C> { pub fn offset(&self, offset: C::F) -> ThresholdKeys<C> {
let mut res = self.clone(); let mut res = self.clone();
// Carry any existing offset // Carry any existing offset
@@ -363,39 +419,43 @@ impl<C: Ciphersuite> ThresholdKeys<C> {
} }
/// Returns all participants' verification shares without any offsetting. /// Returns all participants' verification shares without any offsetting.
pub(crate) fn verification_shares(&self) -> HashMap<u16, C::G> { pub(crate) fn verification_shares(&self) -> HashMap<Participant, C::G> {
self.core.verification_shares() self.core.verification_shares()
} }
pub fn serialize(&self) -> Vec<u8> { pub fn serialize(&self) -> Zeroizing<Vec<u8>> {
self.core.serialize() self.core.serialize()
} }
pub fn view(&self, included: &[u16]) -> Result<ThresholdView<C>, DkgError<()>> { pub fn view(&self, mut included: Vec<Participant>) -> Result<ThresholdView<C>, DkgError<()>> {
if (included.len() < self.params().t.into()) || (usize::from(self.params().n) < included.len()) if (included.len() < self.params().t.into()) || (usize::from(self.params().n) < included.len())
{ {
Err(DkgError::InvalidSigningSet)?; Err(DkgError::InvalidSigningSet)?;
} }
included.sort();
let offset_share = self.offset.unwrap_or_else(C::F::zero) * let mut secret_share =
C::F::from(included.len().try_into().unwrap()).invert().unwrap(); Zeroizing::new(lagrange::<C::F>(self.params().i, &included) * self.secret_share().deref());
let offset_verification_share = C::generator() * offset_share;
let mut verification_shares = self.verification_shares();
for (i, share) in verification_shares.iter_mut() {
*share *= lagrange::<C::F>(*i, &included);
}
// The offset is included by adding it to the participant with the lowest ID
let offset = self.offset.unwrap_or_else(C::F::zero);
if included[0] == self.params().i() {
*secret_share += offset;
}
*verification_shares.get_mut(&included[0]).unwrap() += C::generator() * offset;
Ok(ThresholdView { Ok(ThresholdView {
offset: self.offset.unwrap_or_else(C::F::zero), offset,
group_key: self.group_key(), group_key: self.group_key(),
secret_share: Zeroizing::new( secret_share,
(lagrange::<C::F>(self.params().i, included) * self.secret_share().deref()) + offset_share,
),
original_verification_shares: self.verification_shares(), original_verification_shares: self.verification_shares(),
verification_shares: self verification_shares,
.verification_shares() included,
.iter()
.map(|(l, share)| {
(*l, (*share * lagrange::<C::F>(*l, included)) + offset_verification_share)
})
.collect(),
included: included.to_vec(),
}) })
} }
} }
@@ -409,7 +469,7 @@ impl<C: Ciphersuite> ThresholdView<C> {
self.group_key self.group_key
} }
pub fn included(&self) -> &[u16] { pub fn included(&self) -> &[Participant] {
&self.included &self.included
} }
@@ -417,11 +477,11 @@ impl<C: Ciphersuite> ThresholdView<C> {
&self.secret_share &self.secret_share
} }
pub fn original_verification_share(&self, l: u16) -> C::G { pub fn original_verification_share(&self, l: Participant) -> C::G {
self.original_verification_shares[&l] self.original_verification_shares[&l]
} }
pub fn verification_share(&self, l: u16) -> C::G { pub fn verification_share(&self, l: Participant) -> C::G {
self.verification_shares[&l] self.verification_shares[&l]
} }
} }

View File

@@ -7,14 +7,12 @@ use std::{
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
use group::GroupEncoding; use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::Ciphersuite;
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use dleq::DLEqProof; use dleq::DLEqProof;
use crate::{DkgError, ThresholdCore, ThresholdKeys, validate_map}; use crate::{Participant, DkgError, ThresholdCore, ThresholdKeys, validate_map};
/// Promote a set of keys to another Ciphersuite definition. /// Promote a set of keys to another Ciphersuite definition.
pub trait CiphersuitePromote<C2: Ciphersuite> { pub trait CiphersuitePromote<C2: Ciphersuite> {
@@ -27,10 +25,10 @@ pub trait CiphersuitePromote<C2: Ciphersuite> {
fn promote(self) -> ThresholdKeys<C2>; fn promote(self) -> ThresholdKeys<C2>;
} }
fn transcript<G: GroupEncoding>(key: G, i: u16) -> RecommendedTranscript { fn transcript<G: GroupEncoding>(key: G, i: Participant) -> RecommendedTranscript {
let mut transcript = RecommendedTranscript::new(b"DKG Generator Promotion v0.2"); let mut transcript = RecommendedTranscript::new(b"DKG Generator Promotion v0.2");
transcript.append_message(b"group_key", key.to_bytes()); transcript.append_message(b"group_key", key.to_bytes());
transcript.append_message(b"participant", i.to_be_bytes()); transcript.append_message(b"participant", i.to_bytes());
transcript transcript
} }
@@ -61,9 +59,10 @@ impl<C: Ciphersuite> GeneratorProof<C> {
} }
} }
/// Promote a set of keys from one curve to another, where the elliptic curve is the same. /// Promote a set of keys from one generator to another, where the elliptic curve is the same.
/// Since the Ciphersuite trait additionally specifies a generator, this provides an O(n) way to /// Since the Ciphersuite trait additionally specifies a generator, this provides an O(n) way to
/// update the generator used with keys. The key generation protocol itself is exponential. /// update the generator used with keys. This outperforms the key generation protocol which is
// exponential.
pub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> { pub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> {
base: ThresholdKeys<C1>, base: ThresholdKeys<C1>,
proof: GeneratorProof<C1>, proof: GeneratorProof<C1>,
@@ -74,7 +73,7 @@ impl<C1: Ciphersuite, C2: Ciphersuite> GeneratorPromotion<C1, C2>
where where
C2: Ciphersuite<F = C1::F, G = C1::G>, C2: Ciphersuite<F = C1::F, G = C1::G>,
{ {
/// Begin promoting keys from one curve to another. Returns a proof this share was properly /// Begin promoting keys from one generator to another. Returns a proof this share was properly
/// promoted. /// promoted.
pub fn promote<R: RngCore + CryptoRng>( pub fn promote<R: RngCore + CryptoRng>(
rng: &mut R, rng: &mut R,
@@ -97,10 +96,10 @@ where
/// Complete promotion by taking in the proofs from all other participants. /// Complete promotion by taking in the proofs from all other participants.
pub fn complete( pub fn complete(
self, self,
proofs: &HashMap<u16, GeneratorProof<C1>>, proofs: &HashMap<Participant, GeneratorProof<C1>>,
) -> Result<ThresholdKeys<C2>, DkgError<()>> { ) -> Result<ThresholdKeys<C2>, DkgError<()>> {
let params = self.base.params(); let params = self.base.params();
validate_map(proofs, &(1 ..= params.n).collect::<Vec<_>>(), params.i)?; validate_map(proofs, &(1 ..= params.n).map(Participant).collect::<Vec<_>>(), params.i)?;
let original_shares = self.base.verification_shares(); let original_shares = self.base.verification_shares();

View File

@@ -3,7 +3,7 @@ use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
use crate::{ use crate::{
Ciphersuite, ThresholdParams, ThresholdCore, Ciphersuite, Participant, ThresholdParams, ThresholdCore,
frost::{KeyGenMachine, SecretShare, KeyMachine}, frost::{KeyGenMachine, SecretShare, KeyMachine},
encryption::{EncryptionKeyMessage, EncryptedMessage}, encryption::{EncryptionKeyMessage, EncryptedMessage},
tests::{THRESHOLD, PARTICIPANTS, clone_without}, tests::{THRESHOLD, PARTICIPANTS, clone_without},
@@ -11,30 +11,31 @@ use crate::{
// Needed so rustfmt doesn't fail to format on line length issues // Needed so rustfmt doesn't fail to format on line length issues
type FrostEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>; type FrostEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
type FrostSecretShares<C> = HashMap<u16, FrostEncryptedMessage<C>>; type FrostSecretShares<C> = HashMap<Participant, FrostEncryptedMessage<C>>;
const CONTEXT: &str = "DKG Test Key Generation";
// Commit, then return enc key and shares // Commit, then return enc key and shares
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>( fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R, rng: &mut R,
) -> (HashMap<u16, KeyMachine<C>>, HashMap<u16, C::G>, HashMap<u16, FrostSecretShares<C>>) { ) -> (
HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, C::G>,
HashMap<Participant, FrostSecretShares<C>>,
) {
let mut machines = HashMap::new(); let mut machines = HashMap::new();
let mut commitments = HashMap::new(); let mut commitments = HashMap::new();
let mut enc_keys = HashMap::new(); let mut enc_keys = HashMap::new();
for i in 1 ..= PARTICIPANTS { for i in (1 ..= PARTICIPANTS).map(Participant) {
let machine = KeyGenMachine::<C>::new( let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();
ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(), let machine = KeyGenMachine::<C>::new(params, CONTEXT.to_string());
"DKG Test Key Generation".to_string(),
);
let (machine, these_commitments) = machine.generate_coefficients(rng); let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine); machines.insert(i, machine);
commitments.insert( commitments.insert(
i, i,
EncryptionKeyMessage::read::<&[u8]>( EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)
&mut these_commitments.serialize().as_ref(),
ThresholdParams { t: THRESHOLD, n: PARTICIPANTS, i: 1 },
)
.unwrap(), .unwrap(),
); );
enc_keys.insert(i, commitments[&i].enc_key()); enc_keys.insert(i, commitments[&i].enc_key());
@@ -53,7 +54,8 @@ fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
l, l,
EncryptedMessage::read::<&[u8]>( EncryptedMessage::read::<&[u8]>(
&mut share.serialize().as_ref(), &mut share.serialize().as_ref(),
ThresholdParams { t: THRESHOLD, n: PARTICIPANTS, i: 1 }, // Only t/n actually matters, so hardcode i to 1 here
ThresholdParams { t: THRESHOLD, n: PARTICIPANTS, i: Participant(1) },
) )
.unwrap(), .unwrap(),
) )
@@ -68,8 +70,8 @@ fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
} }
fn generate_secret_shares<C: Ciphersuite>( fn generate_secret_shares<C: Ciphersuite>(
shares: &HashMap<u16, FrostSecretShares<C>>, shares: &HashMap<Participant, FrostSecretShares<C>>,
recipient: u16, recipient: Participant,
) -> FrostSecretShares<C> { ) -> FrostSecretShares<C> {
let mut our_secret_shares = HashMap::new(); let mut our_secret_shares = HashMap::new();
for (i, shares) in shares { for (i, shares) in shares {
@@ -84,7 +86,7 @@ fn generate_secret_shares<C: Ciphersuite>(
/// Fully perform the FROST key generation algorithm. /// Fully perform the FROST key generation algorithm.
pub fn frost_gen<R: RngCore + CryptoRng, C: Ciphersuite>( pub fn frost_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R, rng: &mut R,
) -> HashMap<u16, ThresholdCore<C>> { ) -> HashMap<Participant, ThresholdCore<C>> {
let (mut machines, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng); let (mut machines, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None; let mut verification_shares = None;
@@ -122,16 +124,19 @@ mod literal {
use super::*; use super::*;
const ONE: Participant = Participant(1);
const TWO: Participant = Participant(2);
fn test_blame( fn test_blame(
machines: Vec<BlameMachine<Ristretto>>, machines: Vec<BlameMachine<Ristretto>>,
msg: FrostEncryptedMessage<Ristretto>, msg: FrostEncryptedMessage<Ristretto>,
blame: Option<EncryptionKeyProof<Ristretto>>, blame: Option<EncryptionKeyProof<Ristretto>>,
) { ) {
for machine in machines { for machine in machines {
let (additional, blamed) = machine.blame(1, 2, msg.clone(), blame.clone()); let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());
assert_eq!(blamed, 1); assert_eq!(blamed, ONE);
// Verify additional blame also works // Verify additional blame also works
assert_eq!(additional.blame(1, 2, msg.clone(), blame.clone()), 1); assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
} }
} }
@@ -142,7 +147,7 @@ mod literal {
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2 // Mutate the PoP of the encrypted message from 1 to 2
secret_shares.get_mut(&1).unwrap().get_mut(&2).unwrap().invalidate_pop(); secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();
let mut blame = None; let mut blame = None;
let machines = machines let machines = machines
@@ -150,8 +155,8 @@ mod literal {
.filter_map(|(i, machine)| { .filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i); let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares); let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == 2 { if i == TWO {
assert_eq!(machine.err(), Some(DkgError::InvalidShare { participant: 1, blame: None })); assert_eq!(machine.err(), Some(DkgError::InvalidShare { participant: ONE, blame: None }));
// Explicitly declare we have a blame object, which happens to be None since invalid PoP // Explicitly declare we have a blame object, which happens to be None since invalid PoP
// is self-explainable // is self-explainable
blame = Some(None); blame = Some(None);
@@ -162,7 +167,7 @@ mod literal {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
test_blame(machines, secret_shares[&1][&2].clone(), blame.unwrap()); test_blame(machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap());
} }
#[test] #[test]
@@ -176,7 +181,12 @@ mod literal {
// We then malleate 1's blame proof, so 1 ends up malicious // We then malleate 1's blame proof, so 1 ends up malicious
// Doesn't simply invalidate the PoP as that won't have a blame statement // Doesn't simply invalidate the PoP as that won't have a blame statement
// By mutating the encrypted data, we do ensure a blame statement is created // By mutating the encrypted data, we do ensure a blame statement is created
secret_shares.get_mut(&2).unwrap().get_mut(&1).unwrap().invalidate_msg(&mut OsRng, 2); secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None; let mut blame = None;
let machines = machines let machines = machines
@@ -184,9 +194,9 @@ mod literal {
.filter_map(|(i, machine)| { .filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i); let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares); let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == 1 { if i == ONE {
blame = Some(match machine.err() { blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: 2, blame: Some(blame) }) => Some(blame), Some(DkgError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(), _ => panic!(),
}); });
None None
@@ -197,7 +207,7 @@ mod literal {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(machines, secret_shares[&2][&1].clone(), blame.unwrap()); test_blame(machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap());
} }
// This should be largely equivalent to the prior test // This should be largely equivalent to the prior test
@@ -206,7 +216,12 @@ mod literal {
let (mut machines, _, mut secret_shares) = let (mut machines, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&2).unwrap().get_mut(&1).unwrap().invalidate_msg(&mut OsRng, 2); secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None; let mut blame = None;
let machines = machines let machines = machines
@@ -214,9 +229,9 @@ mod literal {
.filter_map(|(i, machine)| { .filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i); let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares); let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == 1 { if i == ONE {
blame = Some(match machine.err() { blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: 2, blame: Some(blame) }) => Some(blame), Some(DkgError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(), _ => panic!(),
}); });
None None
@@ -227,7 +242,7 @@ mod literal {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(machines, secret_shares[&2][&1].clone(), blame.unwrap()); test_blame(machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap());
} }
#[test] #[test]
@@ -235,11 +250,11 @@ mod literal {
let (mut machines, enc_keys, mut secret_shares) = let (mut machines, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&1).unwrap().get_mut(&2).unwrap().invalidate_share_serialization( secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
&mut OsRng, &mut OsRng,
b"FROST", CONTEXT,
1, ONE,
enc_keys[&2], enc_keys[&TWO],
); );
let mut blame = None; let mut blame = None;
@@ -248,9 +263,9 @@ mod literal {
.filter_map(|(i, machine)| { .filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i); let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares); let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == 2 { if i == TWO {
blame = Some(match machine.err() { blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: 1, blame: Some(blame) }) => Some(blame), Some(DkgError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(), _ => panic!(),
}); });
None None
@@ -260,7 +275,7 @@ mod literal {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
test_blame(machines, secret_shares[&1][&2].clone(), blame.unwrap()); test_blame(machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap());
} }
#[test] #[test]
@@ -268,11 +283,11 @@ mod literal {
let (mut machines, enc_keys, mut secret_shares) = let (mut machines, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng); commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&1).unwrap().get_mut(&2).unwrap().invalidate_share_value( secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
&mut OsRng, &mut OsRng,
b"FROST", CONTEXT,
1, ONE,
enc_keys[&2], enc_keys[&TWO],
); );
let mut blame = None; let mut blame = None;
@@ -281,9 +296,9 @@ mod literal {
.filter_map(|(i, machine)| { .filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i); let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares); let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == 2 { if i == TWO {
blame = Some(match machine.err() { blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: 1, blame: Some(blame) }) => Some(blame), Some(DkgError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(), _ => panic!(),
}); });
None None
@@ -293,6 +308,6 @@ mod literal {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
test_blame(machines, secret_shares[&1][&2].clone(), blame.unwrap()); test_blame(machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap());
} }
} }

View File

@@ -3,11 +3,9 @@ use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
use group::ff::Field; use ciphersuite::{group::ff::Field, Ciphersuite};
use ciphersuite::Ciphersuite; use crate::{Participant, ThresholdCore, ThresholdKeys, lagrange};
use crate::{ThresholdCore, ThresholdKeys, lagrange};
/// FROST generation test. /// FROST generation test.
pub mod frost; pub mod frost;
@@ -33,7 +31,7 @@ pub fn clone_without<K: Clone + std::cmp::Eq + std::hash::Hash, V: Clone>(
} }
/// Recover the secret from a collection of keys. /// Recover the secret from a collection of keys.
pub fn recover_key<C: Ciphersuite>(keys: &HashMap<u16, ThresholdKeys<C>>) -> C::F { pub fn recover_key<C: Ciphersuite>(keys: &HashMap<Participant, ThresholdKeys<C>>) -> C::F {
let first = keys.values().next().expect("no keys provided"); let first = keys.values().next().expect("no keys provided");
assert!(keys.len() >= first.params().t().into(), "not enough keys provided"); assert!(keys.len() >= first.params().t().into(), "not enough keys provided");
let included = keys.keys().cloned().collect::<Vec<_>>(); let included = keys.keys().cloned().collect::<Vec<_>>();
@@ -48,18 +46,18 @@ pub fn recover_key<C: Ciphersuite>(keys: &HashMap<u16, ThresholdKeys<C>>) -> C::
/// Generate threshold keys for tests. /// Generate threshold keys for tests.
pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>( pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R, rng: &mut R,
) -> HashMap<u16, ThresholdKeys<C>> { ) -> HashMap<Participant, ThresholdKeys<C>> {
let res = frost_gen(rng) let res = frost_gen(rng)
.drain() .drain()
.map(|(i, core)| { .map(|(i, core)| {
assert_eq!( assert_eq!(
&ThresholdCore::<C>::deserialize::<&[u8]>(&mut core.serialize().as_ref()).unwrap(), &ThresholdCore::<C>::read::<&[u8]>(&mut core.serialize().as_ref()).unwrap(),
&core &core
); );
(i, ThresholdKeys::new(core)) (i, ThresholdKeys::new(core))
}) })
.collect(); .collect();
assert_eq!(C::generator() * recover_key(&res), res[&1].group_key()); assert_eq!(C::generator() * recover_key(&res), res[&Participant(1)].group_key());
res res
} }

View File

@@ -5,9 +5,7 @@ use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize; use zeroize::Zeroize;
use group::Group; use ciphersuite::{group::Group, Ciphersuite};
use ciphersuite::Ciphersuite;
use crate::{ use crate::{
promote::{GeneratorPromotion, GeneratorProof}, promote::{GeneratorPromotion, GeneratorProof},

View File

@@ -12,10 +12,10 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
thiserror = "1" thiserror = { version = "1", optional = true }
rand_core = "0.6" rand_core = "0.6"
zeroize = { version = "1.3", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
digest = "0.10" digest = "0.10"
@@ -31,7 +31,7 @@ hex-literal = "0.3"
blake2 = "0.10" blake2 = "0.10"
k256 = { version = "0.11", features = ["arithmetic", "bits"] } k256 = { version = "0.12", features = ["arithmetic", "bits"] }
dalek-ff-group = { path = "../dalek-ff-group" } dalek-ff-group = { path = "../dalek-ff-group" }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] } transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] }
@@ -39,8 +39,14 @@ transcript = { package = "flexible-transcript", path = "../transcript", features
[features] [features]
std = [] std = []
serialize = ["std"] serialize = ["std"]
experimental = ["std", "multiexp"]
# Needed for cross-group DLEqs
black_box = []
secure_capacity_difference = [] secure_capacity_difference = []
experimental = ["std", "thiserror", "multiexp"]
# Only applies to experimental, yet is default to ensure security # Only applies to experimental, yet is default to ensure security
# experimental doesn't mandate it itself in case two curves with extreme
# capacity differences are desired to be used together, in which case the user
# must specify experimental without default features
default = ["secure_capacity_difference"] default = ["secure_capacity_difference"]

View File

@@ -1,4 +1,6 @@
use core::ops::Deref; use core::ops::{Deref, DerefMut};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
use thiserror::Error; use thiserror::Error;
@@ -27,8 +29,28 @@ pub(crate) mod aos;
mod bits; mod bits;
use bits::{BitSignature, Bits}; use bits::{BitSignature, Bits};
#[cfg(feature = "serialize")] // Feature gated due to MSRV requirements
use std::io::{Read, Write}; #[cfg(feature = "black_box")]
pub(crate) fn black_box<T>(val: T) -> T {
core::hint::black_box(val)
}
#[cfg(not(feature = "black_box"))]
pub(crate) fn black_box<T>(val: T) -> T {
val
}
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
let mut bit = black_box(*bit_ref);
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
bit_ref.zeroize();
res
}
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> std::io::Result<G> { pub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> std::io::Result<G> {
@@ -224,15 +246,13 @@ where
let mut these_bits: u8 = 0; let mut these_bits: u8 = 0;
// Needed to zero out the bits // Needed to zero out the bits
#[allow(unused_assignments)] #[allow(unused_assignments)]
for (i, mut raw_bit) in raw_bits.iter_mut().enumerate() { for (i, mut bit) in raw_bits.iter_mut().enumerate() {
if i == capacity { if i == capacity {
break; break;
} }
let mut bit = u8::from(*raw_bit);
*raw_bit = false;
// Accumulate this bit // Accumulate this bit
let mut bit = u8_from_bool(bit.deref_mut());
these_bits |= bit << (i % bits_per_group); these_bits |= bit << (i % bits_per_group);
bit.zeroize(); bit.zeroize();

View File

@@ -1,21 +1,26 @@
use core::ops::DerefMut;
use ff::PrimeFieldBits; use ff::PrimeFieldBits;
use zeroize::Zeroize; use zeroize::Zeroize;
use crate::cross_group::u8_from_bool;
/// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed. /// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed.
pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>( pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
mut scalar: F0, mut scalar: F0,
) -> (F0, F1) { ) -> (F0, F1) {
let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY); let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY);
// The security of a mutual key is the security of the lower field. Accordingly, this bans a // A mutual key is only as secure as its weakest group
// difference of more than 4 bits // Accordingly, this bans a capacity difference of more than 4 bits to prevent a curve generally
// offering n-bits of security from being forced into a situation with much fewer bits
#[cfg(feature = "secure_capacity_difference")] #[cfg(feature = "secure_capacity_difference")]
assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) < 4); assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) <= 4);
let mut res1 = F0::zero(); let mut res1 = F0::zero();
let mut res2 = F1::zero(); let mut res2 = F1::zero();
// Uses the bit view API to ensure a consistent endianess // Uses the bits API to ensure a consistent endianess
let mut bits = scalar.to_le_bits(); let mut bits = scalar.to_le_bits();
scalar.zeroize(); scalar.zeroize();
// Convert it to big endian // Convert it to big endian
@@ -24,9 +29,9 @@ pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
let mut skip = bits.len() - usize::try_from(mutual_capacity).unwrap(); let mut skip = bits.len() - usize::try_from(mutual_capacity).unwrap();
// Needed to zero out the bits // Needed to zero out the bits
#[allow(unused_assignments)] #[allow(unused_assignments)]
for mut raw_bit in bits.iter_mut() { for mut bit in bits.iter_mut() {
if skip > 0 { if skip > 0 {
*raw_bit = false; bit.deref_mut().zeroize();
skip -= 1; skip -= 1;
continue; continue;
} }
@@ -34,9 +39,7 @@ pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
res1 = res1.double(); res1 = res1.double();
res2 = res2.double(); res2 = res2.double();
let mut bit = u8::from(*raw_bit); let mut bit = u8_from_bool(bit.deref_mut());
*raw_bit = false;
res1 += F0::from(bit.into()); res1 += F0::from(bit.into());
res2 += F1::from(bit.into()); res2 += F1::from(bit.into());
bit.zeroize(); bit.zeroize();

View File

@@ -21,6 +21,7 @@ pub mod cross_group;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
// Produce a non-biased challenge from the transcript in the specified field
pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F { pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// From here, there are three ways to get a scalar under the ff/group API // From here, there are three ways to get a scalar under the ff/group API
// 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge"))) // 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge")))
@@ -80,6 +81,7 @@ pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
challenge challenge
} }
// Helper function to read a scalar
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> { fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
let mut repr = F::Repr::default(); let mut repr = F::Repr::default();
@@ -91,11 +93,13 @@ fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
Ok(scalar.unwrap()) Ok(scalar.unwrap())
} }
#[derive(Debug)] /// Error for DLEq proofs.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum DLEqError { pub enum DLEqError {
InvalidProof, InvalidProof,
} }
/// A proof that points have the same discrete logarithm across generators.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct DLEqProof<G: PrimeGroup> { pub struct DLEqProof<G: PrimeGroup> {
c: G::Scalar, c: G::Scalar,
@@ -110,6 +114,8 @@ impl<G: PrimeGroup> DLEqProof<G> {
transcript.append_message(b"point", point.to_bytes()); transcript.append_message(b"point", point.to_bytes());
} }
/// Prove that the points created by `scalar * G`, for each specified generator, share a discrete
/// logarithm.
pub fn prove<R: RngCore + CryptoRng, T: Transcript>( pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R, rng: &mut R,
transcript: &mut T, transcript: &mut T,
@@ -134,6 +140,22 @@ impl<G: PrimeGroup> DLEqProof<G> {
DLEqProof { c, s } DLEqProof { c, s }
} }
// Transcript a specific generator/nonce/point (G/R/A), as used when verifying a proof.
// This takes in the generator/point, and then the challenge and solution to calculate the nonce.
fn verify_statement<T: Transcript>(
transcript: &mut T,
generator: G,
point: G,
c: G::Scalar,
s: G::Scalar,
) {
// s = r + ca
// sG - cA = R
// R, A
Self::transcript(transcript, generator, (generator * s) - (point * c), point);
}
/// Verify the specified points share a discrete logarithm across the specified generators.
pub fn verify<T: Transcript>( pub fn verify<T: Transcript>(
&self, &self,
transcript: &mut T, transcript: &mut T,
@@ -146,10 +168,7 @@ impl<G: PrimeGroup> DLEqProof<G> {
transcript.domain_separate(b"dleq"); transcript.domain_separate(b"dleq");
for (generator, point) in generators.iter().zip(points) { for (generator, point) in generators.iter().zip(points) {
// s = r + ca Self::verify_statement(transcript, *generator, *point, self.c, self.s);
// sG - cA = R
// R, A
Self::transcript(transcript, *generator, (*generator * self.s) - (*point * self.c), *point);
} }
if self.c != challenge(transcript) { if self.c != challenge(transcript) {
@@ -159,17 +178,20 @@ impl<G: PrimeGroup> DLEqProof<G> {
Ok(()) Ok(())
} }
/// Write a DLEq proof to something implementing Write.
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> { pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
w.write_all(self.c.to_repr().as_ref())?; w.write_all(self.c.to_repr().as_ref())?;
w.write_all(self.s.to_repr().as_ref()) w.write_all(self.s.to_repr().as_ref())
} }
/// Read a DLEq proof from something implementing Read.
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> { pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? }) Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })
} }
/// Serialize a DLEq proof to a `Vec<u8>`.
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub fn serialize(&self) -> Vec<u8> { pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![]; let mut res = vec![];
@@ -178,6 +200,9 @@ impl<G: PrimeGroup> DLEqProof<G> {
} }
} }
/// A proof that multiple series of points each have a single discrete logarithm across generators.
/// This is effectively n distinct DLEq proofs, one for each discrete logarithm and its points
/// across some generators, yet with a smaller overall proof size.
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct MultiDLEqProof<G: PrimeGroup> { pub struct MultiDLEqProof<G: PrimeGroup> {
@@ -188,6 +213,9 @@ pub struct MultiDLEqProof<G: PrimeGroup> {
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[allow(non_snake_case)] #[allow(non_snake_case)]
impl<G: PrimeGroup> MultiDLEqProof<G> { impl<G: PrimeGroup> MultiDLEqProof<G> {
/// Prove for each scalar that the series of points created by multiplying it against its
/// matching generators share a discrete logarithm.
/// This function panics if `generators.len() != scalars.len()`.
pub fn prove<R: RngCore + CryptoRng, T: Transcript>( pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R, rng: &mut R,
transcript: &mut T, transcript: &mut T,
@@ -197,7 +225,13 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
where where
G::Scalar: Zeroize, G::Scalar: Zeroize,
{ {
transcript.domain_separate(b"multi-dleq"); assert_eq!(
generators.len(),
scalars.len(),
"amount of series of generators doesn't match the amount of scalars"
);
transcript.domain_separate(b"multi_dleq");
let mut nonces = vec![]; let mut nonces = vec![];
for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() { for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() {
@@ -226,6 +260,8 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
MultiDLEqProof { c, s } MultiDLEqProof { c, s }
} }
/// Verify each series of points share a discrete logarithm against their matching series of
/// generators.
pub fn verify<T: Transcript>( pub fn verify<T: Transcript>(
&self, &self,
transcript: &mut T, transcript: &mut T,
@@ -239,7 +275,7 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
Err(DLEqError::InvalidProof)?; Err(DLEqError::InvalidProof)?;
} }
transcript.domain_separate(b"multi-dleq"); transcript.domain_separate(b"multi_dleq");
for (i, (generators, points)) in generators.iter().zip(points).enumerate() { for (i, (generators, points)) in generators.iter().zip(points).enumerate() {
if points.len() != generators.len() { if points.len() != generators.len() {
Err(DLEqError::InvalidProof)?; Err(DLEqError::InvalidProof)?;
@@ -247,12 +283,7 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
transcript.append_message(b"discrete_logarithm", i.to_le_bytes()); transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
for (generator, point) in generators.iter().zip(points) { for (generator, point) in generators.iter().zip(points) {
DLEqProof::transcript( DLEqProof::verify_statement(transcript, *generator, *point, self.c, self.s[i]);
transcript,
*generator,
(*generator * self.s[i]) - (*point * self.c),
*point,
);
} }
} }
@@ -263,6 +294,7 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
Ok(()) Ok(())
} }
/// Write a multi-DLEq proof to something implementing Write.
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> { pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
w.write_all(self.c.to_repr().as_ref())?; w.write_all(self.c.to_repr().as_ref())?;
@@ -272,6 +304,7 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
Ok(()) Ok(())
} }
/// Read a multi-DLEq proof from something implementing Read.
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<MultiDLEqProof<G>> { pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<MultiDLEqProof<G>> {
let c = read_scalar(r)?; let c = read_scalar(r)?;
@@ -282,6 +315,7 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
Ok(MultiDLEqProof { c, s }) Ok(MultiDLEqProof { c, s })
} }
/// Serialize a multi-DLEq proof to a `Vec<u8>`.
#[cfg(feature = "serialize")] #[cfg(feature = "serialize")]
pub fn serialize(&self) -> Vec<u8> { pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![]; let mut res = vec![];

View File

@@ -16,12 +16,11 @@ rustdoc-args = ["--cfg", "docsrs"]
lazy_static = "1" lazy_static = "1"
rand_core = "0.6" rand_core = "0.6"
digest = "0.10"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
subtle = "2.4" subtle = "^2.4"
ff = "0.12" ff = { version = "0.12", features = ["bits"] }
group = "0.12" group = "0.12"
generic-array = "0.14" generic-array = "0.14"
@@ -33,3 +32,6 @@ dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2" }
hex = "0.4" hex = "0.4"
ff-group-tests = { path = "../ff-group-tests" } ff-group-tests = { path = "../ff-group-tests" }
[features]
black_box = []

View File

@@ -1,23 +1,49 @@
use zeroize::Zeroize;
// Feature gated due to MSRV requirements
#[cfg(feature = "black_box")]
pub(crate) fn black_box<T>(val: T) -> T {
core::hint::black_box(val)
}
#[cfg(not(feature = "black_box"))]
pub(crate) fn black_box<T>(val: T) -> T {
val
}
pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
let mut bit = black_box(*bit_ref);
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
bit_ref.zeroize();
res
}
#[doc(hidden)] #[doc(hidden)]
#[macro_export] #[macro_export]
macro_rules! field { macro_rules! field {
($FieldName: ident, $MODULUS: ident, $WIDE_MODULUS: ident, $NUM_BITS: literal) => { ($FieldName: ident, $MODULUS: ident, $WIDE_MODULUS: ident, $NUM_BITS: literal) => {
use core::ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}; use core::ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign};
use rand_core::RngCore;
use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable}; use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable};
use rand_core::RngCore;
use generic_array::{typenum::U57, GenericArray}; use generic_array::{typenum::U57, GenericArray};
use crypto_bigint::{Integer, Encoding}; use crypto_bigint::{Integer, Encoding};
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits}; use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
// Needed to publish for some reason? Yet not actually needed // Needed to publish for some reason? Yet not actually needed
#[allow(unused_imports)] #[allow(unused_imports)]
use dalek_ff_group::{from_wrapper, math_op}; use dalek_ff_group::{from_wrapper, math_op};
use dalek_ff_group::{constant_time, from_uint, math}; use dalek_ff_group::{constant_time, from_uint, math};
use $crate::backend::u8_from_bool;
fn reduce(x: U1024) -> U512 { fn reduce(x: U1024) -> U512 {
U512::from_le_slice(&x.reduce(&$WIDE_MODULUS).unwrap().to_le_bytes()[.. 64]) U512::from_le_slice(&x.reduce(&$WIDE_MODULUS).unwrap().to_le_bytes()[.. 64])
} }
@@ -59,10 +85,11 @@ macro_rules! field {
let mut res = Self(U512::ONE); let mut res = Self(U512::ONE);
let mut bits = 0; let mut bits = 0;
for (i, bit) in other.to_le_bits().iter().rev().enumerate() { for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {
bits <<= 1; bits <<= 1;
let bit = u8::from(*bit); let mut bit = u8_from_bool(bit.deref_mut());
bits |= bit; bits |= bit;
bit.zeroize();
if ((i + 1) % 4) == 0 { if ((i + 1) % 4) == 0 {
if i != 3 { if i != 3 {

View File

@@ -32,5 +32,5 @@ field!(FieldElement, MODULUS, WIDE_MODULUS, 448);
#[test] #[test]
fn test_field() { fn test_field() {
// TODO: Move to test_prime_field_bits once the impl is finished // TODO: Move to test_prime_field_bits once the impl is finished
ff_group_tests::prime_field::test_prime_field::<FieldElement>(); ff_group_tests::prime_field::test_prime_field::<_, FieldElement>(&mut rand_core::OsRng);
} }

View File

@@ -1,5 +1,5 @@
use core::{ use core::{
ops::{Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign},
iter::Sum, iter::Sum,
}; };
@@ -12,10 +12,14 @@ use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable, Conditio
use crypto_bigint::U512; use crypto_bigint::U512;
use ff::{Field, PrimeField, PrimeFieldBits}; use group::{
use group::{Group, GroupEncoding, prime::PrimeGroup}; ff::{Field, PrimeField, PrimeFieldBits},
Group, GroupEncoding,
prime::PrimeGroup,
};
use crate::{ use crate::{
backend::u8_from_bool,
scalar::{Scalar, MODULUS as SCALAR_MODULUS}, scalar::{Scalar, MODULUS as SCALAR_MODULUS},
field::{FieldElement, MODULUS as FIELD_MODULUS, Q_4}, field::{FieldElement, MODULUS as FIELD_MODULUS, Q_4},
}; };
@@ -215,7 +219,7 @@ impl<'a> Sum<&'a Point> for Point {
impl Mul<Scalar> for Point { impl Mul<Scalar> for Point {
type Output = Point; type Output = Point;
fn mul(self, other: Scalar) -> Point { fn mul(self, mut other: Scalar) -> Point {
// Precompute the optimal amount that's a multiple of 2 // Precompute the optimal amount that's a multiple of 2
let mut table = [Point::identity(); 16]; let mut table = [Point::identity(); 16];
table[1] = self; table[1] = self;
@@ -225,10 +229,11 @@ impl Mul<Scalar> for Point {
let mut res = Self::identity(); let mut res = Self::identity();
let mut bits = 0; let mut bits = 0;
for (i, bit) in other.to_le_bits().iter().rev().enumerate() { for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {
bits <<= 1; bits <<= 1;
let bit = u8::from(*bit); let mut bit = u8_from_bool(bit.deref_mut());
bits |= bit; bits |= bit;
bit.zeroize();
if ((i + 1) % 4) == 0 { if ((i + 1) % 4) == 0 {
if i != 3 { if i != 3 {
@@ -240,6 +245,7 @@ impl Mul<Scalar> for Point {
bits = 0; bits = 0;
} }
} }
other.zeroize();
res res
} }
} }
@@ -323,6 +329,7 @@ fn test_group() {
test_sub::<Point>(); test_sub::<Point>();
test_mul::<Point>(); test_mul::<Point>();
test_order::<Point>(); test_order::<Point>();
test_random::<_, Point>(&mut rand_core::OsRng);
test_encoding::<Point>(); test_encoding::<Point>();
} }

View File

@@ -35,5 +35,5 @@ impl Scalar {
#[test] #[test]
fn test_scalar_field() { fn test_scalar_field() {
// TODO: Move to test_prime_field_bits once the impl is finished // TODO: Move to test_prime_field_bits once the impl is finished
ff_group_tests::prime_field::test_prime_field::<Scalar>(); ff_group_tests::prime_field::test_prime_field::<_, Scalar>(&mut rand_core::OsRng);
} }

View File

@@ -13,8 +13,9 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
rand_core = "0.6"
group = "0.12" group = "0.12"
[dev-dependencies] [dev-dependencies]
k256 = { version = "0.11", features = ["bits"] } k256 = { version = "0.12", features = ["bits"] }
p256 = { version = "0.11", features = ["bits"] } p256 = { version = "0.12", features = ["bits"] }

View File

@@ -1,3 +1,4 @@
use rand_core::RngCore;
use group::ff::Field; use group::ff::Field;
/// Perform basic tests on equality. /// Perform basic tests on equality.
@@ -106,8 +107,27 @@ pub fn test_cube<F: Field>() {
assert_eq!(two.cube(), two * two * two, "2^3 != 8"); assert_eq!(two.cube(), two * two * two, "2^3 != 8");
} }
/// Test random.
pub fn test_random<R: RngCore, F: Field>(rng: &mut R) {
let a = F::random(&mut *rng);
// Run up to 128 times so small fields, which may occasionally return the same element twice,
// are statistically unlikely to fail
// Field of order 1 will always fail this test due to lack of distinct elements to sample
// from
let mut pass = false;
for _ in 0 .. 128 {
let b = F::random(&mut *rng);
// This test passes if a distinct element is returned at least once
if b != a {
pass = true;
}
}
assert!(pass, "random always returned the same value");
}
/// Run all tests on fields implementing Field. /// Run all tests on fields implementing Field.
pub fn test_field<F: Field>() { pub fn test_field<R: RngCore, F: Field>(rng: &mut R) {
test_eq::<F>(); test_eq::<F>();
test_conditional_select::<F>(); test_conditional_select::<F>();
test_add::<F>(); test_add::<F>();
@@ -119,4 +139,5 @@ pub fn test_field<F: Field>() {
test_sqrt::<F>(); test_sqrt::<F>();
test_is_zero::<F>(); test_is_zero::<F>();
test_cube::<F>(); test_cube::<F>();
test_random::<R, F>(rng);
} }

View File

@@ -1,3 +1,4 @@
use rand_core::RngCore;
use group::{ use group::{
ff::{Field, PrimeFieldBits}, ff::{Field, PrimeFieldBits},
Group, Group,
@@ -10,7 +11,7 @@ use crate::prime_field::{test_prime_field, test_prime_field_bits};
pub fn test_eq<G: Group>() { pub fn test_eq<G: Group>() {
assert_eq!(G::identity(), G::identity(), "identity != identity"); assert_eq!(G::identity(), G::identity(), "identity != identity");
assert_eq!(G::generator(), G::generator(), "generator != generator"); assert_eq!(G::generator(), G::generator(), "generator != generator");
assert!(G::identity() != G::generator(), "identity != generator"); assert!(G::identity() != G::generator(), "identity == generator");
} }
/// Test identity. /// Test identity.
@@ -69,6 +70,11 @@ pub fn test_sum<G: Group>() {
G::generator().double(), G::generator().double(),
"[generator, generator].sum() != two" "[generator, generator].sum() != two"
); );
assert_eq!(
[G::generator().double(), G::generator()].iter().sum::<G>(),
G::generator().double() + G::generator(),
"[generator.double(), generator].sum() != three"
);
} }
/// Test negation. /// Test negation.
@@ -107,9 +113,31 @@ pub fn test_order<G: Group>() {
assert_eq!(minus_one + G::generator(), G::identity(), "((modulus - 1) * G) + G wasn't identity"); assert_eq!(minus_one + G::generator(), G::identity(), "((modulus - 1) * G) + G wasn't identity");
} }
/// Test random.
pub fn test_random<R: RngCore, G: Group>(rng: &mut R) {
let a = G::random(&mut *rng);
assert!(!bool::from(a.is_identity()), "random returned identity");
// Run up to 128 times so small groups, which may occasionally return the same element twice,
// are statistically unlikely to fail
// Groups of order <= 2 will always fail this test due to lack of distinct elements to sample
// from
let mut pass = false;
for _ in 0 .. 128 {
let b = G::random(&mut *rng);
assert!(!bool::from(b.is_identity()), "random returned identity");
// This test passes if a distinct element is returned at least once
if b != a {
pass = true;
}
}
assert!(pass, "random always returned the same value");
}
/// Run all tests on groups implementing Group. /// Run all tests on groups implementing Group.
pub fn test_group<G: Group>() { pub fn test_group<R: RngCore, G: Group>(rng: &mut R) {
test_prime_field::<G::Scalar>(); test_prime_field::<R, G::Scalar>(rng);
test_eq::<G>(); test_eq::<G>();
test_identity::<G>(); test_identity::<G>();
@@ -121,6 +149,7 @@ pub fn test_group<G: Group>() {
test_sub::<G>(); test_sub::<G>();
test_mul::<G>(); test_mul::<G>();
test_order::<G>(); test_order::<G>();
test_random::<R, G>(rng);
} }
/// Test encoding and decoding of group elements. /// Test encoding and decoding of group elements.
@@ -142,27 +171,35 @@ pub fn test_encoding<G: PrimeGroup>() {
} }
/// Run all tests on groups implementing PrimeGroup (Group + GroupEncoding). /// Run all tests on groups implementing PrimeGroup (Group + GroupEncoding).
pub fn test_prime_group<G: PrimeGroup>() { pub fn test_prime_group<R: RngCore, G: PrimeGroup>(rng: &mut R) {
test_group::<G>(); test_group::<R, G>(rng);
test_encoding::<G>(); test_encoding::<G>();
} }
/// Run all tests offered by this crate on the group. /// Run all tests offered by this crate on the group.
pub fn test_prime_group_bits<G: PrimeGroup>() pub fn test_prime_group_bits<R: RngCore, G: PrimeGroup>(rng: &mut R)
where where
G::Scalar: PrimeFieldBits, G::Scalar: PrimeFieldBits,
{ {
test_prime_field_bits::<G::Scalar>(); test_prime_field_bits::<R, G::Scalar>(rng);
test_prime_group::<G>(); test_prime_group::<R, G>(rng);
}
// Run these tests against k256/p256
// This ensures that these tests are well formed and won't error for valid implementations,
// assuming the validity of k256/p256
// While k256 and p256 may be malformed in a way which coincides with a faulty test, this is
// considered unlikely
// The other option, not running against any libraries, would leave faulty tests completely
// undetected
#[test]
fn test_k256() {
test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
} }
#[test] #[test]
fn test_k256_group_encoding() { fn test_p256() {
test_prime_group_bits::<k256::ProjectivePoint>(); test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);
}
#[test]
fn test_p256_group_encoding() {
test_prime_group_bits::<p256::ProjectivePoint>();
} }

View File

@@ -1,3 +1,4 @@
use rand_core::RngCore;
use group::ff::{PrimeField, PrimeFieldBits}; use group::ff::{PrimeField, PrimeFieldBits};
use crate::field::test_field; use crate::field::test_field;
@@ -29,6 +30,16 @@ pub fn test_is_odd<F: PrimeField>() {
assert_eq!(F::one().is_odd().unwrap_u8(), 1, "1 was even"); assert_eq!(F::one().is_odd().unwrap_u8(), 1, "1 was even");
assert_eq!(F::one().is_even().unwrap_u8(), 0, "1 wasn't odd"); assert_eq!(F::one().is_even().unwrap_u8(), 0, "1 wasn't odd");
// Make sure an odd value added to an odd value is even
let two = F::one().double();
assert_eq!(two.is_odd().unwrap_u8(), 0, "2 was odd");
assert_eq!(two.is_even().unwrap_u8(), 1, "2 wasn't even");
// Make sure an even value added to an even value is even
let four = two.double();
assert_eq!(four.is_odd().unwrap_u8(), 0, "4 was odd");
assert_eq!(four.is_even().unwrap_u8(), 1, "4 wasn't even");
let neg_one = -F::one(); let neg_one = -F::one();
assert_eq!(neg_one.is_odd().unwrap_u8(), 0, "-1 was odd"); assert_eq!(neg_one.is_odd().unwrap_u8(), 0, "-1 was odd");
assert_eq!(neg_one.is_even().unwrap_u8(), 1, "-1 wasn't even"); assert_eq!(neg_one.is_even().unwrap_u8(), 1, "-1 wasn't even");
@@ -49,16 +60,39 @@ pub fn test_encoding<F: PrimeField>() {
F::from_repr_vartime(repr).unwrap(), F::from_repr_vartime(repr).unwrap(),
"{msg} couldn't be encoded and decoded", "{msg} couldn't be encoded and decoded",
); );
assert_eq!(
bytes.as_ref(),
F::from_repr(repr).unwrap().to_repr().as_ref(),
"canonical encoding decoded produced distinct encoding"
);
}; };
test(F::zero(), "0"); test(F::zero(), "0");
test(F::one(), "1"); test(F::one(), "1");
test(F::one() + F::one(), "2"); test(F::one() + F::one(), "2");
test(-F::one(), "-1"); test(-F::one(), "-1");
// Also check if a non-canonical encoding is possible
let mut high = (F::zero() - F::one()).to_repr();
let mut possible_non_canon = false;
for byte in high.as_mut() {
// The fact a bit isn't set in the highest possible value suggests there's unused bits
// If there's unused bits, mark the possibility of a non-canonical encoding and set the bits
if *byte != 255 {
possible_non_canon = true;
*byte = 255;
break;
}
}
// Any non-canonical encoding should fail to be read
if possible_non_canon {
assert!(!bool::from(F::from_repr(high).is_some()));
}
} }
/// Run all tests on fields implementing PrimeField. /// Run all tests on fields implementing PrimeField.
pub fn test_prime_field<F: PrimeField>() { pub fn test_prime_field<R: RngCore, F: PrimeField>(rng: &mut R) {
test_field::<F>(); test_field::<R, F>(rng);
test_zero::<F>(); test_zero::<F>();
test_one::<F>(); test_one::<F>();
@@ -265,6 +299,7 @@ pub fn test_root_of_unity<F: PrimeFieldBits>() {
} }
bit = bit.double(); bit = bit.double();
} }
assert!(bool::from(t.is_odd()), "t wasn't odd");
assert_eq!(pow(F::multiplicative_generator(), t), F::root_of_unity(), "incorrect root of unity"); assert_eq!(pow(F::multiplicative_generator(), t), F::root_of_unity(), "incorrect root of unity");
assert_eq!( assert_eq!(
@@ -275,8 +310,8 @@ pub fn test_root_of_unity<F: PrimeFieldBits>() {
} }
/// Run all tests on fields implementing PrimeFieldBits. /// Run all tests on fields implementing PrimeFieldBits.
pub fn test_prime_field_bits<F: PrimeFieldBits>() { pub fn test_prime_field_bits<R: RngCore, F: PrimeFieldBits>(rng: &mut R) {
test_prime_field::<F>(); test_prime_field::<R, F>(rng);
test_to_le_bits::<F>(); test_to_le_bits::<F>();
test_char_le_bits::<F>(); test_char_le_bits::<F>();

View File

@@ -18,25 +18,19 @@ thiserror = "1"
rand_core = "0.6" rand_core = "0.6"
rand_chacha = "0.3" rand_chacha = "0.3"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
subtle = "2" subtle = "^2.4"
hex = "0.4" hex = { version = "0.4", optional = true }
digest = "0.10" digest = "0.10"
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.2", features = ["recommended"] }
hkdf = "0.12"
chacha20 = { version = "0.9", features = ["zeroize"] }
group = "0.12"
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2", optional = true } dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2", optional = true }
minimal-ed448 = { path = "../ed448", version = "^0.1.2", optional = true } minimal-ed448 = { path = "../ed448", version = "^0.1.2", optional = true }
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std"] } ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["std"] }
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.2", features = ["recommended"] }
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] } multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.2" } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.2" }
@@ -45,6 +39,7 @@ dleq = { path = "../dleq", version = "0.2", features = ["serialize"] }
dkg = { path = "../dkg", version = "0.2" } dkg = { path = "../dkg", version = "0.2" }
[dev-dependencies] [dev-dependencies]
hex = "0.4"
serde_json = "1" serde_json = "1"
dkg = { path = "../dkg", version = "0.2", features = ["tests"] } dkg = { path = "../dkg", version = "0.2", features = ["tests"] }
@@ -58,4 +53,4 @@ p256 = ["ciphersuite/p256"]
ed448 = ["minimal-ed448", "ciphersuite/ed448"] ed448 = ["minimal-ed448", "ciphersuite/ed448"]
tests = ["dkg/tests"] tests = ["hex", "dkg/tests"]

View File

@@ -6,7 +6,7 @@ use rand_core::{RngCore, CryptoRng};
use transcript::Transcript; use transcript::Transcript;
use crate::{Curve, FrostError, ThresholdKeys, ThresholdView}; use crate::{Participant, ThresholdKeys, ThresholdView, Curve, FrostError};
pub use schnorr::SchnorrSignature; pub use schnorr::SchnorrSignature;
/// Write an addendum to a writer. /// Write an addendum to a writer.
@@ -21,11 +21,11 @@ impl WriteAddendum for () {
} }
/// Trait alias for the requirements to be used as an addendum. /// Trait alias for the requirements to be used as an addendum.
pub trait Addendum: Clone + PartialEq + Debug + WriteAddendum {} pub trait Addendum: Send + Clone + PartialEq + Debug + WriteAddendum {}
impl<A: Clone + PartialEq + Debug + WriteAddendum> Addendum for A {} impl<A: Send + Clone + PartialEq + Debug + WriteAddendum> Addendum for A {}
/// Algorithm trait usable by the FROST signing machine to produce signatures.. /// Algorithm trait usable by the FROST signing machine to produce signatures..
pub trait Algorithm<C: Curve>: Clone { pub trait Algorithm<C: Curve>: Send + Clone {
/// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible /// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible
/// transcript included in this crate. /// transcript included in this crate.
type Transcript: Clone + Debug + Transcript; type Transcript: Clone + Debug + Transcript;
@@ -38,7 +38,7 @@ pub trait Algorithm<C: Curve>: Clone {
fn transcript(&mut self) -> &mut Self::Transcript; fn transcript(&mut self) -> &mut Self::Transcript;
/// Obtain the list of nonces to generate, as specified by the generators to create commitments /// Obtain the list of nonces to generate, as specified by the generators to create commitments
/// against per-nonce /// against per-nonce.
fn nonces(&self) -> Vec<Vec<C::G>>; fn nonces(&self) -> Vec<Vec<C::G>>;
/// Generate an addendum to FROST"s preprocessing stage. /// Generate an addendum to FROST"s preprocessing stage.
@@ -55,7 +55,7 @@ pub trait Algorithm<C: Curve>: Clone {
fn process_addendum( fn process_addendum(
&mut self, &mut self,
params: &ThresholdView<C>, params: &ThresholdView<C>,
l: u16, l: Participant,
reader: Self::Addendum, reader: Self::Addendum,
) -> Result<(), FrostError>; ) -> Result<(), FrostError>;
@@ -87,10 +87,13 @@ pub trait Algorithm<C: Curve>: Clone {
) -> Result<Vec<(C::F, C::G)>, ()>; ) -> Result<Vec<(C::F, C::G)>, ()>;
} }
mod sealed {
pub use super::*;
/// IETF-compliant transcript. This is incredibly naive and should not be used within larger /// IETF-compliant transcript. This is incredibly naive and should not be used within larger
/// protocols. /// protocols.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct IetfTranscript(Vec<u8>); pub struct IetfTranscript(pub(crate) Vec<u8>);
impl Transcript for IetfTranscript { impl Transcript for IetfTranscript {
type Challenge = Vec<u8>; type Challenge = Vec<u8>;
@@ -113,37 +116,53 @@ impl Transcript for IetfTranscript {
unimplemented!() unimplemented!()
} }
} }
}
pub(crate) use sealed::IetfTranscript;
/// HRAm usable by the included Schnorr signature algorithm to generate challenges. /// HRAm usable by the included Schnorr signature algorithm to generate challenges.
pub trait Hram<C: Curve>: Clone { pub trait Hram<C: Curve>: Send + Clone {
/// HRAm function to generate a challenge. /// HRAm function to generate a challenge.
/// H2 from the IETF draft, despite having a different argument set (not being pre-formatted). /// H2 from the IETF draft, despite having a different argument set (not being pre-formatted).
#[allow(non_snake_case)] #[allow(non_snake_case)]
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F; fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
} }
/// IETF-compliant Schnorr signature algorithm ((R, s) where s = r + cx). /// Schnorr signature algorithm ((R, s) where s = r + cx).
#[derive(Clone)] #[derive(Clone)]
pub struct Schnorr<C: Curve, H: Hram<C>> { pub struct Schnorr<C: Curve, T: Clone + Debug + Transcript, H: Hram<C>> {
transcript: IetfTranscript, transcript: T,
c: Option<C::F>, c: Option<C::F>,
_hram: PhantomData<H>, _hram: PhantomData<H>,
} }
impl<C: Curve, H: Hram<C>> Default for Schnorr<C, H> { /// IETF-compliant Schnorr signature algorithm.
fn default() -> Self { ///
Self::new() /// This algorithm specifically uses the transcript format defined in the FROST IETF draft.
/// It's a naive transcript format not viable for usage in larger protocols, yet is presented here
/// in order to provide compatibility.
///
/// Usage of this with key offsets will break the intended compatibility as the IETF draft does not
/// specify a protocol for offsets.
pub type IetfSchnorr<C, H> = Schnorr<C, IetfTranscript, H>;
impl<C: Curve, T: Clone + Debug + Transcript, H: Hram<C>> Schnorr<C, T, H> {
/// Construct a Schnorr algorithm continuing the specified transcript.
pub fn new(transcript: T) -> Schnorr<C, T, H> {
Schnorr { transcript, c: None, _hram: PhantomData }
} }
} }
impl<C: Curve, H: Hram<C>> Schnorr<C, H> { impl<C: Curve, H: Hram<C>> IetfSchnorr<C, H> {
pub fn new() -> Schnorr<C, H> { /// Construct a IETF-compatible Schnorr algorithm.
Schnorr { transcript: IetfTranscript(vec![]), c: None, _hram: PhantomData } ///
/// Please see the `IetfSchnorr` documentation for the full details of this.
pub fn ietf() -> IetfSchnorr<C, H> {
Schnorr::new(IetfTranscript(vec![]))
} }
} }
impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> { impl<C: Curve, T: Clone + Debug + Transcript, H: Hram<C>> Algorithm<C> for Schnorr<C, T, H> {
type Transcript = IetfTranscript; type Transcript = T;
type Addendum = (); type Addendum = ();
type Signature = SchnorrSignature<C>; type Signature = SchnorrSignature<C>;
@@ -161,7 +180,12 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
Ok(()) Ok(())
} }
fn process_addendum(&mut self, _: &ThresholdView<C>, _: u16, _: ()) -> Result<(), FrostError> { fn process_addendum(
&mut self,
_: &ThresholdView<C>,
_: Participant,
_: (),
) -> Result<(), FrostError> {
Ok(()) Ok(())
} }

View File

@@ -1,10 +1,7 @@
use digest::Digest; use digest::Digest;
use group::GroupEncoding;
use minimal_ed448::{Scalar, Point}; use minimal_ed448::{Scalar, Point};
pub use ciphersuite::{group::GroupEncoding, Shake256_114, Ed448};
pub use ciphersuite::{Shake256_114, Ed448};
use crate::{curve::Curve, algorithm::Hram}; use crate::{curve::Curve, algorithm::Hram};

View File

@@ -1,6 +1,4 @@
use group::GroupEncoding; use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::Ciphersuite;
use crate::{curve::Curve, algorithm::Hram}; use crate::{curve::Curve, algorithm::Hram};

View File

@@ -8,13 +8,14 @@ use subtle::ConstantTimeEq;
use digest::{Digest, Output}; use digest::{Digest, Output};
use group::{ pub use ciphersuite::{
group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
Group, Group,
},
Ciphersuite,
}; };
pub use ciphersuite::Ciphersuite;
#[cfg(any(feature = "ristretto", feature = "ed25519"))] #[cfg(any(feature = "ristretto", feature = "ed25519"))]
mod dalek; mod dalek;
#[cfg(feature = "ristretto")] #[cfg(feature = "ristretto")]
@@ -77,6 +78,12 @@ pub trait Curve: Ciphersuite {
let mut repr = secret.to_repr(); let mut repr = secret.to_repr();
// Perform rejection sampling until we reach a non-zero nonce
// While the IETF spec doesn't explicitly require this, generating a zero nonce will produce
// commitments which will be rejected for being zero (and if they were used, leak the secret
// share)
// Rejection sampling here will prevent an honest participant from ever generating 'malicious'
// values and ensure safety
let mut res; let mut res;
while { while {
seed.extend(repr.as_ref()); seed.extend(repr.as_ref());
@@ -86,10 +93,7 @@ pub trait Curve: Ciphersuite {
seed = Zeroizing::new(vec![0; 32]); seed = Zeroizing::new(vec![0; 32]);
rng.fill_bytes(&mut seed); rng.fill_bytes(&mut seed);
} }
repr.as_mut().zeroize();
for i in repr.as_mut() {
i.zeroize();
}
res res
} }

View File

@@ -19,7 +19,7 @@ use std::collections::HashMap;
use thiserror::Error; use thiserror::Error;
/// Distributed key generation protocol. /// Distributed key generation protocol.
pub use dkg::{self, ThresholdParams, ThresholdCore, ThresholdKeys, ThresholdView}; pub use dkg::{self, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, ThresholdView};
/// Curve trait and provided curves/HRAMs, forming various ciphersuites. /// Curve trait and provided curves/HRAMs, forming various ciphersuites.
pub mod curve; pub mod curve;
@@ -38,21 +38,21 @@ pub mod tests;
/// Various errors possible during signing. /// Various errors possible during signing.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
pub enum FrostError { pub enum FrostError {
#[error("invalid participant index (0 < index <= {0}, yet index is {1})")] #[error("invalid participant (0 < participant <= {0}, yet participant is {1})")]
InvalidParticipantIndex(u16, u16), InvalidParticipant(u16, Participant),
#[error("invalid signing set ({0})")] #[error("invalid signing set ({0})")]
InvalidSigningSet(&'static str), InvalidSigningSet(&'static str),
#[error("invalid participant quantity (expected {0}, got {1})")] #[error("invalid participant quantity (expected {0}, got {1})")]
InvalidParticipantQuantity(usize, usize), InvalidParticipantQuantity(usize, usize),
#[error("duplicated participant index ({0})")] #[error("duplicated participant ({0})")]
DuplicatedIndex(u16), DuplicatedParticipant(Participant),
#[error("missing participant {0}")] #[error("missing participant {0}")]
MissingParticipant(u16), MissingParticipant(Participant),
#[error("invalid preprocess (participant {0})")] #[error("invalid preprocess (participant {0})")]
InvalidPreprocess(u16), InvalidPreprocess(Participant),
#[error("invalid share (participant {0})")] #[error("invalid share (participant {0})")]
InvalidShare(u16), InvalidShare(Participant),
#[error("internal error ({0})")] #[error("internal error ({0})")]
InternalError(&'static str), InternalError(&'static str),
@@ -60,9 +60,9 @@ pub enum FrostError {
// Validate a map of values to have the expected included participants // Validate a map of values to have the expected included participants
pub fn validate_map<T>( pub fn validate_map<T>(
map: &HashMap<u16, T>, map: &HashMap<Participant, T>,
included: &[u16], included: &[Participant],
ours: u16, ours: Participant,
) -> Result<(), FrostError> { ) -> Result<(), FrostError> {
if (map.len() + 1) != included.len() { if (map.len() + 1) != included.len() {
Err(FrostError::InvalidParticipantQuantity(included.len(), map.len() + 1))?; Err(FrostError::InvalidParticipantQuantity(included.len(), map.len() + 1))?;
@@ -71,7 +71,7 @@ pub fn validate_map<T>(
for included in included { for included in included {
if *included == ours { if *included == ours {
if map.contains_key(included) { if map.contains_key(included) {
Err(FrostError::DuplicatedIndex(*included))?; Err(FrostError::DuplicatedParticipant(*included))?;
} }
continue; continue;
} }

View File

@@ -3,10 +3,11 @@
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once // Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
// //
// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount // Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
// of nonces, each against an arbitrary list of basepoints // of nonces, each against an arbitrary list of generators
// //
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
// When multiple D, E pairs are provided, a DLEq proof is also provided to confirm their integrity // When representations across multiple generators are provided, a DLEq proof is also provided to
// confirm their integrity
use core::ops::Deref; use core::ops::Deref;
use std::{ use std::{
@@ -20,12 +21,12 @@ use zeroize::{Zeroize, Zeroizing};
use transcript::Transcript; use transcript::Transcript;
use group::{ff::PrimeField, Group, GroupEncoding}; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
use multiexp::multiexp_vartime; use multiexp::multiexp_vartime;
use dleq::MultiDLEqProof; use dleq::MultiDLEqProof;
use crate::curve::Curve; use crate::{curve::Curve, Participant};
// Transcript used to aggregate binomial nonces for usage within a single DLEq proof. // Transcript used to aggregate binomial nonces for usage within a single DLEq proof.
fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T { fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T {
@@ -72,11 +73,12 @@ impl<C: Curve> GeneratorCommitments<C> {
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub(crate) struct NonceCommitments<C: Curve> { pub(crate) struct NonceCommitments<C: Curve> {
// Called generators as these commitments are indexed by generator later on // Called generators as these commitments are indexed by generator later on
// So to get the commitments for the first generator, it'd be commitments.generators[0]
pub(crate) generators: Vec<GeneratorCommitments<C>>, pub(crate) generators: Vec<GeneratorCommitments<C>>,
} }
impl<C: Curve> NonceCommitments<C> { impl<C: Curve> NonceCommitments<C> {
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>( pub(crate) fn new<R: RngCore + CryptoRng>(
rng: &mut R, rng: &mut R,
secret_share: &Zeroizing<C::F>, secret_share: &Zeroizing<C::F>,
generators: &[C::G], generators: &[C::G],
@@ -97,10 +99,7 @@ impl<C: Curve> NonceCommitments<C> {
(nonce, NonceCommitments { generators: commitments }) (nonce, NonceCommitments { generators: commitments })
} }
fn read<R: Read, T: Transcript>( fn read<R: Read>(reader: &mut R, generators: &[C::G]) -> io::Result<NonceCommitments<C>> {
reader: &mut R,
generators: &[C::G],
) -> io::Result<NonceCommitments<C>> {
Ok(NonceCommitments { Ok(NonceCommitments {
generators: (0 .. generators.len()) generators: (0 .. generators.len())
.map(|_| GeneratorCommitments::read(reader)) .map(|_| GeneratorCommitments::read(reader))
@@ -130,9 +129,11 @@ impl<C: Curve> NonceCommitments<C> {
} }
} }
/// Commitments for all the nonces across all their generators.
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub(crate) struct Commitments<C: Curve> { pub(crate) struct Commitments<C: Curve> {
// Called nonces as these commitments are indexed by nonce // Called nonces as these commitments are indexed by nonce
// So to get the commitments for the first nonce, it'd be commitments.nonces[0]
pub(crate) nonces: Vec<NonceCommitments<C>>, pub(crate) nonces: Vec<NonceCommitments<C>>,
// DLEq Proof proving that each set of commitments were generated using a single pair of discrete // DLEq Proof proving that each set of commitments were generated using a single pair of discrete
// logarithms // logarithms
@@ -153,7 +154,7 @@ impl<C: Curve> Commitments<C> {
let mut dleq_nonces = vec![]; let mut dleq_nonces = vec![];
for generators in planned_nonces { for generators in planned_nonces {
let (nonce, these_commitments): (Nonce<C>, _) = let (nonce, these_commitments): (Nonce<C>, _) =
NonceCommitments::new::<_, T>(&mut *rng, secret_share, generators); NonceCommitments::new(&mut *rng, secret_share, generators);
if generators.len() > 1 { if generators.len() > 1 {
dleq_generators.push(generators.clone()); dleq_generators.push(generators.clone());
@@ -201,7 +202,7 @@ impl<C: Curve> Commitments<C> {
context: &[u8], context: &[u8],
) -> io::Result<Self> { ) -> io::Result<Self> {
let nonces = (0 .. generators.len()) let nonces = (0 .. generators.len())
.map(|i| NonceCommitments::read::<_, T>(reader, &generators[i])) .map(|i| NonceCommitments::read(reader, &generators[i]))
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?; .collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
let mut dleq_generators = vec![]; let mut dleq_generators = vec![];
@@ -247,17 +248,17 @@ pub(crate) struct IndividualBinding<C: Curve> {
binding_factors: Option<Vec<C::F>>, binding_factors: Option<Vec<C::F>>,
} }
pub(crate) struct BindingFactor<C: Curve>(pub(crate) HashMap<u16, IndividualBinding<C>>); pub(crate) struct BindingFactor<C: Curve>(pub(crate) HashMap<Participant, IndividualBinding<C>>);
impl<C: Curve> BindingFactor<C> { impl<C: Curve> BindingFactor<C> {
pub(crate) fn insert(&mut self, i: u16, commitments: Commitments<C>) { pub(crate) fn insert(&mut self, i: Participant, commitments: Commitments<C>) {
self.0.insert(i, IndividualBinding { commitments, binding_factors: None }); self.0.insert(i, IndividualBinding { commitments, binding_factors: None });
} }
pub(crate) fn calculate_binding_factors<T: Clone + Transcript>(&mut self, transcript: &mut T) { pub(crate) fn calculate_binding_factors<T: Clone + Transcript>(&mut self, transcript: &mut T) {
for (l, binding) in self.0.iter_mut() { for (l, binding) in self.0.iter_mut() {
let mut transcript = transcript.clone(); let mut transcript = transcript.clone();
transcript.append_message(b"participant", C::F::from(u64::from(*l)).to_repr()); transcript.append_message(b"participant", C::F::from(u64::from(u16::from(*l))).to_repr());
// It *should* be perfectly fine to reuse a binding factor for multiple nonces // It *should* be perfectly fine to reuse a binding factor for multiple nonces
// This generates a binding factor per nonce just to ensure it never comes up as a question // This generates a binding factor per nonce just to ensure it never comes up as a question
binding.binding_factors = Some( binding.binding_factors = Some(
@@ -268,12 +269,12 @@ impl<C: Curve> BindingFactor<C> {
} }
} }
pub(crate) fn binding_factors(&self, i: u16) -> &[C::F] { pub(crate) fn binding_factors(&self, i: Participant) -> &[C::F] {
self.0[&i].binding_factors.as_ref().unwrap() self.0[&i].binding_factors.as_ref().unwrap()
} }
// Get the bound nonces for a specific party // Get the bound nonces for a specific party
pub(crate) fn bound(&self, l: u16) -> Vec<Vec<C::G>> { pub(crate) fn bound(&self, l: Participant) -> Vec<Vec<C::G>> {
let mut res = vec![]; let mut res = vec![];
for (i, (nonce, rho)) in for (i, (nonce, rho)) in
self.0[&l].commitments.nonces.iter().zip(self.binding_factors(l).iter()).enumerate() self.0[&l].commitments.nonces.iter().zip(self.binding_factors(l).iter()).enumerate()

View File

@@ -11,15 +11,12 @@ use zeroize::{Zeroize, Zeroizing};
use transcript::Transcript; use transcript::Transcript;
use group::{ use ciphersuite::group::{ff::PrimeField, GroupEncoding};
ff::{Field, PrimeField},
GroupEncoding,
};
use multiexp::BatchVerifier; use multiexp::BatchVerifier;
use crate::{ use crate::{
curve::Curve, curve::Curve,
FrostError, ThresholdParams, ThresholdKeys, ThresholdView, Participant, FrostError, ThresholdParams, ThresholdKeys, ThresholdView,
algorithm::{WriteAddendum, Addendum, Algorithm}, algorithm::{WriteAddendum, Addendum, Algorithm},
validate_map, validate_map,
}; };
@@ -89,7 +86,7 @@ impl<C: Curve, A: Addendum> Writable for Preprocess<C, A> {
pub struct CachedPreprocess(pub Zeroizing<[u8; 32]>); pub struct CachedPreprocess(pub Zeroizing<[u8; 32]>);
/// Trait for the initial state machine of a two-round signing protocol. /// Trait for the initial state machine of a two-round signing protocol.
pub trait PreprocessMachine { pub trait PreprocessMachine: Send {
/// Preprocess message for this machine. /// Preprocess message for this machine.
type Preprocess: Clone + PartialEq + Writable; type Preprocess: Clone + PartialEq + Writable;
/// Signature produced by this machine. /// Signature produced by this machine.
@@ -198,12 +195,14 @@ impl<C: Curve> Writable for SignatureShare<C> {
#[cfg(any(test, feature = "tests"))] #[cfg(any(test, feature = "tests"))]
impl<C: Curve> SignatureShare<C> { impl<C: Curve> SignatureShare<C> {
pub(crate) fn invalidate(&mut self) { pub(crate) fn invalidate(&mut self) {
use ciphersuite::group::ff::Field;
self.0 += C::F::one(); self.0 += C::F::one();
} }
} }
/// Trait for the second machine of a two-round signing protocol. /// Trait for the second machine of a two-round signing protocol.
pub trait SignMachine<S>: Sized { pub trait SignMachine<S>: Send + Sized {
/// Params used to instantiate this machine which can be used to rebuild from a cache. /// Params used to instantiate this machine which can be used to rebuild from a cache.
type Params: Clone; type Params: Clone;
/// Keys used for signing operations. /// Keys used for signing operations.
@@ -221,8 +220,8 @@ pub trait SignMachine<S>: Sized {
/// security as your private key share. /// security as your private key share.
fn cache(self) -> CachedPreprocess; fn cache(self) -> CachedPreprocess;
/// Create a sign machine from a cached preprocess. After this, the preprocess should be fully /// Create a sign machine from a cached preprocess. After this, the preprocess must be deleted so
/// deleted, as it must never be reused. It is /// it's never reused. Any reuse would cause the signer to leak their secret share.
fn from_cache( fn from_cache(
params: Self::Params, params: Self::Params,
keys: Self::Keys, keys: Self::Keys,
@@ -239,7 +238,7 @@ pub trait SignMachine<S>: Sized {
/// become the signing set for this session. /// become the signing set for this session.
fn sign( fn sign(
self, self,
commitments: HashMap<u16, Self::Preprocess>, commitments: HashMap<Participant, Self::Preprocess>,
msg: &[u8], msg: &[u8],
) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError>; ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError>;
} }
@@ -291,7 +290,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
fn sign( fn sign(
mut self, mut self,
mut preprocesses: HashMap<u16, Preprocess<C, A::Addendum>>, mut preprocesses: HashMap<Participant, Preprocess<C, A::Addendum>>,
msg: &[u8], msg: &[u8],
) -> Result<(Self::SignatureMachine, SignatureShare<C>), FrostError> { ) -> Result<(Self::SignatureMachine, SignatureShare<C>), FrostError> {
let multisig_params = self.params.multisig_params(); let multisig_params = self.params.multisig_params();
@@ -307,22 +306,18 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
if included.len() < usize::from(multisig_params.t()) { if included.len() < usize::from(multisig_params.t()) {
Err(FrostError::InvalidSigningSet("not enough signers"))?; Err(FrostError::InvalidSigningSet("not enough signers"))?;
} }
// Invalid index
if included[0] == 0 {
Err(FrostError::InvalidParticipantIndex(included[0], multisig_params.n()))?;
}
// OOB index // OOB index
if included[included.len() - 1] > multisig_params.n() { if u16::from(included[included.len() - 1]) > multisig_params.n() {
Err(FrostError::InvalidParticipantIndex(included[included.len() - 1], multisig_params.n()))?; Err(FrostError::InvalidParticipant(multisig_params.n(), included[included.len() - 1]))?;
} }
// Same signer included multiple times // Same signer included multiple times
for i in 0 .. (included.len() - 1) { for i in 0 .. (included.len() - 1) {
if included[i] == included[i + 1] { if included[i] == included[i + 1] {
Err(FrostError::DuplicatedIndex(included[i]))?; Err(FrostError::DuplicatedParticipant(included[i]))?;
} }
} }
let view = self.params.keys.view(&included).unwrap(); let view = self.params.keys.view(included.clone()).unwrap();
validate_map(&preprocesses, &included, multisig_params.i())?; validate_map(&preprocesses, &included, multisig_params.i())?;
{ {
@@ -332,7 +327,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
let nonces = self.params.algorithm.nonces(); let nonces = self.params.algorithm.nonces();
#[allow(non_snake_case)] #[allow(non_snake_case)]
let mut B = BindingFactor(HashMap::<u16, _>::with_capacity(included.len())); let mut B = BindingFactor(HashMap::<Participant, _>::with_capacity(included.len()));
{ {
// Parse the preprocesses // Parse the preprocesses
for l in &included { for l in &included {
@@ -341,7 +336,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
.params .params
.algorithm .algorithm
.transcript() .transcript()
.append_message(b"participant", C::F::from(u64::from(*l)).to_repr()); .append_message(b"participant", C::F::from(u64::from(u16::from(*l))).to_repr());
} }
if *l == self.params.keys.params().i() { if *l == self.params.keys.params().i() {
@@ -440,7 +435,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
} }
/// Trait for the final machine of a two-round signing protocol. /// Trait for the final machine of a two-round signing protocol.
pub trait SignatureMachine<S> { pub trait SignatureMachine<S>: Send {
/// SignatureShare message for this machine. /// SignatureShare message for this machine.
type SignatureShare: Clone + PartialEq + Writable; type SignatureShare: Clone + PartialEq + Writable;
@@ -449,7 +444,7 @@ pub trait SignatureMachine<S> {
/// Complete signing. /// Complete signing.
/// Takes in everyone elses' shares. Returns the signature. /// Takes in everyone elses' shares. Returns the signature.
fn complete(self, shares: HashMap<u16, Self::SignatureShare>) -> Result<S, FrostError>; fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) -> Result<S, FrostError>;
} }
/// Final step of the state machine for the signing process. /// Final step of the state machine for the signing process.
@@ -472,7 +467,7 @@ impl<C: Curve, A: Algorithm<C>> SignatureMachine<A::Signature> for AlgorithmSign
fn complete( fn complete(
self, self,
mut shares: HashMap<u16, SignatureShare<C>>, mut shares: HashMap<Participant, SignatureShare<C>>,
) -> Result<A::Signature, FrostError> { ) -> Result<A::Signature, FrostError> {
let params = self.params.multisig_params(); let params = self.params.multisig_params();
validate_map(&shares, self.view.included(), params.i())?; validate_map(&shares, self.view.included(), params.i())?;

View File

@@ -9,6 +9,14 @@ use crate::{
tests::vectors::{Vectors, test_with_vectors}, tests::vectors::{Vectors, test_with_vectors},
}; };
// This is a vector from RFC 8032 to sanity check the HRAM is properly implemented
// The RFC 8032 Ed448 HRAM is much more complex than the other HRAMs, hence why it's helpful to
// have additional testing for it
// Additionally, FROST, despite being supposed to use the RFC 8032 HRAMs, originally applied
// Ed25519's HRAM to both Ed25519 and Ed448
// This test was useful when proposing the corrections to the spec to demonstrate the correctness
// the new algorithm/vectors
// While we could test all Ed448 vectors here, this is sufficient for sanity
#[test] #[test]
fn ed448_8032_vector() { fn ed448_8032_vector() {
let context = hex::decode("666f6f").unwrap(); let context = hex::decode("666f6f").unwrap();

View File

@@ -5,11 +5,15 @@ use rand_core::{RngCore, CryptoRng};
pub use dkg::tests::{key_gen, recover_key}; pub use dkg::tests::{key_gen, recover_key};
use crate::{ use crate::{
Curve, ThresholdKeys, Curve, Participant, ThresholdKeys, FrostError,
algorithm::Algorithm, algorithm::{Algorithm, Hram, IetfSchnorr},
sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine}, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine},
}; };
/// Tests for the nonce handling code.
pub mod nonces;
use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof};
/// Vectorized test suite to ensure consistency. /// Vectorized test suite to ensure consistency.
pub mod vectors; pub mod vectors;
@@ -36,11 +40,14 @@ pub fn clone_without<K: Clone + std::cmp::Eq + std::hash::Hash, V: Clone>(
pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>( pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
rng: &mut R, rng: &mut R,
algorithm: A, algorithm: A,
keys: &HashMap<u16, ThresholdKeys<C>>, keys: &HashMap<Participant, ThresholdKeys<C>>,
) -> HashMap<u16, AlgorithmMachine<C, A>> { ) -> HashMap<Participant, AlgorithmMachine<C, A>> {
let mut included = vec![]; let mut included = vec![];
while included.len() < usize::from(keys[&1].params().t()) { while included.len() < usize::from(keys[&Participant::new(1).unwrap()].params().t()) {
let n = u16::try_from((rng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(); let n = Participant::new(
u16::try_from((rng.next_u64() % u64::try_from(keys.len()).unwrap()) + 1).unwrap(),
)
.unwrap();
if included.contains(&n) { if included.contains(&n) {
continue; continue;
} }
@@ -59,21 +66,16 @@ pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
.collect() .collect()
} }
// Run the commit step and generate signature shares // Run the preprocess step
#[allow(clippy::type_complexity)] pub(crate) fn preprocess<
pub(crate) fn commit_and_shares<
R: RngCore + CryptoRng, R: RngCore + CryptoRng,
M: PreprocessMachine, M: PreprocessMachine,
F: FnMut(&mut R, &mut HashMap<u16, M::SignMachine>), F: FnMut(&mut R, &mut HashMap<Participant, M::SignMachine>),
>( >(
rng: &mut R, rng: &mut R,
mut machines: HashMap<u16, M>, mut machines: HashMap<Participant, M>,
mut cache: F, mut cache: F,
msg: &[u8], ) -> (HashMap<Participant, M::SignMachine>, HashMap<Participant, M::Preprocess>) {
) -> (
HashMap<u16, <M::SignMachine as SignMachine<M::Signature>>::SignatureMachine>,
HashMap<u16, <M::SignMachine as SignMachine<M::Signature>>::SignatureShare>,
) {
let mut commitments = HashMap::new(); let mut commitments = HashMap::new();
let mut machines = machines let mut machines = machines
.drain() .drain()
@@ -90,6 +92,26 @@ pub(crate) fn commit_and_shares<
cache(rng, &mut machines); cache(rng, &mut machines);
(machines, commitments)
}
// Run the preprocess and generate signature shares
#[allow(clippy::type_complexity)]
pub(crate) fn preprocess_and_shares<
R: RngCore + CryptoRng,
M: PreprocessMachine,
F: FnMut(&mut R, &mut HashMap<Participant, M::SignMachine>),
>(
rng: &mut R,
machines: HashMap<Participant, M>,
cache: F,
msg: &[u8],
) -> (
HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::SignatureMachine>,
HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::SignatureShare>,
) {
let (mut machines, commitments) = preprocess(rng, machines, cache);
let mut shares = HashMap::new(); let mut shares = HashMap::new();
let machines = machines let machines = machines
.drain() .drain()
@@ -110,14 +132,14 @@ pub(crate) fn commit_and_shares<
fn sign_internal< fn sign_internal<
R: RngCore + CryptoRng, R: RngCore + CryptoRng,
M: PreprocessMachine, M: PreprocessMachine,
F: FnMut(&mut R, &mut HashMap<u16, M::SignMachine>), F: FnMut(&mut R, &mut HashMap<Participant, M::SignMachine>),
>( >(
rng: &mut R, rng: &mut R,
machines: HashMap<u16, M>, machines: HashMap<Participant, M>,
cache: F, cache: F,
msg: &[u8], msg: &[u8],
) -> M::Signature { ) -> M::Signature {
let (mut machines, shares) = commit_and_shares(rng, machines, cache, msg); let (mut machines, shares) = preprocess_and_shares(rng, machines, cache, msg);
let mut signature = None; let mut signature = None;
for (i, machine) in machines.drain() { for (i, machine) in machines.drain() {
@@ -135,7 +157,7 @@ fn sign_internal<
/// caching. /// caching.
pub fn sign_without_caching<R: RngCore + CryptoRng, M: PreprocessMachine>( pub fn sign_without_caching<R: RngCore + CryptoRng, M: PreprocessMachine>(
rng: &mut R, rng: &mut R,
machines: HashMap<u16, M>, machines: HashMap<Participant, M>,
msg: &[u8], msg: &[u8],
) -> M::Signature { ) -> M::Signature {
sign_internal(rng, machines, |_, _| {}, msg) sign_internal(rng, machines, |_, _| {}, msg)
@@ -146,8 +168,8 @@ pub fn sign_without_caching<R: RngCore + CryptoRng, M: PreprocessMachine>(
pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>( pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
rng: &mut R, rng: &mut R,
params: <M::SignMachine as SignMachine<M::Signature>>::Params, params: <M::SignMachine as SignMachine<M::Signature>>::Params,
mut keys: HashMap<u16, <M::SignMachine as SignMachine<M::Signature>>::Keys>, mut keys: HashMap<Participant, <M::SignMachine as SignMachine<M::Signature>>::Keys>,
machines: HashMap<u16, M>, machines: HashMap<Participant, M>,
msg: &[u8], msg: &[u8],
) -> M::Signature { ) -> M::Signature {
sign_internal( sign_internal(
@@ -169,3 +191,67 @@ pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
msg, msg,
) )
} }
/// Test a basic Schnorr signature.
pub fn test_schnorr<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {
const MSG: &[u8] = b"Hello, World!";
let keys = key_gen(&mut *rng);
let machines = algorithm_machines(&mut *rng, IetfSchnorr::<C, H>::ietf(), &keys);
let sig = sign(&mut *rng, IetfSchnorr::<C, H>::ietf(), keys.clone(), machines, MSG);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
assert!(sig.verify(group_key, H::hram(&sig.R, &group_key, MSG)));
}
// Test an offset Schnorr signature.
pub fn test_offset_schnorr<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {
const MSG: &[u8] = b"Hello, World!";
let mut keys = key_gen(&mut *rng);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
let offset = C::F::from(5);
let offset_key = group_key + (C::generator() * offset);
for (_, keys) in keys.iter_mut() {
*keys = keys.offset(offset);
assert_eq!(keys.group_key(), offset_key);
}
let machines = algorithm_machines(&mut *rng, IetfSchnorr::<C, H>::ietf(), &keys);
let sig = sign(&mut *rng, IetfSchnorr::<C, H>::ietf(), keys.clone(), machines, MSG);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
assert!(sig.verify(offset_key, H::hram(&sig.R, &group_key, MSG)));
}
// Test blame for an invalid Schnorr signature share.
pub fn test_schnorr_blame<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {
const MSG: &[u8] = b"Hello, World!";
let keys = key_gen(&mut *rng);
let machines = algorithm_machines(&mut *rng, IetfSchnorr::<C, H>::ietf(), &keys);
let (mut machines, shares) = preprocess_and_shares(&mut *rng, machines, |_, _| {}, MSG);
for (i, machine) in machines.drain() {
let mut shares = clone_without(&shares, &i);
// Select a random participant to give an invalid share
let participants = shares.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
shares.get_mut(&faulty).unwrap().invalidate();
assert_eq!(machine.complete(shares).err(), Some(FrostError::InvalidShare(faulty)));
}
}
// Run a variety of tests against a ciphersuite.
pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut R) {
test_schnorr::<R, C, H>(rng);
test_offset_schnorr::<R, C, H>(rng);
test_schnorr_blame::<R, C, H>(rng);
test_multi_nonce::<R, C>(rng);
test_invalid_commitment::<R, C>(rng);
test_invalid_dleq_proof::<R, C>(rng);
}

View File

@@ -0,0 +1,236 @@
use std::io::{self, Read};
use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng, SeedableRng};
use rand_chacha::ChaCha20Rng;
use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::{ff::Field, Group, GroupEncoding};
use dleq::MultiDLEqProof;
pub use dkg::tests::{key_gen, recover_key};
use crate::{
Curve, Participant, ThresholdView, ThresholdKeys, FrostError,
algorithm::Algorithm,
sign::{Writable, SignMachine},
tests::{algorithm_machines, preprocess, sign},
};
#[derive(Clone)]
struct MultiNonce<C: Curve> {
transcript: RecommendedTranscript,
nonces: Option<Vec<Vec<C::G>>>,
}
impl<C: Curve> MultiNonce<C> {
fn new() -> MultiNonce<C> {
MultiNonce {
transcript: RecommendedTranscript::new(b"FROST MultiNonce Algorithm Test"),
nonces: None,
}
}
}
fn nonces<C: Curve>() -> Vec<Vec<C::G>> {
vec![
vec![C::generator(), C::generator().double()],
vec![C::generator(), C::generator() * C::F::from(3), C::generator() * C::F::from(4)],
]
}
fn verify_nonces<C: Curve>(nonces: &[Vec<C::G>]) {
assert_eq!(nonces.len(), 2);
// Each nonce should be a series of commitments, over some generators, which share a discrete log
// Since they share a discrete log, their only distinction should be the generator
// Above, the generators were created with a known relationship
// Accordingly, we can check here that relationship holds to make sure these commitments are well
// formed
assert_eq!(nonces[0].len(), 2);
assert_eq!(nonces[0][0].double(), nonces[0][1]);
assert_eq!(nonces[1].len(), 3);
assert_eq!(nonces[1][0] * C::F::from(3), nonces[1][1]);
assert_eq!(nonces[1][0] * C::F::from(4), nonces[1][2]);
assert!(nonces[0][0] != nonces[1][0]);
}
impl<C: Curve> Algorithm<C> for MultiNonce<C> {
type Transcript = RecommendedTranscript;
type Addendum = ();
type Signature = ();
fn transcript(&mut self) -> &mut Self::Transcript {
&mut self.transcript
}
fn nonces(&self) -> Vec<Vec<C::G>> {
nonces::<C>()
}
fn preprocess_addendum<R: RngCore + CryptoRng>(&mut self, _: &mut R, _: &ThresholdKeys<C>) {}
fn read_addendum<R: Read>(&self, _: &mut R) -> io::Result<Self::Addendum> {
Ok(())
}
fn process_addendum(
&mut self,
_: &ThresholdView<C>,
_: Participant,
_: (),
) -> Result<(), FrostError> {
Ok(())
}
fn sign_share(
&mut self,
_: &ThresholdView<C>,
nonce_sums: &[Vec<C::G>],
nonces: Vec<Zeroizing<C::F>>,
_: &[u8],
) -> C::F {
// Verify the nonce sums are as expected
verify_nonces::<C>(nonce_sums);
// Verify we actually have two nonces and that they're distinct
assert_eq!(nonces.len(), 2);
assert!(nonces[0] != nonces[1]);
// Save the nonce sums for later so we can check they're consistent with the call to verify
assert!(self.nonces.is_none());
self.nonces = Some(nonce_sums.to_vec());
// Sum the nonces so we can later check they actually have a relationship to nonce_sums
let mut res = C::F::zero();
// Weight each nonce
// This is probably overkill, since their unweighted forms would practically still require
// some level of crafting to pass a naive sum via malleability, yet this makes it more robust
for nonce in nonce_sums {
self.transcript.domain_separate(b"nonce");
for commitment in nonce {
self.transcript.append_message(b"commitment", commitment.to_bytes());
}
}
let mut rng = ChaCha20Rng::from_seed(self.transcript.clone().rng_seed(b"weight"));
for nonce in nonces {
res += *nonce * C::F::random(&mut rng);
}
res
}
#[must_use]
fn verify(&self, _: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature> {
verify_nonces::<C>(nonces);
assert_eq!(&self.nonces.clone().unwrap(), nonces);
// Make sure the nonce sums actually relate to the nonces
let mut res = C::G::identity();
let mut rng = ChaCha20Rng::from_seed(self.transcript.clone().rng_seed(b"weight"));
for nonce in nonces {
res += nonce[0] * C::F::random(&mut rng);
}
assert_eq!(res, C::generator() * sum);
Some(())
}
fn verify_share(&self, _: C::G, _: &[Vec<C::G>], _: C::F) -> Result<Vec<(C::F, C::G)>, ()> {
panic!("share verification triggered");
}
}
/// Test a multi-nonce, multi-generator algorithm.
// Specifically verifies this library can:
// 1) Generate multiple nonces
// 2) Provide the group nonces (nonce_sums) across multiple generators, still with the same
// discrete log
// 3) Provide algorithms with nonces which match the group nonces
pub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, MultiNonce::<C>::new(), &keys);
sign(&mut *rng, MultiNonce::<C>::new(), keys.clone(), machines, &[]);
}
/// Test malleating a commitment for a nonce across generators causes the preprocess to error.
pub fn test_invalid_commitment<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, MultiNonce::<C>::new(), &keys);
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
// Select a random participant to give an invalid commitment
let participants = preprocesses.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
// Grab their preprocess
let mut preprocess = preprocesses.remove(&faulty).unwrap();
// Mutate one of the commitments
let nonce =
preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap();
let generators_len = nonce.generators.len();
*nonce
.generators
.get_mut(usize::try_from(rng.next_u64()).unwrap() % generators_len)
.unwrap()
.0
.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2)
.unwrap() = C::G::random(&mut *rng);
// The commitments are validated at time of deserialization (read_preprocess)
// Accordingly, serialize it and read it again to make sure that errors
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
}
/// Test malleating the DLEq proof for a preprocess causes it to error.
pub fn test_invalid_dleq_proof<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, MultiNonce::<C>::new(), &keys);
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
// Select a random participant to give an invalid DLEq proof
let participants = preprocesses.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
// Invalidate it by replacing it with a completely different proof
let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))];
let mut preprocess = preprocesses.remove(&faulty).unwrap();
preprocess.commitments.dleq = Some(MultiDLEqProof::prove(
&mut *rng,
&mut RecommendedTranscript::new(b"Invalid DLEq Proof"),
&nonces::<C>(),
&dlogs,
));
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
// Also test None for a proof will cause an error
preprocess.commitments.dleq = None;
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
}

View File

@@ -5,21 +5,21 @@ use std::collections::HashMap;
use std::str::FromStr; use std::str::FromStr;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};
use group::{ff::PrimeField, GroupEncoding}; use rand_core::{RngCore, CryptoRng, SeedableRng};
use rand_chacha::ChaCha20Rng;
use dkg::tests::key_gen; use ciphersuite::group::{ff::PrimeField, GroupEncoding};
use crate::{ use crate::{
curve::Curve, curve::Curve,
ThresholdCore, ThresholdKeys, FrostError, Participant, ThresholdCore, ThresholdKeys,
algorithm::{Schnorr, Hram}, algorithm::{IetfTranscript, Hram, IetfSchnorr},
sign::{ sign::{
Nonce, GeneratorCommitments, NonceCommitments, Commitments, Writable, Preprocess, SignMachine, Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,
SignatureMachine, AlgorithmMachine, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
}, },
tests::{clone_without, recover_key, algorithm_machines, commit_and_shares, sign}, tests::{clone_without, recover_key, test_ciphersuite},
}; };
pub struct Vectors { pub struct Vectors {
@@ -30,14 +30,20 @@ pub struct Vectors {
pub shares: Vec<String>, pub shares: Vec<String>,
pub msg: String, pub msg: String,
pub included: Vec<u16>, pub included: Vec<Participant>,
pub nonce_randomness: Vec<[String; 2]>,
pub nonces: Vec<[String; 2]>, pub nonces: Vec<[String; 2]>,
pub commitments: Vec<[String; 2]>,
pub sig_shares: Vec<String>, pub sig_shares: Vec<String>,
pub sig: String, pub sig: String,
} }
// Vectors are expected to be formatted per the IETF proof of concept
// The included vectors are direcly from
// https://github.com/cfrg/draft-irtf-cfrg-frost/tree/draft-irtf-cfrg-frost-11/poc
#[cfg(test)] #[cfg(test)]
impl From<serde_json::Value> for Vectors { impl From<serde_json::Value> for Vectors {
fn from(value: serde_json::Value) -> Vectors { fn from(value: serde_json::Value) -> Vectors {
@@ -58,14 +64,34 @@ impl From<serde_json::Value> for Vectors {
included: to_str(&value["round_one_outputs"]["participant_list"]) included: to_str(&value["round_one_outputs"]["participant_list"])
.split(',') .split(',')
.map(u16::from_str) .map(u16::from_str)
.collect::<Result<_, _>>() .collect::<Result<Vec<_>, _>>()
.unwrap(), .unwrap()
.iter()
.map(|i| Participant::new(*i).unwrap())
.collect(),
nonce_randomness: value["round_one_outputs"]["participants"]
.as_object()
.unwrap()
.values()
.map(|value| {
[to_str(&value["hiding_nonce_randomness"]), to_str(&value["binding_nonce_randomness"])]
})
.collect(),
nonces: value["round_one_outputs"]["participants"] nonces: value["round_one_outputs"]["participants"]
.as_object() .as_object()
.unwrap() .unwrap()
.values() .values()
.map(|value| [to_str(&value["hiding_nonce"]), to_str(&value["binding_nonce"])]) .map(|value| [to_str(&value["hiding_nonce"]), to_str(&value["binding_nonce"])])
.collect(), .collect(),
commitments: value["round_one_outputs"]["participants"]
.as_object()
.unwrap()
.values()
.map(|value| {
[to_str(&value["hiding_nonce_commitment"]), to_str(&value["binding_nonce_commitment"])]
})
.collect(),
sig_shares: value["round_two_outputs"]["participants"] sig_shares: value["round_two_outputs"]["participants"]
.as_object() .as_object()
@@ -80,7 +106,7 @@ impl From<serde_json::Value> for Vectors {
} }
// Load these vectors into ThresholdKeys using a custom serialization it'll deserialize // Load these vectors into ThresholdKeys using a custom serialization it'll deserialize
fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, ThresholdKeys<C>> { fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<Participant, ThresholdKeys<C>> {
let shares = vectors let shares = vectors
.shares .shares
.iter() .iter()
@@ -92,23 +118,24 @@ fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, Thresho
for i in 1 ..= u16::try_from(shares.len()).unwrap() { for i in 1 ..= u16::try_from(shares.len()).unwrap() {
// Manually re-implement the serialization for ThresholdCore to import this data // Manually re-implement the serialization for ThresholdCore to import this data
let mut serialized = vec![]; let mut serialized = vec![];
serialized.extend(u32::try_from(C::ID.len()).unwrap().to_be_bytes()); serialized.extend(u32::try_from(C::ID.len()).unwrap().to_le_bytes());
serialized.extend(C::ID); serialized.extend(C::ID);
serialized.extend(vectors.threshold.to_be_bytes()); serialized.extend(vectors.threshold.to_le_bytes());
serialized.extend(u16::try_from(shares.len()).unwrap().to_be_bytes()); serialized.extend(u16::try_from(shares.len()).unwrap().to_le_bytes());
serialized.extend(i.to_be_bytes()); serialized.extend(i.to_le_bytes());
serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref()); serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref());
for share in &verification_shares { for share in &verification_shares {
serialized.extend(share.to_bytes().as_ref()); serialized.extend(share.to_bytes().as_ref());
} }
let these_keys = ThresholdCore::<C>::deserialize::<&[u8]>(&mut serialized.as_ref()).unwrap(); let these_keys = ThresholdCore::<C>::read::<&[u8]>(&mut serialized.as_ref()).unwrap();
assert_eq!(these_keys.params().t(), vectors.threshold); assert_eq!(these_keys.params().t(), vectors.threshold);
assert_eq!(usize::from(these_keys.params().n()), shares.len()); assert_eq!(usize::from(these_keys.params().n()), shares.len());
assert_eq!(these_keys.params().i(), i); let participant = Participant::new(i).unwrap();
assert_eq!(these_keys.params().i(), participant);
assert_eq!(these_keys.secret_share().deref(), &shares[usize::from(i - 1)]); assert_eq!(these_keys.secret_share().deref(), &shares[usize::from(i - 1)]);
assert_eq!(hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key); assert_eq!(hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key);
keys.insert(i, ThresholdKeys::new(these_keys)); keys.insert(participant, ThresholdKeys::new(these_keys));
} }
keys keys
@@ -118,40 +145,14 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
rng: &mut R, rng: &mut R,
vectors: Vectors, vectors: Vectors,
) { ) {
// Test a basic Schnorr signature test_ciphersuite::<R, C, H>(rng);
{
let keys = key_gen(&mut *rng);
let machines = algorithm_machines(&mut *rng, Schnorr::<C, H>::new(), &keys);
const MSG: &[u8] = b"Hello, World!";
let sig = sign(&mut *rng, Schnorr::<C, H>::new(), keys.clone(), machines, MSG);
assert!(sig.verify(keys[&1].group_key(), H::hram(&sig.R, &keys[&1].group_key(), MSG)));
}
// Test blame on an invalid Schnorr signature share
{
let keys = key_gen(&mut *rng);
let machines = algorithm_machines(&mut *rng, Schnorr::<C, H>::new(), &keys);
const MSG: &[u8] = b"Hello, World!";
let (mut machines, mut shares) = commit_and_shares(&mut *rng, machines, |_, _| {}, MSG);
let faulty = *shares.keys().next().unwrap();
shares.get_mut(&faulty).unwrap().invalidate();
for (i, machine) in machines.drain() {
if i == faulty {
continue;
}
assert_eq!(
machine.complete(clone_without(&shares, &i)).err(),
Some(FrostError::InvalidShare(faulty))
);
}
}
// Test against the vectors // Test against the vectors
let keys = vectors_to_multisig_keys::<C>(&vectors); let keys = vectors_to_multisig_keys::<C>(&vectors);
{
let group_key = let group_key =
<C as Curve>::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref()).unwrap(); <C as Curve>::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref())
.unwrap();
let secret = let secret =
C::read_F::<&[u8]>(&mut hex::decode(&vectors.group_secret).unwrap().as_ref()).unwrap(); C::read_F::<&[u8]>(&mut hex::decode(&vectors.group_secret).unwrap().as_ref()).unwrap();
assert_eq!(C::generator() * secret, group_key); assert_eq!(C::generator() * secret, group_key);
@@ -159,26 +160,34 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
let mut machines = vec![]; let mut machines = vec![];
for i in &vectors.included { for i in &vectors.included {
machines.push((i, AlgorithmMachine::new(Schnorr::<C, H>::new(), keys[i].clone()).unwrap())); machines
.push((i, AlgorithmMachine::new(IetfSchnorr::<C, H>::ietf(), keys[i].clone()).unwrap()));
} }
let mut commitments = HashMap::new(); let mut commitments = HashMap::new();
let mut c = 0;
let mut machines = machines let mut machines = machines
.drain(..) .drain(..)
.map(|(i, machine)| { .enumerate()
.map(|(c, (i, machine))| {
let nonce = |i| { let nonce = |i| {
Zeroizing::new( Zeroizing::new(
C::read_F::<&[u8]>(&mut hex::decode(&vectors.nonces[c][i]).unwrap().as_ref()).unwrap(), C::read_F::<&[u8]>(&mut hex::decode(&vectors.nonces[c][i]).unwrap().as_ref()).unwrap(),
) )
}; };
let nonces = [nonce(0), nonce(1)]; let nonces = [nonce(0), nonce(1)];
c += 1;
let these_commitments = let these_commitments =
[C::generator() * nonces[0].deref(), C::generator() * nonces[1].deref()]; [C::generator() * nonces[0].deref(), C::generator() * nonces[1].deref()];
let machine = machine.unsafe_override_preprocess(
vec![Nonce(nonces)], assert_eq!(
Preprocess { these_commitments[0].to_bytes().as_ref(),
hex::decode(&vectors.commitments[c][0]).unwrap()
);
assert_eq!(
these_commitments[1].to_bytes().as_ref(),
hex::decode(&vectors.commitments[c][1]).unwrap()
);
let preprocess = Preprocess {
commitments: Commitments { commitments: Commitments {
nonces: vec![NonceCommitments { nonces: vec![NonceCommitments {
generators: vec![GeneratorCommitments(these_commitments)], generators: vec![GeneratorCommitments(these_commitments)],
@@ -186,9 +195,16 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
dleq: None, dleq: None,
}, },
addendum: (), addendum: (),
}, };
// FROST doesn't specify how to serialize these together, yet this is sane
// (and the simplest option)
assert_eq!(
preprocess.serialize(),
hex::decode(vectors.commitments[c][0].clone() + &vectors.commitments[c][1]).unwrap()
); );
let machine = machine.unsafe_override_preprocess(vec![Nonce(nonces)], preprocess);
commitments.insert( commitments.insert(
*i, *i,
machine machine
@@ -207,12 +223,13 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let mut shares = HashMap::new(); let mut shares = HashMap::new();
c = 0;
let mut machines = machines let mut machines = machines
.drain(..) .drain(..)
.map(|(i, machine)| { .enumerate()
let (machine, share) = .map(|(c, (i, machine))| {
machine.sign(clone_without(&commitments, i), &hex::decode(&vectors.msg).unwrap()).unwrap(); let (machine, share) = machine
.sign(clone_without(&commitments, i), &hex::decode(&vectors.msg).unwrap())
.unwrap();
let share = { let share = {
let mut buf = vec![]; let mut buf = vec![];
@@ -220,7 +237,6 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
buf buf
}; };
assert_eq!(share, hex::decode(&vectors.sig_shares[c]).unwrap()); assert_eq!(share, hex::decode(&vectors.sig_shares[c]).unwrap());
c += 1;
shares.insert(*i, machine.read_share::<&[u8]>(&mut share.as_ref()).unwrap()); shares.insert(*i, machine.read_share::<&[u8]>(&mut share.as_ref()).unwrap());
(i, machine) (i, machine)
@@ -234,3 +250,122 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
assert_eq!(hex::encode(serialized), vectors.sig); assert_eq!(hex::encode(serialized), vectors.sig);
} }
} }
// The above code didn't test the nonce generation due to the infeasibility of doing so against
// the current codebase
// A transparent RNG which has a fixed output
struct TransparentRng(Vec<[u8; 32]>);
impl RngCore for TransparentRng {
fn next_u32(&mut self) -> u32 {
unimplemented!()
}
fn next_u64(&mut self) -> u64 {
unimplemented!()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
dest.copy_from_slice(&self.0.remove(0))
}
fn try_fill_bytes(&mut self, _: &mut [u8]) -> Result<(), rand_core::Error> {
unimplemented!()
}
}
// CryptoRng requires the output not reveal any info about any other outputs
// Since this only will produce one output, this is actually met, even though it'd be fine to
// fake it as this is a test
impl CryptoRng for TransparentRng {}
// Test C::random_nonce matches the expected vectors
for (i, l) in vectors.included.iter().enumerate() {
let l = usize::from(u16::from(*l));
// Shares are a zero-indexed array of all participants, hence l - 1
let share = Zeroizing::new(
C::read_F::<&[u8]>(&mut hex::decode(&vectors.shares[l - 1]).unwrap().as_ref()).unwrap(),
);
let randomness = vectors.nonce_randomness[i]
.iter()
.map(|randomness| hex::decode(randomness).unwrap().try_into().unwrap())
.collect::<Vec<_>>();
let nonces = vectors.nonces[i]
.iter()
.map(|nonce| {
Zeroizing::new(C::read_F::<&[u8]>(&mut hex::decode(nonce).unwrap().as_ref()).unwrap())
})
.collect::<Vec<_>>();
for (randomness, nonce) in randomness.iter().zip(&nonces) {
// Nonces are only present for participating signers, hence i
assert_eq!(C::random_nonce(&share, &mut TransparentRng(vec![*randomness])), *nonce);
}
// Also test it at the Commitments level
let (generated_nonces, commitments) = Commitments::<C>::new::<_, IetfTranscript>(
&mut TransparentRng(randomness),
&share,
&[vec![C::generator()]],
&[],
);
assert_eq!(generated_nonces.len(), 1);
assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);
let mut commitments_bytes = vec![];
commitments.write(&mut commitments_bytes).unwrap();
assert_eq!(
commitments_bytes,
hex::decode(vectors.commitments[i][0].clone() + &vectors.commitments[i][1]).unwrap()
);
}
// This doesn't verify C::random_nonce is called correctly, where the code should call it with
// the output from a ChaCha20 stream
// Create a known ChaCha20 stream to verify it ends up at random_nonce properly
{
let mut chacha_seed = [0; 32];
rng.fill_bytes(&mut chacha_seed);
let mut ours = ChaCha20Rng::from_seed(chacha_seed);
let frosts = ours.clone();
// The machines should geenerate a seed, and then use that seed in a ChaCha20 RNG for nonces
let mut preprocess_seed = [0; 32];
ours.fill_bytes(&mut preprocess_seed);
let mut ours = ChaCha20Rng::from_seed(preprocess_seed);
// Get the randomness which will be used
let mut randomness = ([0; 32], [0; 32]);
ours.fill_bytes(&mut randomness.0);
ours.fill_bytes(&mut randomness.1);
// Create the machines
let mut machines = vec![];
for i in &vectors.included {
machines
.push((i, AlgorithmMachine::new(IetfSchnorr::<C, H>::ietf(), keys[i].clone()).unwrap()));
}
for (i, machine) in machines.drain(..) {
let (_, preprocess) = machine.preprocess(&mut frosts.clone());
// Calculate the expected nonces
let mut expected = (C::generator() *
C::random_nonce(keys[i].secret_share(), &mut TransparentRng(vec![randomness.0])).deref())
.to_bytes()
.as_ref()
.to_vec();
expected.extend(
(C::generator() *
C::random_nonce(keys[i].secret_share(), &mut TransparentRng(vec![randomness.1]))
.deref())
.to_bytes()
.as_ref(),
);
// Ensure they match
assert_eq!(preprocess.serialize(), expected);
}
}
}

View File

@@ -13,7 +13,7 @@ all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
ff = "0.12" ff = "0.12"
group = "0.12" group = "0.12"
@@ -23,8 +23,9 @@ rand_core = { version = "0.6", optional = true }
[dev-dependencies] [dev-dependencies]
rand_core = "0.6" rand_core = "0.6"
k256 = { version = "0.11", features = ["bits"] } k256 = { version = "0.12", features = ["bits"] }
dalek-ff-group = { path = "../dalek-ff-group" } dalek-ff-group = { path = "../dalek-ff-group" }
[features] [features]
black_box = []
batch = ["rand_core"] batch = ["rand_core"]

View File

@@ -1,16 +1,32 @@
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize; use zeroize::{Zeroize, Zeroizing};
use ff::{Field, PrimeFieldBits}; use ff::{Field, PrimeFieldBits};
use group::Group; use group::Group;
use crate::{multiexp, multiexp_vartime}; use crate::{multiexp, multiexp_vartime};
// Flatten the contained statements to a single Vec.
// Wrapped in Zeroizing in case any of the included statements contain private values.
#[allow(clippy::type_complexity)]
fn flat<Id: Copy + Zeroize, G: Group + Zeroize>(
slice: &[(Id, Vec<(G::Scalar, G)>)],
) -> Zeroizing<Vec<(G::Scalar, G)>>
where
<G as Group>::Scalar: PrimeFieldBits + Zeroize,
{
Zeroizing::new(slice.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>())
}
/// A batch verifier intended to verify a series of statements are each equivalent to zero. /// A batch verifier intended to verify a series of statements are each equivalent to zero.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Clone, Zeroize)] #[derive(Clone, Zeroize)]
pub struct BatchVerifier<Id: Copy + Zeroize, G: Group + Zeroize>(Vec<(Id, Vec<(G::Scalar, G)>)>); pub struct BatchVerifier<Id: Copy + Zeroize, G: Group + Zeroize>(
Zeroizing<Vec<(Id, Vec<(G::Scalar, G)>)>>,
)
where
<G as Group>::Scalar: PrimeFieldBits + Zeroize;
impl<Id: Copy + Zeroize, G: Group + Zeroize> BatchVerifier<Id, G> impl<Id: Copy + Zeroize, G: Group + Zeroize> BatchVerifier<Id, G>
where where
@@ -19,7 +35,7 @@ where
/// Create a new batch verifier, expected to verify the following amount of statements. /// Create a new batch verifier, expected to verify the following amount of statements.
/// This is a size hint and is not required to be accurate. /// This is a size hint and is not required to be accurate.
pub fn new(capacity: usize) -> BatchVerifier<Id, G> { pub fn new(capacity: usize) -> BatchVerifier<Id, G> {
BatchVerifier(Vec::with_capacity(capacity)) BatchVerifier(Zeroizing::new(Vec::with_capacity(capacity)))
} }
/// Queue a statement for batch verification. /// Queue a statement for batch verification.
@@ -71,31 +87,20 @@ where
} {} } {}
weight weight
}; };
self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect())); self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect()));
} }
/// Perform batch verification, returning a boolean of if the statements equaled zero. /// Perform batch verification, returning a boolean of if the statements equaled zero.
#[must_use] #[must_use]
pub fn verify_core(&self) -> bool { pub fn verify(&self) -> bool {
let mut flat = self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>(); multiexp(&flat(&self.0)).is_identity().into()
let res = multiexp(&flat).is_identity().into();
flat.zeroize();
res
}
/// Perform batch verification, zeroizing the statements verified.
pub fn verify(mut self) -> bool {
let res = self.verify_core();
self.zeroize();
res
} }
/// Perform batch verification in variable time. /// Perform batch verification in variable time.
#[must_use] #[must_use]
pub fn verify_vartime(&self) -> bool { pub fn verify_vartime(&self) -> bool {
multiexp_vartime(&self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()) multiexp_vartime(&flat(&self.0)).is_identity().into()
.is_identity()
.into()
} }
/// Perform a binary search to identify which statement does not equal 0, returning None if all /// Perform a binary search to identify which statement does not equal 0, returning None if all
@@ -106,12 +111,7 @@ where
let mut slice = self.0.as_slice(); let mut slice = self.0.as_slice();
while slice.len() > 1 { while slice.len() > 1 {
let split = slice.len() / 2; let split = slice.len() / 2;
if multiexp_vartime( if multiexp_vartime(&flat(&slice[.. split])).is_identity().into() {
&slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>(),
)
.is_identity()
.into()
{
slice = &slice[split ..]; slice = &slice[split ..];
} else { } else {
slice = &slice[.. split]; slice = &slice[.. split];
@@ -126,10 +126,12 @@ where
/// Perform constant time batch verification, and if verification fails, identify one faulty /// Perform constant time batch verification, and if verification fails, identify one faulty
/// statement in variable time. /// statement in variable time.
pub fn verify_with_vartime_blame(mut self) -> Result<(), Id> { pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {
let res = if self.verify_core() { Ok(()) } else { Err(self.blame_vartime().unwrap()) }; if self.verify() {
self.zeroize(); Ok(())
res } else {
Err(self.blame_vartime().unwrap())
}
} }
/// Perform variable time batch verification, and if verification fails, identify one faulty /// Perform variable time batch verification, and if verification fails, identify one faulty

View File

@@ -1,5 +1,7 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
use core::ops::DerefMut;
use zeroize::Zeroize; use zeroize::Zeroize;
use ff::PrimeFieldBits; use ff::PrimeFieldBits;
@@ -19,6 +21,31 @@ pub use batch::BatchVerifier;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
// Feature gated due to MSRV requirements
#[cfg(feature = "black_box")]
pub(crate) fn black_box<T>(val: T) -> T {
core::hint::black_box(val)
}
#[cfg(not(feature = "black_box"))]
pub(crate) fn black_box<T>(val: T) -> T {
val
}
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
let mut bit = black_box(*bit_ref);
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
bit_ref.zeroize();
res
}
// Convert scalars to `window`-sized bit groups, as needed to index a table
// This algorithm works for `window <= 8`
pub(crate) fn prep_bits<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Vec<u8>> pub(crate) fn prep_bits<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Vec<u8>>
where where
G::Scalar: PrimeFieldBits, G::Scalar: PrimeFieldBits,
@@ -31,11 +58,8 @@ where
let mut bits = pair.0.to_le_bits(); let mut bits = pair.0.to_le_bits();
groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]); groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]);
#[allow(unused_assignments)] for (i, mut bit) in bits.iter_mut().enumerate() {
for (i, mut raw_bit) in bits.iter_mut().enumerate() { let mut bit = u8_from_bool(bit.deref_mut());
let mut bit = u8::from(*raw_bit);
*raw_bit = false;
groupings[p][i / w_usize] |= bit << (i % w_usize); groupings[p][i / w_usize] |= bit << (i % w_usize);
bit.zeroize(); bit.zeroize();
} }
@@ -44,20 +68,6 @@ where
groupings groupings
} }
pub(crate) fn prep_tables<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Vec<G>> {
let mut tables = Vec::with_capacity(pairs.len());
for pair in pairs {
let p = tables.len();
tables.push(vec![G::identity(); 2_usize.pow(window.into())]);
let mut accum = G::identity();
for i in 1 .. tables[p].len() {
accum += pair.1;
tables[p][i] = accum;
}
}
tables
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum Algorithm { enum Algorithm {
Null, Null,
@@ -169,6 +179,7 @@ where
match algorithm(pairs.len()) { match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(), Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0, Algorithm::Single => pairs[0].1 * pairs[0].0,
// These functions panic if called without any pairs
Algorithm::Straus(window) => straus(pairs, window), Algorithm::Straus(window) => straus(pairs, window),
Algorithm::Pippenger(window) => pippenger(pairs, window), Algorithm::Pippenger(window) => pippenger(pairs, window),
} }

View File

@@ -5,6 +5,8 @@ use group::Group;
use crate::prep_bits; use crate::prep_bits;
// Pippenger's algorithm for multiexponentation, as published in the SIAM Journal on Computing
// DOI: 10.1137/0209022
pub(crate) fn pippenger<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> G pub(crate) fn pippenger<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> G
where where
G::Scalar: PrimeFieldBits, G::Scalar: PrimeFieldBits,
@@ -13,9 +15,11 @@ where
let mut res = G::identity(); let mut res = G::identity();
for n in (0 .. bits[0].len()).rev() { for n in (0 .. bits[0].len()).rev() {
if n != (bits[0].len() - 1) {
for _ in 0 .. window { for _ in 0 .. window {
res = res.double(); res = res.double();
} }
}
let mut buckets = vec![G::identity(); 2_usize.pow(window.into())]; let mut buckets = vec![G::identity(); 2_usize.pow(window.into())];
for p in 0 .. bits.len() { for p in 0 .. bits.len() {
@@ -47,20 +51,34 @@ where
} }
} }
let mut buckets = vec![G::identity(); 2_usize.pow(window.into())]; // Use None to represent identity since is_none is likely faster than is_identity
let mut buckets = vec![None; 2_usize.pow(window.into())];
for p in 0 .. bits.len() { for p in 0 .. bits.len() {
let nibble = usize::from(bits[p][n]); let nibble = usize::from(bits[p][n]);
if nibble != 0 { if nibble != 0 {
buckets[nibble] += pairs[p].1; if let Some(bucket) = buckets[nibble].as_mut() {
*bucket += pairs[p].1;
} else {
buckets[nibble] = Some(pairs[p].1);
}
} }
} }
let mut intermediate_sum = G::identity(); let mut intermediate_sum = None;
for b in (1 .. buckets.len()).rev() { for b in (1 .. buckets.len()).rev() {
intermediate_sum += buckets[b]; if let Some(bucket) = buckets[b].as_ref() {
if let Some(intermediate_sum) = intermediate_sum.as_mut() {
*intermediate_sum += bucket;
} else {
intermediate_sum = Some(*bucket);
}
}
if let Some(intermediate_sum) = intermediate_sum.as_ref() {
res += intermediate_sum; res += intermediate_sum;
} }
} }
}
res res
} }

View File

@@ -3,8 +3,25 @@ use zeroize::Zeroize;
use ff::PrimeFieldBits; use ff::PrimeFieldBits;
use group::Group; use group::Group;
use crate::{prep_bits, prep_tables}; use crate::prep_bits;
// Create tables for every included point of size 2^window
fn prep_tables<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> Vec<Vec<G>> {
let mut tables = Vec::with_capacity(pairs.len());
for pair in pairs {
let p = tables.len();
tables.push(vec![G::identity(); 2_usize.pow(window.into())]);
let mut accum = G::identity();
for i in 1 .. tables[p].len() {
accum += pair.1;
tables[p][i] = accum;
}
}
tables
}
// Straus's algorithm for multiexponentation, as published in The American Mathematical Monthly
// DOI: 10.2307/2310929
pub(crate) fn straus<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> G pub(crate) fn straus<G: Group>(pairs: &[(G::Scalar, G)], window: u8) -> G
where where
G::Scalar: PrimeFieldBits + Zeroize, G::Scalar: PrimeFieldBits + Zeroize,
@@ -14,9 +31,11 @@ where
let mut res = G::identity(); let mut res = G::identity();
for b in (0 .. groupings[0].len()).rev() { for b in (0 .. groupings[0].len()).rev() {
if b != (groupings[0].len() - 1) {
for _ in 0 .. window { for _ in 0 .. window {
res = res.double(); res = res.double();
} }
}
for s in 0 .. tables.len() { for s in 0 .. tables.len() {
res += tables[s][usize::from(groupings[s][b])]; res += tables[s][usize::from(groupings[s][b])];
@@ -34,20 +53,24 @@ where
let groupings = prep_bits(pairs, window); let groupings = prep_bits(pairs, window);
let tables = prep_tables(pairs, window); let tables = prep_tables(pairs, window);
let mut res = G::identity(); let mut res: Option<G> = None;
for b in (0 .. groupings[0].len()).rev() { for b in (0 .. groupings[0].len()).rev() {
if b != (groupings[0].len() - 1) { if b != (groupings[0].len() - 1) {
for _ in 0 .. window { for _ in 0 .. window {
res = res.double(); res = res.map(|res| res.double());
} }
} }
for s in 0 .. tables.len() { for s in 0 .. tables.len() {
if groupings[s][b] != 0 { if groupings[s][b] != 0 {
res += tables[s][usize::from(groupings[s][b])]; if let Some(res) = res.as_mut() {
*res += tables[s][usize::from(groupings[s][b])];
} else {
res = Some(tables[s][usize::from(groupings[s][b])]);
}
} }
} }
} }
res res.unwrap_or_else(G::identity)
} }

View File

@@ -0,0 +1,94 @@
use rand_core::OsRng;
use zeroize::Zeroize;
use rand_core::RngCore;
use ff::{Field, PrimeFieldBits};
use group::Group;
use crate::BatchVerifier;
pub(crate) fn test_batch<G: Group + Zeroize>()
where
G::Scalar: PrimeFieldBits + Zeroize,
{
let valid = |batch: BatchVerifier<_, G>| {
assert!(batch.verify());
assert!(batch.verify_vartime());
assert_eq!(batch.blame_vartime(), None);
assert_eq!(batch.verify_with_vartime_blame(), Ok(()));
assert_eq!(batch.verify_vartime_with_vartime_blame(), Ok(()));
};
let invalid = |batch: BatchVerifier<_, G>, id| {
assert!(!batch.verify());
assert!(!batch.verify_vartime());
assert_eq!(batch.blame_vartime(), Some(id));
assert_eq!(batch.verify_with_vartime_blame(), Err(id));
assert_eq!(batch.verify_vartime_with_vartime_blame(), Err(id));
};
// Test an empty batch
let batch = BatchVerifier::new(0);
valid(batch);
// Test a batch with one set of statements
let valid_statements =
vec![(-G::Scalar::one(), G::generator()), (G::Scalar::one(), G::generator())];
let mut batch = BatchVerifier::new(1);
batch.queue(&mut OsRng, 0, valid_statements.clone());
valid(batch);
// Test a batch with an invalid set of statements fails properly
let invalid_statements = vec![(-G::Scalar::one(), G::generator())];
let mut batch = BatchVerifier::new(1);
batch.queue(&mut OsRng, 0, invalid_statements.clone());
invalid(batch, 0);
// Test blame can properly identify faulty participants
// Run with 17 statements, rotating which one is faulty
for i in 0 .. 17 {
let mut batch = BatchVerifier::new(17);
for j in 0 .. 17 {
batch.queue(
&mut OsRng,
j,
if i == j { invalid_statements.clone() } else { valid_statements.clone() },
);
}
invalid(batch, i);
}
// Test blame always identifies the left-most invalid statement
for i in 1 .. 32 {
for j in 1 .. i {
let mut batch = BatchVerifier::new(j);
let mut leftmost = None;
// Create j statements
for k in 0 .. j {
batch.queue(
&mut OsRng,
k,
// The usage of i / 10 makes this less likely to add invalid elements, and increases
// the space between them
// For high i values, yet low j values, this will make it likely that random elements
// are at/near the end
if ((OsRng.next_u64() % u64::try_from(1 + (i / 4)).unwrap()) == 0) ||
(leftmost.is_none() && (k == (j - 1)))
{
if leftmost.is_none() {
leftmost = Some(k);
}
invalid_statements.clone()
} else {
valid_statements.clone()
},
);
}
invalid(batch, leftmost.unwrap());
}
}
}

View File

@@ -10,7 +10,12 @@ use group::Group;
use k256::ProjectivePoint; use k256::ProjectivePoint;
use dalek_ff_group::EdwardsPoint; use dalek_ff_group::EdwardsPoint;
use crate::{straus, pippenger, multiexp, multiexp_vartime}; use crate::{straus, straus_vartime, pippenger, pippenger_vartime, multiexp, multiexp_vartime};
#[cfg(feature = "batch")]
mod batch;
#[cfg(feature = "batch")]
use batch::test_batch;
#[allow(dead_code)] #[allow(dead_code)]
fn benchmark_internal<G: Group>(straus_bool: bool) fn benchmark_internal<G: Group>(straus_bool: bool)
@@ -85,26 +90,59 @@ fn test_multiexp<G: Group>()
where where
G::Scalar: PrimeFieldBits + Zeroize, G::Scalar: PrimeFieldBits + Zeroize,
{ {
let test = |pairs: &[_], sum| {
// These should automatically determine the best algorithm
assert_eq!(multiexp(pairs), sum);
assert_eq!(multiexp_vartime(pairs), sum);
// Also explicitly test straus/pippenger for each bit size
if !pairs.is_empty() {
for window in 1 .. 8 {
assert_eq!(straus(pairs, window), sum);
assert_eq!(straus_vartime(pairs, window), sum);
assert_eq!(pippenger(pairs, window), sum);
assert_eq!(pippenger_vartime(pairs, window), sum);
}
}
};
// Test an empty multiexp is identity
test(&[], G::identity());
// Test an multiexp of identity/zero elements is identity
test(&[(G::Scalar::zero(), G::generator())], G::identity());
test(&[(G::Scalar::one(), G::identity())], G::identity());
// Test a variety of multiexp sizes
let mut pairs = Vec::with_capacity(1000); let mut pairs = Vec::with_capacity(1000);
let mut sum = G::identity(); let mut sum = G::identity();
for _ in 0 .. 10 { for _ in 0 .. 10 {
// Test a multiexp of a single item
// On successive loop iterations, this will test a multiexp with an odd number of pairs
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
test(&pairs, sum);
for _ in 0 .. 100 { for _ in 0 .. 100 {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng))); pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0; sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
} }
assert_eq!(multiexp(&pairs), sum); test(&pairs, sum);
assert_eq!(multiexp_vartime(&pairs), sum);
} }
} }
#[test] #[test]
fn test_secp256k1() { fn test_secp256k1() {
test_multiexp::<ProjectivePoint>(); test_multiexp::<ProjectivePoint>();
#[cfg(feature = "batch")]
test_batch::<ProjectivePoint>();
} }
#[test] #[test]
fn test_ed25519() { fn test_ed25519() {
test_multiexp::<EdwardsPoint>(); test_multiexp::<EdwardsPoint>();
#[cfg(feature = "batch")]
test_batch::<EdwardsPoint>();
} }
#[ignore] #[ignore]

View File

@@ -15,16 +15,15 @@ rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
rand_core = "0.6" rand_core = "0.6"
zeroize = { version = "1.5", features = ["zeroize_derive"] } zeroize = { version = "^1.5", features = ["zeroize_derive"] }
digest = "0.10" transcript = { package = "flexible-transcript", path = "../transcript", version = "0.2" }
group = "0.12"
ciphersuite = { path = "../ciphersuite", version = "0.1" } ciphersuite = { path = "../ciphersuite", version = "0.1" }
multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] } multiexp = { path = "../multiexp", version = "0.2", features = ["batch"] }
[dev-dependencies] [dev-dependencies]
blake2 = "0.10" hex = "0.4"
sha2 = "0.10"
dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2" } dalek-ff-group = { path = "../dalek-ff-group", version = "^0.1.2" }
ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["ristretto"] } ciphersuite = { path = "../ciphersuite", version = "0.1", features = ["ed25519"] }

View File

@@ -2,63 +2,66 @@ use std::io::{self, Read, Write};
use zeroize::Zeroize; use zeroize::Zeroize;
use digest::Digest; use transcript::{Transcript, SecureDigest, DigestTranscript};
use group::{ use ciphersuite::{
group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
Group, GroupEncoding, Group, GroupEncoding,
prime::PrimeGroup, },
Ciphersuite,
}; };
use multiexp::multiexp_vartime; use multiexp::multiexp_vartime;
use ciphersuite::Ciphersuite;
use crate::SchnorrSignature; use crate::SchnorrSignature;
fn digest<D: Digest>() -> D { // Returns a unbiased scalar weight to use on a signature in order to prevent malleability
D::new_with_prefix(b"Schnorr Aggregate") fn weight<D: Send + Clone + SecureDigest, F: PrimeField>(digest: &mut DigestTranscript<D>) -> F {
} let mut bytes = digest.challenge(b"aggregation_weight");
// A secure challenge will include the nonce and whatever message
// Depending on the environment, a secure challenge *may* not include the public key, even if
// the modern consensus is it should
// Accordingly, transcript both here, even if ideally only the latter would need to be
fn digest_accumulate<D: Digest, G: PrimeGroup>(digest: &mut D, key: G, challenge: G::Scalar) {
digest.update(key.to_bytes().as_ref());
digest.update(challenge.to_repr().as_ref());
}
// Performs a big-endian modular reduction of the hash value
// This is used by the below aggregator to prevent mutability
// Only an 128-bit scalar is needed to offer 128-bits of security against malleability per
// https://cr.yp.to/badbatch/badbatch-20120919.pdf
// Accordingly, while a 256-bit hash used here with a 256-bit ECC will have bias, it shouldn't be
// an issue
fn scalar_from_digest<D: Digest, F: PrimeField>(digest: D) -> F {
let bytes = digest.finalize();
debug_assert_eq!(bytes.len() % 8, 0); debug_assert_eq!(bytes.len() % 8, 0);
// This should be guaranteed thanks to SecureDigest
debug_assert!(bytes.len() >= 32);
let mut res = F::zero(); let mut res = F::zero();
let mut i = 0; let mut i = 0;
while i < bytes.len() {
if i != 0 { // Derive a scalar from enough bits of entropy that bias is < 2^128
for _ in 0 .. 8 { // This can't be const due to its usage of a generic
// Also due to the usize::try_from, yet that could be replaced with an `as`
// The + 7 forces it to round up
#[allow(non_snake_case)]
let BYTES: usize = usize::try_from(((F::NUM_BITS + 128) + 7) / 8).unwrap();
let mut remaining = BYTES;
// We load bits in as u64s
const WORD_LEN_IN_BITS: usize = 64;
const WORD_LEN_IN_BYTES: usize = WORD_LEN_IN_BITS / 8;
let mut first = true;
while i < remaining {
// Shift over the already loaded bits
if !first {
for _ in 0 .. WORD_LEN_IN_BITS {
res += res; res += res;
} }
} }
res += F::from(u64::from_be_bytes(bytes[i .. (i + 8)].try_into().unwrap())); first = false;
i += 8;
// Add the next 64 bits
res += F::from(u64::from_be_bytes(bytes[i .. (i + WORD_LEN_IN_BYTES)].try_into().unwrap()));
i += WORD_LEN_IN_BYTES;
// If we've exhausted this challenge, get another
if i == bytes.len() {
bytes = digest.challenge(b"aggregation_weight_continued");
remaining -= i;
i = 0;
}
} }
res res
} }
fn digest_yield<D: Digest, F: PrimeField>(digest: D, i: usize) -> F {
scalar_from_digest(digest.chain_update(
u32::try_from(i).expect("more than 4 billion signatures in aggregate").to_le_bytes(),
))
}
/// Aggregate Schnorr signature as defined in <https://eprint.iacr.org/2021/350>. /// Aggregate Schnorr signature as defined in <https://eprint.iacr.org/2021/350>.
#[allow(non_snake_case)] #[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
@@ -82,7 +85,7 @@ impl<C: Ciphersuite> SchnorrAggregate<C> {
Ok(SchnorrAggregate { Rs, s: C::read_F(reader)? }) Ok(SchnorrAggregate { Rs, s: C::read_F(reader)? })
} }
/// Write a SchnorrAggregate to something implementing Read. /// Write a SchnorrAggregate to something implementing Write.
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> { pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all( writer.write_all(
&u32::try_from(self.Rs.len()) &u32::try_from(self.Rs.len())
@@ -96,7 +99,7 @@ impl<C: Ciphersuite> SchnorrAggregate<C> {
writer.write_all(self.s.to_repr().as_ref()) writer.write_all(self.s.to_repr().as_ref())
} }
/// Serialize a SchnorrAggregate, returning a Vec<u8>. /// Serialize a SchnorrAggregate, returning a `Vec<u8>`.
pub fn serialize(&self) -> Vec<u8> { pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![]; let mut buf = vec![];
self.write(&mut buf).unwrap(); self.write(&mut buf).unwrap();
@@ -104,20 +107,28 @@ impl<C: Ciphersuite> SchnorrAggregate<C> {
} }
/// Perform signature verification. /// Perform signature verification.
///
/// Challenges must be properly crafted, which means being binding to the public key, nonce, and
/// any message. Failure to do so will let a malicious adversary to forge signatures for
/// different keys/messages.
///
/// The DST used here must prevent a collision with whatever hash function produced the
/// challenges.
#[must_use] #[must_use]
pub fn verify<D: Clone + Digest>(&self, keys_and_challenges: &[(C::G, C::F)]) -> bool { pub fn verify(&self, dst: &'static [u8], keys_and_challenges: &[(C::G, C::F)]) -> bool {
if self.Rs.len() != keys_and_challenges.len() { if self.Rs.len() != keys_and_challenges.len() {
return false; return false;
} }
let mut digest = digest::<D>(); let mut digest = DigestTranscript::<C::H>::new(dst);
for (key, challenge) in keys_and_challenges { digest.domain_separate(b"signatures");
digest_accumulate(&mut digest, *key, *challenge); for (_, challenge) in keys_and_challenges {
digest.append_message(b"challenge", challenge.to_repr());
} }
let mut pairs = Vec::with_capacity((2 * keys_and_challenges.len()) + 1); let mut pairs = Vec::with_capacity((2 * keys_and_challenges.len()) + 1);
for (i, (key, challenge)) in keys_and_challenges.iter().enumerate() { for (i, (key, challenge)) in keys_and_challenges.iter().enumerate() {
let z = digest_yield(digest.clone(), i); let z = weight(&mut digest);
pairs.push((z, self.Rs[i])); pairs.push((z, self.Rs[i]));
pairs.push((z * challenge, *key)); pairs.push((z * challenge, *key));
} }
@@ -128,31 +139,30 @@ impl<C: Ciphersuite> SchnorrAggregate<C> {
#[allow(non_snake_case)] #[allow(non_snake_case)]
#[derive(Clone, Debug, Zeroize)] #[derive(Clone, Debug, Zeroize)]
pub struct SchnorrAggregator<D: Clone + Digest, C: Ciphersuite> { pub struct SchnorrAggregator<C: Ciphersuite> {
digest: D, digest: DigestTranscript<C::H>,
sigs: Vec<SchnorrSignature<C>>, sigs: Vec<SchnorrSignature<C>>,
} }
impl<D: Clone + Digest, C: Ciphersuite> Default for SchnorrAggregator<D, C> { impl<C: Ciphersuite> SchnorrAggregator<C> {
fn default() -> Self {
Self { digest: digest(), sigs: vec![] }
}
}
impl<D: Clone + Digest, C: Ciphersuite> SchnorrAggregator<D, C> {
/// Create a new aggregator. /// Create a new aggregator.
pub fn new() -> Self { ///
Self::default() /// The DST used here must prevent a collision with whatever hash function produced the
/// challenges.
pub fn new(dst: &'static [u8]) -> Self {
let mut res = Self { digest: DigestTranscript::<C::H>::new(dst), sigs: vec![] };
res.digest.domain_separate(b"signatures");
res
} }
/// Aggregate a signature. /// Aggregate a signature.
pub fn aggregate(&mut self, public_key: C::G, challenge: C::F, sig: SchnorrSignature<C>) { pub fn aggregate(&mut self, challenge: C::F, sig: SchnorrSignature<C>) {
digest_accumulate(&mut self.digest, public_key, challenge); self.digest.append_message(b"challenge", challenge.to_repr());
self.sigs.push(sig); self.sigs.push(sig);
} }
/// Complete aggregation, returning None if none were aggregated. /// Complete aggregation, returning None if none were aggregated.
pub fn complete(self) -> Option<SchnorrAggregate<C>> { pub fn complete(mut self) -> Option<SchnorrAggregate<C>> {
if self.sigs.is_empty() { if self.sigs.is_empty() {
return None; return None;
} }
@@ -161,7 +171,7 @@ impl<D: Clone + Digest, C: Ciphersuite> SchnorrAggregator<D, C> {
SchnorrAggregate { Rs: Vec::with_capacity(self.sigs.len()), s: C::F::zero() }; SchnorrAggregate { Rs: Vec::with_capacity(self.sigs.len()), s: C::F::zero() };
for i in 0 .. self.sigs.len() { for i in 0 .. self.sigs.len() {
aggregate.Rs.push(self.sigs[i].R); aggregate.Rs.push(self.sigs[i].R);
aggregate.s += self.sigs[i].s * digest_yield::<_, C::F>(self.digest.clone(), i); aggregate.s += self.sigs[i].s * weight::<_, C::F>(&mut self.digest);
} }
Some(aggregate) Some(aggregate)
} }

View File

@@ -5,21 +5,28 @@ use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
use group::{ use ciphersuite::{
group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
Group, GroupEncoding, Group, GroupEncoding,
},
Ciphersuite,
}; };
use multiexp::{multiexp_vartime, BatchVerifier}; use multiexp::{multiexp_vartime, BatchVerifier};
use ciphersuite::Ciphersuite;
pub mod aggregate; pub mod aggregate;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
/// A Schnorr signature of the form (R, s) where s = r + cx. /// A Schnorr signature of the form (R, s) where s = r + cx.
///
/// These are intended to be strict. It is generic over Ciphersuite which is for PrimeGroups,
/// and mandates canonical encodings in its read function.
///
/// RFC 8032 has an alternative verification formula, 8R = 8s - 8cX, which is intended to handle
/// torsioned nonces/public keys. Due to this library's strict requirements, such signatures will
/// not be verifiable with this library.
#[allow(non_snake_case)] #[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct SchnorrSignature<C: Ciphersuite> { pub struct SchnorrSignature<C: Ciphersuite> {
@@ -39,7 +46,7 @@ impl<C: Ciphersuite> SchnorrSignature<C> {
writer.write_all(self.s.to_repr().as_ref()) writer.write_all(self.s.to_repr().as_ref())
} }
/// Serialize a SchnorrSignature, returning a Vec<u8>. /// Serialize a SchnorrSignature, returning a `Vec<u8>`.
pub fn serialize(&self) -> Vec<u8> { pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![]; let mut buf = vec![];
self.write(&mut buf).unwrap(); self.write(&mut buf).unwrap();
@@ -47,6 +54,10 @@ impl<C: Ciphersuite> SchnorrSignature<C> {
} }
/// Sign a Schnorr signature with the given nonce for the specified challenge. /// Sign a Schnorr signature with the given nonce for the specified challenge.
///
/// This challenge must be properly crafted, which means being binding to the public key, nonce,
/// and any message. Failure to do so will let a malicious adversary to forge signatures for
/// different keys/messages.
pub fn sign( pub fn sign(
private_key: &Zeroizing<C::F>, private_key: &Zeroizing<C::F>,
nonce: Zeroizing<C::F>, nonce: Zeroizing<C::F>,
@@ -76,12 +87,20 @@ impl<C: Ciphersuite> SchnorrSignature<C> {
} }
/// Verify a Schnorr signature for the given key with the specified challenge. /// Verify a Schnorr signature for the given key with the specified challenge.
///
/// This challenge must be properly crafted, which means being binding to the public key, nonce,
/// and any message. Failure to do so will let a malicious adversary to forge signatures for
/// different keys/messages.
#[must_use] #[must_use]
pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool { pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool {
multiexp_vartime(&self.batch_statements(public_key, challenge)).is_identity().into() multiexp_vartime(&self.batch_statements(public_key, challenge)).is_identity().into()
} }
/// Queue a signature for batch verification. /// Queue a signature for batch verification.
///
/// This challenge must be properly crafted, which means being binding to the public key, nonce,
/// and any message. Failure to do so will let a malicious adversary to forge signatures for
/// different keys/messages.
pub fn batch_verify<R: RngCore + CryptoRng, I: Copy + Zeroize>( pub fn batch_verify<R: RngCore + CryptoRng, I: Copy + Zeroize>(
&self, &self,
rng: &mut R, rng: &mut R,

View File

@@ -1,22 +1,21 @@
use core::ops::Deref; use core::ops::Deref;
use zeroize::Zeroizing;
use rand_core::OsRng; use rand_core::OsRng;
use zeroize::Zeroizing; use ciphersuite::{
group::{ff::Field, Group},
use blake2::{digest::typenum::U32, Blake2b}; Ciphersuite, Ed25519,
type Blake2b256 = Blake2b<U32>; };
use group::{ff::Field, Group};
use multiexp::BatchVerifier; use multiexp::BatchVerifier;
use ciphersuite::{Ciphersuite, Ristretto};
use crate::{ use crate::{
SchnorrSignature, SchnorrSignature,
aggregate::{SchnorrAggregator, SchnorrAggregate}, aggregate::{SchnorrAggregator, SchnorrAggregate},
}; };
mod rfc8032;
pub(crate) fn sign<C: Ciphersuite>() { pub(crate) fn sign<C: Ciphersuite>() {
let private_key = Zeroizing::new(C::random_nonzero_F(&mut OsRng)); let private_key = Zeroizing::new(C::random_nonzero_F(&mut OsRng));
let nonce = Zeroizing::new(C::random_nonzero_F(&mut OsRng)); let nonce = Zeroizing::new(C::random_nonzero_F(&mut OsRng));
@@ -54,7 +53,7 @@ pub(crate) fn batch_verify<C: Ciphersuite>() {
for (i, sig) in sigs.iter().enumerate() { for (i, sig) in sigs.iter().enumerate() {
sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]); sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]);
} }
batch.verify_with_vartime_blame().unwrap(); batch.verify_vartime_with_vartime_blame().unwrap();
} }
// Shift 1 from s from one to another and verify it fails // Shift 1 from s from one to another and verify it fails
@@ -70,7 +69,7 @@ pub(crate) fn batch_verify<C: Ciphersuite>() {
} }
sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]); sig.batch_verify(&mut OsRng, &mut batch, i, C::generator() * keys[i].deref(), challenges[i]);
} }
if let Err(blame) = batch.verify_with_vartime_blame() { if let Err(blame) = batch.verify_vartime_with_vartime_blame() {
assert!((blame == 1) || (blame == 2)); assert!((blame == 1) || (blame == 2));
} else { } else {
panic!("Batch verification considered malleated signatures valid"); panic!("Batch verification considered malleated signatures valid");
@@ -79,15 +78,17 @@ pub(crate) fn batch_verify<C: Ciphersuite>() {
} }
pub(crate) fn aggregate<C: Ciphersuite>() { pub(crate) fn aggregate<C: Ciphersuite>() {
const DST: &[u8] = b"Schnorr Aggregator Test";
// Create 5 signatures // Create 5 signatures
let mut keys = vec![]; let mut keys = vec![];
let mut challenges = vec![]; let mut challenges = vec![];
let mut aggregator = SchnorrAggregator::<Blake2b256, C>::new(); let mut aggregator = SchnorrAggregator::<C>::new(DST);
for i in 0 .. 5 { for i in 0 .. 5 {
keys.push(Zeroizing::new(C::random_nonzero_F(&mut OsRng))); keys.push(Zeroizing::new(C::random_nonzero_F(&mut OsRng)));
// In practice, this MUST be a secure challenge binding to the nonce, key, and any message
challenges.push(C::random_nonzero_F(&mut OsRng)); challenges.push(C::random_nonzero_F(&mut OsRng));
aggregator.aggregate( aggregator.aggregate(
C::generator() * keys[i].deref(),
challenges[i], challenges[i],
SchnorrSignature::<C>::sign( SchnorrSignature::<C>::sign(
&keys[i], &keys[i],
@@ -100,20 +101,21 @@ pub(crate) fn aggregate<C: Ciphersuite>() {
let aggregate = aggregator.complete().unwrap(); let aggregate = aggregator.complete().unwrap();
let aggregate = let aggregate =
SchnorrAggregate::<C>::read::<&[u8]>(&mut aggregate.serialize().as_ref()).unwrap(); SchnorrAggregate::<C>::read::<&[u8]>(&mut aggregate.serialize().as_ref()).unwrap();
assert!(aggregate.verify::<Blake2b256>( assert!(aggregate.verify(
DST,
keys keys
.iter() .iter()
.map(|key| C::generator() * key.deref()) .map(|key| C::generator() * key.deref())
.zip(challenges.iter().cloned()) .zip(challenges.iter().cloned())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.as_ref() .as_ref(),
)); ));
} }
#[test] #[test]
fn test() { fn test() {
sign::<Ristretto>(); sign::<Ed25519>();
verify::<Ristretto>(); verify::<Ed25519>();
batch_verify::<Ristretto>(); batch_verify::<Ed25519>();
aggregate::<Ristretto>(); aggregate::<Ed25519>();
} }

View File

@@ -0,0 +1,59 @@
// RFC 8032 Ed25519 test vectors
// The s = r + cx format modernly used for Schnorr signatures was popularized by EdDSA
// While not all RFC 8032 signatures will work with this library, any canonical ones will, and
// these vectors are canonical
use sha2::{Digest, Sha512};
use dalek_ff_group::Scalar;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ed25519};
use crate::SchnorrSignature;
// Public key, message, signature
#[rustfmt::skip]
const VECTORS: [(&str, &str, &str); 5] = [
(
"d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a",
"",
"e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"
),
(
"3d4017c3e843895a92b70aa74d1b7ebc9c982ccf2ec4968cc0cd55f12af4660c",
"72",
"92a009a9f0d4cab8720e820b5f642540a2b27b5416503f8fb3762223ebdb69da085ac1e43e15996e458f3613d0f11d8c387b2eaeb4302aeeb00d291612bb0c00"
),
(
"fc51cd8e6218a1a38da47ed00230f0580816ed13ba3303ac5deb911548908025",
"af82",
"6291d657deec24024827e69c3abe01a30ce548a284743a445e3680d7db5ac3ac18ff9b538d16f290ae67f760984dc6594a7c15e9716ed28dc027beceea1ec40a"
),
(
"278117fc144c72340f67d0f2316e8386ceffbf2b2428c9c51fef7c597f1d426e",
"08b8b2b733424243760fe426a4b54908632110a66c2f6591eabd3345e3e4eb98fa6e264bf09efe12ee50f8f54e9f77b1e355f6c50544e23fb1433ddf73be84d879de7c0046dc4996d9e773f4bc9efe5738829adb26c81b37c93a1b270b20329d658675fc6ea534e0810a4432826bf58c941efb65d57a338bbd2e26640f89ffbc1a858efcb8550ee3a5e1998bd177e93a7363c344fe6b199ee5d02e82d522c4feba15452f80288a821a579116ec6dad2b3b310da903401aa62100ab5d1a36553e06203b33890cc9b832f79ef80560ccb9a39ce767967ed628c6ad573cb116dbefefd75499da96bd68a8a97b928a8bbc103b6621fcde2beca1231d206be6cd9ec7aff6f6c94fcd7204ed3455c68c83f4a41da4af2b74ef5c53f1d8ac70bdcb7ed185ce81bd84359d44254d95629e9855a94a7c1958d1f8ada5d0532ed8a5aa3fb2d17ba70eb6248e594e1a2297acbbb39d502f1a8c6eb6f1ce22b3de1a1f40cc24554119a831a9aad6079cad88425de6bde1a9187ebb6092cf67bf2b13fd65f27088d78b7e883c8759d2c4f5c65adb7553878ad575f9fad878e80a0c9ba63bcbcc2732e69485bbc9c90bfbd62481d9089beccf80cfe2df16a2cf65bd92dd597b0707e0917af48bbb75fed413d238f5555a7a569d80c3414a8d0859dc65a46128bab27af87a71314f318c782b23ebfe808b82b0ce26401d2e22f04d83d1255dc51addd3b75a2b1ae0784504df543af8969be3ea7082ff7fc9888c144da2af58429ec96031dbcad3dad9af0dcbaaaf268cb8fcffead94f3c7ca495e056a9b47acdb751fb73e666c6c655ade8297297d07ad1ba5e43f1bca32301651339e22904cc8c42f58c30c04aafdb038dda0847dd988dcda6f3bfd15c4b4c4525004aa06eeff8ca61783aacec57fb3d1f92b0fe2fd1a85f6724517b65e614ad6808d6f6ee34dff7310fdc82aebfd904b01e1dc54b2927094b2db68d6f903b68401adebf5a7e08d78ff4ef5d63653a65040cf9bfd4aca7984a74d37145986780fc0b16ac451649de6188a7dbdf191f64b5fc5e2ab47b57f7f7276cd419c17a3ca8e1b939ae49e488acba6b965610b5480109c8b17b80e1b7b750dfc7598d5d5011fd2dcc5600a32ef5b52a1ecc820e308aa342721aac0943bf6686b64b2579376504ccc493d97e6aed3fb0f9cd71a43dd497f01f17c0e2cb3797aa2a2f256656168e6c496afc5fb93246f6b1116398a346f1a641f3b041e989f7914f90cc2c7fff357876e506b50d334ba77c225bc307ba537152f3f1610e4eafe595f6d9d90d11faa933a15ef1369546868a7f3a45a96768d40fd9d03412c091c6315cf4fde7cb68606937380db2eaaa707b4c4185c32eddcdd306705e4dc1ffc872eeee475a64dfac86aba41c0618983f8741c5ef68d3a101e8a3b8cac60c905c15fc910840b94c00a0b9d0",
"0aab4c900501b3e24d7cdf4663326a3a87df5e4843b2cbdb67cbf6e460fec350aa5371b1508f9f4528ecea23c436d94b5e8fcd4f681e30a6ac00a9704a188a03"
),
(
"ec172b93ad5e563bf4932c70e1245034c35467ef2efd4d64ebf819683467e2bf",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f",
"dc2a4459e7369633a52b1bf277839a00201009a3efbf3ecb69bea2186c26b58909351fc9ac90b3ecfdfbc7c66431e0303dca179c138ac17ad9bef1177331a704"
),
];
#[test]
fn test_rfc8032() {
for vector in VECTORS {
let key = Ed25519::read_G::<&[u8]>(&mut hex::decode(vector.0).unwrap().as_ref()).unwrap();
let sig =
SchnorrSignature::<Ed25519>::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref())
.unwrap();
let hram = Sha512::new_with_prefix(
&[sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(),
);
assert!(sig.verify(key, Scalar::from_hash(hram)));
}
}

View File

@@ -18,6 +18,11 @@ digest = "0.10"
blake2 = { version = "0.10", optional = true } blake2 = { version = "0.10", optional = true }
merlin = { version = "3", optional = true } merlin = { version = "3", optional = true }
[dev-dependencies]
sha2 = "0.10"
blake2 = "0.10"
[features] [features]
recommended = ["blake2"] recommended = ["blake2"]
merlin = ["dep:merlin"] merlin = ["dep:merlin"]
tests = []

View File

@@ -6,10 +6,18 @@ mod merlin;
#[cfg(feature = "merlin")] #[cfg(feature = "merlin")]
pub use crate::merlin::MerlinTranscript; pub use crate::merlin::MerlinTranscript;
use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest, Output, HashMarker}; #[cfg(any(test, feature = "tests"))]
pub mod tests;
pub trait Transcript { use digest::{
type Challenge: Clone + Send + Sync + AsRef<[u8]>; typenum::{
consts::U32, marker_traits::NonZero, type_operators::IsGreaterOrEqual, operator_aliases::GrEq,
},
Digest, Output, HashMarker,
};
pub trait Transcript: Send + Clone {
type Challenge: Send + Sync + Clone + AsRef<[u8]>;
/// Create a new transcript with the specified name. /// Create a new transcript with the specified name.
fn new(name: &'static [u8]) -> Self; fn new(name: &'static [u8]) -> Self;
@@ -20,13 +28,19 @@ pub trait Transcript {
/// Append a message to the transcript. /// Append a message to the transcript.
fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M); fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M);
/// Produce a challenge. This MUST update the transcript as it does so, preventing the same /// Produce a challenge.
/// challenge from being generated multiple times. ///
/// Implementors MUST update the transcript as it does so, preventing the same challenge from
/// being generated multiple times.
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge; fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge;
/// Produce a RNG seed. Helper function for parties needing to generate random data from an /// Produce a RNG seed.
/// agreed upon state. Internally calls the challenge function for the needed bytes, converting ///
/// them to the seed format rand_core expects. /// Helper function for parties needing to generate random data from an agreed upon state.
///
/// Implementors MAY internally call the challenge function for the needed bytes, and accordingly
/// produce a transcript conflict between two transcripts, one which called challenge(label) and
/// one which called rng_seed(label) at the same point.
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32];
} }
@@ -36,6 +50,8 @@ enum DigestTranscriptMember {
Label, Label,
Value, Value,
Challenge, Challenge,
Continued,
Challenged,
} }
impl DigestTranscriptMember { impl DigestTranscriptMember {
@@ -46,20 +62,30 @@ impl DigestTranscriptMember {
DigestTranscriptMember::Label => 2, DigestTranscriptMember::Label => 2,
DigestTranscriptMember::Value => 3, DigestTranscriptMember::Value => 3,
DigestTranscriptMember::Challenge => 4, DigestTranscriptMember::Challenge => 4,
DigestTranscriptMember::Continued => 5,
DigestTranscriptMember::Challenged => 6,
} }
} }
} }
/// A trait defining cryptographic Digests with at least a 256-byte output size, assuming at least /// A trait defining cryptographic Digests with at least a 256-bit output size, assuming at least a
/// a 128-bit level of security accordingly. /// 128-bit level of security accordingly.
pub trait SecureDigest: Digest + HashMarker {} pub trait SecureDigest: Digest + HashMarker {}
impl<D: Digest + HashMarker> SecureDigest for D where D::OutputSize: IsGreaterOrEqual<U256> {} impl<D: Digest + HashMarker> SecureDigest for D
where
// This just lets us perform the comparison
D::OutputSize: IsGreaterOrEqual<U32>,
// Perform the comparison and make sure it's true (not zero), meaning D::OutputSize is >= U32
// This should be U32 as it's length in bytes, not bits
GrEq<D::OutputSize, U32>: NonZero,
{
}
/// A simple transcript format constructed around the specified hash algorithm. /// A simple transcript format constructed around the specified hash algorithm.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct DigestTranscript<D: Clone + SecureDigest>(D); pub struct DigestTranscript<D: Send + Clone + SecureDigest>(D);
impl<D: Clone + SecureDigest> DigestTranscript<D> { impl<D: Send + Clone + SecureDigest> DigestTranscript<D> {
fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) { fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) {
self.0.update([kind.as_u8()]); self.0.update([kind.as_u8()]);
// Assumes messages don't exceed 16 exabytes // Assumes messages don't exceed 16 exabytes
@@ -68,7 +94,7 @@ impl<D: Clone + SecureDigest> DigestTranscript<D> {
} }
} }
impl<D: Clone + SecureDigest> Transcript for DigestTranscript<D> { impl<D: Send + Clone + SecureDigest> Transcript for DigestTranscript<D> {
type Challenge = Output<D>; type Challenge = Output<D>;
fn new(name: &'static [u8]) -> Self { fn new(name: &'static [u8]) -> Self {
@@ -88,7 +114,13 @@ impl<D: Clone + SecureDigest> Transcript for DigestTranscript<D> {
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge { fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
self.append(DigestTranscriptMember::Challenge, label); self.append(DigestTranscriptMember::Challenge, label);
self.0.clone().finalize() let mut cloned = self.0.clone();
// Explicitly fork these transcripts to prevent length extension attacks from being possible
// (at least, without the additional ability to remove a byte from a finalized hash)
self.0.update([DigestTranscriptMember::Continued.as_u8()]);
cloned.update([DigestTranscriptMember::Challenged.as_u8()]);
cloned.finalize()
} }
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] { fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {

View File

@@ -2,8 +2,18 @@ use core::fmt::{Debug, Formatter};
use crate::Transcript; use crate::Transcript;
/// A wrapper around a Merlin transcript which satisfiees the Transcript API.
///
/// Challenges are fixed to 64 bytes, despite Merlin supporting variable length challenges.
///
/// This implementation is intended to remain in the spirit of Merlin more than it's intended to be
/// in the spirit of the provided DigestTranscript. While DigestTranscript uses flags for each of
/// its different field types, the domain_separate function simply appends a message with a label
/// of "dom-sep", Merlin's preferred domain separation label. Since this could introduce transcript
/// conflicts between a domain separation and a message with a label of "dom-sep", the
/// append_message function uses an assertion to prevent such labels.
#[derive(Clone)] #[derive(Clone)]
pub struct MerlinTranscript(pub merlin::Transcript); pub struct MerlinTranscript(merlin::Transcript);
// Merlin doesn't implement Debug so provide a stub which won't panic // Merlin doesn't implement Debug so provide a stub which won't panic
impl Debug for MerlinTranscript { impl Debug for MerlinTranscript {
fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
@@ -12,11 +22,10 @@ impl Debug for MerlinTranscript {
} }
impl Transcript for MerlinTranscript { impl Transcript for MerlinTranscript {
// Uses a challenge length of 64 bytes to support wide reduction on generated scalars // Uses a challenge length of 64 bytes to support wide reduction on commonly used EC scalars
// From a security level standpoint, this should just be 32 bytes // From a security level standpoint (Merlin targets 128-bits), this should just be 32 bytes
// From a Merlin standpoint, this should be variable per call // From a Merlin standpoint, this should be variable per call
// From a practical standpoint, this is a demo file not planned to be used and anything using // From a practical standpoint, this should be practical
// this wrapper should be secure with this setting
type Challenge = [u8; 64]; type Challenge = [u8; 64];
fn new(name: &'static [u8]) -> Self { fn new(name: &'static [u8]) -> Self {
@@ -24,10 +33,14 @@ impl Transcript for MerlinTranscript {
} }
fn domain_separate(&mut self, label: &'static [u8]) { fn domain_separate(&mut self, label: &'static [u8]) {
self.append_message(b"dom-sep", label); self.0.append_message(b"dom-sep", label);
} }
fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M) { fn append_message<M: AsRef<[u8]>>(&mut self, label: &'static [u8], message: M) {
assert!(
label != "dom-sep".as_bytes(),
"\"dom-sep\" is reserved for the domain_separate function",
);
self.0.append_message(label, message.as_ref()); self.0.append_message(label, message.as_ref());
} }

View File

@@ -0,0 +1,102 @@
use crate::Transcript;
pub fn test_transcript<T: Transcript>()
where
T::Challenge: PartialEq,
{
// Ensure distinct names cause distinct challenges
{
let mut t1 = T::new(b"1");
let mut t2 = T::new(b"2");
assert!(t1.challenge(b"c") != t2.challenge(b"c"));
}
// Ensure names can't lead into labels
{
let mut t1 = T::new(b"12");
let c1 = t1.challenge(b"c");
let mut t2 = T::new(b"1");
let c2 = t2.challenge(b"2c");
assert!(c1 != c2);
}
let t = || T::new(b"name");
let c = |mut t: T| t.challenge(b"c");
// Ensure domain separators do something
{
let mut t1 = t();
t1.domain_separate(b"d");
assert!(c(t1) != c(t()));
}
// Ensure distinct domain separators create distinct challenges
{
let mut t1 = t();
let mut t2 = t();
t1.domain_separate(b"d1");
t2.domain_separate(b"d2");
assert!(c(t1) != c(t2));
}
// Ensure distinct messages create distinct challenges
{
// By label
{
let mut t1 = t();
let mut t2 = t();
t1.append_message(b"msg", b"a");
t2.append_message(b"msg", b"b");
assert!(c(t1) != c(t2));
}
// By value
{
let mut t1 = t();
let mut t2 = t();
t1.append_message(b"a", b"val");
t2.append_message(b"b", b"val");
assert!(c(t1) != c(t2));
}
}
// Ensure challenges advance the transcript
{
let mut t = t();
let c1 = t.challenge(b"c");
let c2 = t.challenge(b"c");
assert!(c1 != c2);
}
// Ensure distinct challenge labels produce distinct challenges
assert!(t().challenge(b"a") != t().challenge(b"b"));
// Ensure RNG seed calls advance the transcript
{
let mut t = t();
let s1 = t.rng_seed(b"s");
let s2 = t.rng_seed(b"s");
assert!(s1 != s2);
}
// Ensure distinct RNG seed labels produce distinct seeds
assert!(t().rng_seed(b"a") != t().rng_seed(b"b"));
}
#[test]
fn test_digest() {
test_transcript::<crate::DigestTranscript<sha2::Sha256>>();
test_transcript::<crate::DigestTranscript<blake2::Blake2b512>>();
}
#[cfg(feature = "recommended")]
#[test]
fn test_recommended() {
test_transcript::<crate::RecommendedTranscript>();
}
#[cfg(feature = "merlin")]
#[test]
fn test_merlin() {
test_transcript::<crate::MerlinTranscript>();
}

View File

@@ -39,10 +39,8 @@ elements instead of `2n`.
Finally, to support additive offset signing schemes (accounts, stealth Finally, to support additive offset signing schemes (accounts, stealth
addresses, randomization), it's possible to specify a scalar offset for keys. addresses, randomization), it's possible to specify a scalar offset for keys.
The public key signed for is also offset by this value. During the signing The public key signed for is also offset by this value. During the signing
process, the offset is explicitly transcripted. Then, the offset is divided by process, the offset is explicitly transcripted. Then, the offset is added to the
`p`, the amount of participating signers, and each signer adds it to their participant with the lowest ID.
post-interpolation key share. This maintains a leaderless protocol while still
being correct.
# Caching # Caching

View File

@@ -16,23 +16,25 @@ rustdoc-args = ["--cfg", "docsrs"]
[dependencies] [dependencies]
# Macros # Macros
async-trait = "0.1" async-trait = "0.1"
zeroize = "1.5" zeroize = "^1.5"
thiserror = "1" thiserror = "1"
rand_core = "0.6" rand_core = "0.6"
# Cryptography # Cryptography
group = "0.12" transcript = { package = "flexible-transcript", path = "../crypto/transcript", features = ["recommended"] }
curve25519-dalek = { version = "3", features = ["std"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group" }
transcript = { package = "flexible-transcript", path = "../crypto/transcript" } group = "0.12"
frost = { package = "modular-frost", path = "../crypto/frost", features = ["secp256k1", "ed25519"] } frost = { package = "modular-frost", path = "../crypto/frost", features = ["secp256k1", "ed25519"] }
# Monero # Monero
curve25519-dalek = { version = "3", features = ["std"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", features = ["black_box"] }
monero-serai = { path = "../coins/monero", features = ["multisig"] } monero-serai = { path = "../coins/monero", features = ["multisig"] }
# Bitcoin
bitcoin-serai = { path = "../coins/bitcoin" } bitcoin-serai = { path = "../coins/bitcoin" }
k256 = { version = "0.11", features = ["arithmetic"] } k256 = { version = "0.12", features = ["arithmetic"] }
bitcoin = "0.29" bitcoin = "0.29"
hex = "0.4" hex = "0.4"
secp256k1 = { version = "0.24", features = ["global-context", "rand-std"] } secp256k1 = { version = "0.24", features = ["global-context", "rand-std"] }

View File

@@ -3,7 +3,7 @@ use std::{marker::Send, collections::HashMap};
use async_trait::async_trait; use async_trait::async_trait;
use thiserror::Error; use thiserror::Error;
use frost::{curve::Ciphersuite, FrostError}; use frost::{curve::Ciphersuite, Participant, FrostError};
mod coin; mod coin;
use coin::{CoinError, Coin}; use coin::{CoinError, Coin};
@@ -18,7 +18,7 @@ pub enum NetworkError {}
#[async_trait] #[async_trait]
pub trait Network: Send { pub trait Network: Send {
async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<u16, Vec<u8>>, NetworkError>; async fn round(&mut self, data: Vec<u8>) -> Result<HashMap<Participant, Vec<u8>>, NetworkError>;
} }
#[derive(Clone, Error, Debug)] #[derive(Clone, Error, Debug)]