Remove dleq, dkg-promote, dkg-pedpop per #597

Does not move them to a new repository at this time.
This commit is contained in:
Luke Parker
2025-08-23 15:07:40 -04:00
parent 8c366107ae
commit 33faa53b56
30 changed files with 0 additions and 4059 deletions

View File

@@ -1,37 +0,0 @@
[package]
name = "dkg-pedpop"
version = "0.6.0"
description = "The PedPoP distributed key generation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/pedpop"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.3", default-features = false, features = ["std", "recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["std", "zeroize"] }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6", default-features = false, features = ["std"] }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false }

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,12 +0,0 @@
# Distributed Key Generation - PedPoP
This implements the PedPoP distributed key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,506 +0,0 @@
use core::{ops::Deref, fmt};
use std::{io, collections::HashMap};
use thiserror::Error;
use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, CryptoRng};
use chacha20::{
cipher::{crypto_common::KeyIvInit, StreamCipher},
Key as Cc20Key, Nonce as Cc20Iv, ChaCha20,
};
use transcript::{Transcript, RecommendedTranscript};
#[cfg(test)]
use ciphersuite::group::ff::Field;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use multiexp::BatchVerifier;
use schnorr::SchnorrSignature;
use dleq::DLEqProof;
use dkg::{Participant, ThresholdParams};
mod sealed {
use super::*;
pub trait ReadWrite: Sized {
fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self>;
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
}
pub trait Message: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite {}
impl<M: Clone + PartialEq + Eq + fmt::Debug + Zeroize + ReadWrite> Message for M {}
pub trait Encryptable: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + ReadWrite {}
impl<E: Clone + AsRef<[u8]> + AsMut<[u8]> + Zeroize + ReadWrite> Encryptable for E {}
}
pub(crate) use sealed::*;
/// Wraps a message with a key to use for encryption in the future.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct EncryptionKeyMessage<C: Ciphersuite, M: Message> {
msg: M,
enc_key: C::G,
}
// Doesn't impl ReadWrite so that doesn't need to be imported
impl<C: Ciphersuite, M: Message> EncryptionKeyMessage<C, M> {
pub fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {
Ok(Self { msg: M::read(reader, params)?, enc_key: C::read_G(reader)? })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.msg.write(writer)?;
writer.write_all(self.enc_key.to_bytes().as_ref())
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
#[cfg(test)]
pub(crate) fn enc_key(&self) -> C::G {
self.enc_key
}
}
/// An encrypted message, with a per-message encryption key enabling revealing specific messages
/// without side effects.
#[derive(Clone, Zeroize)]
pub struct EncryptedMessage<C: Ciphersuite, E: Encryptable> {
key: C::G,
// Also include a proof-of-possession for the key.
// If this proof-of-possession wasn't here, Eve could observe Alice encrypt to Bob with key X,
// then send Bob a message also claiming to use X.
// While Eve's message would fail to meaningfully decrypt, Bob would then use this to create a
// blame argument against Eve. When they do, they'd reveal bX, revealing Alice's message to Bob.
// This is a massive side effect which could break some protocols, in the worst case.
// While Eve can still reuse their own keys, causing Bob to leak all messages by revealing for
// any single one, that's effectively Eve revealing themselves, and not considered relevant.
pop: SchnorrSignature<C>,
msg: Zeroizing<E>,
}
fn ecdh<C: Ciphersuite>(private: &Zeroizing<C::F>, public: C::G) -> Zeroizing<C::G> {
Zeroizing::new(public * private.deref())
}
// Each ecdh must be distinct. Reuse of an ecdh for multiple ciphers will cause the messages to be
// leaked.
fn cipher<C: Ciphersuite>(context: [u8; 32], ecdh: &Zeroizing<C::G>) -> ChaCha20 {
// Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly
// TODO: https://github.com/serai-dex/serai/issues/151
let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2");
transcript.append_message(b"context", context);
transcript.domain_separate(b"encryption_key");
let mut ecdh = ecdh.to_bytes();
transcript.append_message(b"shared_key", ecdh.as_ref());
ecdh.as_mut().zeroize();
let zeroize = |buf: &mut [u8]| buf.zeroize();
let mut key = Cc20Key::default();
let mut challenge = transcript.challenge(b"key");
key.copy_from_slice(&challenge[.. 32]);
zeroize(challenge.as_mut());
// Since the key is single-use, it doesn't matter what we use for the IV
// The issue is key + IV reuse. If we never reuse the key, we can't have the opportunity to
// reuse a nonce
// Use a static IV in acknowledgement of this
let mut iv = Cc20Iv::default();
// The \0 is to satisfy the length requirement (12), not to be null terminated
iv.copy_from_slice(b"DKG IV v0.2\0");
// ChaCha20 has the same commentary as the transcript regarding ZAlloc
// TODO: https://github.com/serai-dex/serai/issues/151
let res = ChaCha20::new(&key, &iv);
zeroize(key.as_mut());
res
}
fn encrypt<R: RngCore + CryptoRng, C: Ciphersuite, E: Encryptable>(
rng: &mut R,
context: [u8; 32],
from: Participant,
to: C::G,
mut msg: Zeroizing<E>,
) -> EncryptedMessage<C, E> {
/*
The following code could be used to replace the requirement on an RNG here.
It's just currently not an issue to require taking in an RNG here.
let last = self.last_enc_key.to_bytes();
self.last_enc_key = C::hash_to_F(b"encryption_base", last.as_ref());
let key = C::hash_to_F(b"encryption_key", last.as_ref());
last.as_mut().zeroize();
*/
// Generate a new key for this message, satisfying cipher's requirement of distinct keys per
// message, and enabling revealing this message without revealing any others
let key = Zeroizing::new(C::random_nonzero_F(rng));
cipher::<C>(context, &ecdh::<C>(&key, to)).apply_keystream(msg.as_mut().as_mut());
let pub_key = C::generator() * key.deref();
let nonce = Zeroizing::new(C::random_nonzero_F(rng));
let pub_nonce = C::generator() * nonce.deref();
EncryptedMessage {
key: pub_key,
pop: SchnorrSignature::sign(
&key,
nonce,
pop_challenge::<C>(context, pub_nonce, pub_key, from, msg.deref().as_ref()),
),
msg,
}
}
impl<C: Ciphersuite, E: Encryptable> EncryptedMessage<C, E> {
pub fn read<R: io::Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {
Ok(Self {
key: C::read_G(reader)?,
pop: SchnorrSignature::<C>::read(reader)?,
msg: Zeroizing::new(E::read(reader, params)?),
})
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.key.to_bytes().as_ref())?;
self.pop.write(writer)?;
self.msg.write(writer)
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
#[cfg(test)]
pub(crate) fn invalidate_pop(&mut self) {
self.pop.s += C::F::ONE;
}
#[cfg(test)]
pub(crate) fn invalidate_msg<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
context: [u8; 32],
from: Participant,
) {
// Invalidate the message by specifying a new key/Schnorr PoP
// This will cause all initial checks to pass, yet a decrypt to gibberish
let key = Zeroizing::new(C::random_nonzero_F(rng));
let pub_key = C::generator() * key.deref();
let nonce = Zeroizing::new(C::random_nonzero_F(rng));
let pub_nonce = C::generator() * nonce.deref();
self.key = pub_key;
self.pop = SchnorrSignature::sign(
&key,
nonce,
pop_challenge::<C>(context, pub_nonce, pub_key, from, self.msg.deref().as_ref()),
);
}
// Assumes the encrypted message is a secret share.
#[cfg(test)]
pub(crate) fn invalidate_share_serialization<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
context: [u8; 32],
from: Participant,
to: C::G,
) {
use ciphersuite::group::ff::PrimeField;
let mut repr = <C::F as PrimeField>::Repr::default();
for b in repr.as_mut() {
*b = 255;
}
// Tries to guarantee the above assumption.
assert_eq!(repr.as_ref().len(), self.msg.as_ref().len());
// Checks that this isn't over a field where this is somehow valid
assert!(!bool::from(C::F::from_repr(repr).is_some()));
self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref());
*self = encrypt(rng, context, from, to, self.msg.clone());
}
// Assumes the encrypted message is a secret share.
#[cfg(test)]
pub(crate) fn invalidate_share_value<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
context: [u8; 32],
from: Participant,
to: C::G,
) {
use ciphersuite::group::ff::PrimeField;
// Assumes the share isn't randomly 1
let repr = C::F::ONE.to_repr();
self.msg.as_mut().as_mut().copy_from_slice(repr.as_ref());
*self = encrypt(rng, context, from, to, self.msg.clone());
}
}
/// A proof that the provided encryption key is a legitimately derived shared key for some message.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct EncryptionKeyProof<C: Ciphersuite> {
key: Zeroizing<C::G>,
dleq: DLEqProof<C::G>,
}
impl<C: Ciphersuite> EncryptionKeyProof<C> {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
Ok(Self { key: Zeroizing::new(C::read_G(reader)?), dleq: DLEqProof::read(reader)? })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.key.to_bytes().as_ref())?;
self.dleq.write(writer)
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
#[cfg(test)]
pub(crate) fn invalidate_key(&mut self) {
*self.key += C::generator();
}
#[cfg(test)]
pub(crate) fn invalidate_dleq(&mut self) {
let mut buf = vec![];
self.dleq.write(&mut buf).unwrap();
// Adds one to c since this is serialized c, s
// Adding one to c will leave a validly serialized c
// Adding one to s may leave an invalidly serialized s
buf[0] = buf[0].wrapping_add(1);
self.dleq = DLEqProof::read::<&[u8]>(&mut buf.as_ref()).unwrap();
}
}
// This doesn't need to take the msg. It just doesn't hurt as an extra layer.
// This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no
// root of trust other than their existence in the assumed-to-exist external authenticated channel.
fn pop_challenge<C: Ciphersuite>(
context: [u8; 32],
nonce: C::G,
key: C::G,
sender: Participant,
msg: &[u8],
) -> C::F {
let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2");
transcript.append_message(b"context", context);
transcript.domain_separate(b"proof_of_possession");
transcript.append_message(b"nonce", nonce.to_bytes());
transcript.append_message(b"key", key.to_bytes());
// This is sufficient to prevent the attack this is meant to stop
transcript.append_message(b"sender", sender.to_bytes());
// This, as written above, doesn't hurt
transcript.append_message(b"message", msg);
// While this is a PoK and a PoP, it's called a PoP here since the important part is its owner
// Elsewhere, where we use the term PoK, the important part is that it isn't some inverse, with
// an unknown to anyone discrete log, breaking the system
C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr"))
}
fn encryption_key_transcript(context: [u8; 32]) -> RecommendedTranscript {
let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2");
transcript.append_message(b"context", context);
transcript
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
pub(crate) enum DecryptionError {
#[error("accused provided an invalid signature")]
InvalidSignature,
#[error("accuser provided an invalid decryption key")]
InvalidProof,
}
// A simple box for managing decryption.
#[derive(Clone, Debug)]
pub(crate) struct Decryption<C: Ciphersuite> {
context: [u8; 32],
enc_keys: HashMap<Participant, C::G>,
}
impl<C: Ciphersuite> Decryption<C> {
pub(crate) fn new(context: [u8; 32]) -> Self {
Self { context, enc_keys: HashMap::new() }
}
pub(crate) fn register<M: Message>(
&mut self,
participant: Participant,
msg: EncryptionKeyMessage<C, M>,
) -> M {
assert!(
!self.enc_keys.contains_key(&participant),
"Re-registering encryption key for a participant"
);
self.enc_keys.insert(participant, msg.enc_key);
msg.msg
}
// Given a message, and the intended decryptor, and a proof for its key, decrypt the message.
// Returns None if the key was wrong.
pub(crate) fn decrypt_with_proof<E: Encryptable>(
&self,
from: Participant,
decryptor: Participant,
mut msg: EncryptedMessage<C, E>,
// There's no encryption key proof if the accusation is of an invalid signature
proof: Option<EncryptionKeyProof<C>>,
) -> Result<Zeroizing<E>, DecryptionError> {
if !msg.pop.verify(
msg.key,
pop_challenge::<C>(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()),
) {
Err(DecryptionError::InvalidSignature)?;
}
if let Some(proof) = proof {
// Verify this is the decryption key for this message
proof
.dleq
.verify(
&mut encryption_key_transcript(self.context),
&[C::generator(), msg.key],
&[self.enc_keys[&decryptor], *proof.key],
)
.map_err(|_| DecryptionError::InvalidProof)?;
cipher::<C>(self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut());
Ok(msg.msg)
} else {
Err(DecryptionError::InvalidProof)
}
}
}
// A simple box for managing encryption.
#[derive(Clone)]
pub(crate) struct Encryption<C: Ciphersuite> {
context: [u8; 32],
i: Participant,
enc_key: Zeroizing<C::F>,
enc_pub_key: C::G,
decryption: Decryption<C>,
}
impl<C: Ciphersuite> fmt::Debug for Encryption<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("Encryption")
.field("context", &self.context)
.field("i", &self.i)
.field("enc_pub_key", &self.enc_pub_key)
.field("decryption", &self.decryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for Encryption<C> {
fn zeroize(&mut self) {
self.enc_key.zeroize();
self.enc_pub_key.zeroize();
for (_, mut value) in self.decryption.enc_keys.drain() {
value.zeroize();
}
}
}
impl<C: Ciphersuite> Encryption<C> {
pub(crate) fn new<R: RngCore + CryptoRng>(
context: [u8; 32],
i: Participant,
rng: &mut R,
) -> Self {
let enc_key = Zeroizing::new(C::random_nonzero_F(rng));
Self {
context,
i,
enc_pub_key: C::generator() * enc_key.deref(),
enc_key,
decryption: Decryption::new(context),
}
}
pub(crate) fn registration<M: Message>(&self, msg: M) -> EncryptionKeyMessage<C, M> {
EncryptionKeyMessage { msg, enc_key: self.enc_pub_key }
}
pub(crate) fn register<M: Message>(
&mut self,
participant: Participant,
msg: EncryptionKeyMessage<C, M>,
) -> M {
self.decryption.register(participant, msg)
}
pub(crate) fn encrypt<R: RngCore + CryptoRng, E: Encryptable>(
&self,
rng: &mut R,
participant: Participant,
msg: Zeroizing<E>,
) -> EncryptedMessage<C, E> {
encrypt(rng, self.context, self.i, self.decryption.enc_keys[&participant], msg)
}
pub(crate) fn decrypt<R: RngCore + CryptoRng, I: Copy + Zeroize, E: Encryptable>(
&self,
rng: &mut R,
batch: &mut BatchVerifier<I, C::G>,
// Uses a distinct batch ID so if this batch verifier is reused, we know its the PoP aspect
// which failed, and therefore to use None for the blame
batch_id: I,
from: Participant,
mut msg: EncryptedMessage<C, E>,
) -> (Zeroizing<E>, EncryptionKeyProof<C>) {
msg.pop.batch_verify(
rng,
batch,
batch_id,
msg.key,
pop_challenge::<C>(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()),
);
let key = ecdh::<C>(&self.enc_key, msg.key);
cipher::<C>(self.context, &key).apply_keystream(msg.msg.as_mut().as_mut());
(
msg.msg,
EncryptionKeyProof {
key,
dleq: DLEqProof::prove(
rng,
&mut encryption_key_transcript(self.context),
&[C::generator(), msg.key],
&self.enc_key,
),
},
)
}
pub(crate) fn into_decryption(self) -> Decryption<C> {
self.decryption
}
}

View File

@@ -1,683 +0,0 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
// This crate requires `dleq` which doesn't support no-std via std-shims
// #![cfg_attr(not(feature = "std"), no_std)]
use core::{marker::PhantomData, ops::Deref, fmt};
use std::{
io::{self, Read, Write},
collections::HashMap,
};
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use rand_core::{RngCore, CryptoRng};
use transcript::{Transcript, RecommendedTranscript};
use multiexp::{multiexp_vartime, BatchVerifier};
use ciphersuite::{
group::{
ff::{Field, PrimeField},
Group, GroupEncoding,
},
Ciphersuite,
};
use schnorr::SchnorrSignature;
pub use dkg::*;
mod encryption;
pub use encryption::*;
#[cfg(test)]
mod tests;
/// Errors possible during key generation.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum PedPoPError<C: Ciphersuite> {
/// An incorrect amount of participants was provided.
#[error("incorrect amount of participants (expected {expected}, found {found})")]
IncorrectAmountOfParticipants { expected: usize, found: usize },
/// An invalid proof of knowledge was provided.
#[error("invalid proof of knowledge (participant {0})")]
InvalidCommitments(Participant),
/// An invalid DKG share was provided.
#[error("invalid share (participant {participant}, blame {blame})")]
InvalidShare { participant: Participant, blame: Option<EncryptionKeyProof<C>> },
/// A participant was missing.
#[error("missing participant {0}")]
MissingParticipant(Participant),
/// An error propagated from the underlying `dkg` crate.
#[error("error from dkg ({0})")]
DkgError(DkgError),
}
// Validate a map of values to have the expected included participants
fn validate_map<T, C: Ciphersuite>(
map: &HashMap<Participant, T>,
included: &[Participant],
ours: Participant,
) -> Result<(), PedPoPError<C>> {
if (map.len() + 1) != included.len() {
Err(PedPoPError::IncorrectAmountOfParticipants {
expected: included.len(),
found: map.len() + 1,
})?;
}
for included in included {
if *included == ours {
if map.contains_key(included) {
Err(PedPoPError::DkgError(DkgError::DuplicatedParticipant(*included)))?;
}
continue;
}
if !map.contains_key(included) {
Err(PedPoPError::MissingParticipant(*included))?;
}
}
Ok(())
}
#[allow(non_snake_case)]
fn challenge<C: Ciphersuite>(context: [u8; 32], l: Participant, R: &[u8], Am: &[u8]) -> C::F {
let mut transcript = RecommendedTranscript::new(b"DKG PedPoP v0.2");
transcript.domain_separate(b"schnorr_proof_of_knowledge");
transcript.append_message(b"context", context);
transcript.append_message(b"participant", l.to_bytes());
transcript.append_message(b"nonce", R);
transcript.append_message(b"commitments", Am);
C::hash_to_F(b"DKG-PedPoP-proof_of_knowledge-0", &transcript.challenge(b"schnorr"))
}
/// The commitments message, intended to be broadcast to all other parties.
///
/// Every participant should only provide one set of commitments to all parties. If any
/// participant sends multiple sets of commitments, they are faulty and should be presumed
/// malicious. As this library does not handle networking, it is unable to detect if any
/// participant is so faulty. That responsibility lies with the caller.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct Commitments<C: Ciphersuite> {
commitments: Vec<C::G>,
cached_msg: Vec<u8>,
sig: SchnorrSignature<C>,
}
impl<C: Ciphersuite> ReadWrite for Commitments<C> {
fn read<R: Read>(reader: &mut R, params: ThresholdParams) -> io::Result<Self> {
let mut commitments = Vec::with_capacity(params.t().into());
let mut cached_msg = vec![];
#[allow(non_snake_case)]
let mut read_G = || -> io::Result<C::G> {
let mut buf = <C::G as GroupEncoding>::Repr::default();
reader.read_exact(buf.as_mut())?;
let point = C::read_G(&mut buf.as_ref())?;
cached_msg.extend(buf.as_ref());
Ok(point)
};
for _ in 0 .. params.t() {
commitments.push(read_G()?);
}
Ok(Commitments { commitments, cached_msg, sig: SchnorrSignature::read(reader)? })
}
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.cached_msg)?;
self.sig.write(writer)
}
}
/// State machine to begin the key generation protocol.
#[derive(Debug, Zeroize)]
pub struct KeyGenMachine<C: Ciphersuite> {
params: ThresholdParams,
context: [u8; 32],
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> KeyGenMachine<C> {
/// Create a new machine to generate a key.
///
/// The context should be unique among multisigs.
pub fn new(params: ThresholdParams, context: [u8; 32]) -> KeyGenMachine<C> {
KeyGenMachine { params, context, _curve: PhantomData }
}
/// Start generating a key according to the PedPoP DKG specification present in the FROST paper.
///
/// Returns a commitments message to be sent to all parties over an authenticated channel. If any
/// party submits multiple sets of commitments, they MUST be treated as malicious.
pub fn generate_coefficients<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (SecretShareMachine<C>, EncryptionKeyMessage<C, Commitments<C>>) {
let t = usize::from(self.params.t());
let mut coefficients = Vec::with_capacity(t);
let mut commitments = Vec::with_capacity(t);
let mut cached_msg = vec![];
for i in 0 .. t {
// Step 1: Generate t random values to form a polynomial with
coefficients.push(Zeroizing::new(C::random_nonzero_F(&mut *rng)));
// Step 3: Generate public commitments
commitments.push(C::generator() * coefficients[i].deref());
cached_msg.extend(commitments[i].to_bytes().as_ref());
}
// Step 2: Provide a proof of knowledge
let r = Zeroizing::new(C::random_nonzero_F(rng));
let nonce = C::generator() * r.deref();
let sig = SchnorrSignature::<C>::sign(
&coefficients[0],
// This could be deterministic as the PoK is a singleton never opened up to cooperative
// discussion
// There's no reason to spend the time and effort to make this deterministic besides a
// general obsession with canonicity and determinism though
r,
challenge::<C>(self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg),
);
// Additionally create an encryption mechanism to protect the secret shares
let encryption = Encryption::new(self.context, self.params.i(), rng);
// Step 4: Broadcast
let msg =
encryption.registration(Commitments { commitments: commitments.clone(), cached_msg, sig });
(
SecretShareMachine {
params: self.params,
context: self.context,
coefficients,
our_commitments: commitments,
encryption,
},
msg,
)
}
}
fn polynomial<F: PrimeField + Zeroize>(
coefficients: &[Zeroizing<F>],
l: Participant,
) -> Zeroizing<F> {
let l = F::from(u64::from(u16::from(l)));
// This should never be reached since Participant is explicitly non-zero
assert!(l != F::ZERO, "zero participant passed to polynomial");
let mut share = Zeroizing::new(F::ZERO);
for (idx, coefficient) in coefficients.iter().rev().enumerate() {
*share += coefficient.deref();
if idx != (coefficients.len() - 1) {
*share *= l;
}
}
share
}
/// The secret share message, to be sent to the party it's intended for over an authenticated
/// channel.
///
/// If any participant sends multiple secret shares to another participant, they are faulty.
// This should presumably be written as SecretShare(Zeroizing<F::Repr>).
// It's unfortunately not possible as F::Repr doesn't have Zeroize as a bound.
// The encryption system also explicitly uses Zeroizing<M> so it can ensure anything being
// encrypted is within Zeroizing. Accordingly, internally having Zeroizing would be redundant.
#[derive(Clone, PartialEq, Eq)]
pub struct SecretShare<F: PrimeField>(F::Repr);
impl<F: PrimeField> AsRef<[u8]> for SecretShare<F> {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
impl<F: PrimeField> AsMut<[u8]> for SecretShare<F> {
fn as_mut(&mut self) -> &mut [u8] {
self.0.as_mut()
}
}
impl<F: PrimeField> fmt::Debug for SecretShare<F> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("SecretShare").finish_non_exhaustive()
}
}
impl<F: PrimeField> Zeroize for SecretShare<F> {
fn zeroize(&mut self) {
self.0.as_mut().zeroize()
}
}
// Still manually implement ZeroizeOnDrop to ensure these don't stick around.
// We could replace Zeroizing<M> with a bound M: ZeroizeOnDrop.
// Doing so would potentially fail to highlight the expected behavior with these and remove a layer
// of depth.
impl<F: PrimeField> Drop for SecretShare<F> {
fn drop(&mut self) {
self.zeroize();
}
}
impl<F: PrimeField> ZeroizeOnDrop for SecretShare<F> {}
impl<F: PrimeField> ReadWrite for SecretShare<F> {
fn read<R: Read>(reader: &mut R, _: ThresholdParams) -> io::Result<Self> {
let mut repr = F::Repr::default();
reader.read_exact(repr.as_mut())?;
Ok(SecretShare(repr))
}
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.0.as_ref())
}
}
/// Advancement of the key generation state machine.
#[derive(Zeroize)]
pub struct SecretShareMachine<C: Ciphersuite> {
params: ThresholdParams,
context: [u8; 32],
coefficients: Vec<Zeroizing<C::F>>,
our_commitments: Vec<C::G>,
encryption: Encryption<C>,
}
impl<C: Ciphersuite> fmt::Debug for SecretShareMachine<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("SecretShareMachine")
.field("params", &self.params)
.field("context", &self.context)
.field("our_commitments", &self.our_commitments)
.field("encryption", &self.encryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> SecretShareMachine<C> {
/// Verify the data from the previous round (canonicity, PoKs, message authenticity)
#[allow(clippy::type_complexity)]
fn verify_r1<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<HashMap<Participant, Vec<C::G>>, PedPoPError<C>> {
validate_map(
&commitment_msgs,
&self.params.all_participant_indexes().collect::<Vec<_>>(),
self.params.i(),
)?;
let mut batch = BatchVerifier::<Participant, C::G>::new(commitment_msgs.len());
let mut commitments = HashMap::new();
for l in self.params.all_participant_indexes() {
let Some(msg) = commitment_msgs.remove(&l) else { continue };
let mut msg = self.encryption.register(l, msg);
if msg.commitments.len() != self.params.t().into() {
Err(PedPoPError::InvalidCommitments(l))?;
}
// Step 5: Validate each proof of knowledge
// This is solely the prep step for the latter batch verification
msg.sig.batch_verify(
rng,
&mut batch,
l,
msg.commitments[0],
challenge::<C>(self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg),
);
commitments.insert(l, msg.commitments.drain(..).collect::<Vec<_>>());
}
batch.verify_vartime_with_vartime_blame().map_err(PedPoPError::InvalidCommitments)?;
commitments.insert(self.params.i(), self.our_commitments.drain(..).collect());
Ok(commitments)
}
/// Continue generating a key.
///
/// Takes in everyone else's commitments. Returns a HashMap of encrypted secret shares to be sent
/// over authenticated channels to their relevant counterparties.
///
/// If any participant sends multiple secret shares to another participant, they are faulty.
#[allow(clippy::type_complexity)]
pub fn generate_secret_shares<R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<
(KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
PedPoPError<C>,
> {
let commitments = self.verify_r1(&mut *rng, commitments)?;
// Step 1: Generate secret shares for all other parties
let mut res = HashMap::new();
for l in self.params.all_participant_indexes() {
// Don't insert our own shares to the byte buffer which is meant to be sent around
// An app developer could accidentally send it. Best to keep this black boxed
if l == self.params.i() {
continue;
}
let mut share = polynomial(&self.coefficients, l);
let share_bytes = Zeroizing::new(SecretShare::<C::F>(share.to_repr()));
share.zeroize();
res.insert(l, self.encryption.encrypt(rng, l, share_bytes));
}
// Calculate our own share
let share = polynomial(&self.coefficients, self.params.i());
self.coefficients.zeroize();
Ok((
KeyMachine { params: self.params, secret: share, commitments, encryption: self.encryption },
res,
))
}
}
/// Advancement of the the secret share state machine.
///
/// This machine will 'complete' the protocol, by a local perspective. In order to be secure,
/// the parties must confirm having successfully completed the protocol (an effort out of scope to
/// this library), yet this is modeled by one more state transition (BlameMachine).
pub struct KeyMachine<C: Ciphersuite> {
params: ThresholdParams,
secret: Zeroizing<C::F>,
commitments: HashMap<Participant, Vec<C::G>>,
encryption: Encryption<C>,
}
impl<C: Ciphersuite> fmt::Debug for KeyMachine<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("KeyMachine")
.field("params", &self.params)
.field("commitments", &self.commitments)
.field("encryption", &self.encryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for KeyMachine<C> {
fn zeroize(&mut self) {
self.params.zeroize();
self.secret.zeroize();
for commitments in self.commitments.values_mut() {
commitments.zeroize();
}
self.encryption.zeroize();
}
}
// Calculate the exponent for a given participant and apply it to a series of commitments
// Initially used with the actual commitments to verify the secret share, later used with
// stripes to generate the verification shares
fn exponential<C: Ciphersuite>(i: Participant, values: &[C::G]) -> Vec<(C::F, C::G)> {
let i = C::F::from(u16::from(i).into());
let mut res = Vec::with_capacity(values.len());
(0 .. values.len()).fold(C::F::ONE, |exp, l| {
res.push((exp, values[l]));
exp * i
});
res
}
fn share_verification_statements<C: Ciphersuite>(
target: Participant,
commitments: &[C::G],
mut share: Zeroizing<C::F>,
) -> Vec<(C::F, C::G)> {
// This can be insecurely linearized from n * t to just n using the below sums for a given
// stripe. Doing so uses naive addition which is subject to malleability. The only way to
// ensure that malleability isn't present is to use this n * t algorithm, which runs
// per sender and not as an aggregate of all senders, which also enables blame
let mut values = exponential::<C>(target, commitments);
// Perform the share multiplication outside of the multiexp to minimize stack copying
// While the multiexp BatchVerifier does zeroize its flattened multiexp, and itself, it still
// converts whatever we give to an iterator and then builds a Vec internally, welcoming copies
let neg_share_pub = C::generator() * -*share;
share.zeroize();
values.push((C::F::ONE, neg_share_pub));
values
}
#[derive(Clone, Copy, Hash, Debug, Zeroize)]
enum BatchId {
Decryption(Participant),
Share(Participant),
}
impl<C: Ciphersuite> KeyMachine<C> {
/// Calculate our share given the shares sent to us.
///
/// Returns a BlameMachine usable to determine if faults in the protocol occurred.
///
/// This will error on, and return a blame proof for, the first-observed case of faulty behavior.
pub fn calculate_share<R: RngCore + CryptoRng>(
mut self,
rng: &mut R,
mut shares: HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>,
) -> Result<BlameMachine<C>, PedPoPError<C>> {
validate_map(
&shares,
&self.params.all_participant_indexes().collect::<Vec<_>>(),
self.params.i(),
)?;
let mut batch = BatchVerifier::new(shares.len());
let mut blames = HashMap::new();
for (l, share_bytes) in shares.drain() {
let (mut share_bytes, blame) =
self.encryption.decrypt(rng, &mut batch, BatchId::Decryption(l), l, share_bytes);
let share =
Zeroizing::new(Option::<C::F>::from(C::F::from_repr(share_bytes.0)).ok_or_else(|| {
PedPoPError::InvalidShare { participant: l, blame: Some(blame.clone()) }
})?);
share_bytes.zeroize();
*self.secret += share.deref();
blames.insert(l, blame);
batch.queue(
rng,
BatchId::Share(l),
share_verification_statements::<C>(self.params.i(), &self.commitments[&l], share),
);
}
batch.verify_with_vartime_blame().map_err(|id| {
let (l, blame) = match id {
BatchId::Decryption(l) => (l, None),
BatchId::Share(l) => (l, Some(blames.remove(&l).unwrap())),
};
PedPoPError::InvalidShare { participant: l, blame }
})?;
// Stripe commitments per t and sum them in advance. Calculating verification shares relies on
// these sums so preprocessing them is a massive speedup
// If these weren't just sums, yet the tables used in multiexp, this would be further optimized
// As of right now, each multiexp will regenerate them
let mut stripes = Vec::with_capacity(usize::from(self.params.t()));
for t in 0 .. usize::from(self.params.t()) {
stripes.push(self.commitments.values().map(|commitments| commitments[t]).sum());
}
// Calculate each user's verification share
let mut verification_shares = HashMap::new();
for i in self.params.all_participant_indexes() {
verification_shares.insert(
i,
if i == self.params.i() {
C::generator() * self.secret.deref()
} else {
multiexp_vartime(&exponential::<C>(i, &stripes))
},
);
}
let KeyMachine { commitments, encryption, params, secret } = self;
Ok(BlameMachine {
commitments,
encryption: encryption.into_decryption(),
result: Some(
ThresholdKeys::new(params, Interpolation::Lagrange, secret, verification_shares)
.map_err(PedPoPError::DkgError)?,
),
})
}
}
/// A machine capable of handling blame proofs.
pub struct BlameMachine<C: Ciphersuite> {
commitments: HashMap<Participant, Vec<C::G>>,
encryption: Decryption<C>,
result: Option<ThresholdKeys<C>>,
}
impl<C: Ciphersuite> fmt::Debug for BlameMachine<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("BlameMachine")
.field("commitments", &self.commitments)
.field("encryption", &self.encryption)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for BlameMachine<C> {
fn zeroize(&mut self) {
for commitments in self.commitments.values_mut() {
commitments.zeroize();
}
self.result.zeroize();
}
}
impl<C: Ciphersuite> BlameMachine<C> {
/// Mark the protocol as having been successfully completed, returning the generated keys.
/// This should only be called after having confirmed, with all participants, successful
/// completion.
///
/// Confirming successful completion is not necessarily as simple as everyone reporting their
/// completion. Everyone must also receive everyone's report of completion, entering into the
/// territory of consensus protocols. This library does not handle that nor does it provide any
/// tooling to do so. This function is solely intended to force users to acknowledge they're
/// completing the protocol, not processing any blame.
pub fn complete(self) -> ThresholdKeys<C> {
self.result.unwrap()
}
fn blame_internal(
&self,
sender: Participant,
recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>,
proof: Option<EncryptionKeyProof<C>>,
) -> Participant {
let share_bytes = match self.encryption.decrypt_with_proof(sender, recipient, msg, proof) {
Ok(share_bytes) => share_bytes,
// If there's an invalid signature, the sender did not send a properly formed message
Err(DecryptionError::InvalidSignature) => return sender,
// Decryption will fail if the provided ECDH key wasn't correct for the given message
Err(DecryptionError::InvalidProof) => return recipient,
};
let Some(share) = Option::<C::F>::from(C::F::from_repr(share_bytes.0)) else {
// If this isn't a valid scalar, the sender is faulty
return sender;
};
// If this isn't a valid share, the sender is faulty
if !bool::from(
multiexp_vartime(&share_verification_statements::<C>(
recipient,
&self.commitments[&sender],
Zeroizing::new(share),
))
.is_identity(),
) {
return sender;
}
// The share was canonical and valid
recipient
}
/// Given an accusation of fault, determine the faulty party (either the sender, who sent an
/// invalid secret share, or the receiver, who claimed a valid secret share was invalid). No
/// matter which, prevent completion of the machine, forcing an abort of the protocol.
///
/// The message should be a copy of the encrypted secret share from the accused sender to the
/// accusing recipient. This message must have been authenticated as actually having come from
/// the sender in question.
///
/// In order to enable detecting multiple faults, an `AdditionalBlameMachine` is returned, which
/// can be used to determine further blame. These machines will process the same blame statements
/// multiple times, always identifying blame. It is the caller's job to ensure they're unique in
/// order to prevent multiple instances of blame over a single incident.
pub fn blame(
self,
sender: Participant,
recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>,
proof: Option<EncryptionKeyProof<C>>,
) -> (AdditionalBlameMachine<C>, Participant) {
let faulty = self.blame_internal(sender, recipient, msg, proof);
(AdditionalBlameMachine(self), faulty)
}
}
/// A machine capable of handling an arbitrary amount of additional blame proofs.
#[derive(Debug, Zeroize)]
pub struct AdditionalBlameMachine<C: Ciphersuite>(BlameMachine<C>);
impl<C: Ciphersuite> AdditionalBlameMachine<C> {
/// Create an AdditionalBlameMachine capable of evaluating Blame regardless of if the caller was
/// a member in the DKG protocol.
///
/// Takes in the parameters for the DKG protocol and all of the participant's commitment
/// messages.
///
/// This constructor assumes the full validity of the commitment messages. They must be fully
/// authenticated as having come from the supposed party and verified as valid. Usage of invalid
/// commitments is considered undefined behavior, and may cause everything from inaccurate blame
/// to panics.
pub fn new(
context: [u8; 32],
n: u16,
mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<Self, PedPoPError<C>> {
let mut commitments = HashMap::new();
let mut encryption = Decryption::new(context);
for i in 1 ..= n {
let i = Participant::new(i).unwrap();
let Some(msg) = commitment_msgs.remove(&i) else { Err(PedPoPError::MissingParticipant(i))? };
commitments.insert(i, encryption.register(i, msg).commitments);
}
Ok(AdditionalBlameMachine(BlameMachine { commitments, encryption, result: None }))
}
/// Given an accusation of fault, determine the faulty party (either the sender, who sent an
/// invalid secret share, or the receiver, who claimed a valid secret share was invalid).
///
/// The message should be a copy of the encrypted secret share from the accused sender to the
/// accusing recipient. This message must have been authenticated as actually having come from
/// the sender in question.
///
/// This will process the same blame statement multiple times, always identifying blame. It is
/// the caller's job to ensure they're unique in order to prevent multiple instances of blame
/// over a single incident.
pub fn blame(
&self,
sender: Participant,
recipient: Participant,
msg: EncryptedMessage<C, SecretShare<C::F>>,
proof: Option<EncryptionKeyProof<C>>,
) -> Participant {
self.0.blame_internal(sender, recipient, msg, proof)
}
}

View File

@@ -1,346 +0,0 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use crate::*;
const THRESHOLD: u16 = 3;
const PARTICIPANTS: u16 = 5;
/// Clone a map without a specific value.
fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
type PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
type PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage<C>>;
const CONTEXT: [u8; 32] = *b"DKG Test Key Generation ";
// Commit, then return commitment messages, enc keys, and shares
#[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> (
HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
HashMap<Participant, C::G>,
HashMap<Participant, PedPoPSecretShares<C>>,
) {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
let mut enc_keys = HashMap::new();
for i in (1 ..= PARTICIPANTS).map(|i| Participant::new(i).unwrap()) {
let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();
let machine = KeyGenMachine::<C>::new(params, CONTEXT);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(
i,
EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)
.unwrap(),
);
enc_keys.insert(i, commitments[&i].enc_key());
}
let mut secret_shares = HashMap::new();
let machines = machines
.drain()
.map(|(l, machine)| {
let (machine, mut shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
let shares = shares
.drain()
.map(|(l, share)| {
(
l,
EncryptedMessage::read::<&[u8]>(
&mut share.serialize().as_ref(),
// Only t/n actually matters, so hardcode i to 1 here
ThresholdParams::new(THRESHOLD, PARTICIPANTS, Participant::new(1).unwrap()).unwrap(),
)
.unwrap(),
)
})
.collect::<HashMap<_, _>>();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
(machines, commitments, enc_keys, secret_shares)
}
fn generate_secret_shares<C: Ciphersuite>(
shares: &HashMap<Participant, PedPoPSecretShares<C>>,
recipient: Participant,
) -> PedPoPSecretShares<C> {
let mut our_secret_shares = HashMap::new();
for (i, shares) in shares {
if recipient == *i {
continue;
}
our_secret_shares.insert(*i, shares[&recipient].clone());
}
our_secret_shares
}
/// Fully perform the PedPoP key generation algorithm.
fn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdKeys<C>> {
let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(
these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>(),
);
}
assert_eq!(
verification_shares.as_ref().unwrap(),
&these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>()
);
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}
const ONE: Participant = Participant::new(1).unwrap();
const TWO: Participant = Participant::new(2).unwrap();
#[test]
fn test_pedpop() {
let _ = core::hint::black_box(pedpop_gen::<_, Ristretto>(&mut OsRng));
}
fn test_blame(
commitment_msgs: &HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,
machines: Vec<BlameMachine<Ristretto>>,
msg: &PedPoPEncryptedMessage<Ristretto>,
blame: &Option<EncryptionKeyProof<Ristretto>>,
) {
for machine in machines {
let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());
assert_eq!(blamed, ONE);
// Verify additional blame also works
assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
// Verify machines constructed with AdditionalBlameMachine::new work
assert_eq!(
AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame(
ONE,
TWO,
msg.clone(),
blame.clone()
),
ONE,
);
}
}
// TODO: Write a macro which expands to the following
#[test]
fn invalid_encryption_pop_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
assert_eq!(
machine.err(),
Some(PedPoPError::InvalidShare { participant: ONE, blame: None })
);
// Explicitly declare we have a blame object, which happens to be None since invalid PoP
// is self-explainable
blame = Some(None);
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_ecdh_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the share to trigger a blame event
// Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass
// While here, 2 is malicious, this is so 1 creates the blame proof
// We then malleate 1's blame proof, so 1 ends up malicious
// Doesn't simply invalidate the PoP as that won't have a blame statement
// By mutating the encrypted data, we do ensure a blame statement is created
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
// This should be largely equivalent to the prior test
#[test]
fn invalid_dleq_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_serialization_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_value_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}

View File

@@ -1,34 +0,0 @@
[package]
name = "dkg-promote"
version = "0.6.1"
description = "Promotions for keys from the dkg crate"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/promote"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6.1", default-features = false, features = ["std"] }
[dev-dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,13 +0,0 @@
# Distributed Key Generation - Promote
This crate implements 'promotions' for keys from the
[`dkg`](https://docs.rs/dkg) crate. A promotion takes a set of keys and maps it
to a different `Ciphersuite`.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,168 +0,0 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
// This crate requires `dleq` which doesn't support no-std via std-shims
// #![cfg_attr(not(feature = "std"), no_std)]
use core::{marker::PhantomData, ops::Deref};
use std::{
io::{self, Read, Write},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use transcript::{Transcript, RecommendedTranscript};
use dleq::DLEqProof;
pub use dkg::*;
#[cfg(test)]
mod tests;
/// Errors encountered when promoting keys.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum PromotionError {
/// Invalid participant identifier.
#[error("invalid participant (1 <= participant <= {n}, yet participant is {participant})")]
InvalidParticipant {
/// The total amount of participants.
n: u16,
/// The specified participant.
participant: Participant,
},
/// An incorrect amount of participants was specified.
#[error("incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}")]
IncorrectAmountOfParticipants {
/// The threshold required.
t: u16,
/// The total amount of participants.
n: u16,
/// The amount of participants specified.
amount: usize,
},
/// Participant provided an invalid proof.
#[error("invalid proof {0}")]
InvalidProof(Participant),
}
fn transcript<G: GroupEncoding>(key: &G, i: Participant) -> RecommendedTranscript {
let mut transcript = RecommendedTranscript::new(b"DKG Generator Promotion v0.2");
transcript.append_message(b"group_key", key.to_bytes());
transcript.append_message(b"participant", i.to_bytes());
transcript
}
/// Proof of valid promotion to another generator.
#[derive(Clone, Copy)]
pub struct GeneratorProof<C: Ciphersuite> {
share: C::G,
proof: DLEqProof<C::G>,
}
impl<C: Ciphersuite> GeneratorProof<C> {
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.share.to_bytes().as_ref())?;
self.proof.write(writer)
}
pub fn read<R: Read>(reader: &mut R) -> io::Result<GeneratorProof<C>> {
Ok(GeneratorProof {
share: <C as Ciphersuite>::read_G(reader)?,
proof: DLEqProof::read(reader)?,
})
}
pub fn serialize(&self) -> Vec<u8> {
let mut buf = vec![];
self.write(&mut buf).unwrap();
buf
}
}
/// Promote a set of keys from one generator to another, where the elliptic curve is the same.
///
/// Since the Ciphersuite trait additionally specifies a generator, this provides an O(n) way to
/// update the generator used with keys. This outperforms the key generation protocol which is
/// exponential.
pub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> {
base: ThresholdKeys<C1>,
proof: GeneratorProof<C1>,
_c2: PhantomData<C2>,
}
impl<C1: Ciphersuite, C2: Ciphersuite<F = C1::F, G = C1::G>> GeneratorPromotion<C1, C2> {
/// Begin promoting keys from one generator to another.
///
/// Returns a proof this share was properly promoted.
pub fn promote<R: RngCore + CryptoRng>(
rng: &mut R,
base: ThresholdKeys<C1>,
) -> (GeneratorPromotion<C1, C2>, GeneratorProof<C1>) {
// Do a DLEqProof for the new generator
let proof = GeneratorProof {
share: C2::generator() * base.original_secret_share().deref(),
proof: DLEqProof::prove(
rng,
&mut transcript(&base.original_group_key(), base.params().i()),
&[C1::generator(), C2::generator()],
base.original_secret_share(),
),
};
(GeneratorPromotion { base, proof, _c2: PhantomData::<C2> }, proof)
}
/// Complete promotion by taking in the proofs from all other participants.
pub fn complete(
self,
proofs: &HashMap<Participant, GeneratorProof<C1>>,
) -> Result<ThresholdKeys<C2>, PromotionError> {
let params = self.base.params();
if proofs.len() != (usize::from(params.n()) - 1) {
Err(PromotionError::IncorrectAmountOfParticipants {
t: params.n(),
n: params.n(),
amount: proofs.len() + 1,
})?;
}
for i in proofs.keys().copied() {
if u16::from(i) > params.n() {
Err(PromotionError::InvalidParticipant { n: params.n(), participant: i })?;
}
}
let mut verification_shares = HashMap::new();
verification_shares.insert(params.i(), self.proof.share);
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
if i == params.i() {
continue;
}
let proof = proofs.get(&i).unwrap();
proof
.proof
.verify(
&mut transcript(&self.base.original_group_key(), i),
&[C1::generator(), C2::generator()],
&[self.base.original_verification_share(i), proof.share],
)
.map_err(|_| PromotionError::InvalidProof(i))?;
verification_shares.insert(i, proof.share);
}
Ok(
ThresholdKeys::new(
params,
self.base.interpolation().clone(),
self.base.original_secret_share().clone(),
verification_shares,
)
.unwrap(),
)
}
}

View File

@@ -1,113 +0,0 @@
use core::marker::PhantomData;
use std::collections::HashMap;
use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite,
};
use dkg::*;
use dkg_recovery::recover_key;
use crate::{GeneratorPromotion, GeneratorProof};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Ciphersuite> {
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
type F = C::F;
type G = C::G;
type H = C::H;
const ID: &'static [u8] = b"Alternate Ciphersuite";
fn generator() -> Self::G {
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
<C as Ciphersuite>::hash_to_F(dst, data)
}
}
/// Clone a map without a specific value.
pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
// Test promotion of threshold keys to another generator
#[test]
fn test_generator_promotion() {
// Generate a set of `ThresholdKeys`
const PARTICIPANTS: u16 = 5;
let keys: [ThresholdKeys<_>; PARTICIPANTS as usize] = {
let shares: [<Ristretto as Ciphersuite>::F; PARTICIPANTS as usize] =
core::array::from_fn(|_| <Ristretto as Ciphersuite>::F::random(&mut OsRng));
let verification_shares = (0 .. PARTICIPANTS)
.map(|i| {
(
Participant::new(i + 1).unwrap(),
<Ristretto as Ciphersuite>::generator() * shares[usize::from(i)],
)
})
.collect::<HashMap<_, _>>();
core::array::from_fn(|i| {
ThresholdKeys::new(
ThresholdParams::new(
PARTICIPANTS,
PARTICIPANTS,
Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),
)
.unwrap(),
Interpolation::Constant(vec![<Ristretto as Ciphersuite>::F::ONE; PARTICIPANTS as usize]),
Zeroizing::new(shares[i]),
verification_shares.clone(),
)
.unwrap()
})
};
// Perform the promotion
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for keys in &keys {
let i = keys.params().i();
let (promotion, proof) =
GeneratorPromotion::<_, AltGenerator<Ristretto>>::promote(&mut OsRng, keys.clone());
promotions.insert(i, promotion);
proofs.insert(
i,
GeneratorProof::<Ristretto>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap(),
);
}
// Complete the promotion, and verify it worked
let new_group_key = AltGenerator::<Ristretto>::generator() * *recover_key(&keys).unwrap();
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[usize::from(u16::from(i) - 1)].params(), promoted.params());
assert_eq!(
keys[usize::from(u16::from(i) - 1)].original_secret_share(),
promoted.original_secret_share()
);
assert_eq!(new_group_key, promoted.group_key());
for l in 0 .. PARTICIPANTS {
let verification_share =
promoted.original_verification_share(Participant::new(l + 1).unwrap());
assert_eq!(
AltGenerator::<Ristretto>::generator() * **keys[usize::from(l)].original_secret_share(),
verification_share
);
}
}
}

View File

@@ -1,61 +0,0 @@
[package]
name = "dleq"
version = "0.4.1"
description = "Implementation of single and cross-curve Discrete Log Equality proofs"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.81"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
rustversion = "1"
thiserror = { version = "2", default-features = false, optional = true }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
digest = { version = "0.10", default-features = false }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
ff = { version = "0.13", default-features = false }
group = { version = "0.13", default-features = false }
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["batch"], optional = true }
[dev-dependencies]
hex-literal = "0.4"
rand_core = { version = "0.6", features = ["getrandom"] }
blake2 = "0.10"
k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic", "bits"] }
dalek-ff-group = { path = "../dalek-ff-group" }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] }
[features]
std = ["thiserror?/std", "rand_core/std", "zeroize/std", "digest/std", "transcript/std", "ff/std", "multiexp?/std"]
serialize = ["std"]
# Needed for cross-group DLEqs
secure_capacity_difference = []
experimental = ["std", "thiserror", "multiexp"]
default = [
"std",
# Only applies to experimental, yet is default to ensure security
# experimental doesn't mandate it itself in case two curves with extreme
# capacity differences are desired to be used together, in which case the user
# must specify experimental without default features
"secure_capacity_difference"
]

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2020-2023 Luke Parker, Lee Bousfield
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,71 +0,0 @@
# Discrete Log Equality
Implementation of discrete log equality proofs for curves implementing
`ff`/`group`.
There is also a highly experimental cross-group DLEq proof, under
the `experimental` feature, which has no formal proofs available yet is
available here regardless.
This library, except for the `experimental` feature, was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
Any subsequent changes have not undergone auditing.
### Cross-Group DLEq
The present cross-group DLEq is based off
[MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf),
which isn't computationally correct as while it proves both keys have the same
discrete logarithm for their `G'`/`H'` component, it doesn't prove a lack of a
`G`/`H` component. Accordingly, it was augmented with a pair of Schnorr Proof of
Knowledges, proving a known `G'`/`H'` component, guaranteeing a lack of a
`G`/`H` component (assuming an unknown relation between `G`/`H` and `G'`/`H'`).
The challenges for the ring signatures were also merged, removing one-element
from each bit's proof with only a slight reduction to challenge security (as
instead of being uniform over each scalar field, they're uniform over the
mutual bit capacity of each scalar field). This reduction is identical to the
one applied to the proved-for scalar, and accordingly should not reduce overall
security. It does create a lack of domain separation, yet that shouldn't be an
issue.
The following variants are available:
- `ClassicLinear`. This is only for reference purposes, being the above
described proof, with no further optimizations.
- `ConciseLinear`. This proves for 2 bits at a time, not increasing the
signature size for both bits yet decreasing the amount of
commitments/challenges in total.
- `EfficientLinear`. This provides ring signatures in the form
`((R_G, R_H), s)`, instead of `(e, s)`, and accordingly enables a batch
verification of their final step. It is the most performant, and also the
largest, option.
- `CompromiseLinear`. This provides signatures in the form `((R_G, R_H), s)` AND
proves for 2-bits at a time. While this increases the amount of steps in
verifying the ring signatures, which aren't batch verified, and decreases the
amount of items batched (an operation which grows in efficiency with
quantity), it strikes a balance between speed and size.
The following numbers are from benchmarks performed with k256/curve25519_dalek
on a Intel i7-118567:
| Algorithm | Size | Verification Time |
|--------------------|-------------------------|-------------------|
| `ClassicLinear` | 56829 bytes (+27%) | 157ms (0%) |
| `ConciseLinear` | 44607 bytes (Reference) | 156ms (Reference) |
| `EfficientLinear` | 65145 bytes (+46%) | 122ms (-22%) |
| `CompromiseLinear` | 48765 bytes (+9%) | 137ms (-12%) |
`CompromiseLinear` is the best choice by only being marginally sub-optimal
regarding size, yet still achieving most of the desired performance
improvements. That said, neither the original postulation (which had flaws) nor
any construction here has been proven nor audited. Accordingly, they are solely
experimental, and none are recommended.
All proofs are suffixed "Linear" in the hope a logarithmic proof makes itself
available, which would likely immediately become the most efficient option.

View File

@@ -1,248 +0,0 @@
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use transcript::Transcript;
use group::{
ff::{Field, PrimeFieldBits},
prime::PrimeGroup,
};
use multiexp::BatchVerifier;
use crate::cross_group::{
Generators, DLEqError,
scalar::{scalar_convert, mutual_scalar_from_bytes},
};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use ff::PrimeField;
#[cfg(feature = "serialize")]
use crate::{read_scalar, cross_group::read_point};
#[allow(non_camel_case_types)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) enum Re<G0: PrimeGroup, G1: PrimeGroup> {
R(G0, G1),
// Merged challenges have a slight security reduction, yet one already applied to the scalar
// being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed,
// present here, which is then hashed for each of the two challenges, remaining unbiased/unique
// while maintaining the bandwidth savings, yet also while adding 252 hashes for
// Secp256k1/Ed25519
e(G0::Scalar),
}
impl<G0: PrimeGroup, G1: PrimeGroup> Re<G0, G1> {
#[allow(non_snake_case)]
pub(crate) fn R_default() -> Re<G0, G1> {
Re::R(G0::identity(), G1::identity())
}
pub(crate) fn e_default() -> Re<G0, G1> {
Re::e(G0::Scalar::ZERO)
}
}
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct Aos<G0: PrimeGroup + Zeroize, G1: PrimeGroup + Zeroize, const RING_LEN: usize> {
Re_0: Re<G0, G1>,
s: [(G0::Scalar, G1::Scalar); RING_LEN],
}
impl<
G0: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,
G1: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,
const RING_LEN: usize,
> Aos<G0, G1, RING_LEN>
{
#[allow(non_snake_case)]
fn nonces<T: Transcript>(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) {
transcript.domain_separate(b"aos_membership_proof");
transcript.append_message(b"ring_len", u8::try_from(RING_LEN).unwrap().to_le_bytes());
transcript.append_message(b"nonce_0", nonces.0.to_bytes());
transcript.append_message(b"nonce_1", nonces.1.to_bytes());
mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref())
}
#[allow(non_snake_case)]
fn R(
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar),
) -> (G0, G1) {
(((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1)))
}
#[allow(non_snake_case, clippy::type_complexity)]
fn R_batch(
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar),
) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) {
(vec![(-s.0, generators.0.alt), (e.0, A.0)], vec![(-s.1, generators.1.alt), (e.1, A.1)])
}
#[allow(non_snake_case)]
fn R_nonces<T: Transcript>(
transcript: T,
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar),
) -> (G0::Scalar, G1::Scalar) {
Self::nonces(transcript, Self::R(generators, s, A, e))
}
#[allow(non_snake_case)]
pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &T,
generators: (Generators<G0>, Generators<G1>),
ring: &[(G0, G1)],
mut actual: usize,
blinding_key: &mut (G0::Scalar, G1::Scalar),
mut Re_0: Re<G0, G1>,
) -> Self {
// While it is possible to use larger values, it's not efficient to do so
// 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN];
let mut r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
#[allow(non_snake_case)]
let original_R = (generators.0.alt * r.0, generators.1.alt * r.1);
#[allow(non_snake_case)]
let mut R = original_R;
for i in ((actual + 1) ..= (actual + RING_LEN)).map(|i| i % RING_LEN) {
let e = Self::nonces(transcript.clone(), R);
if i == 0 {
match Re_0 {
Re::R(ref mut R0_0, ref mut R1_0) => {
*R0_0 = R.0;
*R1_0 = R.1
}
Re::e(ref mut e_0) => *e_0 = e.0,
}
}
// Solve for the real index
if i == actual {
s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1));
debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R);
actual.zeroize();
blinding_key.0.zeroize();
blinding_key.1.zeroize();
r.0.zeroize();
r.1.zeroize();
break;
}
// Generate a decoy response
s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
R = Self::R(generators, s[i], ring[i], e);
}
Aos { Re_0, s }
}
// Assumes the ring has already been transcripted in some form. Critically insecure if it hasn't
pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &T,
generators: (Generators<G0>, Generators<G1>),
batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),
ring: &[(G0, G1)],
) -> Result<(), DLEqError> {
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
#[allow(non_snake_case)]
match self.Re_0 {
Re::R(R0_0, R1_0) => {
let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0));
#[allow(clippy::needless_range_loop)]
for i in 0 .. (RING_LEN - 1) {
e = Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e);
}
let mut statements =
Self::R_batch(generators, *self.s.last().unwrap(), *ring.last().unwrap(), e);
statements.0.push((G0::Scalar::ONE, R0_0));
statements.1.push((G1::Scalar::ONE, R1_0));
batch.0.queue(&mut *rng, (), statements.0);
batch.1.queue(&mut *rng, (), statements.1);
}
Re::e(e_0) => {
let e_0 = (e_0, scalar_convert(e_0).ok_or(DLEqError::InvalidChallenge)?);
let mut e = None;
#[allow(clippy::needless_range_loop)]
for i in 0 .. RING_LEN {
e = Some(Self::R_nonces(
transcript.clone(),
generators,
self.s[i],
ring[i],
e.unwrap_or(e_0),
));
}
// Will panic if the above loop is never run somehow
// If e wasn't an Option, and instead initially set to e_0, it'd always pass
if e_0 != e.unwrap() {
Err(DLEqError::InvalidProof)?;
}
}
}
Ok(())
}
#[cfg(feature = "serialize")]
pub(crate) fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
#[allow(non_snake_case)]
match self.Re_0 {
Re::R(R0, R1) => {
w.write_all(R0.to_bytes().as_ref())?;
w.write_all(R1.to_bytes().as_ref())?;
}
Re::e(e) => w.write_all(e.to_repr().as_ref())?,
}
for i in 0 .. RING_LEN {
w.write_all(self.s[i].0.to_repr().as_ref())?;
w.write_all(self.s[i].1.to_repr().as_ref())?;
}
Ok(())
}
#[allow(non_snake_case)]
#[cfg(feature = "serialize")]
pub(crate) fn read<R: Read>(r: &mut R, mut Re_0: Re<G0, G1>) -> std::io::Result<Self> {
match Re_0 {
Re::R(ref mut R0, ref mut R1) => {
*R0 = read_point(r)?;
*R1 = read_point(r)?
}
Re::e(ref mut e) => *e = read_scalar(r)?,
}
let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN];
for s in &mut s {
*s = (read_scalar(r)?, read_scalar(r)?);
}
Ok(Aos { Re_0, s })
}
}

View File

@@ -1,175 +0,0 @@
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use transcript::Transcript;
use group::{ff::PrimeFieldBits, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::cross_group::{
Generators, DLEqError,
aos::{Re, Aos},
};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use crate::cross_group::read_point;
#[allow(clippy::enum_variant_names)]
pub(crate) enum BitSignature {
ClassicLinear,
ConciseLinear,
EfficientLinear,
CompromiseLinear,
}
impl BitSignature {
pub(crate) const fn to_u8(&self) -> u8 {
match self {
BitSignature::ClassicLinear => 0,
BitSignature::ConciseLinear => 1,
BitSignature::EfficientLinear => 2,
BitSignature::CompromiseLinear => 3,
}
}
pub(crate) const fn from(algorithm: u8) -> BitSignature {
match algorithm {
0 => BitSignature::ClassicLinear,
1 => BitSignature::ConciseLinear,
2 => BitSignature::EfficientLinear,
3 => BitSignature::CompromiseLinear,
_ => panic!("Unknown algorithm"),
}
}
pub(crate) const fn bits(&self) -> u8 {
match self {
BitSignature::ClassicLinear | BitSignature::EfficientLinear => 1,
BitSignature::ConciseLinear | BitSignature::CompromiseLinear => 2,
}
}
pub(crate) const fn ring_len(&self) -> usize {
2_usize.pow(self.bits() as u32)
}
fn aos_form<G0: PrimeGroup, G1: PrimeGroup>(&self) -> Re<G0, G1> {
match self {
BitSignature::ClassicLinear | BitSignature::ConciseLinear => Re::e_default(),
BitSignature::EfficientLinear | BitSignature::CompromiseLinear => Re::R_default(),
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct Bits<
G0: PrimeGroup + Zeroize,
G1: PrimeGroup + Zeroize,
const SIGNATURE: u8,
const RING_LEN: usize,
> {
pub(crate) commitments: (G0, G1),
signature: Aos<G0, G1, RING_LEN>,
}
impl<
G0: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,
G1: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,
const SIGNATURE: u8,
const RING_LEN: usize,
> Bits<G0, G1, SIGNATURE, RING_LEN>
{
fn transcript<T: Transcript>(transcript: &mut T, i: usize, commitments: (G0, G1)) {
transcript.domain_separate(b"bits");
transcript.append_message(b"group", u16::try_from(i).unwrap().to_le_bytes());
transcript.append_message(b"commitment_0", commitments.0.to_bytes());
transcript.append_message(b"commitment_1", commitments.1.to_bytes());
}
fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> {
let mut res = vec![commitments; RING_LEN];
for i in 1 .. RING_LEN {
res[i] = (res[i - 1].0 - pow_2.0, res[i - 1].1 - pow_2.1);
}
res
}
fn shift(pow_2: &mut (G0, G1)) {
for _ in 0 .. BitSignature::from(SIGNATURE).bits() {
pow_2.0 = pow_2.0.double();
pow_2.1 = pow_2.1.double();
}
}
pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
i: usize,
pow_2: &mut (G0, G1),
mut bits: u8,
blinding_key: &mut (G0::Scalar, G1::Scalar),
) -> Self {
let mut commitments =
((generators.0.alt * blinding_key.0), (generators.1.alt * blinding_key.1));
commitments.0 += pow_2.0 * G0::Scalar::from(bits.into());
commitments.1 += pow_2.1 * G1::Scalar::from(bits.into());
Self::transcript(transcript, i, commitments);
let signature = Aos::prove(
rng,
transcript,
generators,
&Self::ring(*pow_2, commitments),
usize::from(bits),
blinding_key,
BitSignature::from(SIGNATURE).aos_form(),
);
bits.zeroize();
Self::shift(pow_2);
Bits { commitments, signature }
}
pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),
i: usize,
pow_2: &mut (G0, G1),
) -> Result<(), DLEqError> {
Self::transcript(transcript, i, self.commitments);
self.signature.verify(
rng,
transcript,
generators,
batch,
&Self::ring(*pow_2, self.commitments),
)?;
Self::shift(pow_2);
Ok(())
}
#[cfg(feature = "serialize")]
pub(crate) fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
w.write_all(self.commitments.0.to_bytes().as_ref())?;
w.write_all(self.commitments.1.to_bytes().as_ref())?;
self.signature.write(w)
}
#[cfg(feature = "serialize")]
pub(crate) fn read<R: Read>(r: &mut R) -> std::io::Result<Self> {
Ok(Bits {
commitments: (read_point(r)?, read_point(r)?),
signature: Aos::read(r, BitSignature::from(SIGNATURE).aos_form())?,
})
}
}

View File

@@ -1,459 +0,0 @@
use core::ops::{Deref, DerefMut};
#[cfg(feature = "serialize")]
use std::io::{self, Read, Write};
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, Zeroizing};
use digest::{Digest, HashMarker};
use transcript::Transcript;
use group::{
ff::{Field, PrimeField, PrimeFieldBits},
prime::PrimeGroup,
};
use multiexp::BatchVerifier;
/// Scalar utilities.
pub mod scalar;
use scalar::{scalar_convert, mutual_scalar_from_bytes};
pub(crate) mod schnorr;
use self::schnorr::SchnorrPoK;
pub(crate) mod aos;
mod bits;
use bits::{BitSignature, Bits};
// Use black_box when possible
#[rustversion::since(1.66)]
use core::hint::black_box;
#[rustversion::before(1.66)]
fn black_box<T>(val: T) -> T {
val
}
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
let mut bit = black_box(*bit_ref);
#[allow(clippy::cast_lossless)]
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
bit_ref.zeroize();
res
}
#[cfg(feature = "serialize")]
pub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> io::Result<G> {
let mut repr = G::Repr::default();
r.read_exact(repr.as_mut())?;
let point = G::from_bytes(&repr);
let Some(point) = Option::<G>::from(point) else { Err(io::Error::other("invalid point"))? };
if point.to_bytes().as_ref() != repr.as_ref() {
Err(io::Error::other("non-canonical point"))?;
}
Ok(point)
}
/// A pair of generators, one committing to values (primary), one blinding (alt), for an elliptic
/// curve.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Generators<G: PrimeGroup> {
/// The generator used to commit to values.
///
/// This should likely be the curve's traditional 'basepoint'.
pub primary: G,
/// The generator used to blind values. This must be distinct from the primary generator.
pub alt: G,
}
impl<G: PrimeGroup> Generators<G> {
/// Create a new set of generators.
pub fn new(primary: G, alt: G) -> Option<Generators<G>> {
if primary == alt {
None?;
}
Some(Generators { primary, alt })
}
fn transcript<T: Transcript>(&self, transcript: &mut T) {
transcript.domain_separate(b"generators");
transcript.append_message(b"primary", self.primary.to_bytes());
transcript.append_message(b"alternate", self.alt.to_bytes());
}
}
/// Error for cross-group DLEq proofs.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
pub enum DLEqError {
/// Invalid proof length.
#[error("invalid proof length")]
InvalidProofLength,
/// Invalid challenge.
#[error("invalid challenge")]
InvalidChallenge,
/// Invalid proof.
#[error("invalid proof")]
InvalidProof,
}
// This should never be directly instantiated and uses a u8 to represent internal values
// Any external usage is likely invalid
#[doc(hidden)]
// Debug would be such a dump of data this likely isn't helpful, but at least it's available to
// anyone who wants it
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct __DLEqProof<
G0: PrimeGroup<Scalar: PrimeFieldBits> + Zeroize,
G1: PrimeGroup<Scalar: PrimeFieldBits> + Zeroize,
const SIGNATURE: u8,
const RING_LEN: usize,
const REMAINDER_RING_LEN: usize,
> {
bits: Vec<Bits<G0, G1, SIGNATURE, RING_LEN>>,
remainder: Option<Bits<G0, G1, SIGNATURE, REMAINDER_RING_LEN>>,
poks: (SchnorrPoK<G0>, SchnorrPoK<G1>),
}
macro_rules! dleq {
($doc_str: expr, $name: ident, $signature: expr, $remainder: literal,) => {
#[doc = $doc_str]
pub type $name<G0, G1> = __DLEqProof<
G0,
G1,
{ $signature.to_u8() },
{ $signature.ring_len() },
// There may not be a remainder, yet if there is one, it'll be just one bit
// A ring for one bit has a RING_LEN of 2
{
if $remainder {
2
} else {
0
}
},
>;
};
}
// Proves for 1-bit at a time with the signature form (e, s), as originally described in MRL-0010.
// Uses a merged challenge, unlike MRL-0010, for the ring signature, saving an element from each
// bit and removing a hash while slightly reducing challenge security. This security reduction is
// already applied to the scalar being proven for, a result of the requirement it's mutually valid
// over both scalar fields, hence its application here as well. This is mainly here as a point of
// reference for the following DLEq proofs, all which use merged challenges, and isn't performant
// in comparison to the others
dleq!(
"The DLEq proof described in MRL-0010.",
ClassicLinearDLEq,
BitSignature::ClassicLinear,
false,
);
// Proves for 2-bits at a time to save 3/7 elements of every other bit
// <9% smaller than CompromiseLinear, yet ~12% slower
dleq!(
"A DLEq proof modified from MRL-0010, proving for two bits at a time to save on space.",
ConciseLinearDLEq,
BitSignature::ConciseLinear,
true,
);
// Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be
// batch verified, at the cost of adding an additional element per bit
dleq!(
"
A DLEq proof modified from MRL-0010, using R, s forms instead of c, s forms to enable batch
verification at the cost of space usage.
",
EfficientLinearDLEq,
BitSignature::EfficientLinear,
false,
);
// Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other
// bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less
// efficient than EfficientLinear due to having more ring signature steps which aren't batched
// >25% smaller than EfficientLinear and just 11% slower, making it the recommended option
dleq!(
"
A DLEq proof modified from MRL-0010, using R, s forms instead of c, s forms, while proving for
two bits at a time, to enable batch verification and take advantage of space savings.
This isn't quite as efficient as EfficientLinearDLEq, and isn't as compact as
ConciseLinearDLEq, yet strikes a strong balance of performance and conciseness.
",
CompromiseLinearDLEq,
BitSignature::CompromiseLinear,
true,
);
impl<
G0: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,
G1: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize,
const SIGNATURE: u8,
const RING_LEN: usize,
const REMAINDER_RING_LEN: usize,
> __DLEqProof<G0, G1, SIGNATURE, RING_LEN, REMAINDER_RING_LEN>
{
pub(crate) fn transcript<T: Transcript>(
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
keys: (G0, G1),
) {
transcript.domain_separate(b"cross_group_dleq");
generators.0.transcript(transcript);
generators.1.transcript(transcript);
transcript.domain_separate(b"points");
transcript.append_message(b"point_0", keys.0.to_bytes());
transcript.append_message(b"point_1", keys.1.to_bytes());
}
pub(crate) fn blinding_key<R: RngCore + CryptoRng, F: PrimeField>(
rng: &mut R,
total: &mut F,
last: bool,
) -> F {
let blinding_key = if last { -*total } else { F::random(&mut *rng) };
*total += blinding_key;
blinding_key
}
fn reconstruct_keys(&self) -> (G0, G1) {
let mut res = (
self.bits.iter().map(|bit| bit.commitments.0).sum::<G0>(),
self.bits.iter().map(|bit| bit.commitments.1).sum::<G1>(),
);
if let Some(bit) = &self.remainder {
res.0 += bit.commitments.0;
res.1 += bit.commitments.1;
}
res
}
#[allow(clippy::type_complexity)]
fn prove_internal<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
f: (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>),
) -> (Self, (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>)) {
Self::transcript(
transcript,
generators,
((generators.0.primary * f.0.deref()), (generators.1.primary * f.1.deref())),
);
let poks = (
SchnorrPoK::<G0>::prove(rng, transcript, generators.0.primary, &f.0),
SchnorrPoK::<G1>::prove(rng, transcript, generators.1.primary, &f.1),
);
let mut blinding_key_total = (G0::Scalar::ZERO, G1::Scalar::ZERO);
let mut blinding_key = |rng: &mut R, last| {
let blinding_key = (
Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last),
Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last),
);
if last {
debug_assert_eq!(blinding_key_total.0, G0::Scalar::ZERO);
debug_assert_eq!(blinding_key_total.1, G1::Scalar::ZERO);
}
blinding_key
};
let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();
let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits());
let mut pow_2 = (generators.0.primary, generators.1.primary);
let mut raw_bits = f.0.to_le_bits();
let mut bits = Vec::with_capacity(capacity);
let mut these_bits: u8 = 0;
// Needed to zero out the bits
#[allow(unused_assignments)]
for (i, mut bit) in raw_bits.iter_mut().enumerate() {
if i == capacity {
break;
}
// Accumulate this bit
let mut bit = u8_from_bool(bit.deref_mut());
these_bits |= bit << (i % bits_per_group);
bit.zeroize();
if (i % bits_per_group) == (bits_per_group - 1) {
let last = i == (capacity - 1);
let mut blinding_key = blinding_key(&mut *rng, last);
bits.push(Bits::prove(
&mut *rng,
transcript,
generators,
i / bits_per_group,
&mut pow_2,
these_bits,
&mut blinding_key,
));
these_bits.zeroize();
}
}
debug_assert_eq!(bits.len(), capacity / bits_per_group);
let mut remainder = None;
if capacity != ((capacity / bits_per_group) * bits_per_group) {
let mut blinding_key = blinding_key(&mut *rng, true);
remainder = Some(Bits::prove(
&mut *rng,
transcript,
generators,
capacity / bits_per_group,
&mut pow_2,
these_bits,
&mut blinding_key,
));
}
these_bits.zeroize();
let proof = __DLEqProof { bits, remainder, poks };
debug_assert_eq!(
proof.reconstruct_keys(),
(generators.0.primary * f.0.deref(), generators.1.primary * f.1.deref())
);
(proof, f)
}
/// Prove the Cross-Group Discrete Log Equality for the points derived from the scalar created as
/// the output of the passed in Digest.
///
/// Given the non-standard requirements to achieve uniformity, needing to be < 2^x instead of
/// less than a prime moduli, this is the simplest way to safely and securely generate a Scalar,
/// without risk of failure nor bias.
///
/// It also ensures a lack of determinable relation between keys, guaranteeing security in the
/// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing
/// the relationship between keys would allow breaking all swaps after just one.
#[allow(clippy::type_complexity)]
pub fn prove<R: RngCore + CryptoRng, T: Clone + Transcript, D: Digest + HashMarker>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
digest: D,
) -> (Self, (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>)) {
// This pattern theoretically prevents the compiler from moving it, so our protection against
// a copy remaining un-zeroized is actually what's causing a copy. There's still a feeling of
// safety granted by it, even if there's a loss in performance.
let (mut f0, mut f1) =
mutual_scalar_from_bytes::<G0::Scalar, G1::Scalar>(digest.finalize().as_ref());
let f = (Zeroizing::new(f0), Zeroizing::new(f1));
f0.zeroize();
f1.zeroize();
Self::prove_internal(rng, transcript, generators, f)
}
/// Prove the Cross-Group Discrete Log Equality for the points derived from the scalar passed in,
/// failing if it's not mutually valid.
///
/// This allows for rejection sampling externally derived scalars until they're safely usable,
/// as needed.
#[allow(clippy::type_complexity)]
pub fn prove_without_bias<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
f0: Zeroizing<G0::Scalar>,
) -> Option<(Self, (Zeroizing<G0::Scalar>, Zeroizing<G1::Scalar>))> {
scalar_convert(*f0.deref()) // scalar_convert will zeroize it, though this is unfortunate
.map(|f1| Self::prove_internal(rng, transcript, generators, (f0, Zeroizing::new(f1))))
}
/// Verify a Cross-Group Discrete Log Equality proof, returning the points proven for.
pub fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
) -> Result<(G0, G1), DLEqError> {
let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();
let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits());
let has_remainder = (capacity % bits_per_group) != 0;
// These shouldn't be possible, as locally created and deserialized proofs should be properly
// formed in these regards, yet it doesn't hurt to check and would be problematic if true
if (self.bits.len() != (capacity / bits_per_group)) ||
((self.remainder.is_none() && has_remainder) ||
(self.remainder.is_some() && !has_remainder))
{
return Err(DLEqError::InvalidProofLength);
}
let keys = self.reconstruct_keys();
Self::transcript(transcript, generators, keys);
let batch_capacity = match BitSignature::from(SIGNATURE) {
BitSignature::ClassicLinear | BitSignature::ConciseLinear => 3,
BitSignature::EfficientLinear | BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3,
};
let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity));
self.poks.0.verify(&mut *rng, transcript, generators.0.primary, keys.0, &mut batch.0);
self.poks.1.verify(&mut *rng, transcript, generators.1.primary, keys.1, &mut batch.1);
let mut pow_2 = (generators.0.primary, generators.1.primary);
for (i, bits) in self.bits.iter().enumerate() {
bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?;
}
if let Some(bit) = &self.remainder {
bit.verify(&mut *rng, transcript, generators, &mut batch, self.bits.len(), &mut pow_2)?;
}
if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) {
Err(DLEqError::InvalidProof)?;
}
Ok(keys)
}
/// Write a Cross-Group Discrete Log Equality proof to a type satisfying std::io::Write.
#[cfg(feature = "serialize")]
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
for bit in &self.bits {
bit.write(w)?;
}
if let Some(bit) = &self.remainder {
bit.write(w)?;
}
self.poks.0.write(w)?;
self.poks.1.write(w)
}
/// Read a Cross-Group Discrete Log Equality proof from a type satisfying std::io::Read.
#[cfg(feature = "serialize")]
pub fn read<R: Read>(r: &mut R) -> io::Result<Self> {
let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();
let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits());
let mut bits = Vec::with_capacity(capacity / bits_per_group);
for _ in 0 .. (capacity / bits_per_group) {
bits.push(Bits::read(r)?);
}
let mut remainder = None;
if (capacity % bits_per_group) != 0 {
remainder = Some(Bits::read(r)?);
}
Ok(__DLEqProof { bits, remainder, poks: (SchnorrPoK::read(r)?, SchnorrPoK::read(r)?) })
}
}

View File

@@ -1,75 +0,0 @@
use core::ops::DerefMut;
use ff::PrimeFieldBits;
use zeroize::Zeroize;
use crate::cross_group::u8_from_bool;
/// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed.
pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
mut scalar: F0,
) -> (F0, F1) {
let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY);
// A mutual key is only as secure as its weakest group
// Accordingly, this bans a capacity difference of more than 4 bits to prevent a curve generally
// offering n-bits of security from being forced into a situation with much fewer bits
#[cfg(feature = "secure_capacity_difference")]
assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) <= 4);
let mut res1 = F0::ZERO;
let mut res2 = F1::ZERO;
// Uses the bits API to ensure a consistent endianness
let mut bits = scalar.to_le_bits();
scalar.zeroize();
// Convert it to big endian
bits.reverse();
let mut skip = bits.len() - usize::try_from(mutual_capacity).unwrap();
// Needed to zero out the bits
#[allow(unused_assignments)]
for mut bit in &mut bits {
if skip > 0 {
bit.deref_mut().zeroize();
skip -= 1;
continue;
}
res1 = res1.double();
res2 = res2.double();
let mut bit = u8_from_bool(bit.deref_mut());
res1 += F0::from(bit.into());
res2 += F1::from(bit.into());
bit.zeroize();
}
(res1, res2)
}
/// Helper to convert a scalar between fields. Returns None if the scalar isn't mutually valid.
pub fn scalar_convert<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
mut scalar: F0,
) -> Option<F1> {
let (mut valid, converted) = scalar_normalize(scalar);
let res = Some(converted).filter(|_| scalar == valid);
scalar.zeroize();
valid.zeroize();
res
}
/// Create a mutually valid scalar from bytes via bit truncation to not introduce bias.
pub fn mutual_scalar_from_bytes<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
bytes: &[u8],
) -> (F0, F1) {
let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap();
debug_assert!((bytes.len() * 8) >= capacity);
let mut accum = F0::ZERO;
for b in 0 .. capacity {
accum = accum.double();
accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into());
}
(accum, scalar_convert(accum).unwrap())
}

View File

@@ -1,88 +0,0 @@
use core::ops::Deref;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, Zeroizing};
use transcript::Transcript;
use group::{
ff::{Field, PrimeFieldBits},
prime::PrimeGroup,
};
use multiexp::BatchVerifier;
use crate::challenge;
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use ff::PrimeField;
#[cfg(feature = "serialize")]
use crate::{read_scalar, cross_group::read_point};
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct SchnorrPoK<G: PrimeGroup + Zeroize> {
R: G,
s: G::Scalar,
}
impl<G: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize> SchnorrPoK<G> {
// Not HRAm due to the lack of m
#[allow(non_snake_case)]
fn hra<T: Transcript>(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar {
transcript.domain_separate(b"schnorr_proof_of_knowledge");
transcript.append_message(b"generator", generator.to_bytes());
transcript.append_message(b"nonce", R.to_bytes());
transcript.append_message(b"public_key", A.to_bytes());
challenge(transcript)
}
pub(crate) fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generator: G,
private_key: &Zeroizing<G::Scalar>,
) -> SchnorrPoK<G> {
let nonce = Zeroizing::new(G::Scalar::random(rng));
#[allow(non_snake_case)]
let R = generator * nonce.deref();
SchnorrPoK {
R,
s: (SchnorrPoK::hra(transcript, generator, R, generator * private_key.deref()) *
private_key.deref()) +
nonce.deref(),
}
}
pub(crate) fn verify<R: RngCore + CryptoRng, T: Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generator: G,
public_key: G,
batch: &mut BatchVerifier<(), G>,
) {
batch.queue(
rng,
(),
[
(-self.s, generator),
(G::Scalar::ONE, self.R),
(Self::hra(transcript, generator, self.R, public_key), public_key),
],
);
}
#[cfg(feature = "serialize")]
pub fn write<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
w.write_all(self.R.to_bytes().as_ref())?;
w.write_all(self.s.to_repr().as_ref())
}
#[cfg(feature = "serialize")]
pub fn read<R: Read>(r: &mut R) -> std::io::Result<SchnorrPoK<G>> {
Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? })
}
}

View File

@@ -1,324 +0,0 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![doc = include_str!("../README.md")]
use core::ops::Deref;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, Zeroizing};
use transcript::Transcript;
use ff::{Field, PrimeField};
use group::prime::PrimeGroup;
#[cfg(feature = "serialize")]
use std::io::{self, Error, Read, Write};
/// A cross-group DLEq proof capable of proving that two public keys, across two different curves,
/// share a private key.
#[cfg(feature = "experimental")]
pub mod cross_group;
#[cfg(test)]
mod tests;
// Produce a non-biased challenge from the transcript in the specified field
pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// From here, there are three ways to get a scalar under the ff/group API
// 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge")))
// 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness
// and loading it in
// 3: Iterating over each byte and manually doubling/adding. This is simplest
let mut challenge = F::ZERO;
// Get a wide amount of bytes to safely reduce without bias
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
// groups which may technically require more than 1.5x bytes for this to work as intended
let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2;
let mut challenge_bytes = transcript.challenge(b"challenge");
let challenge_bytes_len = challenge_bytes.as_ref().len();
// If the challenge is 32 bytes, and we need 64, we need two challenges
let needed_challenges = target_bytes.div_ceil(challenge_bytes_len);
// The following algorithm should be equivalent to a wide reduction of the challenges,
// interpreted as concatenated, big-endian byte string
let mut handled_bytes = 0;
'outer: for _ in 0 ..= needed_challenges {
// Cursor of which byte of the challenge to use next
let mut b = 0;
while b < challenge_bytes_len {
// Get the next amount of bytes to attempt
// Only grabs the needed amount of bytes, up to 8 at a time (u64), so long as they're
// available in the challenge
let chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len - b);
let mut chunk = 0;
for _ in 0 .. chunk_bytes {
chunk <<= 8;
chunk |= u64::from(challenge_bytes.as_ref()[b]);
b += 1;
}
// Add this chunk
challenge += F::from(chunk);
handled_bytes += chunk_bytes;
// If we've reached the target amount of bytes, break
if handled_bytes == target_bytes {
break 'outer;
}
// Shift over by however many bits will be in the next chunk
let next_chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len);
for _ in 0 .. (next_chunk_bytes * 8) {
challenge = challenge.double();
}
}
// Secure thanks to the Transcript trait having a bound of updating on challenge
challenge_bytes = transcript.challenge(b"challenge_extension");
}
challenge
}
// Helper function to read a scalar
#[cfg(feature = "serialize")]
fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
let mut repr = F::Repr::default();
r.read_exact(repr.as_mut())?;
let scalar = F::from_repr(repr);
if scalar.is_none().into() {
Err(Error::other("invalid scalar"))?;
}
Ok(scalar.unwrap())
}
/// Error for DLEq proofs.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum DLEqError {
/// The proof was invalid.
InvalidProof,
}
/// A proof that points have the same discrete logarithm across generators.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct DLEqProof<G: PrimeGroup<Scalar: Zeroize>> {
c: G::Scalar,
s: G::Scalar,
}
#[allow(non_snake_case)]
impl<G: PrimeGroup<Scalar: Zeroize>> DLEqProof<G> {
fn transcript<T: Transcript>(transcript: &mut T, generator: G, nonce: G, point: G) {
transcript.append_message(b"generator", generator.to_bytes());
transcript.append_message(b"nonce", nonce.to_bytes());
transcript.append_message(b"point", point.to_bytes());
}
/// Prove that the points created by `scalar * G`, for each specified generator, share a discrete
/// logarithm.
pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generators: &[G],
scalar: &Zeroizing<G::Scalar>,
) -> DLEqProof<G> {
let r = Zeroizing::new(G::Scalar::random(rng));
transcript.domain_separate(b"dleq");
for generator in generators {
// R, A
Self::transcript(transcript, *generator, *generator * r.deref(), *generator * scalar.deref());
}
let c = challenge(transcript);
// r + ca
let s = (c * scalar.deref()) + r.deref();
DLEqProof { c, s }
}
// Transcript a specific generator/nonce/point (G/R/A), as used when verifying a proof.
// This takes in the generator/point, and then the challenge and solution to calculate the nonce.
fn verify_statement<T: Transcript>(
transcript: &mut T,
generator: G,
point: G,
c: G::Scalar,
s: G::Scalar,
) {
// s = r + ca
// sG - cA = R
// R, A
Self::transcript(transcript, generator, (generator * s) - (point * c), point);
}
/// Verify the specified points share a discrete logarithm across the specified generators.
pub fn verify<T: Transcript>(
&self,
transcript: &mut T,
generators: &[G],
points: &[G],
) -> Result<(), DLEqError> {
if generators.len() != points.len() {
Err(DLEqError::InvalidProof)?;
}
transcript.domain_separate(b"dleq");
for (generator, point) in generators.iter().zip(points) {
Self::verify_statement(transcript, *generator, *point, self.c, self.s);
}
if self.c != challenge(transcript) {
Err(DLEqError::InvalidProof)?;
}
Ok(())
}
/// Write a DLEq proof to something implementing Write.
#[cfg(feature = "serialize")]
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
w.write_all(self.c.to_repr().as_ref())?;
w.write_all(self.s.to_repr().as_ref())
}
/// Read a DLEq proof from something implementing Read.
#[cfg(feature = "serialize")]
pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })
}
/// Serialize a DLEq proof to a `Vec<u8>`.
#[cfg(feature = "serialize")]
pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![];
self.write(&mut res).unwrap();
res
}
}
/// A proof that multiple series of points each have a single discrete logarithm across generators.
///
/// This is effectively n distinct DLEq proofs, one for each discrete logarithm and its points
/// across some generators, yet with a smaller overall proof size.
#[cfg(feature = "std")]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct MultiDLEqProof<G: PrimeGroup<Scalar: Zeroize>> {
c: G::Scalar,
s: Vec<G::Scalar>,
}
#[cfg(feature = "std")]
#[allow(non_snake_case)]
impl<G: PrimeGroup<Scalar: Zeroize>> MultiDLEqProof<G> {
/// Prove for each scalar that the series of points created by multiplying it against its
/// matching generators share a discrete logarithm.
/// This function panics if `generators.len() != scalars.len()`.
pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generators: &[Vec<G>],
scalars: &[Zeroizing<G::Scalar>],
) -> MultiDLEqProof<G> {
assert_eq!(
generators.len(),
scalars.len(),
"amount of series of generators doesn't match the amount of scalars"
);
transcript.domain_separate(b"multi_dleq");
let mut nonces = vec![];
for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() {
// Delineate between discrete logarithms
transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
let nonce = Zeroizing::new(G::Scalar::random(&mut *rng));
for generator in generators {
DLEqProof::transcript(
transcript,
*generator,
*generator * nonce.deref(),
*generator * scalar.deref(),
);
}
nonces.push(nonce);
}
let c = challenge(transcript);
let mut s = vec![];
for (scalar, nonce) in scalars.iter().zip(nonces) {
s.push((c * scalar.deref()) + nonce.deref());
}
MultiDLEqProof { c, s }
}
/// Verify each series of points share a discrete logarithm against their matching series of
/// generators.
pub fn verify<T: Transcript>(
&self,
transcript: &mut T,
generators: &[Vec<G>],
points: &[Vec<G>],
) -> Result<(), DLEqError> {
if points.len() != generators.len() {
Err(DLEqError::InvalidProof)?;
}
if self.s.len() != generators.len() {
Err(DLEqError::InvalidProof)?;
}
transcript.domain_separate(b"multi_dleq");
for (i, (generators, points)) in generators.iter().zip(points).enumerate() {
if points.len() != generators.len() {
Err(DLEqError::InvalidProof)?;
}
transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
for (generator, point) in generators.iter().zip(points) {
DLEqProof::verify_statement(transcript, *generator, *point, self.c, self.s[i]);
}
}
if self.c != challenge(transcript) {
Err(DLEqError::InvalidProof)?;
}
Ok(())
}
/// Write a multi-DLEq proof to something implementing Write.
#[cfg(feature = "serialize")]
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
w.write_all(self.c.to_repr().as_ref())?;
for s in &self.s {
w.write_all(s.to_repr().as_ref())?;
}
Ok(())
}
/// Read a multi-DLEq proof from something implementing Read.
#[cfg(feature = "serialize")]
pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<MultiDLEqProof<G>> {
let c = read_scalar(r)?;
let mut s = vec![];
for _ in 0 .. discrete_logs {
s.push(read_scalar(r)?);
}
Ok(MultiDLEqProof { c, s })
}
/// Serialize a multi-DLEq proof to a `Vec<u8>`.
#[cfg(feature = "serialize")]
pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![];
self.write(&mut res).unwrap();
res
}
}

View File

@@ -1,67 +0,0 @@
use rand_core::OsRng;
use group::{ff::Field, Group};
use multiexp::BatchVerifier;
use crate::{
cross_group::aos::{Re, Aos},
tests::cross_group::{G0, G1, transcript, generators},
};
#[allow(non_snake_case)]
#[cfg(feature = "serialize")]
fn test_aos_serialization<const RING_LEN: usize>(proof: &Aos<G0, G1, RING_LEN>, Re_0: Re<G0, G1>) {
let mut buf = vec![];
proof.write(&mut buf).unwrap();
let deserialized = Aos::read::<&[u8]>(&mut buf.as_ref(), Re_0).unwrap();
assert_eq!(proof, &deserialized);
}
fn test_aos<const RING_LEN: usize>(default: &Re<G0, G1>) {
let generators = generators();
let mut ring_keys = [(<G0 as Group>::Scalar::ZERO, <G1 as Group>::Scalar::ZERO); RING_LEN];
// Side-effect of G0 being a type-alias with identity() deprecated
#[allow(deprecated)]
let mut ring = [(G0::identity(), G1::identity()); RING_LEN];
for i in 0 .. RING_LEN {
ring_keys[i] =
(<G0 as Group>::Scalar::random(&mut OsRng), <G1 as Group>::Scalar::random(&mut OsRng));
ring[i] = (generators.0.alt * ring_keys[i].0, generators.1.alt * ring_keys[i].1);
}
for (actual, key) in ring_keys.iter_mut().enumerate() {
let proof = Aos::<_, _, RING_LEN>::prove(
&mut OsRng,
&transcript(),
generators,
&ring,
actual,
key,
default.clone(),
);
let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0));
proof.verify(&mut OsRng, &transcript(), generators, &mut batch, &ring).unwrap();
// For e, these should have nothing. For R, these should have 6 elements each which sum to 0
assert!(batch.0.verify_vartime());
assert!(batch.1.verify_vartime());
#[cfg(feature = "serialize")]
test_aos_serialization(&proof, default.clone());
}
}
#[test]
fn test_aos_e() {
test_aos::<2>(&Re::e_default());
test_aos::<4>(&Re::e_default());
}
#[allow(non_snake_case)]
#[test]
fn test_aos_R() {
// Batch verification appreciates the longer vectors, which means not batching bits
test_aos::<2>(&Re::R_default());
}

View File

@@ -1,200 +0,0 @@
use core::ops::Deref;
use hex_literal::hex;
use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng};
use ff::{Field, PrimeField};
use group::{Group, GroupEncoding};
use blake2::{Digest, Blake2b512};
use k256::{Scalar, ProjectivePoint};
use dalek_ff_group::{self as dfg, EdwardsPoint};
use transcript::{Transcript, RecommendedTranscript};
use crate::{
cross_group::{
scalar::mutual_scalar_from_bytes, Generators, ClassicLinearDLEq, EfficientLinearDLEq,
ConciseLinearDLEq, CompromiseLinearDLEq,
},
};
mod scalar;
mod aos;
type G0 = ProjectivePoint;
type G1 = EdwardsPoint;
pub(crate) fn transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"Cross-Group DLEq Proof Test")
}
pub(crate) fn generators() -> (Generators<G0>, Generators<G1>) {
(
Generators::new(
ProjectivePoint::GENERATOR,
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into()),
)
.unwrap(),
)
.unwrap(),
Generators::new(
EdwardsPoint::generator(),
EdwardsPoint::from_bytes(&hex!(
"8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94"
))
.unwrap(),
)
.unwrap(),
)
}
macro_rules! verify_and_deserialize {
($type: ty, $proof: ident, $generators: ident, $keys: ident) => {
let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap();
assert_eq!($generators.0.primary * $keys.0.deref(), public_keys.0);
assert_eq!($generators.1.primary * $keys.1.deref(), public_keys.1);
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
$proof.write(&mut buf).unwrap();
let deserialized = <$type>::read::<&[u8]>(&mut buf.as_ref()).unwrap();
assert_eq!($proof, deserialized);
}
};
}
macro_rules! test_dleq {
($str: literal, $benchmark: ident, $name: ident, $type: ident) => {
#[ignore]
#[test]
fn $benchmark() {
println!("Benchmarking with Secp256k1/Ed25519");
let generators = generators();
let mut seed = [0; 32];
OsRng.fill_bytes(&mut seed);
let key = Blake2b512::new().chain_update(seed);
let runs = 200;
let mut proofs = Vec::with_capacity(usize::try_from(runs).unwrap());
let time = std::time::Instant::now();
for _ in 0 .. runs {
proofs.push($type::prove(&mut OsRng, &mut transcript(), generators, key.clone()).0);
}
println!("{} had a average prove time of {}ms", $str, time.elapsed().as_millis() / runs);
let time = std::time::Instant::now();
for proof in &proofs {
proof.verify(&mut OsRng, &mut transcript(), generators).unwrap();
}
println!("{} had a average verify time of {}ms", $str, time.elapsed().as_millis() / runs);
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proofs[0].write(&mut buf).unwrap();
println!("{} had a proof size of {} bytes", $str, buf.len());
}
}
#[test]
fn $name() {
let generators = generators();
for i in 0 .. 1 {
let (proof, keys) = if i == 0 {
let mut seed = [0; 32];
OsRng.fill_bytes(&mut seed);
$type::prove(
&mut OsRng,
&mut transcript(),
generators,
Blake2b512::new().chain_update(seed),
)
} else {
let mut key;
let mut res;
while {
key = Zeroizing::new(Scalar::random(&mut OsRng));
res = $type::prove_without_bias(&mut OsRng, &mut transcript(), generators, key.clone());
res.is_none()
} {}
let res = res.unwrap();
assert_eq!(key, res.1 .0);
res
};
verify_and_deserialize!($type::<G0, G1>, proof, generators, keys);
}
}
};
}
test_dleq!("ClassicLinear", benchmark_classic_linear, test_classic_linear, ClassicLinearDLEq);
test_dleq!("ConciseLinear", benchmark_concise_linear, test_concise_linear, ConciseLinearDLEq);
test_dleq!(
"EfficientLinear",
benchmark_efficient_linear,
test_efficient_linear,
EfficientLinearDLEq
);
test_dleq!(
"CompromiseLinear",
benchmark_compromise_linear,
test_compromise_linear,
CompromiseLinearDLEq
);
#[test]
fn test_rejection_sampling() {
let mut pow_2 = Scalar::ONE;
for _ in 0 .. dfg::Scalar::CAPACITY {
pow_2 = pow_2.double();
}
assert!(
// Either would work
EfficientLinearDLEq::prove_without_bias(
&mut OsRng,
&mut transcript(),
generators(),
Zeroizing::new(pow_2)
)
.is_none()
);
}
#[test]
fn test_remainder() {
// Uses Secp256k1 for both to achieve an odd capacity of 255
assert_eq!(Scalar::CAPACITY, 255);
let generators = (generators().0, generators().0);
// This will ignore any unused bits, ensuring every remaining one is set
let keys = mutual_scalar_from_bytes::<Scalar, Scalar>(&[0xFF; 32]);
let keys = (Zeroizing::new(keys.0), Zeroizing::new(keys.1));
assert_eq!(Scalar::ONE + keys.0.deref(), Scalar::from(2u64).pow_vartime([255]));
assert_eq!(keys.0, keys.1);
let (proof, res) = ConciseLinearDLEq::prove_without_bias(
&mut OsRng,
&mut transcript(),
generators,
keys.0.clone(),
)
.unwrap();
assert_eq!(keys, res);
verify_and_deserialize!(
ConciseLinearDLEq::<ProjectivePoint, ProjectivePoint>,
proof,
generators,
keys
);
}

View File

@@ -1,47 +0,0 @@
use rand_core::OsRng;
use ff::{Field, PrimeField};
use k256::Scalar as K256Scalar;
use dalek_ff_group::Scalar as DalekScalar;
use crate::cross_group::scalar::{scalar_normalize, scalar_convert};
#[test]
fn test_scalar() {
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::ZERO),
(K256Scalar::ZERO, DalekScalar::ZERO)
);
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::ONE),
(K256Scalar::ONE, DalekScalar::ONE)
);
let mut initial;
while {
initial = K256Scalar::random(&mut OsRng);
let (k, ed) = scalar_normalize::<_, DalekScalar>(initial);
// The initial scalar should equal the new scalar with Ed25519's capacity
let mut initial_bytes = initial.to_repr().to_vec();
// Drop the first 4 bits to hit 252
initial_bytes[0] &= 0b00001111;
let k_bytes = k.to_repr().to_vec();
assert_eq!(initial_bytes, k_bytes);
let mut ed_bytes = ed.to_repr().as_ref().to_vec();
// Reverse to big endian
ed_bytes.reverse();
assert_eq!(k_bytes, ed_bytes);
// Verify conversion works as expected
assert_eq!(scalar_convert::<_, DalekScalar>(k), Some(ed));
// Run this test again if this secp256k1 scalar didn't have any bits cleared
initial == k
} {}
// Verify conversion returns None when the scalar isn't mutually valid
assert!(scalar_convert::<_, DalekScalar>(initial).is_none());
}

View File

@@ -1,43 +0,0 @@
use core::ops::Deref;
use rand_core::OsRng;
use zeroize::Zeroize;
use group::{
ff::{Field, PrimeFieldBits},
prime::PrimeGroup,
};
use multiexp::BatchVerifier;
use transcript::{Transcript, RecommendedTranscript};
use crate::cross_group::schnorr::SchnorrPoK;
fn test_schnorr<G: PrimeGroup<Scalar: PrimeFieldBits + Zeroize> + Zeroize>() {
let transcript = RecommendedTranscript::new(b"Schnorr Test");
let mut batch = BatchVerifier::new(10);
for _ in 0 .. 10 {
let private = Zeroizing::new(G::Scalar::random(&mut OsRng));
SchnorrPoK::prove(&mut OsRng, &mut transcript.clone(), G::generator(), &private).verify(
&mut OsRng,
&mut transcript.clone(),
G::generator(),
G::generator() * private.deref(),
&mut batch,
);
}
assert!(batch.verify_vartime());
}
#[test]
fn test_secp256k1() {
test_schnorr::<k256::ProjectivePoint>();
}
#[test]
fn test_ed25519() {
test_schnorr::<dalek_ff_group::EdwardsPoint>();
}

View File

@@ -1,152 +0,0 @@
use core::ops::Deref;
use hex_literal::hex;
use rand_core::OsRng;
use zeroize::Zeroizing;
use ff::Field;
use group::GroupEncoding;
use k256::{Scalar, ProjectivePoint};
use transcript::{Transcript, RecommendedTranscript};
use crate::{DLEqProof, MultiDLEqProof};
#[cfg(feature = "experimental")]
mod cross_group;
fn generators() -> [k256::ProjectivePoint; 5] {
[
ProjectivePoint::GENERATOR,
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into()),
)
.unwrap(),
// Just an increment of the last byte from the previous, where the previous two are valid
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac4").into()),
)
.unwrap(),
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803aca").into()),
)
.unwrap(),
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803acb").into()),
)
.unwrap(),
]
}
#[test]
fn test_dleq() {
let generators = generators();
let transcript = || RecommendedTranscript::new(b"DLEq Proof Test");
for i in 0 .. 5 {
let key = Zeroizing::new(Scalar::random(&mut OsRng));
let proof = DLEqProof::prove(&mut OsRng, &mut transcript(), &generators[.. i], &key);
let mut keys = [ProjectivePoint::GENERATOR; 5];
for k in 0 .. 5 {
keys[k] = generators[k] * key.deref();
}
proof.verify(&mut transcript(), &generators[.. i], &keys[.. i]).unwrap();
// Different challenge
assert!(proof
.verify(
&mut RecommendedTranscript::new(b"different challenge"),
&generators[.. i],
&keys[.. i]
)
.is_err());
// All of these following tests should effectively be a different challenge and accordingly
// pointless. They're still nice to have though
// We could edit these tests to always test with at least two generators
// Then we don't test proofs with zero/one generator(s)
// While those are stupid, and pointless, and potentially point to a failure in the caller,
// it could also be part of a dynamic system which deals with variable amounts of generators
// Not panicking in such use cases, even if they're inefficient, provides seamless behavior
if i >= 2 {
// Different generators
assert!(proof
.verify(
&mut transcript(),
generators[.. i].iter().copied().rev().collect::<Vec<_>>().as_ref(),
&keys[.. i]
)
.is_err());
// Different keys
assert!(proof
.verify(
&mut transcript(),
&generators[.. i],
keys[.. i].iter().copied().rev().collect::<Vec<_>>().as_ref()
)
.is_err());
}
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proof.write(&mut buf).unwrap();
let deserialized = DLEqProof::<ProjectivePoint>::read::<&[u8]>(&mut buf.as_ref()).unwrap();
assert_eq!(proof, deserialized);
}
}
}
#[test]
fn test_multi_dleq() {
let generators = generators();
let transcript = || RecommendedTranscript::new(b"MultiDLEq Proof Test");
// Test up to 3 keys
for k in 0 ..= 3 {
let mut keys = vec![];
let mut these_generators = vec![];
let mut pub_keys = vec![];
for i in 0 .. k {
let key = Zeroizing::new(Scalar::random(&mut OsRng));
// For each key, test a variable set of generators
// 0: 0
// 1: 1, 2
// 2: 2, 3, 4
let key_generators = generators[i ..= (i + i)].to_vec();
let mut these_pub_keys = vec![];
for generator in &key_generators {
these_pub_keys.push(generator * key.deref());
}
keys.push(key);
these_generators.push(key_generators);
pub_keys.push(these_pub_keys);
}
let proof = MultiDLEqProof::prove(&mut OsRng, &mut transcript(), &these_generators, &keys);
proof.verify(&mut transcript(), &these_generators, &pub_keys).unwrap();
// Different challenge
assert!(proof
.verify(&mut RecommendedTranscript::new(b"different challenge"), &these_generators, &pub_keys)
.is_err());
// Test verifying for a different amount of keys fail
if k > 0 {
assert!(proof.verify(&mut transcript(), &these_generators, &pub_keys[.. k - 1]).is_err());
}
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proof.write(&mut buf).unwrap();
let deserialized =
MultiDLEqProof::<ProjectivePoint>::read::<&[u8]>(&mut buf.as_ref(), k).unwrap();
assert_eq!(proof, deserialized);
}
}
}