2022-09-29 04:47:55 -04:00
|
|
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
2022-07-13 23:29:48 -04:00
|
|
|
#![cfg_attr(not(feature = "std"), no_std)]
|
|
|
|
|
|
2022-11-10 22:35:09 -05:00
|
|
|
use core::ops::Deref;
|
|
|
|
|
|
2022-06-30 05:42:29 -04:00
|
|
|
use rand_core::{RngCore, CryptoRng};
|
|
|
|
|
|
2022-11-10 22:35:09 -05:00
|
|
|
use zeroize::{Zeroize, Zeroizing};
|
Utilize zeroize (#76)
* Apply Zeroize to nonces used in Bulletproofs
Also makes bit decomposition constant time for a given amount of
outputs.
* Fix nonce reuse for single-signer CLSAG
* Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data
* Zeroize private keys and nonces
* Merge prepare_outputs and prepare_transactions
* Ensure CLSAG is constant time
* Pass by borrow where needed, bug fixes
The past few commitments have been one in-progress chunk which I've
broken up as best read.
* Add Zeroize to FROST structs
Still needs to zeroize internally, yet next step. Not quite as
aggressive as Monero, partially due to the limitations of HashMaps,
partially due to less concern about metadata, yet does still delete a
few smaller items of metadata (group key, context string...).
* Remove Zeroize from most Monero multisig structs
These structs largely didn't have private data, just fields with private
data, yet those fields implemented ZeroizeOnDrop making them already
covered. While there is still traces of the transaction left in RAM,
fully purging that was never the intent.
* Use Zeroize within dleq
bitvec doesn't offer Zeroize, so a manual zeroing has been implemented.
* Use Zeroize for random_nonce
It isn't perfect, due to the inability to zeroize the digest, and due to
kp256 requiring a few transformations. It does the best it can though.
Does move the per-curve random_nonce to a provided one, which is allowed
as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231.
* Use Zeroize on FROST keygen/signing
* Zeroize constant time multiexp.
* Correct when FROST keygen zeroizes
* Move the FROST keys Arc into FrostKeys
Reduces amount of instances in memory.
* Manually implement Debug for FrostCore to not leak the secret share
* Misc bug fixes
* clippy + multiexp test bug fixes
* Correct FROST key gen share summation
It leaked our own share for ourself.
* Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
|
|
|
|
2022-06-30 05:42:29 -04:00
|
|
|
use transcript::Transcript;
|
|
|
|
|
|
|
|
|
|
use ff::{Field, PrimeField};
|
|
|
|
|
use group::prime::PrimeGroup;
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
|
|
|
|
use std::io::{self, ErrorKind, Error, Read, Write};
|
|
|
|
|
|
2022-07-07 09:52:10 -04:00
|
|
|
#[cfg(feature = "experimental")]
|
2022-06-30 05:42:29 -04:00
|
|
|
pub mod cross_group;
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests;
|
|
|
|
|
|
2022-06-30 11:23:13 -04:00
|
|
|
pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
|
2022-06-30 05:42:29 -04:00
|
|
|
// From here, there are three ways to get a scalar under the ff/group API
|
2022-08-30 20:01:46 -04:00
|
|
|
// 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge")))
|
2022-06-30 05:42:29 -04:00
|
|
|
// 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess
|
|
|
|
|
// and loading it in
|
|
|
|
|
// 3: Iterating over each byte and manually doubling/adding. This is simplest
|
2022-07-09 00:38:19 -04:00
|
|
|
|
2023-01-01 05:09:22 -05:00
|
|
|
let mut challenge = F::zero();
|
|
|
|
|
|
2022-07-09 00:38:19 -04:00
|
|
|
// Get a wide amount of bytes to safely reduce without bias
|
2023-01-01 05:09:22 -05:00
|
|
|
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
|
|
|
|
|
// groups which may technically require more than 1.5x bytes for this to work as intended
|
|
|
|
|
let target_bytes = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
|
|
|
|
|
let mut challenge_bytes = transcript.challenge(b"challenge");
|
|
|
|
|
let challenge_bytes_len = challenge_bytes.as_ref().len();
|
|
|
|
|
// If the challenge is 32 bytes, and we need 64, we need two challenges
|
|
|
|
|
let needed_challenges = (target_bytes + (challenge_bytes_len - 1)) / challenge_bytes_len;
|
2022-06-30 05:42:29 -04:00
|
|
|
|
2023-01-01 05:09:22 -05:00
|
|
|
// The following algorithm should be equivalent to a wide reduction of the challenges,
|
|
|
|
|
// interpreted as concatenated, big-endian byte string
|
|
|
|
|
let mut handled_bytes = 0;
|
|
|
|
|
'outer: for _ in 0 ..= needed_challenges {
|
|
|
|
|
// Cursor of which byte of the challenge to use next
|
|
|
|
|
let mut b = 0;
|
|
|
|
|
while b < challenge_bytes_len {
|
|
|
|
|
// Get the next amount of bytes to attempt
|
|
|
|
|
// Only grabs the needed amount of bytes, up to 8 at a time (u64), so long as they're
|
|
|
|
|
// available in the challenge
|
|
|
|
|
let chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len - b);
|
|
|
|
|
|
|
|
|
|
let mut chunk = 0;
|
|
|
|
|
for _ in 0 .. chunk_bytes {
|
|
|
|
|
chunk <<= 8;
|
|
|
|
|
chunk |= u64::from(challenge_bytes.as_ref()[b]);
|
|
|
|
|
b += 1;
|
|
|
|
|
}
|
|
|
|
|
// Add this chunk
|
|
|
|
|
challenge += F::from(chunk);
|
|
|
|
|
|
|
|
|
|
handled_bytes += chunk_bytes;
|
|
|
|
|
// If we've reached the target amount of bytes, break
|
|
|
|
|
if handled_bytes == target_bytes {
|
|
|
|
|
break 'outer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Shift over by however many bits will be in the next chunk
|
|
|
|
|
let next_chunk_bytes = (target_bytes - handled_bytes).min(8).min(challenge_bytes_len);
|
|
|
|
|
for _ in 0 .. (next_chunk_bytes * 8) {
|
|
|
|
|
challenge = challenge.double();
|
|
|
|
|
}
|
2022-06-30 05:42:29 -04:00
|
|
|
}
|
2023-01-01 05:09:22 -05:00
|
|
|
|
|
|
|
|
// Secure thanks to the Transcript trait having a bound of updating on challenge
|
|
|
|
|
challenge_bytes = transcript.challenge(b"challenge_extension");
|
2022-06-30 05:42:29 -04:00
|
|
|
}
|
2023-01-01 05:09:22 -05:00
|
|
|
|
2022-06-30 05:42:29 -04:00
|
|
|
challenge
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
|
|
|
|
fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
|
|
|
|
|
let mut repr = F::Repr::default();
|
|
|
|
|
r.read_exact(repr.as_mut())?;
|
|
|
|
|
let scalar = F::from_repr(repr);
|
|
|
|
|
if scalar.is_none().into() {
|
|
|
|
|
Err(Error::new(ErrorKind::Other, "invalid scalar"))?;
|
|
|
|
|
}
|
|
|
|
|
Ok(scalar.unwrap())
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-13 23:29:48 -04:00
|
|
|
#[derive(Debug)]
|
2022-06-30 05:42:29 -04:00
|
|
|
pub enum DLEqError {
|
2022-07-15 01:26:07 -04:00
|
|
|
InvalidProof,
|
2022-06-30 05:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
2022-10-25 23:17:25 -05:00
|
|
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
2022-06-30 05:42:29 -04:00
|
|
|
pub struct DLEqProof<G: PrimeGroup> {
|
|
|
|
|
c: G::Scalar,
|
2022-07-15 01:26:07 -04:00
|
|
|
s: G::Scalar,
|
2022-06-30 05:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
|
impl<G: PrimeGroup> DLEqProof<G> {
|
2022-07-13 23:29:48 -04:00
|
|
|
fn transcript<T: Transcript>(transcript: &mut T, generator: G, nonce: G, point: G) {
|
2022-11-05 18:43:36 -04:00
|
|
|
transcript.append_message(b"generator", generator.to_bytes());
|
|
|
|
|
transcript.append_message(b"nonce", nonce.to_bytes());
|
|
|
|
|
transcript.append_message(b"point", point.to_bytes());
|
2022-06-30 05:42:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
|
|
|
|
|
rng: &mut R,
|
|
|
|
|
transcript: &mut T,
|
2022-07-13 23:29:48 -04:00
|
|
|
generators: &[G],
|
2022-11-10 22:35:09 -05:00
|
|
|
scalar: &Zeroizing<G::Scalar>,
|
Utilize zeroize (#76)
* Apply Zeroize to nonces used in Bulletproofs
Also makes bit decomposition constant time for a given amount of
outputs.
* Fix nonce reuse for single-signer CLSAG
* Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data
* Zeroize private keys and nonces
* Merge prepare_outputs and prepare_transactions
* Ensure CLSAG is constant time
* Pass by borrow where needed, bug fixes
The past few commitments have been one in-progress chunk which I've
broken up as best read.
* Add Zeroize to FROST structs
Still needs to zeroize internally, yet next step. Not quite as
aggressive as Monero, partially due to the limitations of HashMaps,
partially due to less concern about metadata, yet does still delete a
few smaller items of metadata (group key, context string...).
* Remove Zeroize from most Monero multisig structs
These structs largely didn't have private data, just fields with private
data, yet those fields implemented ZeroizeOnDrop making them already
covered. While there is still traces of the transaction left in RAM,
fully purging that was never the intent.
* Use Zeroize within dleq
bitvec doesn't offer Zeroize, so a manual zeroing has been implemented.
* Use Zeroize for random_nonce
It isn't perfect, due to the inability to zeroize the digest, and due to
kp256 requiring a few transformations. It does the best it can though.
Does move the per-curve random_nonce to a provided one, which is allowed
as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231.
* Use Zeroize on FROST keygen/signing
* Zeroize constant time multiexp.
* Correct when FROST keygen zeroizes
* Move the FROST keys Arc into FrostKeys
Reduces amount of instances in memory.
* Manually implement Debug for FrostCore to not leak the secret share
* Misc bug fixes
* clippy + multiexp test bug fixes
* Correct FROST key gen share summation
It leaked our own share for ourself.
* Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
|
|
|
) -> DLEqProof<G>
|
|
|
|
|
where
|
|
|
|
|
G::Scalar: Zeroize,
|
|
|
|
|
{
|
2022-11-10 22:35:09 -05:00
|
|
|
let r = Zeroizing::new(G::Scalar::random(rng));
|
2022-07-13 23:29:48 -04:00
|
|
|
|
|
|
|
|
transcript.domain_separate(b"dleq");
|
|
|
|
|
for generator in generators {
|
2023-01-01 01:54:18 -05:00
|
|
|
// R, A
|
2022-11-10 22:35:09 -05:00
|
|
|
Self::transcript(transcript, *generator, *generator * r.deref(), *generator * scalar.deref());
|
2022-07-13 23:29:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let c = challenge(transcript);
|
2023-01-01 01:54:18 -05:00
|
|
|
// r + ca
|
2022-11-10 22:35:09 -05:00
|
|
|
let s = (c * scalar.deref()) + r.deref();
|
Utilize zeroize (#76)
* Apply Zeroize to nonces used in Bulletproofs
Also makes bit decomposition constant time for a given amount of
outputs.
* Fix nonce reuse for single-signer CLSAG
* Attach Zeroize to most structures in Monero, and ZOnDrop to anything with private data
* Zeroize private keys and nonces
* Merge prepare_outputs and prepare_transactions
* Ensure CLSAG is constant time
* Pass by borrow where needed, bug fixes
The past few commitments have been one in-progress chunk which I've
broken up as best read.
* Add Zeroize to FROST structs
Still needs to zeroize internally, yet next step. Not quite as
aggressive as Monero, partially due to the limitations of HashMaps,
partially due to less concern about metadata, yet does still delete a
few smaller items of metadata (group key, context string...).
* Remove Zeroize from most Monero multisig structs
These structs largely didn't have private data, just fields with private
data, yet those fields implemented ZeroizeOnDrop making them already
covered. While there is still traces of the transaction left in RAM,
fully purging that was never the intent.
* Use Zeroize within dleq
bitvec doesn't offer Zeroize, so a manual zeroing has been implemented.
* Use Zeroize for random_nonce
It isn't perfect, due to the inability to zeroize the digest, and due to
kp256 requiring a few transformations. It does the best it can though.
Does move the per-curve random_nonce to a provided one, which is allowed
as of https://github.com/cfrg/draft-irtf-cfrg-frost/pull/231.
* Use Zeroize on FROST keygen/signing
* Zeroize constant time multiexp.
* Correct when FROST keygen zeroizes
* Move the FROST keys Arc into FrostKeys
Reduces amount of instances in memory.
* Manually implement Debug for FrostCore to not leak the secret share
* Misc bug fixes
* clippy + multiexp test bug fixes
* Correct FROST key gen share summation
It leaked our own share for ourself.
* Fix cross-group DLEq tests
2022-08-03 03:25:18 -05:00
|
|
|
|
2022-06-30 05:42:29 -04:00
|
|
|
DLEqProof { c, s }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn verify<T: Transcript>(
|
|
|
|
|
&self,
|
|
|
|
|
transcript: &mut T,
|
2022-07-13 23:29:48 -04:00
|
|
|
generators: &[G],
|
2022-07-15 01:26:07 -04:00
|
|
|
points: &[G],
|
2022-06-30 05:42:29 -04:00
|
|
|
) -> Result<(), DLEqError> {
|
2022-07-13 23:29:48 -04:00
|
|
|
if generators.len() != points.len() {
|
|
|
|
|
Err(DLEqError::InvalidProof)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
transcript.domain_separate(b"dleq");
|
|
|
|
|
for (generator, point) in generators.iter().zip(points) {
|
2023-01-01 01:54:18 -05:00
|
|
|
// s = r + ca
|
|
|
|
|
// sG - cA = R
|
|
|
|
|
// R, A
|
2022-07-13 23:29:48 -04:00
|
|
|
Self::transcript(transcript, *generator, (*generator * self.s) - (*point * self.c), *point);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if self.c != challenge(transcript) {
|
2022-06-30 05:42:29 -04:00
|
|
|
Err(DLEqError::InvalidProof)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
2023-01-01 01:54:18 -05:00
|
|
|
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
2022-06-30 05:42:29 -04:00
|
|
|
w.write_all(self.c.to_repr().as_ref())?;
|
|
|
|
|
w.write_all(self.s.to_repr().as_ref())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
2023-01-01 01:54:18 -05:00
|
|
|
pub fn read<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
|
2022-06-30 05:42:29 -04:00
|
|
|
Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })
|
|
|
|
|
}
|
2023-01-01 01:54:18 -05:00
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
|
|
|
|
pub fn serialize(&self) -> Vec<u8> {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
self.write(&mut res).unwrap();
|
|
|
|
|
res
|
|
|
|
|
}
|
2022-06-30 05:42:29 -04:00
|
|
|
}
|
2023-01-01 09:16:09 -05:00
|
|
|
|
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
|
|
|
|
pub struct MultiDLEqProof<G: PrimeGroup> {
|
|
|
|
|
c: G::Scalar,
|
|
|
|
|
s: Vec<G::Scalar>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "std")]
|
|
|
|
|
#[allow(non_snake_case)]
|
|
|
|
|
impl<G: PrimeGroup> MultiDLEqProof<G> {
|
|
|
|
|
pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
|
|
|
|
|
rng: &mut R,
|
|
|
|
|
transcript: &mut T,
|
|
|
|
|
generators: &[Vec<G>],
|
|
|
|
|
scalars: &[Zeroizing<G::Scalar>],
|
|
|
|
|
) -> MultiDLEqProof<G>
|
|
|
|
|
where
|
|
|
|
|
G::Scalar: Zeroize,
|
|
|
|
|
{
|
|
|
|
|
transcript.domain_separate(b"multi-dleq");
|
|
|
|
|
|
|
|
|
|
let mut nonces = vec![];
|
|
|
|
|
for (i, (scalar, generators)) in scalars.iter().zip(generators).enumerate() {
|
|
|
|
|
// Delineate between discrete logarithms
|
|
|
|
|
transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
|
|
|
|
|
|
|
|
|
|
let nonce = Zeroizing::new(G::Scalar::random(&mut *rng));
|
|
|
|
|
for generator in generators {
|
|
|
|
|
DLEqProof::transcript(
|
|
|
|
|
transcript,
|
|
|
|
|
*generator,
|
|
|
|
|
*generator * nonce.deref(),
|
|
|
|
|
*generator * scalar.deref(),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
nonces.push(nonce);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let c = challenge(transcript);
|
|
|
|
|
|
|
|
|
|
let mut s = vec![];
|
|
|
|
|
for (scalar, nonce) in scalars.iter().zip(nonces) {
|
|
|
|
|
s.push((c * scalar.deref()) + nonce.deref());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
MultiDLEqProof { c, s }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn verify<T: Transcript>(
|
|
|
|
|
&self,
|
|
|
|
|
transcript: &mut T,
|
|
|
|
|
generators: &[Vec<G>],
|
|
|
|
|
points: &[Vec<G>],
|
|
|
|
|
) -> Result<(), DLEqError> {
|
|
|
|
|
if points.len() != generators.len() {
|
|
|
|
|
Err(DLEqError::InvalidProof)?;
|
|
|
|
|
}
|
|
|
|
|
if self.s.len() != generators.len() {
|
|
|
|
|
Err(DLEqError::InvalidProof)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
transcript.domain_separate(b"multi-dleq");
|
|
|
|
|
for (i, (generators, points)) in generators.iter().zip(points).enumerate() {
|
|
|
|
|
if points.len() != generators.len() {
|
|
|
|
|
Err(DLEqError::InvalidProof)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
transcript.append_message(b"discrete_logarithm", i.to_le_bytes());
|
|
|
|
|
for (generator, point) in generators.iter().zip(points) {
|
|
|
|
|
DLEqProof::transcript(
|
|
|
|
|
transcript,
|
|
|
|
|
*generator,
|
|
|
|
|
(*generator * self.s[i]) - (*point * self.c),
|
|
|
|
|
*point,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if self.c != challenge(transcript) {
|
|
|
|
|
Err(DLEqError::InvalidProof)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
|
|
|
|
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
|
|
|
w.write_all(self.c.to_repr().as_ref())?;
|
|
|
|
|
for s in &self.s {
|
|
|
|
|
w.write_all(s.to_repr().as_ref())?;
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
|
|
|
|
pub fn read<R: Read>(r: &mut R, discrete_logs: usize) -> io::Result<MultiDLEqProof<G>> {
|
|
|
|
|
let c = read_scalar(r)?;
|
|
|
|
|
let mut s = vec![];
|
|
|
|
|
for _ in 0 .. discrete_logs {
|
|
|
|
|
s.push(read_scalar(r)?);
|
|
|
|
|
}
|
|
|
|
|
Ok(MultiDLEqProof { c, s })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(feature = "serialize")]
|
|
|
|
|
pub fn serialize(&self) -> Vec<u8> {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
self.write(&mut res).unwrap();
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
}
|