* Partial move to ff 0.13

It turns out the newly released k256 0.12 isn't on ff 0.13, preventing further
work at this time.

* Update all crates to work on ff 0.13

The provided curves still need to be expanded to fit the new API.

* Finish adding dalek-ff-group ff 0.13 constants

* Correct FieldElement::product definition

Also stops exporting macros.

* Test most new parts of ff 0.13

* Additionally test ff-group-tests with BLS12-381 and the pasta curves

We only tested curves from RustCrypto. Now we test a curve offered by zk-crypto,
the group behind ff/group, and the pasta curves, which is by Zcash (though
Zcash developers are also behind zk-crypto).

* Finish Ed448

Fully specifies all constants, passes all tests in ff-group-tests, and finishes moving to ff-0.13.

* Add RustCrypto/elliptic-curves to allowed git repos

Needed due to k256/p256 incorrectly defining product.

* Finish writing ff 0.13 tests

* Add additional comments to dalek

* Further comments

* Update ethereum-serai to ff 0.13
This commit is contained in:
Luke Parker
2023-03-28 04:38:01 -04:00
committed by GitHub
parent a9f6300e86
commit 79aff5d4c8
59 changed files with 865 additions and 429 deletions

View File

@@ -21,8 +21,8 @@ digest = "0.10"
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.3" }
ff = "0.12"
group = "0.12"
ff = "0.13"
group = "0.13"
multiexp = { path = "../multiexp", version = "0.3", features = ["batch"], optional = true }
@@ -31,7 +31,7 @@ hex-literal = "0.3"
blake2 = "0.10"
k256 = { version = "0.12", features = ["arithmetic", "bits"] }
k256 = { version = "0.13", features = ["arithmetic", "bits"] }
dalek-ff-group = { path = "../dalek-ff-group" }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] }

View File

@@ -42,7 +42,7 @@ impl<G0: PrimeGroup, G1: PrimeGroup> Re<G0, G1> {
}
pub(crate) fn e_default() -> Re<G0, G1> {
Re::e(G0::Scalar::zero())
Re::e(G0::Scalar::ZERO)
}
}
@@ -114,7 +114,7 @@ where
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN];
let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN];
let mut r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
#[allow(non_snake_case)]
@@ -178,8 +178,8 @@ where
let mut statements =
Self::R_batch(generators, *self.s.last().unwrap(), *ring.last().unwrap(), e);
statements.0.push((G0::Scalar::one(), R0_0));
statements.1.push((G1::Scalar::one(), R1_0));
statements.0.push((G0::Scalar::ONE, R0_0));
statements.1.push((G1::Scalar::ONE, R1_0));
batch.0.queue(&mut *rng, (), statements.0);
batch.1.queue(&mut *rng, (), statements.1);
}
@@ -239,7 +239,7 @@ where
Re::e(ref mut e) => *e = read_scalar(r)?,
}
let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN];
let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN];
for s in s.iter_mut() {
*s = (read_scalar(r)?, read_scalar(r)?);
}

View File

@@ -269,15 +269,15 @@ where
SchnorrPoK::<G1>::prove(rng, transcript, generators.1.primary, &f.1),
);
let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero());
let mut blinding_key_total = (G0::Scalar::ZERO, G1::Scalar::ZERO);
let mut blinding_key = |rng: &mut R, last| {
let blinding_key = (
Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last),
Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last),
);
if last {
debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero());
debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero());
debug_assert_eq!(blinding_key_total.0, G0::Scalar::ZERO);
debug_assert_eq!(blinding_key_total.1, G1::Scalar::ZERO);
}
blinding_key
};

View File

@@ -18,8 +18,8 @@ pub fn scalar_normalize<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits>(
#[cfg(feature = "secure_capacity_difference")]
assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) <= 4);
let mut res1 = F0::zero();
let mut res2 = F1::zero();
let mut res1 = F0::ZERO;
let mut res2 = F1::ZERO;
// Uses the bits API to ensure a consistent endianess
let mut bits = scalar.to_le_bits();
scalar.zeroize();
@@ -66,7 +66,7 @@ pub fn mutual_scalar_from_bytes<F0: PrimeFieldBits + Zeroize, F1: PrimeFieldBits
let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap();
debug_assert!((bytes.len() * 8) >= capacity);
let mut accum = F0::zero();
let mut accum = F0::ZERO;
for b in 0 .. capacity {
accum = accum.double();
accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into());

View File

@@ -72,7 +72,7 @@ where
(),
[
(-self.s, generator),
(G::Scalar::one(), self.R),
(G::Scalar::ONE, self.R),
(Self::hra(transcript, generator, self.R, public_key), public_key),
],
);

View File

@@ -32,7 +32,7 @@ pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// and loading it in
// 3: Iterating over each byte and manually doubling/adding. This is simplest
let mut challenge = F::zero();
let mut challenge = F::ZERO;
// Get a wide amount of bytes to safely reduce without bias
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
@@ -105,13 +105,19 @@ pub enum DLEqError {
/// A proof that points have the same discrete logarithm across generators.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct DLEqProof<G: PrimeGroup> {
pub struct DLEqProof<G: PrimeGroup>
where
G::Scalar: Zeroize,
{
c: G::Scalar,
s: G::Scalar,
}
#[allow(non_snake_case)]
impl<G: PrimeGroup> DLEqProof<G> {
impl<G: PrimeGroup> DLEqProof<G>
where
G::Scalar: Zeroize,
{
fn transcript<T: Transcript>(transcript: &mut T, generator: G, nonce: G, point: G) {
transcript.append_message(b"generator", generator.to_bytes());
transcript.append_message(b"nonce", nonce.to_bytes());
@@ -125,10 +131,7 @@ impl<G: PrimeGroup> DLEqProof<G> {
transcript: &mut T,
generators: &[G],
scalar: &Zeroizing<G::Scalar>,
) -> DLEqProof<G>
where
G::Scalar: Zeroize,
{
) -> DLEqProof<G> {
let r = Zeroizing::new(G::Scalar::random(rng));
transcript.domain_separate(b"dleq");
@@ -210,14 +213,20 @@ impl<G: PrimeGroup> DLEqProof<G> {
/// across some generators, yet with a smaller overall proof size.
#[cfg(feature = "std")]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct MultiDLEqProof<G: PrimeGroup> {
pub struct MultiDLEqProof<G: PrimeGroup>
where
G::Scalar: Zeroize,
{
c: G::Scalar,
s: Vec<G::Scalar>,
}
#[cfg(feature = "std")]
#[allow(non_snake_case)]
impl<G: PrimeGroup> MultiDLEqProof<G> {
impl<G: PrimeGroup> MultiDLEqProof<G>
where
G::Scalar: Zeroize,
{
/// Prove for each scalar that the series of points created by multiplying it against its
/// matching generators share a discrete logarithm.
/// This function panics if `generators.len() != scalars.len()`.
@@ -226,10 +235,7 @@ impl<G: PrimeGroup> MultiDLEqProof<G> {
transcript: &mut T,
generators: &[Vec<G>],
scalars: &[Zeroizing<G::Scalar>],
) -> MultiDLEqProof<G>
where
G::Scalar: Zeroize,
{
) -> MultiDLEqProof<G> {
assert_eq!(
generators.len(),
scalars.len(),

View File

@@ -21,7 +21,7 @@ fn test_aos_serialization<const RING_LEN: usize>(proof: Aos<G0, G1, RING_LEN>, R
fn test_aos<const RING_LEN: usize>(default: Re<G0, G1>) {
let generators = generators();
let mut ring_keys = [(<G0 as Group>::Scalar::zero(), <G1 as Group>::Scalar::zero()); RING_LEN];
let mut ring_keys = [(<G0 as Group>::Scalar::ZERO, <G1 as Group>::Scalar::ZERO); RING_LEN];
// Side-effect of G0 being a type-alias with identity() deprecated
#[allow(deprecated)]
let mut ring = [(G0::identity(), G1::identity()); RING_LEN];

View File

@@ -154,7 +154,7 @@ test_dleq!(
#[test]
fn test_rejection_sampling() {
let mut pow_2 = Scalar::one();
let mut pow_2 = Scalar::ONE;
for _ in 0 .. dfg::Scalar::CAPACITY {
pow_2 = pow_2.double();
}
@@ -179,7 +179,7 @@ fn test_remainder() {
// This will ignore any unused bits, ensuring every remaining one is set
let keys = mutual_scalar_from_bytes::<Scalar, Scalar>(&[0xFF; 32]);
let keys = (Zeroizing::new(keys.0), Zeroizing::new(keys.1));
assert_eq!(Scalar::one() + keys.0.deref(), Scalar::from(2u64).pow_vartime([255]));
assert_eq!(Scalar::ONE + keys.0.deref(), Scalar::from(2u64).pow_vartime([255]));
assert_eq!(keys.0, keys.1);
let (proof, res) = ConciseLinearDLEq::prove_without_bias(

View File

@@ -10,13 +10,13 @@ use crate::cross_group::scalar::{scalar_normalize, scalar_convert};
#[test]
fn test_scalar() {
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::zero()),
(K256Scalar::zero(), DalekScalar::zero())
scalar_normalize::<_, DalekScalar>(K256Scalar::ZERO),
(K256Scalar::ZERO, DalekScalar::ZERO)
);
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::one()),
(K256Scalar::one(), DalekScalar::one())
scalar_normalize::<_, DalekScalar>(K256Scalar::ONE),
(K256Scalar::ONE, DalekScalar::ONE)
);
let mut initial;