Merge branch 'develop' into firo

This commit is contained in:
Luke Parker
2022-07-12 01:29:37 -04:00
86 changed files with 5678 additions and 2076 deletions

View File

@@ -1,9 +1,11 @@
[package]
name = "dalek-ff-group"
version = "0.1.0"
version = "0.1.1"
description = "ff/group bindings around curve25519-dalek"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021"
[dependencies]
@@ -12,7 +14,8 @@ digest = "0.10"
subtle = "2.4"
ff = "0.11"
group = "0.11"
ff = "0.12"
group = "0.12"
crypto-bigint = "0.4"
curve25519-dalek = "3.2"

View File

@@ -1,3 +1,6 @@
# Dalek FF/Group
ff/group bindings around curve25519-dalek with a random function based around a more modern rand_core.
ff/group bindings around curve25519-dalek with a from_hash/random function based
around modern dependencies.
Some functions currently remain unimplemented.

View File

@@ -0,0 +1,142 @@
use core::ops::{Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign};
use rand_core::RngCore;
use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable};
use crypto_bigint::{Encoding, U256, U512};
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use crate::{choice, constant_time, math_op, math, from_wrapper, from_uint};
const FIELD_MODULUS: U256 = U256::from_be_hex(
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed"
);
#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
pub struct FieldElement(U256);
pub const SQRT_M1: FieldElement = FieldElement(
U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0")
);
constant_time!(FieldElement, U256);
math!(
FieldElement,
FieldElement,
|x, y| U256::add_mod(&x, &y, &FIELD_MODULUS),
|x, y| U256::sub_mod(&x, &y, &FIELD_MODULUS),
|x, y| {
#[allow(non_snake_case)]
let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS));
debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]);
let wide = U256::mul_wide(&x, &y);
U256::from_le_slice(
&U512::from((wide.1, wide.0)).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32]
)
}
);
from_uint!(FieldElement, U256);
impl Neg for FieldElement {
type Output = Self;
fn neg(self) -> Self::Output { Self(self.0.neg_mod(&FIELD_MODULUS)) }
}
impl Field for FieldElement {
fn random(mut rng: impl RngCore) -> Self {
let mut bytes = [0; 64];
rng.fill_bytes(&mut bytes);
#[allow(non_snake_case)]
let WIDE_MODULUS: U512 = U512::from((U256::ZERO, FIELD_MODULUS));
debug_assert_eq!(FIELD_MODULUS.to_le_bytes()[..], WIDE_MODULUS.to_le_bytes()[.. 32]);
FieldElement(
U256::from_le_slice(
&U512::from_be_bytes(bytes).reduce(&WIDE_MODULUS).unwrap().to_le_bytes()[.. 32]
)
)
}
fn zero() -> Self { Self(U256::ZERO) }
fn one() -> Self { Self(U256::ONE) }
fn square(&self) -> Self { *self * self }
fn double(&self) -> Self { *self + self }
fn invert(&self) -> CtOption<Self> {
CtOption::new(self.pow(-FieldElement(U256::from(2u64))), !self.is_zero())
}
fn sqrt(&self) -> CtOption<Self> {
let c1 = SQRT_M1;
let c2 = FIELD_MODULUS.saturating_add(&U256::from(3u8)).checked_div(&U256::from(8u8)).unwrap();
let tv1 = self.pow(FieldElement(c2));
let tv2 = tv1 * c1;
let res = Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self));
debug_assert_eq!(res * res, *self);
CtOption::new(Self::conditional_select(&tv2, &tv1, tv1.square().ct_eq(self)), 1.into())
}
fn is_zero(&self) -> Choice { self.0.ct_eq(&U256::ZERO) }
fn cube(&self) -> Self { *self * self * self }
fn pow_vartime<S: AsRef<[u64]>>(&self, _exp: S) -> Self { unimplemented!() }
}
impl PrimeField for FieldElement {
type Repr = [u8; 32];
const NUM_BITS: u32 = 255;
const CAPACITY: u32 = 254;
fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {
let res = Self(U256::from_le_bytes(bytes));
CtOption::new(res, res.0.add_mod(&U256::ZERO, &FIELD_MODULUS).ct_eq(&res.0))
}
fn to_repr(&self) -> [u8; 32] { self.0.to_le_bytes() }
const S: u32 = 2;
fn is_odd(&self) -> Choice { unimplemented!() }
fn multiplicative_generator() -> Self { 2u64.into() }
fn root_of_unity() -> Self {
FieldElement(
U256::from_be_hex("2b8324804fc1df0b2b4d00993dfbd7a72f431806ad2fe478c4ee1b274a0ea0b0")
)
}
}
impl PrimeFieldBits for FieldElement {
type ReprBits = [u8; 32];
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
self.to_repr().into()
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
FIELD_MODULUS.to_le_bytes().into()
}
}
impl FieldElement {
pub fn from_square(value: [u8; 32]) -> FieldElement {
let value = U256::from_le_bytes(value);
FieldElement(value) * FieldElement(value)
}
pub fn pow(&self, other: FieldElement) -> FieldElement {
let mut res = FieldElement(U256::ONE);
let mut m = *self;
for bit in other.to_le_bits() {
res *= FieldElement::conditional_select(&FieldElement(U256::ONE), &m, choice(bit));
m *= m;
}
res
}
}
#[test]
fn test_mul() {
assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one(), FieldElement::zero());
assert_eq!(FieldElement(FIELD_MODULUS) * FieldElement::one().double(), FieldElement::zero());
assert_eq!(SQRT_M1.square(), -FieldElement::one());
}

View File

@@ -1,114 +1,189 @@
#![no_std]
use core::{
ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},
borrow::Borrow,
iter::{Iterator, Sum}
};
use subtle::{ConstantTimeEq, ConditionallySelectable};
use rand_core::RngCore;
use digest::{consts::U64, Digest};
use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable};
use subtle::{Choice, CtOption};
pub use curve25519_dalek as dalek;
use dalek::{
constants,
traits::{Identity, IsIdentity},
traits::Identity,
scalar::Scalar as DScalar,
edwards::{
EdwardsPoint as DPoint,
EdwardsBasepointTable as DTable,
CompressedEdwardsY as DCompressed
EdwardsPoint as DEdwardsPoint,
EdwardsBasepointTable as DEdwardsBasepointTable,
CompressedEdwardsY as DCompressedEdwards
},
ristretto::{
RistrettoPoint as DRistrettoPoint,
RistrettoBasepointTable as DRistrettoBasepointTable,
CompressedRistretto as DCompressedRistretto
}
};
use ff::{Field, PrimeField};
use group::Group;
use ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use group::{Group, GroupEncoding, prime::PrimeGroup};
pub mod field;
// Convert a boolean to a Choice in a *presumably* constant time manner
fn choice(value: bool) -> Choice {
let bit = value as u8;
debug_assert_eq!(bit | 1, 1);
Choice::from(bit)
}
macro_rules! deref_borrow {
($Source: ident, $Target: ident) => {
impl Deref for $Source {
type Target = $Target;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Borrow<$Target> for $Source {
fn borrow(&self) -> &$Target {
&self.0
}
}
impl Borrow<$Target> for &$Source {
fn borrow(&self) -> &$Target {
&self.0
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! constant_time {
($Value: ident, $Inner: ident) => {
impl ConstantTimeEq for $Value {
fn ct_eq(&self, other: &Self) -> Choice { self.0.ct_eq(&other.0) }
}
impl ConditionallySelectable for $Value {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
$Value($Inner::conditional_select(&a.0, &b.0, choice))
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! math_op {
(
$Value: ident,
$Other: ident,
$Op: ident,
$op_fn: ident,
$Assign: ident,
$assign_fn: ident,
$function: expr
) => {
impl $Op<$Other> for $Value {
type Output = $Value;
fn $op_fn(self, other: $Other) -> Self::Output {
Self($function(self.0, other.0))
}
}
impl $Assign<$Other> for $Value {
fn $assign_fn(&mut self, other: $Other) {
self.0 = $function(self.0, other.0);
}
}
impl<'a> $Op<&'a $Other> for $Value {
type Output = $Value;
fn $op_fn(self, other: &'a $Other) -> Self::Output {
Self($function(self.0, other.0))
}
}
impl<'a> $Assign<&'a $Other> for $Value {
fn $assign_fn(&mut self, other: &'a $Other) {
self.0 = $function(self.0, other.0);
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! math {
($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => {
math_op!($Value, $Value, Add, add, AddAssign, add_assign, $add);
math_op!($Value, $Value, Sub, sub, SubAssign, sub_assign, $sub);
math_op!($Value, $Factor, Mul, mul, MulAssign, mul_assign, $mul);
}
}
macro_rules! math_neg {
($Value: ident, $Factor: ident, $add: expr, $sub: expr, $mul: expr) => {
math!($Value, $Factor, $add, $sub, $mul);
impl Neg for $Value {
type Output = Self;
fn neg(self) -> Self::Output { Self(-self.0) }
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! from_wrapper {
($wrapper: ident, $inner: ident, $uint: ident) => {
impl From<$uint> for $wrapper {
fn from(a: $uint) -> $wrapper { Self($inner::from(a)) }
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! from_uint {
($wrapper: ident, $inner: ident) => {
from_wrapper!($wrapper, $inner, u8);
from_wrapper!($wrapper, $inner, u16);
from_wrapper!($wrapper, $inner, u32);
from_wrapper!($wrapper, $inner, u64);
}
}
/// Wrapper around the dalek Scalar type
#[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
pub struct Scalar(pub DScalar);
deref_borrow!(Scalar, DScalar);
constant_time!(Scalar, DScalar);
math_neg!(Scalar, Scalar, DScalar::add, DScalar::sub, DScalar::mul);
from_uint!(Scalar, DScalar);
impl Deref for Scalar {
type Target = DScalar;
fn deref(&self) -> &Self::Target {
&self.0
impl Scalar {
/// Perform wide reduction on a 64-byte array to create a Scalar without bias
pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar {
Self(DScalar::from_bytes_mod_order_wide(bytes))
}
}
impl Borrow<DScalar> for Scalar {
fn borrow(&self) -> &DScalar {
&self.0
/// Derive a Scalar without bias from a digest via wide reduction
pub fn from_hash<D: Digest<OutputSize = U64>>(hash: D) -> Scalar {
let mut output = [0u8; 64];
output.copy_from_slice(&hash.finalize());
Scalar(DScalar::from_bytes_mod_order_wide(&output))
}
}
impl Borrow<DScalar> for &Scalar {
fn borrow(&self) -> &DScalar {
&self.0
}
}
impl Add<Scalar> for Scalar {
type Output = Self;
fn add(self, other: Scalar) -> Scalar { Self(self.0 + other.0) }
}
impl AddAssign for Scalar {
fn add_assign(&mut self, other: Scalar) { self.0 += other.0 }
}
impl<'a> Add<&'a Scalar> for Scalar {
type Output = Self;
fn add(self, other: &'a Scalar) -> Scalar { Self(self.0 + other.0) }
}
impl<'a> AddAssign<&'a Scalar> for Scalar {
fn add_assign(&mut self, other: &'a Scalar) { self.0 += other.0 }
}
impl Sub<Scalar> for Scalar {
type Output = Self;
fn sub(self, other: Scalar) -> Scalar { Self(self.0 - other.0) }
}
impl SubAssign for Scalar {
fn sub_assign(&mut self, other: Scalar) { self.0 -= other.0 }
}
impl<'a> Sub<&'a Scalar> for Scalar {
type Output = Self;
fn sub(self, other: &'a Scalar) -> Scalar { Self(self.0 - other.0) }
}
impl<'a> SubAssign<&'a Scalar> for Scalar {
fn sub_assign(&mut self, other: &'a Scalar) { self.0 -= other.0 }
}
impl Neg for Scalar {
type Output = Self;
fn neg(self) -> Scalar { Self(-self.0) }
}
impl Mul<Scalar> for Scalar {
type Output = Self;
fn mul(self, other: Scalar) -> Scalar { Self(self.0 * other.0) }
}
impl MulAssign for Scalar {
fn mul_assign(&mut self, other: Scalar) { self.0 *= other.0 }
}
impl<'a> Mul<&'a Scalar> for Scalar {
type Output = Self;
fn mul(self, other: &'a Scalar) -> Scalar { Self(self.0 * other.0) }
}
impl<'a> MulAssign<&'a Scalar> for Scalar {
fn mul_assign(&mut self, other: &'a Scalar) { self.0 *= other.0 }
}
impl ConstantTimeEq for Scalar {
fn ct_eq(&self, _: &Self) -> Choice { unimplemented!() }
}
impl ConditionallySelectable for Scalar {
fn conditional_select(_: &Self, _: &Self, _: Choice) -> Self { unimplemented!() }
}
impl Field for Scalar {
fn random(mut rng: impl RngCore) -> Self {
let mut r = [0; 64];
@@ -121,194 +196,145 @@ impl Field for Scalar {
fn square(&self) -> Self { *self * self }
fn double(&self) -> Self { *self + self }
fn invert(&self) -> CtOption<Self> {
CtOption::new(Self(self.0.invert()), Choice::from(1 as u8))
CtOption::new(Self(self.0.invert()), !self.is_zero())
}
fn sqrt(&self) -> CtOption<Self> { unimplemented!() }
fn is_zero(&self) -> Choice { Choice::from(if self.0 == DScalar::zero() { 1 } else { 0 }) }
fn is_zero(&self) -> Choice { self.0.ct_eq(&DScalar::zero()) }
fn cube(&self) -> Self { *self * self * self }
fn pow_vartime<S: AsRef<[u64]>>(&self, _exp: S) -> Self { unimplemented!() }
}
impl From<u64> for Scalar {
fn from(a: u64) -> Scalar { Self(DScalar::from(a)) }
}
impl PrimeField for Scalar {
type Repr = [u8; 32];
const NUM_BITS: u32 = 253;
const CAPACITY: u32 = 252;
fn from_repr(bytes: [u8; 32]) -> CtOption<Self> {
let scalar = DScalar::from_canonical_bytes(bytes).map(|x| Scalar(x));
CtOption::new(
scalar.unwrap_or(Scalar::zero()),
Choice::from(if scalar.is_some() { 1 } else { 0 })
)
let scalar = DScalar::from_canonical_bytes(bytes);
// TODO: This unwrap_or isn't constant time, yet do we have an alternative?
CtOption::new(Scalar(scalar.unwrap_or(DScalar::zero())), choice(scalar.is_some()))
}
fn to_repr(&self) -> [u8; 32] { self.0.to_bytes() }
const S: u32 = 0;
const S: u32 = 2;
fn is_odd(&self) -> Choice { unimplemented!() }
fn multiplicative_generator() -> Self { unimplemented!() }
fn multiplicative_generator() -> Self { 2u64.into() }
fn root_of_unity() -> Self { unimplemented!() }
}
impl Scalar {
pub fn from_hash<D: Digest<OutputSize = U64>>(hash: D) -> Scalar {
let mut output = [0u8; 64];
output.copy_from_slice(&hash.finalize());
Scalar(DScalar::from_bytes_mod_order_wide(&output))
impl PrimeFieldBits for Scalar {
type ReprBits = [u8; 32];
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
self.to_repr().into()
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
let mut bytes = (Scalar::zero() - Scalar::one()).to_repr();
bytes[0] += 1;
debug_assert_eq!(DScalar::from_bytes_mod_order(bytes), DScalar::zero());
bytes.into()
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct EdwardsPoint(pub DPoint);
pub const ED25519_BASEPOINT_POINT: EdwardsPoint = EdwardsPoint(constants::ED25519_BASEPOINT_POINT);
macro_rules! dalek_group {
(
$Point: ident,
$DPoint: ident,
$torsion_free: expr,
impl Deref for EdwardsPoint {
type Target = DPoint;
$Table: ident,
$DTable: ident,
fn deref(&self) -> &Self::Target {
&self.0
$DCompressed: ident,
$BASEPOINT_POINT: ident,
$BASEPOINT_TABLE: ident
) => {
/// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct $Point(pub $DPoint);
deref_borrow!($Point, $DPoint);
constant_time!($Point, $DPoint);
math_neg!($Point, Scalar, $DPoint::add, $DPoint::sub, $DPoint::mul);
pub const $BASEPOINT_POINT: $Point = $Point(constants::$BASEPOINT_POINT);
impl Sum<$Point> for $Point {
fn sum<I: Iterator<Item = $Point>>(iter: I) -> $Point { Self($DPoint::sum(iter)) }
}
impl<'a> Sum<&'a $Point> for $Point {
fn sum<I: Iterator<Item = &'a $Point>>(iter: I) -> $Point { Self($DPoint::sum(iter)) }
}
impl Group for $Point {
type Scalar = Scalar;
// Ideally, this would be cryptographically secure, yet that's not a bound on the trait
// k256 also does this
fn random(rng: impl RngCore) -> Self { &$BASEPOINT_TABLE * Scalar::random(rng) }
fn identity() -> Self { Self($DPoint::identity()) }
fn generator() -> Self { $BASEPOINT_POINT }
fn is_identity(&self) -> Choice { self.0.ct_eq(&$DPoint::identity()) }
fn double(&self) -> Self { *self + self }
}
impl GroupEncoding for $Point {
type Repr = [u8; 32];
fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
let decompressed = $DCompressed(*bytes).decompress();
// TODO: Same note on unwrap_or as above
let point = decompressed.unwrap_or($DPoint::identity());
CtOption::new($Point(point), choice(decompressed.is_some()) & choice($torsion_free(point)))
}
fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
$Point::from_bytes(bytes)
}
fn to_bytes(&self) -> Self::Repr {
self.0.compress().to_bytes()
}
}
impl PrimeGroup for $Point {}
/// Wrapper around the dalek Table type, offering efficient multiplication against the
/// basepoint
pub struct $Table(pub $DTable);
deref_borrow!($Table, $DTable);
pub const $BASEPOINT_TABLE: $Table = $Table(constants::$BASEPOINT_TABLE);
impl Mul<Scalar> for &$Table {
type Output = $Point;
fn mul(self, b: Scalar) -> $Point { $Point(&b.0 * &self.0) }
}
};
}
impl Borrow<DPoint> for EdwardsPoint {
fn borrow(&self) -> &DPoint {
&self.0
}
}
dalek_group!(
EdwardsPoint,
DEdwardsPoint,
|point: DEdwardsPoint| point.is_torsion_free(),
impl Borrow<DPoint> for &EdwardsPoint {
fn borrow(&self) -> &DPoint {
&self.0
}
}
EdwardsBasepointTable,
DEdwardsBasepointTable,
impl Add<EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn add(self, b: EdwardsPoint) -> EdwardsPoint { Self(self.0 + b.0) }
}
impl AddAssign<EdwardsPoint> for EdwardsPoint {
fn add_assign(&mut self, other: EdwardsPoint) { self.0 += other.0 }
}
impl Sum<EdwardsPoint> for EdwardsPoint {
fn sum<I: Iterator<Item = EdwardsPoint>>(iter: I) -> EdwardsPoint { Self(DPoint::sum(iter)) }
}
DCompressedEdwards,
impl<'a> Add<&'a EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn add(self, b: &'a EdwardsPoint) -> EdwardsPoint { Self(self.0 + b.0) }
}
impl<'a> AddAssign<&'a EdwardsPoint> for EdwardsPoint {
fn add_assign(&mut self, other: &'a EdwardsPoint) { self.0 += other.0 }
}
impl<'a> Sum<&'a EdwardsPoint> for EdwardsPoint {
fn sum<I: Iterator<Item = &'a EdwardsPoint>>(iter: I) -> EdwardsPoint { Self(DPoint::sum(iter)) }
}
impl Sub<EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn sub(self, b: EdwardsPoint) -> EdwardsPoint { Self(self.0 - b.0) }
}
impl SubAssign<EdwardsPoint> for EdwardsPoint {
fn sub_assign(&mut self, other: EdwardsPoint) { self.0 -= other.0 }
}
impl<'a> Sub<&'a EdwardsPoint> for EdwardsPoint {
type Output = Self;
fn sub(self, b: &'a EdwardsPoint) -> EdwardsPoint { Self(self.0 - b.0) }
}
impl<'a> SubAssign<&'a EdwardsPoint> for EdwardsPoint {
fn sub_assign(&mut self, other: &'a EdwardsPoint) { self.0 -= other.0 }
}
impl Neg for EdwardsPoint {
type Output = Self;
fn neg(self) -> EdwardsPoint { Self(-self.0) }
}
impl Mul<Scalar> for EdwardsPoint {
type Output = Self;
fn mul(self, b: Scalar) -> EdwardsPoint { Self(b.0 * self.0) }
}
impl MulAssign<Scalar> for EdwardsPoint {
fn mul_assign(&mut self, other: Scalar) { self.0 *= other.0 }
}
impl<'a> Mul<&'a Scalar> for EdwardsPoint {
type Output = Self;
fn mul(self, b: &'a Scalar) -> EdwardsPoint { Self(b.0 * self.0) }
}
impl<'a> MulAssign<&'a Scalar> for EdwardsPoint {
fn mul_assign(&mut self, other: &'a Scalar) { self.0 *= other.0 }
}
impl Group for EdwardsPoint {
type Scalar = Scalar;
fn random(mut _rng: impl RngCore) -> Self { unimplemented!() }
fn identity() -> Self { Self(DPoint::identity()) }
fn generator() -> Self { ED25519_BASEPOINT_POINT }
fn is_identity(&self) -> Choice { (self.0.is_identity() as u8).into() }
fn double(&self) -> Self { *self + self }
}
impl Scalar {
pub fn from_canonical_bytes(bytes: [u8; 32]) -> Option<Scalar> {
DScalar::from_canonical_bytes(bytes).map(|x| Self(x))
}
pub fn from_bytes_mod_order(bytes: [u8; 32]) -> Scalar {
Self(DScalar::from_bytes_mod_order(bytes))
}
pub fn from_bytes_mod_order_wide(bytes: &[u8; 64]) -> Scalar {
Self(DScalar::from_bytes_mod_order_wide(bytes))
}
}
pub struct CompressedEdwardsY(pub DCompressed);
impl CompressedEdwardsY {
pub fn new(y: [u8; 32]) -> CompressedEdwardsY {
Self(DCompressed(y))
}
pub fn decompress(&self) -> Option<EdwardsPoint> {
self.0.decompress().map(|x| EdwardsPoint(x))
}
pub fn to_bytes(&self) -> [u8; 32] {
self.0.to_bytes()
}
}
impl EdwardsPoint {
pub fn is_torsion_free(&self) -> bool {
self.0.is_torsion_free()
}
pub fn compress(&self) -> CompressedEdwardsY {
CompressedEdwardsY(self.0.compress())
}
}
pub struct EdwardsBasepointTable(pub DTable);
pub const ED25519_BASEPOINT_TABLE: EdwardsBasepointTable = EdwardsBasepointTable(
constants::ED25519_BASEPOINT_TABLE
ED25519_BASEPOINT_POINT,
ED25519_BASEPOINT_TABLE
);
impl Deref for EdwardsBasepointTable {
type Target = DTable;
dalek_group!(
RistrettoPoint,
DRistrettoPoint,
|_| true,
fn deref(&self) -> &Self::Target {
&self.0
}
}
RistrettoBasepointTable,
DRistrettoBasepointTable,
impl Borrow<DTable> for &EdwardsBasepointTable {
fn borrow(&self) -> &DTable {
&self.0
}
}
DCompressedRistretto,
impl Mul<Scalar> for &EdwardsBasepointTable {
type Output = EdwardsPoint;
fn mul(self, b: Scalar) -> EdwardsPoint { EdwardsPoint(&b.0 * &self.0) }
}
RISTRETTO_BASEPOINT_POINT,
RISTRETTO_BASEPOINT_TABLE
);

38
crypto/dleq/Cargo.toml Normal file
View File

@@ -0,0 +1,38 @@
[package]
name = "dleq"
version = "0.1.0"
description = "Implementation of single and cross-curve Discrete Log Equality proofs"
license = "MIT"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
[dependencies]
thiserror = "1"
rand_core = "0.6"
digest = "0.10"
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" }
ff = "0.12"
group = "0.12"
multiexp = { path = "../multiexp", features = ["batch"], optional = true }
[dev-dependencies]
hex-literal = "0.3"
blake2 = "0.10"
k256 = { version = "0.11", features = ["arithmetic", "bits"] }
dalek-ff-group = { path = "../dalek-ff-group" }
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] }
[features]
serialize = []
experimental = ["multiexp"]
secure_capacity_difference = []
# Only applies to cross_group, yet is default to ensure security
default = ["secure_capacity_difference"]

21
crypto/dleq/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020-2022 Luke Parker, Lee Bousfield
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

63
crypto/dleq/README.md Normal file
View File

@@ -0,0 +1,63 @@
# Discrete Log Equality
Implementation of discrete log equality proofs for curves implementing
`ff`/`group`. There is also a highly experimental cross-group DLEq proof, under
the `experimental` feature, which has no formal proofs available yet is
available here regardless. This library has NOT undergone auditing.
### Cross-Group DLEq
The present cross-group DLEq is based off
[MRL-0010](https://web.getmonero.org/resources/research-lab/pubs/MRL-0010.pdf),
which isn't computationally correct as while it proves both keys have the same
discrete logarithm for their `G'`/`H'` component, it doesn't prove a lack of a
`G`/`H` component. Accordingly, it was augmented with a pair of Schnorr Proof of
Knowledges, proving a known `G'`/`H'` component, guaranteeing a lack of a
`G`/`H` component (assuming an unknown relation between `G`/`H` and `G'`/`H'`).
The challenges for the ring signatures were also merged, removing one-element
from each bit's proof with only a slight reduction to challenge security (as
instead of being uniform over each scalar field, they're uniform over the
mutual bit capacity of each scalar field). This reduction is identical to the
one applied to the proved-for scalar, and accordingly should not reduce overall
security. It does create a lack of domain separation, yet that shouldn't be an
issue.
The following variants are available:
- `ClassicLinear`. This is only for reference purposes, being the above
described proof, with no further optimizations.
- `ConciseLinear`. This proves for 2 bits at a time, not increasing the
signature size for both bits yet decreasing the amount of
commitments/challenges in total.
- `EfficientLinear`. This provides ring signatures in the form
`((R_G, R_H), s)`, instead of `(e, s)`, and accordingly enables a batch
verification of their final step. It is the most performant, and also the
largest, option.
- `CompromiseLinear`. This provides signatures in the form `((R_G, R_H), s)` AND
proves for 2-bits at a time. While this increases the amount of steps in
verifying the ring signatures, which aren't batch verified, and decreases the
amount of items batched (an operation which grows in efficiency with
quantity), it strikes a balance between speed and size.
The following numbers are from benchmarks performed with k256/curve25519_dalek
on a Intel i7-118567:
| Algorithm | Size | Verification Time |
|--------------------|-------------------------|-------------------|
| `ClassicLinear` | 56829 bytes (+27%) | 157ms (0%) |
| `ConciseLinear` | 44607 bytes (Reference) | 156ms (Reference) |
| `EfficientLinear` | 65145 bytes (+46%) | 122ms (-22%) |
| `CompromiseLinear` | 48765 bytes (+9%) | 137ms (-12%) |
`CompromiseLinear` is the best choice by only being marginally sub-optimal
regarding size, yet still achieving most of the desired performance
improvements. That said, neither the original postulation (which had flaws) nor
any construction here has been proven nor audited. Accordingly, they are solely
experimental, and none are recommended.
All proofs are suffixed "Linear" in the hope a logarithmic proof makes itself
available, which would likely immediately become the most efficient option.

View File

@@ -0,0 +1,230 @@
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::{
Generators,
cross_group::{DLEqError, scalar::{scalar_convert, mutual_scalar_from_bytes}}
};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use ff::PrimeField;
#[cfg(feature = "serialize")]
use crate::{read_scalar, cross_group::read_point};
#[allow(non_camel_case_types)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) enum Re<G0: PrimeGroup, G1: PrimeGroup> {
R(G0, G1),
// Merged challenges have a slight security reduction, yet one already applied to the scalar
// being proven for, and this saves ~8kb. Alternatively, challenges could be redefined as a seed,
// present here, which is then hashed for each of the two challenges, remaining unbiased/unique
// while maintaining the bandwidth savings, yet also while adding 252 hashes for
// Secp256k1/Ed25519
e(G0::Scalar)
}
impl<G0: PrimeGroup, G1: PrimeGroup> Re<G0, G1> {
#[allow(non_snake_case)]
pub(crate) fn R_default() -> Re<G0, G1> {
Re::R(G0::identity(), G1::identity())
}
pub(crate) fn e_default() -> Re<G0, G1> {
Re::e(G0::Scalar::zero())
}
}
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct Aos<G0: PrimeGroup, G1: PrimeGroup, const RING_LEN: usize> {
Re_0: Re<G0, G1>,
s: [(G0::Scalar, G1::Scalar); RING_LEN]
}
impl<
G0: PrimeGroup,
G1: PrimeGroup,
const RING_LEN: usize
> Aos<G0, G1, RING_LEN> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
#[allow(non_snake_case)]
fn nonces<T: Transcript>(mut transcript: T, nonces: (G0, G1)) -> (G0::Scalar, G1::Scalar) {
transcript.domain_separate(b"aos_membership_proof");
transcript.append_message(b"ring_len", &u8::try_from(RING_LEN).unwrap().to_le_bytes());
transcript.append_message(b"nonce_0", nonces.0.to_bytes().as_ref());
transcript.append_message(b"nonce_1", nonces.1.to_bytes().as_ref());
mutual_scalar_from_bytes(transcript.challenge(b"challenge").as_ref())
}
#[allow(non_snake_case)]
fn R(
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar)
) -> (G0, G1) {
(((generators.0.alt * s.0) - (A.0 * e.0)), ((generators.1.alt * s.1) - (A.1 * e.1)))
}
#[allow(non_snake_case)]
fn R_batch(
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar)
) -> (Vec<(G0::Scalar, G0)>, Vec<(G1::Scalar, G1)>) {
(vec![(-s.0, generators.0.alt), (e.0, A.0)], vec![(-s.1, generators.1.alt), (e.1, A.1)])
}
#[allow(non_snake_case)]
fn R_nonces<T: Transcript>(
transcript: T,
generators: (Generators<G0>, Generators<G1>),
s: (G0::Scalar, G1::Scalar),
A: (G0, G1),
e: (G0::Scalar, G1::Scalar)
) -> (G0::Scalar, G1::Scalar) {
Self::nonces(transcript, Self::R(generators, s, A, e))
}
#[allow(non_snake_case)]
pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: T,
generators: (Generators<G0>, Generators<G1>),
ring: &[(G0, G1)],
actual: usize,
blinding_key: (G0::Scalar, G1::Scalar),
mut Re_0: Re<G0, G1>
) -> Self {
// While it is possible to use larger values, it's not efficient to do so
// 2 + 2 == 2^2, yet 2 + 2 + 2 < 2^3
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN];
let r = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
#[allow(non_snake_case)]
let original_R = (generators.0.alt * r.0, generators.1.alt * r.1);
#[allow(non_snake_case)]
let mut R = original_R;
for i in ((actual + 1) .. (actual + RING_LEN + 1)).map(|i| i % RING_LEN) {
let e = Self::nonces(transcript.clone(), R);
if i == 0 {
match Re_0 {
Re::R(ref mut R0_0, ref mut R1_0) => { *R0_0 = R.0; *R1_0 = R.1 },
Re::e(ref mut e_0) => *e_0 = e.0
}
}
// Solve for the real index
if i == actual {
s[i] = (r.0 + (e.0 * blinding_key.0), r.1 + (e.1 * blinding_key.1));
debug_assert_eq!(Self::R(generators, s[i], ring[actual], e), original_R);
break;
// Generate a decoy response
} else {
s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng));
}
R = Self::R(generators, s[i], ring[i], e);
}
Aos { Re_0, s }
}
// Assumes the ring has already been transcripted in some form. Critically insecure if it hasn't
pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: T,
generators: (Generators<G0>, Generators<G1>),
batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),
ring: &[(G0, G1)]
) -> Result<(), DLEqError> {
debug_assert!((RING_LEN == 2) || (RING_LEN == 4));
debug_assert_eq!(RING_LEN, ring.len());
#[allow(non_snake_case)]
match self.Re_0 {
Re::R(R0_0, R1_0) => {
let mut e = Self::nonces(transcript.clone(), (R0_0, R1_0));
for i in 0 .. (RING_LEN - 1) {
e = Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e);
}
let mut statements = Self::R_batch(
generators,
*self.s.last().unwrap(),
*ring.last().unwrap(),
e
);
statements.0.push((G0::Scalar::one(), R0_0));
statements.1.push((G1::Scalar::one(), R1_0));
batch.0.queue(&mut *rng, (), statements.0);
batch.1.queue(&mut *rng, (), statements.1);
},
Re::e(e_0) => {
let e_0 = (e_0, scalar_convert(e_0).ok_or(DLEqError::InvalidChallenge)?);
let mut e = None;
for i in 0 .. RING_LEN {
e = Some(
Self::R_nonces(transcript.clone(), generators, self.s[i], ring[i], e.unwrap_or(e_0))
);
}
// Will panic if the above loop is never run somehow
// If e wasn't an Option, and instead initially set to e_0, it'd always pass
if e_0 != e.unwrap() {
Err(DLEqError::InvalidProof)?;
}
}
}
Ok(())
}
#[cfg(feature = "serialize")]
pub(crate) fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
#[allow(non_snake_case)]
match self.Re_0 {
Re::R(R0, R1) => {
w.write_all(R0.to_bytes().as_ref())?;
w.write_all(R1.to_bytes().as_ref())?;
},
Re::e(e) => w.write_all(e.to_repr().as_ref())?
}
for i in 0 .. RING_LEN {
w.write_all(self.s[i].0.to_repr().as_ref())?;
w.write_all(self.s[i].1.to_repr().as_ref())?;
}
Ok(())
}
#[allow(non_snake_case)]
#[cfg(feature = "serialize")]
pub(crate) fn deserialize<R: Read>(r: &mut R, mut Re_0: Re<G0, G1>) -> std::io::Result<Self> {
match Re_0 {
Re::R(ref mut R0, ref mut R1) => { *R0 = read_point(r)?; *R1 = read_point(r)? },
Re::e(ref mut e) => *e = read_scalar(r)?
}
let mut s = [(G0::Scalar::zero(), G1::Scalar::zero()); RING_LEN];
for i in 0 .. RING_LEN {
s[i] = (read_scalar(r)?, read_scalar(r)?);
}
Ok(Aos { Re_0, s })
}
}

View File

@@ -0,0 +1,175 @@
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use group::{ff::PrimeFieldBits, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::{Generators, cross_group::{DLEqError, aos::{Re, Aos}}};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use crate::cross_group::read_point;
pub(crate) enum BitSignature {
ClassicLinear,
ConciseLinear,
EfficientLinear,
CompromiseLinear
}
impl BitSignature {
pub(crate) const fn to_u8(&self) -> u8 {
match self {
BitSignature::ClassicLinear => 0,
BitSignature::ConciseLinear => 1,
BitSignature::EfficientLinear => 2,
BitSignature::CompromiseLinear => 3
}
}
pub(crate) const fn from(algorithm: u8) -> BitSignature {
match algorithm {
0 => BitSignature::ClassicLinear,
1 => BitSignature::ConciseLinear,
2 => BitSignature::EfficientLinear,
3 => BitSignature::CompromiseLinear,
_ => panic!("Unknown algorithm")
}
}
pub(crate) const fn bits(&self) -> usize {
match self {
BitSignature::ClassicLinear => 1,
BitSignature::ConciseLinear => 2,
BitSignature::EfficientLinear => 1,
BitSignature::CompromiseLinear => 2
}
}
pub(crate) const fn ring_len(&self) -> usize {
2_usize.pow(self.bits() as u32)
}
fn aos_form<G0: PrimeGroup, G1: PrimeGroup>(&self) -> Re<G0, G1> {
match self {
BitSignature::ClassicLinear => Re::e_default(),
BitSignature::ConciseLinear => Re::e_default(),
BitSignature::EfficientLinear => Re::R_default(),
BitSignature::CompromiseLinear => Re::R_default()
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct Bits<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize
> {
pub(crate) commitments: (G0, G1),
signature: Aos<G0, G1, RING_LEN>
}
impl<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize
> Bits<G0, G1, SIGNATURE, RING_LEN> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
fn transcript<T: Transcript>(transcript: &mut T, i: usize, commitments: (G0, G1)) {
transcript.domain_separate(b"bits");
transcript.append_message(b"group", &u16::try_from(i).unwrap().to_le_bytes());
transcript.append_message(b"commitment_0", commitments.0.to_bytes().as_ref());
transcript.append_message(b"commitment_1", commitments.1.to_bytes().as_ref());
}
fn ring(pow_2: (G0, G1), commitments: (G0, G1)) -> Vec<(G0, G1)> {
let mut res = vec![commitments; RING_LEN];
for i in 1 .. RING_LEN {
res[i] = (res[i - 1].0 - pow_2.0, res[i - 1].1 - pow_2.1);
}
res
}
fn shift(pow_2: &mut (G0, G1)) {
for _ in 0 .. BitSignature::from(SIGNATURE).bits() {
pow_2.0 = pow_2.0.double();
pow_2.1 = pow_2.1.double();
}
}
pub(crate) fn prove<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
i: usize,
pow_2: &mut (G0, G1),
bits: u8,
blinding_key: (G0::Scalar, G1::Scalar)
) -> Self {
let mut commitments = (
(generators.0.alt * blinding_key.0),
(generators.1.alt * blinding_key.1)
);
commitments.0 += pow_2.0 * G0::Scalar::from(bits.into());
commitments.1 += pow_2.1 * G1::Scalar::from(bits.into());
Self::transcript(transcript, i, commitments);
let signature = Aos::prove(
rng,
transcript.clone(),
generators,
&Self::ring(*pow_2, commitments),
usize::from(bits),
blinding_key,
BitSignature::from(SIGNATURE).aos_form()
);
Self::shift(pow_2);
Bits { commitments, signature }
}
pub(crate) fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>),
i: usize,
pow_2: &mut (G0, G1)
) -> Result<(), DLEqError> {
Self::transcript(transcript, i, self.commitments);
self.signature.verify(
rng,
transcript.clone(),
generators,
batch,
&Self::ring(*pow_2, self.commitments)
)?;
Self::shift(pow_2);
Ok(())
}
#[cfg(feature = "serialize")]
pub(crate) fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
w.write_all(self.commitments.0.to_bytes().as_ref())?;
w.write_all(self.commitments.1.to_bytes().as_ref())?;
self.signature.serialize(w)
}
#[cfg(feature = "serialize")]
pub(crate) fn deserialize<R: Read>(r: &mut R) -> std::io::Result<Self> {
Ok(
Bits {
commitments: (read_point(r)?, read_point(r)?),
signature: Aos::deserialize(r, BitSignature::from(SIGNATURE).aos_form())?
}
)
}
}

View File

@@ -0,0 +1,366 @@
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use digest::Digest;
use transcript::Transcript;
use group::{ff::{Field, PrimeField, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::Generators;
pub mod scalar;
use scalar::{scalar_convert, mutual_scalar_from_bytes};
pub(crate) mod schnorr;
use schnorr::SchnorrPoK;
pub(crate) mod aos;
mod bits;
use bits::{BitSignature, Bits};
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
pub(crate) fn read_point<R: Read, G: PrimeGroup>(r: &mut R) -> std::io::Result<G> {
let mut repr = G::Repr::default();
r.read_exact(repr.as_mut())?;
let point = G::from_bytes(&repr);
if point.is_none().into() {
Err(std::io::Error::new(std::io::ErrorKind::Other, "invalid point"))?;
}
Ok(point.unwrap())
}
#[derive(Error, PartialEq, Eq, Debug)]
pub enum DLEqError {
#[error("invalid proof of knowledge")]
InvalidProofOfKnowledge,
#[error("invalid proof length")]
InvalidProofLength,
#[error("invalid challenge")]
InvalidChallenge,
#[error("invalid proof")]
InvalidProof
}
// This should never be directly instantiated and uses a u8 to represent internal values
// Any external usage is likely invalid
#[doc(hidden)]
// Debug would be such a dump of data this likely isn't helpful, but at least it's available to
// anyone who wants it
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct __DLEqProof<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize,
const REMAINDER_RING_LEN: usize
> where G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
bits: Vec<Bits<G0, G1, SIGNATURE, RING_LEN>>,
remainder: Option<Bits<G0, G1, SIGNATURE, REMAINDER_RING_LEN>>,
poks: (SchnorrPoK<G0>, SchnorrPoK<G1>)
}
macro_rules! dleq {
($name: ident, $signature: expr, $remainder: literal) => {
pub type $name<G0, G1> = __DLEqProof<
G0,
G1,
{ $signature.to_u8() },
{ $signature.ring_len() },
// There may not be a remainder, yet if there is one, it'll be just one bit
// A ring for one bit has a RING_LEN of 2
{ if $remainder { 2 } else { 0 } }
>;
}
}
// Proves for 1-bit at a time with the signature form (e, s), as originally described in MRL-0010.
// Uses a merged challenge, unlike MRL-0010, for the ring signature, saving an element from each
// bit and removing a hash while slightly reducing challenge security. This security reduction is
// already applied to the scalar being proven for, a result of the requirement it's mutually valid
// over both scalar fields, hence its application here as well. This is mainly here as a point of
// reference for the following DLEq proofs, all which use merged challenges, and isn't performant
// in comparison to the others
dleq!(ClassicLinearDLEq, BitSignature::ClassicLinear, false);
// Proves for 2-bits at a time to save 3/7 elements of every other bit
// <9% smaller than CompromiseLinear, yet ~12% slower
dleq!(ConciseLinearDLEq, BitSignature::ConciseLinear, true);
// Uses AOS signatures of the form R, s, to enable the final step of the ring signature to be
// batch verified, at the cost of adding an additional element per bit
dleq!(EfficientLinearDLEq, BitSignature::EfficientLinear, false);
// Proves for 2-bits at a time while using the R, s form. This saves 3/7 elements of every other
// bit, while adding 1 element to every bit, and is more efficient than ConciseLinear yet less
// efficient than EfficientLinear due to having more ring signature steps which aren't batched
// >25% smaller than EfficientLinear and just 11% slower, making it the recommended option
dleq!(CompromiseLinearDLEq, BitSignature::CompromiseLinear, true);
impl<
G0: PrimeGroup,
G1: PrimeGroup,
const SIGNATURE: u8,
const RING_LEN: usize,
const REMAINDER_RING_LEN: usize
> __DLEqProof<G0, G1, SIGNATURE, RING_LEN, REMAINDER_RING_LEN> where
G0::Scalar: PrimeFieldBits, G1::Scalar: PrimeFieldBits {
pub(crate) fn transcript<T: Transcript>(
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
keys: (G0, G1)
) {
transcript.domain_separate(b"cross_group_dleq");
generators.0.transcript(transcript);
generators.1.transcript(transcript);
transcript.domain_separate(b"points");
transcript.append_message(b"point_0", keys.0.to_bytes().as_ref());
transcript.append_message(b"point_1", keys.1.to_bytes().as_ref());
}
pub(crate) fn blinding_key<R: RngCore + CryptoRng, F: PrimeField>(
rng: &mut R,
total: &mut F,
last: bool
) -> F {
let blinding_key = if last {
-*total
} else {
F::random(&mut *rng)
};
*total += blinding_key;
blinding_key
}
fn reconstruct_keys(&self) -> (G0, G1) {
let mut res = (
self.bits.iter().map(|bit| bit.commitments.0).sum::<G0>(),
self.bits.iter().map(|bit| bit.commitments.1).sum::<G1>()
);
if let Some(bit) = &self.remainder {
res.0 += bit.commitments.0;
res.1 += bit.commitments.1;
}
res
}
fn prove_internal<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
f: (G0::Scalar, G1::Scalar)
) -> (Self, (G0::Scalar, G1::Scalar)) {
Self::transcript(
transcript,
generators,
((generators.0.primary * f.0), (generators.1.primary * f.1))
);
let poks = (
SchnorrPoK::<G0>::prove(rng, transcript, generators.0.primary, f.0),
SchnorrPoK::<G1>::prove(rng, transcript, generators.1.primary, f.1)
);
let mut blinding_key_total = (G0::Scalar::zero(), G1::Scalar::zero());
let mut blinding_key = |rng: &mut R, last| {
let blinding_key = (
Self::blinding_key(&mut *rng, &mut blinding_key_total.0, last),
Self::blinding_key(&mut *rng, &mut blinding_key_total.1, last)
);
if last {
debug_assert_eq!(blinding_key_total.0, G0::Scalar::zero());
debug_assert_eq!(blinding_key_total.1, G1::Scalar::zero());
}
blinding_key
};
let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap();
let bits_per_group = BitSignature::from(SIGNATURE).bits();
let mut pow_2 = (generators.0.primary, generators.1.primary);
let raw_bits = f.0.to_le_bits();
let mut bits = Vec::with_capacity(capacity);
let mut these_bits: u8 = 0;
for (i, bit) in raw_bits.iter().enumerate() {
if i == capacity {
break;
}
let bit = *bit as u8;
debug_assert_eq!(bit | 1, 1);
// Accumulate this bit
these_bits |= bit << (i % bits_per_group);
if (i % bits_per_group) == (bits_per_group - 1) {
let last = i == (capacity - 1);
let blinding_key = blinding_key(&mut *rng, last);
bits.push(
Bits::prove(
&mut *rng,
transcript,
generators,
i / bits_per_group,
&mut pow_2,
these_bits,
blinding_key
)
);
these_bits = 0;
}
}
debug_assert_eq!(bits.len(), capacity / bits_per_group);
let mut remainder = None;
if capacity != ((capacity / bits_per_group) * bits_per_group) {
let blinding_key = blinding_key(&mut *rng, true);
remainder = Some(
Bits::prove(
&mut *rng,
transcript,
generators,
capacity / bits_per_group,
&mut pow_2,
these_bits,
blinding_key
)
);
}
let proof = __DLEqProof { bits, remainder, poks };
debug_assert_eq!(
proof.reconstruct_keys(),
(generators.0.primary * f.0, generators.1.primary * f.1)
);
(proof, f)
}
/// Prove the cross-Group Discrete Log Equality for the points derived from the scalar created as
/// the output of the passed in Digest. Given the non-standard requirements to achieve
/// uniformity, needing to be < 2^x instead of less than a prime moduli, this is the simplest way
/// to safely and securely generate a Scalar, without risk of failure, nor bias
/// It also ensures a lack of determinable relation between keys, guaranteeing security in the
/// currently expected use case for this, atomic swaps, where each swap leaks the key. Knowing
/// the relationship between keys would allow breaking all swaps after just one
pub fn prove<R: RngCore + CryptoRng, T: Clone + Transcript, D: Digest>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
digest: D
) -> (Self, (G0::Scalar, G1::Scalar)) {
Self::prove_internal(
rng,
transcript,
generators,
mutual_scalar_from_bytes(digest.finalize().as_ref())
)
}
/// Prove the cross-Group Discrete Log Equality for the points derived from the scalar passed in,
/// failing if it's not mutually valid. This allows for rejection sampling externally derived
/// scalars until they're safely usable, as needed
pub fn prove_without_bias<R: RngCore + CryptoRng, T: Clone + Transcript>(
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>),
f0: G0::Scalar
) -> Option<(Self, (G0::Scalar, G1::Scalar))> {
scalar_convert(f0).map(|f1| Self::prove_internal(rng, transcript, generators, (f0, f1)))
}
/// Verify a cross-Group Discrete Log Equality statement, returning the points proven for
pub fn verify<R: RngCore + CryptoRng, T: Clone + Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generators: (Generators<G0>, Generators<G1>)
) -> Result<(G0, G1), DLEqError> {
let capacity = usize::try_from(
G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)
).unwrap();
let bits_per_group = BitSignature::from(SIGNATURE).bits();
let has_remainder = (capacity % bits_per_group) != 0;
// These shouldn't be possible, as locally created and deserialized proofs should be properly
// formed in these regards, yet it doesn't hurt to check and would be problematic if true
if (self.bits.len() != (capacity / bits_per_group)) || (
(self.remainder.is_none() && has_remainder) || (self.remainder.is_some() && !has_remainder)
) {
return Err(DLEqError::InvalidProofLength);
}
let keys = self.reconstruct_keys();
Self::transcript(transcript, generators, keys);
let batch_capacity = match BitSignature::from(SIGNATURE) {
BitSignature::ClassicLinear => 3,
BitSignature::ConciseLinear => 3,
BitSignature::EfficientLinear => (self.bits.len() + 1) * 3,
BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3
};
let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity));
self.poks.0.verify(&mut *rng, transcript, generators.0.primary, keys.0, &mut batch.0);
self.poks.1.verify(&mut *rng, transcript, generators.1.primary, keys.1, &mut batch.1);
let mut pow_2 = (generators.0.primary, generators.1.primary);
for (i, bits) in self.bits.iter().enumerate() {
bits.verify(&mut *rng, transcript, generators, &mut batch, i, &mut pow_2)?;
}
if let Some(bit) = &self.remainder {
bit.verify(&mut *rng, transcript, generators, &mut batch, self.bits.len(), &mut pow_2)?;
}
if (!batch.0.verify_vartime()) || (!batch.1.verify_vartime()) {
Err(DLEqError::InvalidProof)?;
}
Ok(keys)
}
#[cfg(feature = "serialize")]
pub fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
for bit in &self.bits {
bit.serialize(w)?;
}
if let Some(bit) = &self.remainder {
bit.serialize(w)?;
}
self.poks.0.serialize(w)?;
self.poks.1.serialize(w)
}
#[cfg(feature = "serialize")]
pub fn deserialize<R: Read>(r: &mut R) -> std::io::Result<Self> {
let capacity = usize::try_from(
G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)
).unwrap();
let bits_per_group = BitSignature::from(SIGNATURE).bits();
let mut bits = Vec::with_capacity(capacity / bits_per_group);
for _ in 0 .. (capacity / bits_per_group) {
bits.push(Bits::deserialize(r)?);
}
let mut remainder = None;
if (capacity % bits_per_group) != 0 {
remainder = Some(Bits::deserialize(r)?);
}
Ok(
__DLEqProof {
bits,
remainder,
poks: (SchnorrPoK::deserialize(r)?, SchnorrPoK::deserialize(r)?)
}
)
}
}

View File

@@ -0,0 +1,49 @@
use ff::PrimeFieldBits;
/// Convert a uniform scalar into one usable on both fields, clearing the top bits as needed
pub fn scalar_normalize<F0: PrimeFieldBits, F1: PrimeFieldBits>(scalar: F0) -> (F0, F1) {
let mutual_capacity = F0::CAPACITY.min(F1::CAPACITY);
// The security of a mutual key is the security of the lower field. Accordingly, this bans a
// difference of more than 4 bits
#[cfg(feature = "secure_capacity_difference")]
assert!((F0::CAPACITY.max(F1::CAPACITY) - mutual_capacity) < 4);
let mut res1 = F0::zero();
let mut res2 = F1::zero();
// Uses the bit view API to ensure a consistent endianess
let mut bits = scalar.to_le_bits();
// Convert it to big endian
bits.reverse();
for bit in bits.iter().skip(bits.len() - usize::try_from(mutual_capacity).unwrap()) {
res1 = res1.double();
res2 = res2.double();
let bit = *bit as u8;
debug_assert_eq!(bit | 1, 1);
res1 += F0::from(bit.into());
res2 += F1::from(bit.into());
}
(res1, res2)
}
/// Helper to convert a scalar between fields. Returns None if the scalar isn't mutually valid
pub fn scalar_convert<F0: PrimeFieldBits, F1: PrimeFieldBits>(scalar: F0) -> Option<F1> {
let (valid, converted) = scalar_normalize(scalar);
Some(converted).filter(|_| scalar == valid)
}
/// Create a mutually valid scalar from bytes via bit truncation to not introduce bias
pub fn mutual_scalar_from_bytes<F0: PrimeFieldBits, F1: PrimeFieldBits>(bytes: &[u8]) -> (F0, F1) {
let capacity = usize::try_from(F0::CAPACITY.min(F1::CAPACITY)).unwrap();
debug_assert!((bytes.len() * 8) >= capacity);
let mut accum = F0::zero();
for b in 0 .. capacity {
accum = accum.double();
accum += F0::from(((bytes[b / 8] >> (b % 8)) & 1).into());
}
(accum, scalar_convert(accum).unwrap())
}

View File

@@ -0,0 +1,79 @@
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use crate::challenge;
#[cfg(feature = "serialize")]
use std::io::{Read, Write};
#[cfg(feature = "serialize")]
use ff::PrimeField;
#[cfg(feature = "serialize")]
use crate::{read_scalar, cross_group::read_point};
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub(crate) struct SchnorrPoK<G: PrimeGroup> {
R: G,
s: G::Scalar
}
impl<G: PrimeGroup> SchnorrPoK<G> where G::Scalar: PrimeFieldBits {
// Not hram due to the lack of m
#[allow(non_snake_case)]
fn hra<T: Transcript>(transcript: &mut T, generator: G, R: G, A: G) -> G::Scalar {
transcript.domain_separate(b"schnorr_proof_of_knowledge");
transcript.append_message(b"generator", generator.to_bytes().as_ref());
transcript.append_message(b"nonce", R.to_bytes().as_ref());
transcript.append_message(b"public_key", A.to_bytes().as_ref());
challenge(transcript)
}
pub(crate) fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generator: G,
private_key: G::Scalar
) -> SchnorrPoK<G> {
let nonce = G::Scalar::random(rng);
#[allow(non_snake_case)]
let R = generator * nonce;
SchnorrPoK {
R,
s: nonce + (private_key * SchnorrPoK::hra(transcript, generator, R, generator * private_key))
}
}
pub(crate) fn verify<R: RngCore + CryptoRng, T: Transcript>(
&self,
rng: &mut R,
transcript: &mut T,
generator: G,
public_key: G,
batch: &mut BatchVerifier<(), G>
) {
batch.queue(
rng,
(),
[
(-self.s, generator),
(G::Scalar::one(), self.R),
(Self::hra(transcript, generator, self.R, public_key), public_key)
]
);
}
#[cfg(feature = "serialize")]
pub fn serialize<W: Write>(&self, w: &mut W) -> std::io::Result<()> {
w.write_all(self.R.to_bytes().as_ref())?;
w.write_all(self.s.to_repr().as_ref())
}
#[cfg(feature = "serialize")]
pub fn deserialize<R: Read>(r: &mut R) -> std::io::Result<SchnorrPoK<G>> {
Ok(SchnorrPoK { R: read_point(r)?, s: read_scalar(r)? })
}
}

151
crypto/dleq/src/lib.rs Normal file
View File

@@ -0,0 +1,151 @@
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use ff::{Field, PrimeField};
use group::prime::PrimeGroup;
#[cfg(feature = "serialize")]
use std::io::{self, ErrorKind, Error, Read, Write};
#[cfg(feature = "experimental")]
pub mod cross_group;
#[cfg(test)]
mod tests;
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Generators<G: PrimeGroup> {
primary: G,
alt: G
}
impl<G: PrimeGroup> Generators<G> {
pub fn new(primary: G, alt: G) -> Generators<G> {
Generators { primary, alt }
}
fn transcript<T: Transcript>(&self, transcript: &mut T) {
transcript.domain_separate(b"generators");
transcript.append_message(b"primary", self.primary.to_bytes().as_ref());
transcript.append_message(b"alternate", self.alt.to_bytes().as_ref());
}
}
pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
// From here, there are three ways to get a scalar under the ff/group API
// 1: Scalar::random(ChaCha12Rng::from_seed(self.transcript.rng_seed(b"challenge")))
// 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess
// and loading it in
// 3: Iterating over each byte and manually doubling/adding. This is simplest
// Get a wide amount of bytes to safely reduce without bias
let target = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
let mut challenge_bytes = transcript.challenge(b"challenge").as_ref().to_vec();
while challenge_bytes.len() < target {
// Secure given transcripts updating on challenge
challenge_bytes.extend(transcript.challenge(b"challenge_extension").as_ref());
}
challenge_bytes.truncate(target);
let mut challenge = F::zero();
for b in challenge_bytes {
for _ in 0 .. 8 {
challenge = challenge.double();
}
challenge += F::from(u64::from(b));
}
challenge
}
#[cfg(feature = "serialize")]
fn read_scalar<R: Read, F: PrimeField>(r: &mut R) -> io::Result<F> {
let mut repr = F::Repr::default();
r.read_exact(repr.as_mut())?;
let scalar = F::from_repr(repr);
if scalar.is_none().into() {
Err(Error::new(ErrorKind::Other, "invalid scalar"))?;
}
Ok(scalar.unwrap())
}
#[derive(Error, Debug)]
pub enum DLEqError {
#[error("invalid proof")]
InvalidProof
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct DLEqProof<G: PrimeGroup> {
c: G::Scalar,
s: G::Scalar
}
#[allow(non_snake_case)]
impl<G: PrimeGroup> DLEqProof<G> {
fn challenge<T: Transcript>(
transcript: &mut T,
generators: Generators<G>,
nonces: (G, G),
points: (G, G)
) -> G::Scalar {
generators.transcript(transcript);
transcript.domain_separate(b"dleq");
transcript.append_message(b"nonce_primary", nonces.0.to_bytes().as_ref());
transcript.append_message(b"nonce_alternate", nonces.1.to_bytes().as_ref());
transcript.append_message(b"point_primary", points.0.to_bytes().as_ref());
transcript.append_message(b"point_alternate", points.1.to_bytes().as_ref());
challenge(transcript)
}
pub fn prove<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R,
transcript: &mut T,
generators: Generators<G>,
scalar: G::Scalar
) -> DLEqProof<G> {
let r = G::Scalar::random(rng);
let c = Self::challenge(
transcript,
generators,
(generators.primary * r, generators.alt * r),
(generators.primary * scalar, generators.alt * scalar)
);
let s = r + (c * scalar);
DLEqProof { c, s }
}
pub fn verify<T: Transcript>(
&self,
transcript: &mut T,
generators: Generators<G>,
points: (G, G)
) -> Result<(), DLEqError> {
if self.c != Self::challenge(
transcript,
generators,
(
(generators.primary * self.s) - (points.0 * self.c),
(generators.alt * self.s) - (points.1 * self.c)
),
points
) {
Err(DLEqError::InvalidProof)?;
}
Ok(())
}
#[cfg(feature = "serialize")]
pub fn serialize<W: Write>(&self, w: &mut W) -> io::Result<()> {
w.write_all(self.c.to_repr().as_ref())?;
w.write_all(self.s.to_repr().as_ref())
}
#[cfg(feature = "serialize")]
pub fn deserialize<R: Read>(r: &mut R) -> io::Result<DLEqProof<G>> {
Ok(DLEqProof { c: read_scalar(r)?, s: read_scalar(r)? })
}
}

View File

@@ -0,0 +1,69 @@
use rand_core::OsRng;
use group::{ff::Field, Group};
use multiexp::BatchVerifier;
use crate::{
cross_group::aos::{Re, Aos},
tests::cross_group::{G0, G1, transcript, generators}
};
#[allow(non_snake_case)]
#[cfg(feature = "serialize")]
fn test_aos_serialization<const RING_LEN: usize>(proof: Aos<G0, G1, RING_LEN>, Re_0: Re<G0, G1>) {
let mut buf = vec![];
proof.serialize(&mut buf).unwrap();
let deserialized = Aos::deserialize(&mut std::io::Cursor::new(buf), Re_0).unwrap();
assert_eq!(proof, deserialized);
}
fn test_aos<const RING_LEN: usize>(default: Re<G0, G1>) {
let generators = generators();
let mut ring_keys = [(<G0 as Group>::Scalar::zero(), <G1 as Group>::Scalar::zero()); RING_LEN];
// Side-effect of G0 being a type-alias with identity() deprecated
#[allow(deprecated)]
let mut ring = [(G0::identity(), G1::identity()); RING_LEN];
for i in 0 .. RING_LEN {
ring_keys[i] = (
<G0 as Group>::Scalar::random(&mut OsRng),
<G1 as Group>::Scalar::random(&mut OsRng)
);
ring[i] = (generators.0.alt * ring_keys[i].0, generators.1.alt * ring_keys[i].1);
}
for actual in 0 .. RING_LEN {
let proof = Aos::<_, _, RING_LEN>::prove(
&mut OsRng,
transcript(),
generators,
&ring,
actual,
ring_keys[actual],
default.clone()
);
let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0));
proof.verify(&mut OsRng, transcript(), generators, &mut batch, &ring).unwrap();
// For e, these should have nothing. For R, these should have 6 elements each which sum to 0
assert!(batch.0.verify_vartime());
assert!(batch.1.verify_vartime());
#[cfg(feature = "serialize")]
test_aos_serialization(proof, default.clone());
}
}
#[test]
fn test_aos_e() {
test_aos::<2>(Re::e_default());
test_aos::<4>(Re::e_default());
}
#[allow(non_snake_case)]
#[test]
fn test_aos_R() {
// Batch verification appreciates the longer vectors, which means not batching bits
test_aos::<2>(Re::R_default());
}

View File

@@ -0,0 +1,192 @@
use hex_literal::hex;
use rand_core::{RngCore, OsRng};
use ff::{Field, PrimeField};
use group::{Group, GroupEncoding};
use blake2::{Digest, Blake2b512};
use k256::{Scalar, ProjectivePoint};
use dalek_ff_group::{self as dfg, EdwardsPoint, CompressedEdwardsY};
use transcript::RecommendedTranscript;
use crate::{
Generators,
cross_group::{
scalar::mutual_scalar_from_bytes,
ClassicLinearDLEq, EfficientLinearDLEq, ConciseLinearDLEq, CompromiseLinearDLEq
}
};
mod scalar;
mod schnorr;
mod aos;
type G0 = ProjectivePoint;
type G1 = EdwardsPoint;
pub(crate) fn transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"Cross-Group DLEq Proof Test")
}
pub(crate) fn generators() -> (Generators<G0>, Generators<G1>) {
(
Generators::new(
ProjectivePoint::GENERATOR,
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into())
).unwrap()
),
Generators::new(
EdwardsPoint::generator(),
CompressedEdwardsY::new(
hex!("8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94")
).decompress().unwrap()
)
)
}
macro_rules! verify_and_deserialize {
($type: ty, $proof: ident, $generators: ident, $keys: ident) => {
let public_keys = $proof.verify(&mut OsRng, &mut transcript(), $generators).unwrap();
assert_eq!($generators.0.primary * $keys.0, public_keys.0);
assert_eq!($generators.1.primary * $keys.1, public_keys.1);
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
$proof.serialize(&mut buf).unwrap();
let deserialized = <$type>::deserialize(&mut std::io::Cursor::new(&buf)).unwrap();
assert_eq!($proof, deserialized);
}
}
}
macro_rules! test_dleq {
($str: literal, $benchmark: ident, $name: ident, $type: ident) => {
#[ignore]
#[test]
fn $benchmark() {
println!("Benchmarking with Secp256k1/Ed25519");
let generators = generators();
let mut seed = [0; 32];
OsRng.fill_bytes(&mut seed);
let key = Blake2b512::new().chain_update(seed);
let runs = 200;
let mut proofs = Vec::with_capacity(usize::try_from(runs).unwrap());
let time = std::time::Instant::now();
for _ in 0 .. runs {
proofs.push($type::prove(&mut OsRng, &mut transcript(), generators, key.clone()).0);
}
println!("{} had a average prove time of {}ms", $str, time.elapsed().as_millis() / runs);
let time = std::time::Instant::now();
for proof in &proofs {
proof.verify(&mut OsRng, &mut transcript(), generators).unwrap();
}
println!("{} had a average verify time of {}ms", $str, time.elapsed().as_millis() / runs);
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proofs[0].serialize(&mut buf).unwrap();
println!("{} had a proof size of {} bytes", $str, buf.len());
}
}
#[test]
fn $name() {
let generators = generators();
for i in 0 .. 1 {
let (proof, keys) = if i == 0 {
let mut seed = [0; 32];
OsRng.fill_bytes(&mut seed);
$type::prove(
&mut OsRng,
&mut transcript(),
generators,
Blake2b512::new().chain_update(seed)
)
} else {
let mut key;
let mut res;
while {
key = Scalar::random(&mut OsRng);
res = $type::prove_without_bias(&mut OsRng, &mut transcript(), generators, key);
res.is_none()
} {}
let res = res.unwrap();
assert_eq!(key, res.1.0);
res
};
verify_and_deserialize!($type::<G0, G1>, proof, generators, keys);
}
}
}
}
test_dleq!("ClassicLinear", benchmark_classic_linear, test_classic_linear, ClassicLinearDLEq);
test_dleq!("ConciseLinear", benchmark_concise_linear, test_concise_linear, ConciseLinearDLEq);
test_dleq!(
"EfficientLinear",
benchmark_efficient_linear,
test_efficient_linear,
EfficientLinearDLEq
);
test_dleq!(
"CompromiseLinear",
benchmark_compromise_linear,
test_compromise_linear,
CompromiseLinearDLEq
);
#[test]
fn test_rejection_sampling() {
let mut pow_2 = Scalar::one();
for _ in 0 .. dfg::Scalar::CAPACITY {
pow_2 = pow_2.double();
}
assert!(
// Either would work
EfficientLinearDLEq::prove_without_bias(
&mut OsRng,
&mut transcript(),
generators(),
pow_2
).is_none()
);
}
#[test]
fn test_remainder() {
// Uses Secp256k1 for both to achieve an odd capacity of 255
assert_eq!(Scalar::CAPACITY, 255);
let generators = (generators().0, generators().0);
// This will ignore any unused bits, ensuring every remaining one is set
let keys = mutual_scalar_from_bytes(&[0xFF; 32]);
assert_eq!(keys.0 + Scalar::one(), Scalar::from(2u64).pow_vartime(&[255]));
assert_eq!(keys.0, keys.1);
let (proof, res) = ConciseLinearDLEq::prove_without_bias(
&mut OsRng,
&mut transcript(),
generators,
keys.0
).unwrap();
assert_eq!(keys, res);
verify_and_deserialize!(
ConciseLinearDLEq::<ProjectivePoint, ProjectivePoint>,
proof,
generators,
keys
);
}

View File

@@ -0,0 +1,47 @@
use rand_core::OsRng;
use ff::{Field, PrimeField};
use k256::Scalar as K256Scalar;
use dalek_ff_group::Scalar as DalekScalar;
use crate::cross_group::scalar::{scalar_normalize, scalar_convert};
#[test]
fn test_scalar() {
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::zero()),
(K256Scalar::zero(), DalekScalar::zero())
);
assert_eq!(
scalar_normalize::<_, DalekScalar>(K256Scalar::one()),
(K256Scalar::one(), DalekScalar::one())
);
let mut initial;
while {
initial = K256Scalar::random(&mut OsRng);
let (k, ed) = scalar_normalize::<_, DalekScalar>(initial);
// The initial scalar should equal the new scalar with Ed25519's capacity
let mut initial_bytes = (&initial.to_repr()).to_vec();
// Drop the first 4 bits to hit 252
initial_bytes[0] = initial_bytes[0] & 0b00001111;
let k_bytes = (&k.to_repr()).to_vec();
assert_eq!(initial_bytes, k_bytes);
let mut ed_bytes = ed.to_repr().as_ref().to_vec();
// Reverse to big endian
ed_bytes.reverse();
assert_eq!(k_bytes, ed_bytes);
// Verify conversion works as expected
assert_eq!(scalar_convert::<_, DalekScalar>(k), Some(ed));
// Run this test again if this secp256k1 scalar didn't have any bits cleared
initial == k
} {}
// Verify conversion returns None when the scalar isn't mutually valid
assert!(scalar_convert::<_, DalekScalar>(initial).is_none());
}

View File

@@ -0,0 +1,38 @@
use rand_core::OsRng;
use group::{ff::{Field, PrimeFieldBits}, prime::PrimeGroup};
use multiexp::BatchVerifier;
use transcript::RecommendedTranscript;
use crate::cross_group::schnorr::SchnorrPoK;
fn test_schnorr<G: PrimeGroup>() where G::Scalar: PrimeFieldBits {
let private = G::Scalar::random(&mut OsRng);
let transcript = RecommendedTranscript::new(b"Schnorr Test");
let mut batch = BatchVerifier::new(3);
SchnorrPoK::prove(
&mut OsRng,
&mut transcript.clone(),
G::generator(),
private
).verify(
&mut OsRng,
&mut transcript.clone(),
G::generator(),
G::generator() * private,
&mut batch
);
assert!(batch.verify_vartime());
}
#[test]
fn test_secp256k1() {
test_schnorr::<k256::ProjectivePoint>();
}
#[test]
fn test_ed25519() {
test_schnorr::<dalek_ff_group::EdwardsPoint>();
}

View File

@@ -0,0 +1,43 @@
#[cfg(feature = "experimental")]
mod cross_group;
use hex_literal::hex;
use rand_core::OsRng;
use ff::Field;
use group::GroupEncoding;
use k256::{Scalar, ProjectivePoint};
use transcript::RecommendedTranscript;
use crate::{Generators, DLEqProof};
#[test]
fn test_dleq() {
let transcript = || RecommendedTranscript::new(b"DLEq Proof Test");
let generators = Generators::new(
ProjectivePoint::GENERATOR,
ProjectivePoint::from_bytes(
&(hex!("0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0").into())
).unwrap()
);
let key = Scalar::random(&mut OsRng);
let proof = DLEqProof::prove(&mut OsRng, &mut transcript(), generators, key);
let keys = (generators.primary * key, generators.alt * key);
proof.verify(&mut transcript(), generators, keys).unwrap();
#[cfg(feature = "serialize")]
{
let mut buf = vec![];
proof.serialize(&mut buf).unwrap();
let deserialized = DLEqProof::<ProjectivePoint>::deserialize(
&mut std::io::Cursor::new(&buf)
).unwrap();
assert_eq!(proof, deserialized);
deserialized.verify(&mut transcript(), generators, keys).unwrap();
}
}

View File

@@ -1,24 +1,46 @@
[package]
name = "frost"
name = "modular-frost"
version = "0.1.0"
description = "Implementation of FROST over ff/group"
description = "Modular implementation of FROST over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["frost", "multisig", "threshold"]
edition = "2021"
[dependencies]
thiserror = "1"
rand_core = "0.6"
hex = "0.4"
ff = "0.11"
group = "0.11"
sha2 = { version = "0.10", optional = true }
transcript = { path = "../transcript" }
ff = "0.12"
group = "0.12"
multiexp = { path = "../multiexp", features = ["batch"] }
elliptic-curve = { version = "0.12", features = ["hash2curve"], optional = true }
p256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "0.11", features = ["arithmetic", "bits", "hash2curve"], optional = true }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.1", optional = true }
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.1" }
multiexp = { path = "../multiexp", version = "0.1", features = ["batch"] }
dleq = { package = "dleq", path = "../dleq", version = "0.1", features = ["serialize"] }
[dev-dependencies]
rand = "0.8"
sha2 = "0.10"
k256 = { version = "0.10", features = ["arithmetic"] }
dalek-ff-group = { path = "../dalek-ff-group" }
[features]
curves = ["sha2"] # All officially denoted curves use the SHA2 family of hashes
kp256 = ["elliptic-curve", "curves"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
dalek = ["curves", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]

View File

@@ -1,3 +1,6 @@
# FROST
# Modular FROST
Implementation of FROST for any curve with a ff/group API.
A modular implementation of FROST for any curve with a ff/group API. Notably,
beyond curve modularity, custom algorithms may be specified, providing support
for privacy coins. The provided Schnorr algorithm also has a modular HRAM due
to the variety in existence, enabling integration with existing systems.

View File

@@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
use transcript::Transcript;
use crate::{Curve, FrostError, MultisigView, schnorr};
use crate::{Curve, FrostError, FrostView, schnorr};
pub use schnorr::SchnorrSignature;
/// Algorithm to use FROST with
@@ -13,22 +13,25 @@ pub trait Algorithm<C: Curve>: Clone {
/// The resulting type of the signatures this algorithm will produce
type Signature: Clone + PartialEq + Debug;
/// Obtain a mutable borrow of the underlying transcript
fn transcript(&mut self) -> &mut Self::Transcript;
/// Obtain the list of nonces to generate, as specified by the basepoints to create commitments
/// against per-nonce. These are not committed to by FROST on the underlying transcript
fn nonces(&self) -> Vec<Vec<C::G>>;
/// Generate an addendum to FROST"s preprocessing stage
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
params: &MultisigView<C>,
nonces: &[C::F; 2],
params: &FrostView<C>,
) -> Vec<u8>;
/// Proccess the addendum for the specified participant. Guaranteed to be ordered
fn process_addendum(
&mut self,
params: &MultisigView<C>,
params: &FrostView<C>,
l: u16,
commitments: &[C::G; 2],
serialized: &[u8],
) -> Result<(), FrostError>;
@@ -38,23 +41,24 @@ pub trait Algorithm<C: Curve>: Clone {
/// The nonce will already have been processed into the combined form d + (e * p)
fn sign_share(
&mut self,
params: &MultisigView<C>,
nonce_sum: C::G,
binding: C::F,
nonce: C::F,
params: &FrostView<C>,
nonce_sums: &[Vec<C::G>],
nonces: &[C::F],
msg: &[u8],
) -> C::F;
/// Verify a signature
fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option<Self::Signature>;
#[must_use]
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;
/// Verify a specific share given as a response. Used to determine blame if signature
/// verification fails
#[must_use]
fn verify_share(
&self,
l: u16,
verification_share: C::G,
nonce: C::G,
nonces: &[Vec<C::G>],
share: C::F,
) -> bool;
}
@@ -63,6 +67,12 @@ pub trait Algorithm<C: Curve>: Clone {
#[derive(Clone, Debug)]
pub struct IetfTranscript(Vec<u8>);
impl Transcript for IetfTranscript {
type Challenge = Vec<u8>;
fn new(_: &'static [u8]) -> IetfTranscript {
unimplemented!("IetfTranscript should not be used with multiple nonce protocols");
}
fn domain_separate(&mut self, _: &[u8]) {}
fn append_message(&mut self, _: &'static [u8], message: &[u8]) {
@@ -112,20 +122,22 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
&mut self.transcript
}
fn nonces(&self) -> Vec<Vec<C::G>> {
vec![vec![C::GENERATOR]]
}
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
_: &mut R,
_: &MultisigView<C>,
_: &[C::F; 2],
_: &FrostView<C>,
) -> Vec<u8> {
vec![]
}
fn process_addendum(
&mut self,
_: &MultisigView<C>,
_: &FrostView<C>,
_: u16,
_: &[C::G; 2],
_: &[u8],
) -> Result<(), FrostError> {
Ok(())
@@ -133,19 +145,19 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
fn sign_share(
&mut self,
params: &MultisigView<C>,
nonce_sum: C::G,
_: C::F,
nonce: C::F,
params: &FrostView<C>,
nonce_sums: &[Vec<C::G>],
nonces: &[C::F],
msg: &[u8],
) -> C::F {
let c = H::hram(&nonce_sum, &params.group_key(), msg);
let c = H::hram(&nonce_sums[0][0], &params.group_key(), msg);
self.c = Some(c);
schnorr::sign::<C>(params.secret_share(), nonce, c).s
schnorr::sign::<C>(params.secret_share(), nonces[0], c).s
}
fn verify(&self, group_key: C::G, nonce: C::G, sum: C::F) -> Option<Self::Signature> {
let sig = SchnorrSignature { R: nonce, s: sum };
#[must_use]
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature> {
let sig = SchnorrSignature { R: nonces[0][0], s: sum };
if schnorr::verify::<C>(group_key, self.c.unwrap(), &sig) {
Some(sig)
} else {
@@ -153,17 +165,18 @@ impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
}
}
#[must_use]
fn verify_share(
&self,
_: u16,
verification_share: C::G,
nonce: C::G,
nonces: &[Vec<C::G>],
share: C::F,
) -> bool {
schnorr::verify::<C>(
verification_share,
self.c.unwrap(),
&SchnorrSignature { R: nonce, s: share}
&SchnorrSignature { R: nonces[0][0], s: share}
)
}
}

View File

@@ -0,0 +1,91 @@
use rand_core::{RngCore, CryptoRng};
use sha2::{Digest, Sha512};
use dalek_ff_group::Scalar;
use crate::{curve::Curve, algorithm::Hram};
macro_rules! dalek_curve {
(
$Curve: ident,
$Hram: ident,
$Point: ident,
$POINT: ident,
$ID: literal,
$CONTEXT: literal,
$chal: literal,
$digest: literal,
) => {
use dalek_ff_group::{$Point, $POINT};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct $Curve;
impl Curve for $Curve {
type F = Scalar;
type G = $Point;
const ID: &'static [u8] = $ID;
const GENERATOR: Self::G = $POINT;
fn random_nonce<R: RngCore + CryptoRng>(secret: Self::F, rng: &mut R) -> Self::F {
let mut seed = vec![0; 32];
rng.fill_bytes(&mut seed);
seed.extend(&secret.to_bytes());
Self::hash_to_F(b"nonce", &seed)
}
fn hash_msg(msg: &[u8]) -> Vec<u8> {
Sha512::new()
.chain_update($CONTEXT)
.chain_update($digest)
.chain_update(msg)
.finalize()
.to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(b"rho", binding)
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
Scalar::from_hash(Sha512::new().chain_update($CONTEXT).chain_update(dst).chain_update(msg))
}
}
#[derive(Copy, Clone)]
pub struct $Hram;
impl Hram<$Curve> for $Hram {
#[allow(non_snake_case)]
fn hram(R: &$Point, A: &$Point, m: &[u8]) -> Scalar {
$Curve::hash_to_F($chal, &[&R.compress().to_bytes(), &A.compress().to_bytes(), m].concat())
}
}
}
}
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!(
Ristretto,
IetfRistrettoHram,
RistrettoPoint,
RISTRETTO_BASEPOINT_POINT,
b"ristretto",
b"FROST-RISTRETTO255-SHA512-v5",
b"chal",
b"digest",
);
#[cfg(feature = "ed25519")]
dalek_curve!(
Ed25519,
IetfEd25519Hram,
EdwardsPoint,
ED25519_BASEPOINT_POINT,
b"edwards25519",
b"",
b"",
b"",
);

View File

@@ -0,0 +1,105 @@
use rand_core::{RngCore, CryptoRng};
use sha2::{digest::Update, Digest, Sha256};
use group::{ff::Field, GroupEncoding};
use elliptic_curve::{bigint::{Encoding, U384}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}};
use crate::{curve::{Curve, F_from_slice}, algorithm::Hram};
macro_rules! kp_curve {
(
$lib: ident,
$Curve: ident,
$Hram: ident,
$ID: literal,
$CONTEXT: literal
) => {
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct $Curve;
impl Curve for $Curve {
type F = $lib::Scalar;
type G = $lib::ProjectivePoint;
const ID: &'static [u8] = $ID;
const GENERATOR: Self::G = $lib::ProjectivePoint::GENERATOR;
fn random_nonce<R: RngCore + CryptoRng>(secret: Self::F, rng: &mut R) -> Self::F {
let mut seed = vec![0; 32];
rng.fill_bytes(&mut seed);
seed.extend(secret.to_bytes());
Self::hash_to_F(&[$CONTEXT as &[u8], b"nonce"].concat(), &seed)
}
fn hash_msg(msg: &[u8]) -> Vec<u8> {
(&Sha256::new()
.chain($CONTEXT)
.chain(b"digest")
.chain(msg)
.finalize()
).to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(&[$CONTEXT as &[u8], b"rho"].concat(), binding)
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
let mut dst = dst;
let oversize = Sha256::digest([b"H2C-OVERSIZE-DST-", dst].concat());
if dst.len() > 255 {
dst = &oversize;
}
// While one of these two libraries does support directly hashing to the Scalar field, the
// other doesn't. While that's probably an oversight, this is a universally working method
let mut modulus = vec![0; 16];
modulus.extend((Self::F::zero() - Self::F::one()).to_bytes());
let modulus = U384::from_be_slice(&modulus).wrapping_add(&U384::ONE);
F_from_slice::<Self::F>(
&U384::from_be_slice(&{
let mut bytes = [0; 48];
ExpandMsgXmd::<Sha256>::expand_message(
&[msg],
dst,
48
).unwrap().fill_bytes(&mut bytes);
bytes
}).reduce(&modulus).unwrap().to_be_bytes()[16 ..]
).unwrap()
}
}
#[derive(Clone)]
pub struct $Hram;
impl Hram<$Curve> for $Hram {
#[allow(non_snake_case)]
fn hram(R: &$lib::ProjectivePoint, A: &$lib::ProjectivePoint, m: &[u8]) -> $lib::Scalar {
$Curve::hash_to_F(
&[$CONTEXT as &[u8], b"chal"].concat(),
&[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat()
)
}
}
}
}
#[cfg(feature = "p256")]
kp_curve!(
p256,
P256,
IetfP256Hram,
b"P-256",
b"FROST-P256-SHA256-v5"
);
#[cfg(feature = "secp256k1")]
kp_curve!(
k256,
Secp256k1,
NonIetfSecp256k1Hram,
b"secp256k1",
b"FROST-secp256k1-SHA256-v5"
);

View File

@@ -0,0 +1,117 @@
use core::fmt::Debug;
use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use ff::{PrimeField, PrimeFieldBits};
use group::{Group, GroupOps, GroupEncoding, prime::PrimeGroup};
#[cfg(any(test, feature = "dalek"))]
mod dalek;
#[cfg(any(test, feature = "ristretto"))]
pub use dalek::{Ristretto, IetfRistrettoHram};
#[cfg(feature = "ed25519")]
pub use dalek::{Ed25519, IetfEd25519Hram};
#[cfg(feature = "kp256")]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::{Secp256k1, NonIetfSecp256k1Hram};
#[cfg(feature = "p256")]
pub use kp256::{P256, IetfP256Hram};
/// Set of errors for curve-related operations, namely encoding and decoding
#[derive(Clone, Error, Debug)]
pub enum CurveError {
#[error("invalid length for data (expected {0}, got {0})")]
InvalidLength(usize, usize),
#[error("invalid scalar")]
InvalidScalar,
#[error("invalid point")]
InvalidPoint,
}
/// Unified trait to manage a field/group
// This should be moved into its own crate if the need for generic cryptography over ff/group
// continues, which is the exact reason ff/group exists (to provide a generic interface)
// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec<u8>
// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough
// advantages in the modern day to be worth the hassle -- Kayaba
pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
/// Scalar field element type
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField + PrimeFieldBits;
/// Group element type
type G: Group<Scalar = Self::F> + GroupOps + PrimeGroup;
/// ID for this curve
const ID: &'static [u8];
/// Generator for the group
// While group does provide this in its API, privacy coins may want to use a custom basepoint
const GENERATOR: Self::G;
/// Securely generate a random nonce. H4 from the IETF draft
fn random_nonce<R: RngCore + CryptoRng>(secret: Self::F, rng: &mut R) -> Self::F;
/// Hash the message for the binding factor. H3 from the IETF draft
// This doesn't actually need to be part of Curve as it does nothing with the curve
// This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using
// aggregatable signatures over this curve will work without issue
// It is kept here as Curve + H{1, 2, 3} is effectively a ciphersuite according to the IETF draft
// and moving it to Schnorr would force all of them into being ciphersuite-specific
// H2 is left to the Schnorr Algorithm as H2 is the H used in HRAM, which Schnorr further
// modularizes
fn hash_msg(msg: &[u8]) -> Vec<u8>;
/// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft
fn hash_binding_factor(binding: &[u8]) -> Self::F;
// The following methods would optimally be F:: and G:: yet developers can't control F/G
// They can control a trait they pass into this library
/// Field element from hash. Used during key gen and by other crates under Serai as a general
/// utility
// Not parameterized by Digest as it's fine for it to use its own hash function as relevant to
// hash_msg and hash_binding_factor
#[allow(non_snake_case)]
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F;
}
#[allow(non_snake_case)]
pub(crate) fn F_len<C: Curve>() -> usize {
<C::F as PrimeField>::Repr::default().as_ref().len()
}
#[allow(non_snake_case)]
pub(crate) fn G_len<C: Curve>() -> usize {
<C::G as GroupEncoding>::Repr::default().as_ref().len()
}
/// Field element from slice
#[allow(non_snake_case)]
pub(crate) fn F_from_slice<F: PrimeField>(slice: &[u8]) -> Result<F, CurveError> {
let mut encoding = F::Repr::default();
encoding.as_mut().copy_from_slice(slice);
let point = Option::<F>::from(F::from_repr(encoding)).ok_or(CurveError::InvalidScalar)?;
if point.to_repr().as_ref() != slice {
Err(CurveError::InvalidScalar)?;
}
Ok(point)
}
/// Group element from slice
#[allow(non_snake_case)]
pub(crate) fn G_from_slice<G: PrimeGroup>(slice: &[u8]) -> Result<G, CurveError> {
let mut encoding = G::Repr::default();
encoding.as_mut().copy_from_slice(slice);
let point = Option::<G>::from(G::from_bytes(&encoding)).ok_or(CurveError::InvalidPoint)?;
// Ban the identity, per the FROST spec, and non-canonical points
if (point.is_identity().into()) || (point.to_bytes().as_ref() != slice) {
Err(CurveError::InvalidPoint)?;
}
Ok(point)
}

View File

@@ -1,14 +1,14 @@
use core::fmt;
use std::collections::HashMap;
use std::{marker::PhantomData, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::{Field, PrimeField};
use group::{ff::{Field, PrimeField}, GroupEncoding};
use multiexp::{multiexp_vartime, BatchVerifier};
use crate::{
Curve, MultisigParams, MultisigKeys, FrostError,
curve::{Curve, F_len, G_len, F_from_slice, G_from_slice},
FrostError, FrostParams, FrostKeys,
schnorr::{self, SchnorrSignature},
validate_map
};
@@ -16,29 +16,34 @@ use crate::{
#[allow(non_snake_case)]
fn challenge<C: Curve>(context: &str, l: u16, R: &[u8], Am: &[u8]) -> C::F {
const DST: &'static [u8] = b"FROST Schnorr Proof of Knowledge";
// Uses hash_msg to get a fixed size value out of the context string
C::hash_to_F(&[DST, &C::hash_msg(context.as_bytes()), &l.to_be_bytes(), R, Am].concat())
let mut transcript = C::hash_msg(context.as_bytes());
transcript.extend(l.to_be_bytes());
transcript.extend(R);
transcript.extend(Am);
C::hash_to_F(DST, &transcript)
}
// Implements steps 1 through 3 of round 1 of FROST DKG. Returns the coefficients, commitments, and
// the serialized commitments to be broadcasted over an authenticated channel to all parties
fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &MultisigParams,
params: &FrostParams,
context: &str,
) -> (Vec<C::F>, Vec<u8>) {
let t = usize::from(params.t);
let mut coefficients = Vec::with_capacity(t);
let mut commitments = Vec::with_capacity(t);
let mut serialized = Vec::with_capacity((C::G_len() * t) + C::G_len() + C::F_len());
let mut serialized = Vec::with_capacity((G_len::<C>() * t) + G_len::<C>() + F_len::<C>());
for i in 0 .. t {
// Step 1: Generate t random values to form a polynomial with
coefficients.push(C::F::random(&mut *rng));
// Step 3: Generate public commitments
commitments.push(C::generator_table() * coefficients[i]);
commitments.push(C::GENERATOR * coefficients[i]);
// Serialize them for publication
serialized.extend(&C::G_to_bytes(&commitments[i]));
serialized.extend(commitments[i].to_bytes().as_ref());
}
// Step 2: Provide a proof of knowledge
@@ -54,7 +59,7 @@ fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
challenge::<C>(
context,
params.i(),
&C::G_to_bytes(&(C::generator_table() * r)),
(C::GENERATOR * r).to_bytes().as_ref(),
&serialized
)
).serialize()
@@ -67,7 +72,7 @@ fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
// Verify the received data from the first round of key generation
fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &MultisigParams,
params: &FrostParams,
context: &str,
our_commitments: Vec<u8>,
mut serialized: HashMap<u16, Vec<u8>>,
@@ -78,19 +83,19 @@ fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
(params.i(), our_commitments)
)?;
let commitments_len = usize::from(params.t()) * C::G_len();
let commitments_len = usize::from(params.t()) * G_len::<C>();
let mut commitments = HashMap::new();
#[allow(non_snake_case)]
let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + C::G_len()];
let R_bytes = |l| &serialized[&l][commitments_len .. commitments_len + G_len::<C>()];
#[allow(non_snake_case)]
let R = |l| C::G_from_slice(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l));
let R = |l| G_from_slice::<C::G>(R_bytes(l)).map_err(|_| FrostError::InvalidProofOfKnowledge(l));
#[allow(non_snake_case)]
let Am = |l| &serialized[&l][0 .. commitments_len];
let s = |l| C::F_from_slice(
&serialized[&l][commitments_len + C::G_len() ..]
let s = |l| F_from_slice::<C::F>(
&serialized[&l][commitments_len + G_len::<C>() ..]
).map_err(|_| FrostError::InvalidProofOfKnowledge(l));
let mut signatures = Vec::with_capacity(usize::from(params.n() - 1));
@@ -98,8 +103,8 @@ fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
let mut these_commitments = vec![];
for c in 0 .. usize::from(params.t()) {
these_commitments.push(
C::G_from_slice(
&serialized[&l][(c * C::G_len()) .. ((c + 1) * C::G_len())]
G_from_slice::<C::G>(
&serialized[&l][(c * G_len::<C>()) .. ((c + 1) * G_len::<C>())]
).map_err(|_| FrostError::InvalidCommitment(l.try_into().unwrap()))?
);
}
@@ -144,7 +149,7 @@ fn polynomial<F: PrimeField>(
// counterparty to receive
fn generate_key_r2<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: &MultisigParams,
params: &FrostParams,
context: &str,
coefficients: Vec<C::F>,
our_commitments: Vec<u8>,
@@ -161,7 +166,7 @@ fn generate_key_r2<R: RngCore + CryptoRng, C: Curve>(
continue;
}
res.insert(l, C::F_to_bytes(&polynomial(&coefficients, l)));
res.insert(l, polynomial(&coefficients, l).to_repr().as_ref().to_vec());
}
// Calculate our own share
@@ -185,22 +190,22 @@ fn generate_key_r2<R: RngCore + CryptoRng, C: Curve>(
/// broadcasted initially
fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
params: MultisigParams,
params: FrostParams,
mut secret_share: C::F,
commitments: HashMap<u16, Vec<C::G>>,
// Vec to preserve ownership
mut serialized: HashMap<u16, Vec<u8>>,
) -> Result<MultisigKeys<C>, FrostError> {
) -> Result<FrostKeys<C>, FrostError> {
validate_map(
&mut serialized,
&(1 ..= params.n()).into_iter().collect::<Vec<_>>(),
(params.i(), C::F_to_bytes(&secret_share))
(params.i(), secret_share.to_repr().as_ref().to_vec())
)?;
// Step 2. Verify each share
let mut shares = HashMap::new();
for (l, share) in serialized {
shares.insert(l, C::F_from_slice(&share).map_err(|_| FrostError::InvalidShare(l))?);
shares.insert(l, F_from_slice::<C::F>(&share).map_err(|_| FrostError::InvalidShare(l))?);
}
// Calculate the exponent for a given participant and apply it to a series of commitments
@@ -219,7 +224,7 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
res
};
let mut batch = BatchVerifier::new(shares.len(), C::little_endian());
let mut batch = BatchVerifier::new(shares.len());
for (l, share) in &shares {
if *l == params.i() {
continue;
@@ -232,7 +237,7 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
// ensure that malleability isn't present is to use this n * t algorithm, which runs
// per sender and not as an aggregate of all senders, which also enables blame
let mut values = exponential(params.i, &commitments[l]);
values.push((-*share, C::generator()));
values.push((-*share, C::GENERATOR));
batch.queue(rng, *l, values);
}
batch.verify_with_vartime_blame().map_err(|l| FrostError::InvalidCommitment(l))?;
@@ -249,14 +254,15 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
// Calculate each user's verification share
let mut verification_shares = HashMap::new();
for i in 1 ..= params.n() {
verification_shares.insert(i, multiexp_vartime(exponential(i, &stripes), C::little_endian()));
verification_shares.insert(i, multiexp_vartime(&exponential(i, &stripes)));
}
debug_assert_eq!(C::generator_table() * secret_share, verification_shares[&params.i()]);
// Removing this check would enable optimizing the above from t + (n * t) to t + ((n - 1) * t)
debug_assert_eq!(C::GENERATOR * secret_share, verification_shares[&params.i()]);
// TODO: Clear serialized and shares
Ok(
MultisigKeys {
FrostKeys {
params,
secret_share,
group_key: stripes[0],
@@ -266,100 +272,76 @@ fn complete_r2<R: RngCore + CryptoRng, C: Curve>(
)
}
/// State of a Key Generation machine
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum State {
Fresh,
GeneratedCoefficients,
GeneratedSecretShares,
Complete,
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
/// State machine which manages key generation
#[allow(non_snake_case)]
pub struct StateMachine<C: Curve> {
params: MultisigParams,
pub struct KeyGenMachine<C: Curve> {
params: FrostParams,
context: String,
state: State,
coefficients: Option<Vec<C::F>>,
our_commitments: Option<Vec<u8>>,
secret: Option<C::F>,
commitments: Option<HashMap<u16, Vec<C::G>>>
_curve: PhantomData<C>,
}
impl<C: Curve> StateMachine<C> {
pub struct SecretShareMachine<C: Curve> {
params: FrostParams,
context: String,
coefficients: Vec<C::F>,
our_commitments: Vec<u8>,
}
pub struct KeyMachine<C: Curve> {
params: FrostParams,
secret: C::F,
commitments: HashMap<u16, Vec<C::G>>,
}
impl<C: Curve> KeyGenMachine<C> {
/// Creates a new machine to generate a key for the specified curve in the specified multisig
// The context string must be unique among multisigs
pub fn new(params: MultisigParams, context: String) -> StateMachine<C> {
StateMachine {
params,
context,
state: State::Fresh,
coefficients: None,
our_commitments: None,
secret: None,
commitments: None
}
pub fn new(params: FrostParams, context: String) -> KeyGenMachine<C> {
KeyGenMachine { params, context, _curve: PhantomData }
}
/// Start generating a key according to the FROST DKG spec
/// Returns a serialized list of commitments to be sent to all parties over an authenticated
/// channel. If any party submits multiple sets of commitments, they MUST be treated as malicious
pub fn generate_coefficients<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R
) -> Result<Vec<u8>, FrostError> {
if self.state != State::Fresh {
Err(FrostError::InvalidKeyGenTransition(State::Fresh, self.state))?;
}
let (coefficients, serialized) = generate_key_r1::<R, C>(
rng,
&self.params,
&self.context,
);
self.coefficients = Some(coefficients);
self.our_commitments = Some(serialized.clone());
self.state = State::GeneratedCoefficients;
Ok(serialized)
) -> (SecretShareMachine<C>, Vec<u8>) {
let (coefficients, serialized) = generate_key_r1::<R, C>(rng, &self.params, &self.context);
(
SecretShareMachine {
params: self.params,
context: self.context,
coefficients,
our_commitments: serialized.clone()
},
serialized,
)
}
}
impl<C: Curve> SecretShareMachine<C> {
/// Continue generating a key
/// Takes in everyone else's commitments, which are expected to be in a Vec where participant
/// index = Vec index. An empty vector is expected at index 0 to allow for this. An empty vector
/// is also expected at index i which is locally handled. Returns a byte vector representing a
/// secret share for each other participant which should be encrypted before sending
pub fn generate_secret_shares<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R,
commitments: HashMap<u16, Vec<u8>>,
) -> Result<HashMap<u16, Vec<u8>>, FrostError> {
if self.state != State::GeneratedCoefficients {
Err(FrostError::InvalidKeyGenTransition(State::GeneratedCoefficients, self.state))?;
}
) -> Result<(KeyMachine<C>, HashMap<u16, Vec<u8>>), FrostError> {
let (secret, commitments, shares) = generate_key_r2::<R, C>(
rng,
&self.params,
&self.context,
self.coefficients.take().unwrap(),
self.our_commitments.take().unwrap(),
self.coefficients,
self.our_commitments,
commitments,
)?;
self.secret = Some(secret);
self.commitments = Some(commitments);
self.state = State::GeneratedSecretShares;
Ok(shares)
Ok((KeyMachine { params: self.params, secret, commitments }, shares))
}
}
impl<C: Curve> KeyMachine<C> {
/// Complete key generation
/// Takes in everyone elses' shares submitted to us as a Vec, expecting participant index =
/// Vec index with an empty vector at index 0 and index i. Returns a byte vector representing the
@@ -367,31 +349,10 @@ impl<C: Curve> StateMachine<C> {
/// must report completion without issue before this key can be considered usable, yet you should
/// wait for all participants to report as such
pub fn complete<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R,
shares: HashMap<u16, Vec<u8>>,
) -> Result<MultisigKeys<C>, FrostError> {
if self.state != State::GeneratedSecretShares {
Err(FrostError::InvalidKeyGenTransition(State::GeneratedSecretShares, self.state))?;
}
let keys = complete_r2(
rng,
self.params,
self.secret.take().unwrap(),
self.commitments.take().unwrap(),
shares,
)?;
self.state = State::Complete;
Ok(keys)
}
pub fn params(&self) -> MultisigParams {
self.params.clone()
}
pub fn state(&self) -> State {
self.state
) -> Result<FrostKeys<C>, FrostError> {
complete_r2(rng, self.params, self.secret, self.commitments, shares)
}
}

View File

@@ -1,124 +1,24 @@
use core::{ops::Mul, fmt::Debug};
use core::fmt::Debug;
use std::collections::HashMap;
use thiserror::Error;
use ff::{Field, PrimeField};
use group::{Group, GroupOps};
use group::{ff::{Field, PrimeField}, GroupEncoding};
mod schnorr;
pub mod curve;
use curve::{Curve, F_len, G_len, F_from_slice, G_from_slice};
pub mod key_gen;
pub mod algorithm;
pub mod sign;
pub mod tests;
/// Set of errors for curve-related operations, namely encoding and decoding
#[derive(Clone, Error, Debug)]
pub enum CurveError {
#[error("invalid length for data (expected {0}, got {0})")]
InvalidLength(usize, usize),
#[error("invalid scalar")]
InvalidScalar,
#[error("invalid point")]
InvalidPoint,
}
/// Unified trait to manage a field/group
// This should be moved into its own crate if the need for generic cryptography over ff/group
// continues, which is the exact reason ff/group exists (to provide a generic interface)
// elliptic-curve exists, yet it doesn't really serve the same role, nor does it use &[u8]/Vec<u8>
// It uses GenericArray which will hopefully be deprecated as Rust evolves and doesn't offer enough
// advantages in the modern day to be worth the hassle -- Kayaba
pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
/// Field element type
// This is available via G::Scalar yet `C::G::Scalar` is ambiguous, forcing horrific accesses
type F: PrimeField;
/// Group element type
type G: Group<Scalar = Self::F> + GroupOps;
/// Precomputed table type
type T: Mul<Self::F, Output = Self::G>;
/// ID for this curve
fn id() -> String;
/// Byte length of the curve ID
// While curve.id().len() is trivial, this bounds it to u8 and lets us ignore the possibility it
// contains Unicode, therefore having a String length which is different from its byte length
fn id_len() -> u8;
/// Generator for the group
// While group does provide this in its API, Jubjub users will want to use a custom basepoint
fn generator() -> Self::G;
/// Table for the generator for the group
/// If there isn't a precomputed table available, the generator itself should be used
fn generator_table() -> Self::T;
/// If little endian is used for the scalar field's Repr
fn little_endian() -> bool;
/// Hash the message for the binding factor. H3 from the IETF draft
// This doesn't actually need to be part of Curve as it does nothing with the curve
// This also solely relates to FROST and with a proper Algorithm/HRAM, all projects using
// aggregatable signatures over this curve will work without issue
// It is kept here as Curve + H{1, 2, 3} is effectively a ciphersuite according to the IETF draft
// and moving it to Schnorr would force all of them into being ciphersuite-specific
// H2 is left to the Schnorr Algorithm as H2 is the H used in HRAM, which Schnorr further
// modularizes
fn hash_msg(msg: &[u8]) -> Vec<u8>;
/// Hash the commitments and message to calculate the binding factor. H1 from the IETF draft
fn hash_binding_factor(binding: &[u8]) -> Self::F;
// The following methods would optimally be F:: and G:: yet developers can't control F/G
// They can control a trait they pass into this library
/// Field element from hash. Used during key gen and by other crates under Serai as a general
/// utility
// Not parameterized by Digest as it's fine for it to use its own hash function as relevant to
// hash_msg and hash_binding_factor
#[allow(non_snake_case)]
fn hash_to_F(data: &[u8]) -> Self::F;
/// Constant size of a serialized field element
// The alternative way to grab this would be either serializing a junk element and getting its
// length or doing a naive division of its BITS property by 8 and assuming a lack of padding
#[allow(non_snake_case)]
fn F_len() -> usize;
/// Constant size of a serialized group element
// We could grab the serialization as described above yet a naive developer may use a
// non-constant size encoding, proving yet another reason to force this to be a provided constant
// A naive developer could still provide a constant for a variable length encoding, yet at least
// that is on them
#[allow(non_snake_case)]
fn G_len() -> usize;
/// Field element from slice. Preferred to be canonical yet does not have to be
// Required due to the lack of standardized encoding functions provided by ff/group
// While they do technically exist, their usage of Self::Repr breaks all potential library usage
// without helper functions like this
#[allow(non_snake_case)]
fn F_from_slice(slice: &[u8]) -> Result<Self::F, CurveError>;
/// Group element from slice. Must require canonicity or risks differing binding factors
#[allow(non_snake_case)]
fn G_from_slice(slice: &[u8]) -> Result<Self::G, CurveError>;
/// Obtain a vector of the byte encoding of F
#[allow(non_snake_case)]
fn F_to_bytes(f: &Self::F) -> Vec<u8>;
/// Obtain a vector of the byte encoding of G
#[allow(non_snake_case)]
fn G_to_bytes(g: &Self::G) -> Vec<u8>;
}
/// Parameters for a multisig
// These fields can not be made public as they should be static
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct MultisigParams {
pub struct FrostParams {
/// Participants needed to sign on behalf of the group
t: u16,
/// Amount of participants
@@ -127,12 +27,12 @@ pub struct MultisigParams {
i: u16,
}
impl MultisigParams {
impl FrostParams {
pub fn new(
t: u16,
n: u16,
i: u16
) -> Result<MultisigParams, FrostError> {
) -> Result<FrostParams, FrostError> {
if (t == 0) || (n == 0) {
Err(FrostError::ZeroParameter(t, n))?;
}
@@ -146,7 +46,7 @@ impl MultisigParams {
Err(FrostError::InvalidParticipantIndex(n, i))?;
}
Ok(MultisigParams{ t, n, i })
Ok(FrostParams{ t, n, i })
}
pub fn t(&self) -> u16 { self.t }
@@ -179,11 +79,6 @@ pub enum FrostError {
InvalidProofOfKnowledge(u16),
#[error("invalid share (participant {0})")]
InvalidShare(u16),
#[error("invalid key generation state machine transition (expected {0}, was {1})")]
InvalidKeyGenTransition(key_gen::State, key_gen::State),
#[error("invalid sign state machine transition (expected {0}, was {1})")]
InvalidSignTransition(sign::State, sign::State),
#[error("internal error ({0})")]
InternalError(String),
@@ -191,14 +86,14 @@ pub enum FrostError {
// View of keys passable to algorithm implementations
#[derive(Clone)]
pub struct MultisigView<C: Curve> {
pub struct FrostView<C: Curve> {
group_key: C::G,
included: Vec<u16>,
secret_share: C::F,
verification_shares: HashMap<u16, C::G>,
}
impl<C: Curve> MultisigView<C> {
impl<C: Curve> FrostView<C> {
pub fn group_key(&self) -> C::G {
self.group_key
}
@@ -239,9 +134,9 @@ pub fn lagrange<F: PrimeField>(
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct MultisigKeys<C: Curve> {
/// Multisig Parameters
params: MultisigParams,
pub struct FrostKeys<C: Curve> {
/// FROST Parameters
params: FrostParams,
/// Secret share key
secret_share: C::F,
@@ -254,21 +149,26 @@ pub struct MultisigKeys<C: Curve> {
offset: Option<C::F>,
}
impl<C: Curve> MultisigKeys<C> {
pub fn offset(&self, offset: C::F) -> MultisigKeys<C> {
impl<C: Curve> FrostKeys<C> {
/// Offset the keys by a given scalar to allow for account and privacy schemes
/// This offset is ephemeral and will not be included when these keys are serialized
/// Keys offset multiple times will form a new offset of their sum
/// Not IETF compliant
pub fn offset(&self, offset: C::F) -> FrostKeys<C> {
let mut res = self.clone();
// Carry any existing offset
// Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a
// one-time-key offset
res.offset = Some(offset + res.offset.unwrap_or(C::F::zero()));
res.group_key += C::GENERATOR * offset;
res
}
pub fn params(&self) -> MultisigParams {
pub fn params(&self) -> FrostParams {
self.params
}
pub fn secret_share(&self) -> C::F {
fn secret_share(&self) -> C::F {
self.secret_share
}
@@ -276,11 +176,11 @@ impl<C: Curve> MultisigKeys<C> {
self.group_key
}
pub fn verification_shares(&self) -> HashMap<u16, C::G> {
fn verification_shares(&self) -> HashMap<u16, C::G> {
self.verification_shares.clone()
}
pub fn view(&self, included: &[u16]) -> Result<MultisigView<C>, FrostError> {
pub fn view(&self, included: &[u16]) -> Result<FrostView<C>, FrostError> {
if (included.len() < self.params.t.into()) || (usize::from(self.params.n) < included.len()) {
Err(FrostError::InvalidSigningSet("invalid amount of participants included".to_string()))?;
}
@@ -289,13 +189,13 @@ impl<C: Curve> MultisigKeys<C> {
let offset = self.offset.unwrap_or(C::F::zero());
let offset_share = offset * C::F::from(included.len().try_into().unwrap()).invert().unwrap();
Ok(MultisigView {
group_key: self.group_key + (C::generator_table() * offset),
Ok(FrostView {
group_key: self.group_key,
secret_share: secret_share + offset_share,
verification_shares: self.verification_shares.iter().map(
|(l, share)| (
*l,
(*share * lagrange::<C::F>(*l, &included)) + (C::generator_table() * offset_share)
(*share * lagrange::<C::F>(*l, &included)) + (C::GENERATOR * offset_share)
)
).collect(),
included: included.to_vec(),
@@ -303,84 +203,76 @@ impl<C: Curve> MultisigKeys<C> {
}
pub fn serialized_len(n: u16) -> usize {
1 + usize::from(C::id_len()) + (3 * 2) + C::F_len() + C::G_len() + (usize::from(n) * C::G_len())
8 + C::ID.len() + (3 * 2) + F_len::<C>() + G_len::<C>() + (usize::from(n) * G_len::<C>())
}
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = Vec::with_capacity(
1 + usize::from(C::id_len()) + MultisigKeys::<C>::serialized_len(self.params.n)
);
serialized.push(C::id_len());
serialized.extend(C::id().as_bytes());
serialized.extend(&self.params.n.to_le_bytes());
serialized.extend(&self.params.t.to_le_bytes());
serialized.extend(&self.params.i.to_le_bytes());
serialized.extend(&C::F_to_bytes(&self.secret_share));
serialized.extend(&C::G_to_bytes(&self.group_key));
let mut serialized = Vec::with_capacity(FrostKeys::<C>::serialized_len(self.params.n));
serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
serialized.extend(&self.params.t.to_be_bytes());
serialized.extend(&self.params.n.to_be_bytes());
serialized.extend(&self.params.i.to_be_bytes());
serialized.extend(self.secret_share.to_repr().as_ref());
serialized.extend(self.group_key.to_bytes().as_ref());
for l in 1 ..= self.params.n.into() {
serialized.extend(&C::G_to_bytes(&self.verification_shares[&l]));
serialized.extend(self.verification_shares[&l].to_bytes().as_ref());
}
serialized
}
pub fn deserialize(serialized: &[u8]) -> Result<MultisigKeys<C>, FrostError> {
if serialized.len() < 1 {
Err(FrostError::InternalError("MultisigKeys serialization is empty".to_string()))?;
pub fn deserialize(serialized: &[u8]) -> Result<FrostKeys<C>, FrostError> {
let mut start = u64::try_from(C::ID.len()).unwrap().to_be_bytes().to_vec();
start.extend(C::ID);
let mut cursor = start.len();
if serialized.len() < (cursor + 4) {
Err(
FrostError::InternalError(
"FrostKeys serialization is missing its curve/participant quantities".to_string()
)
)?;
}
let id_len: usize = serialized[0].into();
let mut cursor = 1;
if serialized.len() < (cursor + id_len) {
Err(FrostError::InternalError("ID wasn't included".to_string()))?;
}
let id = &serialized[cursor .. (cursor + id_len)];
if C::id().as_bytes() != id {
if &start != &serialized[.. cursor] {
Err(
FrostError::InternalError(
"curve is distinct between serialization and deserialization".to_string()
)
)?;
}
cursor += id_len;
if serialized.len() < (cursor + 8) {
Err(FrostError::InternalError("participant quantity wasn't included".to_string()))?;
}
let n = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
let t = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
if serialized.len() != MultisigKeys::<C>::serialized_len(n) {
let n = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
if serialized.len() != FrostKeys::<C>::serialized_len(n) {
Err(FrostError::InternalError("incorrect serialization length".to_string()))?;
}
let t = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
let i = u16::from_le_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
let i = u16::from_be_bytes(serialized[cursor .. (cursor + 2)].try_into().unwrap());
cursor += 2;
let secret_share = C::F_from_slice(&serialized[cursor .. (cursor + C::F_len())])
let secret_share = F_from_slice::<C::F>(&serialized[cursor .. (cursor + F_len::<C>())])
.map_err(|_| FrostError::InternalError("invalid secret share".to_string()))?;
cursor += C::F_len();
let group_key = C::G_from_slice(&serialized[cursor .. (cursor + C::G_len())])
cursor += F_len::<C>();
let group_key = G_from_slice::<C::G>(&serialized[cursor .. (cursor + G_len::<C>())])
.map_err(|_| FrostError::InternalError("invalid group key".to_string()))?;
cursor += C::G_len();
cursor += G_len::<C>();
let mut verification_shares = HashMap::new();
for l in 1 ..= n {
verification_shares.insert(
l,
C::G_from_slice(&serialized[cursor .. (cursor + C::G_len())])
G_from_slice::<C::G>(&serialized[cursor .. (cursor + G_len::<C>())])
.map_err(|_| FrostError::InternalError("invalid verification share".to_string()))?
);
cursor += C::G_len();
cursor += G_len::<C>();
}
Ok(
MultisigKeys {
params: MultisigParams::new(t, n, i)
FrostKeys {
params: FrostParams::new(t, n, i)
.map_err(|_| FrostError::InternalError("invalid parameters".to_string()))?,
secret_share,
group_key,

View File

@@ -1,10 +1,10 @@
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::{ff::{Field, PrimeField}, GroupEncoding};
use multiexp::BatchVerifier;
use crate::Curve;
use crate::{Curve, F_len, G_len};
#[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
@@ -15,9 +15,9 @@ pub struct SchnorrSignature<C: Curve> {
impl<C: Curve> SchnorrSignature<C> {
pub fn serialize(&self) -> Vec<u8> {
let mut res = Vec::with_capacity(C::G_len() + C::F_len());
res.extend(C::G_to_bytes(&self.R));
res.extend(C::F_to_bytes(&self.s));
let mut res = Vec::with_capacity(G_len::<C>() + F_len::<C>());
res.extend(self.R.to_bytes().as_ref());
res.extend(self.s.to_repr().as_ref());
res
}
}
@@ -28,25 +28,26 @@ pub(crate) fn sign<C: Curve>(
challenge: C::F
) -> SchnorrSignature<C> {
SchnorrSignature {
R: C::generator_table() * nonce,
R: C::GENERATOR * nonce,
s: nonce + (private_key * challenge)
}
}
#[must_use]
pub(crate) fn verify<C: Curve>(
public_key: C::G,
challenge: C::F,
signature: &SchnorrSignature<C>
) -> bool {
(C::generator_table() * signature.s) == (signature.R + (public_key * challenge))
(C::GENERATOR * signature.s) == (signature.R + (public_key * challenge))
}
pub(crate) fn batch_verify<C: Curve, R: RngCore + CryptoRng>(
rng: &mut R,
triplets: &[(u16, C::G, C::F, SchnorrSignature<C>)]
) -> Result<(), u16> {
let mut values = [(C::F::one(), C::generator()); 3];
let mut batch = BatchVerifier::new(triplets.len(), C::little_endian());
let mut values = [(C::F::one(), C::GENERATOR); 3];
let mut batch = BatchVerifier::new(triplets.len());
for triple in triplets {
// s = r + ca
// sG == R + cA

View File

@@ -1,33 +1,35 @@
use core::fmt;
use std::{rc::Rc, collections::HashMap};
use std::{sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::{ff::{Field, PrimeField}, Group, GroupEncoding};
use transcript::Transcript;
use dleq::{Generators, DLEqProof};
use crate::{
Curve,
curve::{Curve, F_len, G_len, F_from_slice, G_from_slice},
FrostError,
MultisigParams, MultisigKeys, MultisigView,
FrostParams, FrostKeys, FrostView,
algorithm::Algorithm,
validate_map
};
/// Pairing of an Algorithm with a MultisigKeys instance and this specific signing set
/// Pairing of an Algorithm with a FrostKeys instance and this specific signing set
#[derive(Clone)]
pub struct Params<C: Curve, A: Algorithm<C>> {
algorithm: A,
keys: Rc<MultisigKeys<C>>,
view: MultisigView<C>,
keys: Arc<FrostKeys<C>>,
view: FrostView<C>,
}
// Currently public to enable more complex operations as desired, yet solely used in testing
impl<C: Curve, A: Algorithm<C>> Params<C, A> {
pub fn new(
algorithm: A,
keys: Rc<MultisigKeys<C>>,
keys: Arc<FrostKeys<C>>,
included: &[u16],
) -> Result<Params<C, A>, FrostError> {
let mut included = included.to_vec();
@@ -60,18 +62,22 @@ impl<C: Curve, A: Algorithm<C>> Params<C, A> {
Ok(Params { algorithm, view: keys.view(&included).unwrap(), keys })
}
pub fn multisig_params(&self) -> MultisigParams {
pub fn multisig_params(&self) -> FrostParams {
self.keys.params
}
pub fn view(&self) -> MultisigView<C> {
pub fn view(&self) -> FrostView<C> {
self.view.clone()
}
}
struct PreprocessPackage<C: Curve> {
nonces: [C::F; 2],
serialized: Vec<u8>,
fn nonce_transcript<T: Transcript>() -> T {
T::new(b"FROST_nonce_dleq")
}
pub(crate) struct PreprocessPackage<C: Curve> {
pub(crate) nonces: Vec<[C::F; 2]>,
pub(crate) serialized: Vec<u8>,
}
// This library unifies the preprocessing step with signing due to security concerns and to provide
@@ -80,27 +86,53 @@ fn preprocess<R: RngCore + CryptoRng, C: Curve, A: Algorithm<C>>(
rng: &mut R,
params: &mut Params<C, A>,
) -> PreprocessPackage<C> {
let nonces = [C::F::random(&mut *rng), C::F::random(&mut *rng)];
let commitments = [C::generator_table() * nonces[0], C::generator_table() * nonces[1]];
let mut serialized = C::G_to_bytes(&commitments[0]);
serialized.extend(&C::G_to_bytes(&commitments[1]));
let mut serialized = Vec::with_capacity(2 * G_len::<C>());
let nonces = params.algorithm.nonces().iter().cloned().map(
|mut generators| {
let nonces = [
C::random_nonce(params.view().secret_share(), &mut *rng),
C::random_nonce(params.view().secret_share(), &mut *rng)
];
serialized.extend(
&params.algorithm.preprocess_addendum(
rng,
&params.view,
&nonces
)
);
let commit = |generator: C::G| {
let commitments = [generator * nonces[0], generator * nonces[1]];
[commitments[0].to_bytes().as_ref(), commitments[1].to_bytes().as_ref()].concat().to_vec()
};
let first = generators.remove(0);
serialized.extend(commit(first));
// Iterate over the rest
for generator in generators.iter() {
serialized.extend(commit(*generator));
// Provide a DLEq to verify these commitments are for the same nonce
// TODO: Provide a single DLEq. See https://github.com/serai-dex/serai/issues/34
for nonce in nonces {
DLEqProof::prove(
&mut *rng,
// Uses an independent transcript as each signer must do this now, yet we validate them
// sequentially by the global order. Avoids needing to clone the transcript around
&mut nonce_transcript::<A::Transcript>(),
Generators::new(first, *generator),
nonce
).serialize(&mut serialized).unwrap();
}
}
nonces
}
).collect::<Vec<_>>();
serialized.extend(&params.algorithm.preprocess_addendum(rng, &params.view));
PreprocessPackage { nonces, serialized }
}
#[allow(non_snake_case)]
struct Package<C: Curve> {
B: HashMap<u16, [C::G; 2]>,
B: HashMap<u16, Vec<Vec<[C::G; 2]>>>,
binding: C::F,
R: C::G,
Rs: Vec<Vec<C::G>>,
share: Vec<u8>
}
@@ -126,7 +158,7 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
transcript.domain_separate(b"FROST");
// Include the offset, if one exists
if let Some(offset) = params.keys.offset {
transcript.append_message(b"offset", &C::F_to_bytes(&offset));
transcript.append_message(b"offset", offset.to_repr().as_ref());
}
}
@@ -134,61 +166,98 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
let mut B = HashMap::<u16, _>::with_capacity(params.view.included.len());
// Get the binding factor
let nonces = params.algorithm.nonces();
let mut addendums = HashMap::new();
let binding = {
let transcript = params.algorithm.transcript();
// Parse the commitments
for l in &params.view.included {
transcript.append_message(b"participant", &l.to_be_bytes());
let serialized = commitments.remove(l).unwrap();
let commitments = commitments.remove(l).unwrap();
let mut read_commitment = |c, label| {
let commitment = &commitments[c .. (c + C::G_len())];
let commitment = &serialized[c .. (c + G_len::<C>())];
transcript.append_message(label, commitment);
C::G_from_slice(commitment).map_err(|_| FrostError::InvalidCommitment(*l))
G_from_slice::<C::G>(commitment).map_err(|_| FrostError::InvalidCommitment(*l))
};
// While this doesn't note which nonce/basepoint this is for, those are expected to be
// static. Beyond that, they're committed to in the DLEq proof transcripts, ensuring
// consistency. While this is suboptimal, it maintains IETF compliance, and Algorithm is
// documented accordingly
#[allow(non_snake_case)]
let mut read_D_E = || Ok(
[read_commitment(0, b"commitment_D")?, read_commitment(C::G_len(), b"commitment_E")?]
);
let mut read_D_E = |c| Ok([
read_commitment(c, b"commitment_D")?,
read_commitment(c + G_len::<C>(), b"commitment_E")?
]);
B.insert(*l, read_D_E()?);
addendums.insert(*l, commitments[(C::G_len() * 2) ..].to_vec());
let mut c = 0;
let mut commitments = Vec::with_capacity(nonces.len());
for (n, nonce_generators) in nonces.clone().iter_mut().enumerate() {
commitments.push(Vec::with_capacity(nonce_generators.len()));
let first = nonce_generators.remove(0);
commitments[n].push(read_D_E(c)?);
c += 2 * G_len::<C>();
let mut c = 2 * G_len::<C>();
for generator in nonce_generators {
commitments[n].push(read_D_E(c)?);
c += 2 * G_len::<C>();
for de in 0 .. 2 {
DLEqProof::deserialize(
&mut std::io::Cursor::new(&serialized[c .. (c + (2 * F_len::<C>()))])
).map_err(|_| FrostError::InvalidCommitment(*l))?.verify(
&mut nonce_transcript::<A::Transcript>(),
Generators::new(first, *generator),
(commitments[n][0][de], commitments[n][commitments[n].len() - 1][de])
).map_err(|_| FrostError::InvalidCommitment(*l))?;
c += 2 * F_len::<C>();
}
}
addendums.insert(*l, serialized[c ..].to_vec());
}
B.insert(*l, commitments);
}
// Append the message to the transcript
transcript.append_message(b"message", &C::hash_msg(&msg));
// Calculate the binding factor
C::hash_binding_factor(&transcript.challenge(b"binding"))
C::hash_binding_factor(transcript.challenge(b"binding").as_ref())
};
// Process the addendums
for l in &params.view.included {
params.algorithm.process_addendum(&params.view, *l, &B[l], &addendums[l])?;
params.algorithm.process_addendum(&params.view, *l, &addendums[l])?;
}
#[allow(non_snake_case)]
let R = {
B.values().map(|B| B[0]).sum::<C::G>() + (B.values().map(|B| B[1]).sum::<C::G>() * binding)
};
let share = C::F_to_bytes(
&params.algorithm.sign_share(
&params.view,
R,
binding,
our_preprocess.nonces[0] + (our_preprocess.nonces[1] * binding),
msg
)
);
let mut Rs = Vec::with_capacity(nonces.len());
for n in 0 .. nonces.len() {
Rs.push(vec![C::G::identity(); nonces[n].len()]);
#[allow(non_snake_case)]
for g in 0 .. nonces[n].len() {
Rs[n][g] = {
B.values().map(|B| B[n][g][0]).sum::<C::G>() +
(B.values().map(|B| B[n][g][1]).sum::<C::G>() * binding)
};
}
}
Ok((Package { B, binding, R, share: share.clone() }, share))
let share = params.algorithm.sign_share(
&params.view,
&Rs,
&our_preprocess.nonces.iter().map(
|nonces| nonces[0] + (nonces[1] * binding)
).collect::<Vec<_>>(),
msg
).to_repr().as_ref().to_vec();
Ok((Package { B, binding, Rs, share: share.clone() }, share))
}
// This doesn't check the signing set is as expected and unexpected changes can cause false blames
// if legitimate participants are still using the original, expected, signing set. This library
// could be made more robust in that regard
fn complete<C: Curve, A: Algorithm<C>>(
sign_params: &Params<C, A>,
sign: Package<C>,
@@ -200,7 +269,7 @@ fn complete<C: Curve, A: Algorithm<C>>(
let mut responses = HashMap::new();
let mut sum = C::F::zero();
for l in &sign_params.view.included {
let part = C::F_from_slice(&shares[l]).map_err(|_| FrostError::InvalidShare(*l))?;
let part = F_from_slice::<C::F>(&shares[l]).map_err(|_| FrostError::InvalidShare(*l))?;
sum += part;
responses.insert(*l, part);
}
@@ -208,7 +277,7 @@ fn complete<C: Curve, A: Algorithm<C>>(
// Perform signature validation instead of individual share validation
// For the success route, which should be much more frequent, this should be faster
// It also acts as an integrity check of this library's signing function
let res = sign_params.algorithm.verify(sign_params.view.group_key, sign.R, sum);
let res = sign_params.algorithm.verify(sign_params.view.group_key, &sign.Rs, sum);
if let Some(res) = res {
return Ok(res);
}
@@ -219,7 +288,11 @@ fn complete<C: Curve, A: Algorithm<C>>(
if !sign_params.algorithm.verify_share(
*l,
sign_params.view.verification_share(*l),
sign.B[l][0] + (sign.B[l][1] * sign.binding),
&sign.B[l].iter().map(
|nonces| nonces.iter().map(
|commitments| commitments[0] + (commitments[1] * sign.binding)
).collect()
).collect::<Vec<_>>(),
responses[l]
) {
Err(FrostError::InvalidShare(*l))?;
@@ -234,31 +307,21 @@ fn complete<C: Curve, A: Algorithm<C>>(
)
}
/// State of a Sign machine
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum State {
Fresh,
Preprocessed,
Signed,
Complete,
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
pub trait StateMachine {
pub trait PreprocessMachine {
type Signature: Clone + PartialEq + fmt::Debug;
type SignMachine: SignMachine<Self::Signature>;
/// Perform the preprocessing round required in order to sign
/// Returns a byte vector which must be transmitted to all parties selected for this signing
/// process, over an authenticated channel
fn preprocess<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R
) -> Result<Vec<u8>, FrostError>;
) -> (Self::SignMachine, Vec<u8>);
}
pub trait SignMachine<S> {
type SignatureMachine: SignatureMachine<S>;
/// Sign a message
/// Takes in the participant's commitments, which are expected to be in a Vec where participant
@@ -266,107 +329,88 @@ pub trait StateMachine {
/// index i which is locally handled. Returns a byte vector representing a share of the signature
/// for every other participant to receive, over an authenticated channel
fn sign(
&mut self,
self,
commitments: HashMap<u16, Vec<u8>>,
msg: &[u8],
) -> Result<Vec<u8>, FrostError>;
) -> Result<(Self::SignatureMachine, Vec<u8>), FrostError>;
}
pub trait SignatureMachine<S> {
/// Complete signing
/// Takes in everyone elses' shares submitted to us as a Vec, expecting participant index =
/// Vec index with None at index 0 and index i. Returns a byte vector representing the serialized
/// signature
fn complete(&mut self, shares: HashMap<u16, Vec<u8>>) -> Result<Self::Signature, FrostError>;
fn multisig_params(&self) -> MultisigParams;
fn state(&self) -> State;
fn complete(self, shares: HashMap<u16, Vec<u8>>) -> Result<S, FrostError>;
}
/// State machine which manages signing for an arbitrary signature algorithm
#[allow(non_snake_case)]
pub struct AlgorithmMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>
}
pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
state: State,
preprocess: Option<PreprocessPackage<C>>,
sign: Option<Package<C>>,
preprocess: PreprocessPackage<C>,
}
pub struct AlgorithmSignatureMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
sign: Package<C>,
}
impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
/// Creates a new machine to generate a key for the specified curve in the specified multisig
pub fn new(
algorithm: A,
keys: Rc<MultisigKeys<C>>,
keys: Arc<FrostKeys<C>>,
included: &[u16],
) -> Result<AlgorithmMachine<C, A>, FrostError> {
Ok(
AlgorithmMachine {
params: Params::new(algorithm, keys, included)?,
state: State::Fresh,
preprocess: None,
sign: None,
}
)
Ok(AlgorithmMachine { params: Params::new(algorithm, keys, included)? })
}
pub(crate) fn unsafe_override_preprocess(
self,
preprocess: PreprocessPackage<C>
) -> (AlgorithmSignMachine<C, A>, Vec<u8>) {
let serialized = preprocess.serialized.clone();
(AlgorithmSignMachine { params: self.params, preprocess }, serialized)
}
}
impl<C: Curve, A: Algorithm<C>> StateMachine for AlgorithmMachine<C, A> {
impl<C: Curve, A: Algorithm<C>> PreprocessMachine for AlgorithmMachine<C, A> {
type Signature = A::Signature;
type SignMachine = AlgorithmSignMachine<C, A>;
fn preprocess<R: RngCore + CryptoRng>(
&mut self,
self,
rng: &mut R
) -> Result<Vec<u8>, FrostError> {
if self.state != State::Fresh {
Err(FrostError::InvalidSignTransition(State::Fresh, self.state))?;
}
let preprocess = preprocess::<R, C, A>(rng, &mut self.params);
) -> (Self::SignMachine, Vec<u8>) {
let mut params = self.params;
let preprocess = preprocess::<R, C, A>(rng, &mut params);
let serialized = preprocess.serialized.clone();
self.preprocess = Some(preprocess);
self.state = State::Preprocessed;
Ok(serialized)
}
fn sign(
&mut self,
commitments: HashMap<u16, Vec<u8>>,
msg: &[u8],
) -> Result<Vec<u8>, FrostError> {
if self.state != State::Preprocessed {
Err(FrostError::InvalidSignTransition(State::Preprocessed, self.state))?;
}
let (sign, serialized) = sign_with_share(
&mut self.params,
self.preprocess.take().unwrap(),
commitments,
msg,
)?;
self.sign = Some(sign);
self.state = State::Signed;
Ok(serialized)
}
fn complete(&mut self, shares: HashMap<u16, Vec<u8>>) -> Result<A::Signature, FrostError> {
if self.state != State::Signed {
Err(FrostError::InvalidSignTransition(State::Signed, self.state))?;
}
let signature = complete(
&self.params,
self.sign.take().unwrap(),
shares,
)?;
self.state = State::Complete;
Ok(signature)
}
fn multisig_params(&self) -> MultisigParams {
self.params.multisig_params().clone()
}
fn state(&self) -> State {
self.state
(AlgorithmSignMachine { params, preprocess }, serialized)
}
}
impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachine<C, A> {
type SignatureMachine = AlgorithmSignatureMachine<C, A>;
fn sign(
self,
commitments: HashMap<u16, Vec<u8>>,
msg: &[u8]
) -> Result<(Self::SignatureMachine, Vec<u8>), FrostError> {
let mut params = self.params;
let (sign, serialized) = sign_with_share(&mut params, self.preprocess, commitments, msg)?;
Ok((AlgorithmSignatureMachine { params, sign }, serialized))
}
}
impl<
C: Curve,
A: Algorithm<C>
> SignatureMachine<A::Signature> for AlgorithmSignatureMachine<C, A> {
fn complete(self, shares: HashMap<u16, Vec<u8>>) -> Result<A::Signature, FrostError> {
complete(&self.params, self.sign, shares)
}
}

View File

@@ -1,9 +1,8 @@
use rand_core::{RngCore, CryptoRng};
use crate::{
Curve, MultisigKeys,
tests::{schnorr::{sign, verify, batch_verify}, key_gen}
};
use group::{ff::Field, Group};
use crate::{Curve, FrostKeys, tests::key_gen};
// Test generation of FROST keys
fn key_generation<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
@@ -14,21 +13,30 @@ fn key_generation<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Test serialization of generated keys
fn keys_serialization<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
for (_, keys) in key_gen::<_, C>(rng) {
assert_eq!(&MultisigKeys::<C>::deserialize(&keys.serialize()).unwrap(), &*keys);
assert_eq!(&FrostKeys::<C>::deserialize(&keys.serialize()).unwrap(), &*keys);
}
}
pub fn test_curve<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// TODO: Test the Curve functions themselves
// Test Schnorr signatures work as expected
// This is a bit unnecessary, as they should for any valid curve, yet this provides tests with
// meaning, which the above tests won't have
sign::<_, C>(rng);
verify::<_, C>(rng);
batch_verify::<_, C>(rng);
// Test successful multiexp, with enough pairs to trigger its variety of algorithms
// Multiexp has its own tests, yet only against k256 and Ed25519 (which should be sufficient
// as-is to prove multiexp), and this doesn't hurt
{
let mut pairs = Vec::with_capacity(1000);
let mut sum = C::G::identity();
for _ in 0 .. 10 {
for _ in 0 .. 100 {
pairs.push((C::F::random(&mut *rng), C::GENERATOR * C::F::random(&mut *rng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
assert_eq!(multiexp::multiexp(&pairs), sum);
assert_eq!(multiexp::multiexp_vartime(&pairs), sum);
}
}
// Test FROST key generation and serialization of MultisigKeys works as expected
// Test FROST key generation and serialization of FrostKeys works as expected
key_generation::<_, C>(rng);
keys_serialization::<_, C>(rng);
}

View File

@@ -0,0 +1,77 @@
use rand::rngs::OsRng;
use crate::{curve, tests::vectors::{Vectors, test_with_vectors}};
#[cfg(any(test, feature = "ristretto"))]
#[test]
fn ristretto_vectors() {
test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>(
&mut OsRng,
Vectors {
threshold: 2,
shares: &[
"5c3430d391552f6e60ecdc093ff9f6f4488756aa6cebdbad75a768010b8f830e",
"b06fc5eac20b4f6e1b271d9df2343d843e1e1fb03c4cbb673f2872d459ce6f01",
"f17e505f0e2581c6acfe54d3846a622834b5e7b50cad9a2109a97ba7a80d5c04"
],
group_secret: "1b25a55e463cfd15cf14a5d3acc3d15053f08da49c8afcf3ab265f2ebc4f970b",
group_key: "e2a62f39eede11269e3bd5a7d97554f5ca384f9f6d3dd9c3c0d05083c7254f57",
msg: "74657374",
included: &[1, 3],
nonces: &[
[
"b358743151e33d84bf00c12f71808f4103957c3e2cabab7b895c436b5e70f90c",
"7bd112153b9ae1ab9b31f5e78f61f5c4ca9ee67b7ea6d1181799c409d14c350c"
],
[
"22acad88478e0d0373a991092a322ebd1b9a2dad90451a976d0db3215426af0e",
"9155e3d7bcf7cd468b980c7e20b2c77cbdfbe33a1dcae031fd8bc6b1403f4b04"
]
],
sig_shares: &[
"ff801b4e0839faa67f16dee4127b9f7fbcf5fd007900257b0e2bbc02cbe5e709",
"afdf5481023c855bf3411a5c8a5fafa92357296a078c3b80dc168f294cb4f504"
],
sig: "deae61af10e8ee48ba492573592fba547f5debeff6bd6e2024e8673584746f5e".to_owned() +
"ae6070cf0a757f027358f8409dda4e29e04c276b808c60fbea414b2c179add0e"
}
);
}
#[cfg(feature = "ed25519")]
#[test]
fn ed25519_vectors() {
test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>(
&mut OsRng,
Vectors {
threshold: 2,
shares: &[
"929dcc590407aae7d388761cddb0c0db6f5627aea8e217f4a033f2ec83d93509",
"a91e66e012e4364ac9aaa405fcafd370402d9859f7b6685c07eed76bf409e80d",
"d3cb090a075eb154e82fdb4b3cb507f110040905468bb9c46da8bdea643a9a02"
],
group_secret: "7b1c33d3f5291d85de664833beb1ad469f7fb6025a0ec78b3a790c6e13a98304",
group_key: "15d21ccd7ee42959562fc8aa63224c8851fb3ec85a3faf66040d380fb9738673",
msg: "74657374",
included: &[1, 3],
nonces: &[
[
"8c76af04340e83bb5fc427c117d38347fc8ef86d5397feea9aa6412d96c05b0a",
"14a37ddbeae8d9e9687369e5eb3c6d54f03dc19d76bb54fb5425131bc37a600b"
],
[
"5ca39ebab6874f5e7b5089f3521819a2aa1e2cf738bae6974ee80555de2ef70e",
"0afe3650c4815ff37becd3c6948066e906e929ea9b8f546c74e10002dbcc150c"
]
],
sig_shares: &[
"4369474a398aa10357b60d683da91ea6a767dcf53fd541a8ed6b4d780827ea0a",
"32fcc690d926075e45d2dfb746bab71447943cddbefe80d122c39174aa2e1004"
],
sig: "2b8d9c6995333c5990e3a3dd6568785539d3322f7f0376452487ea35cfda587b".to_owned() +
"75650edb12b1a8619c88ed1f8463d6baeefb18d3fed3c279102fdfecb255fa0e"
}
);
}

View File

@@ -0,0 +1,55 @@
use rand::rngs::OsRng;
#[cfg(feature = "secp256k1")]
use crate::tests::{curve::test_curve, schnorr::test_schnorr};
#[cfg(feature = "secp256k1")]
use crate::curve::Secp256k1;
#[cfg(feature = "p256")]
use crate::tests::vectors::{Vectors, test_with_vectors};
#[cfg(feature = "p256")]
use crate::curve::{P256, IetfP256Hram};
#[cfg(feature = "secp256k1")]
#[test]
fn secp256k1_non_ietf() {
test_curve::<_, Secp256k1>(&mut OsRng);
test_schnorr::<_, Secp256k1>(&mut OsRng);
}
#[cfg(feature = "p256")]
#[test]
fn p256_vectors() {
test_with_vectors::<_, P256, IetfP256Hram>(
&mut OsRng,
Vectors {
threshold: 2,
shares: &[
"0c9c1a0fe806c184add50bbdcac913dda73e482daf95dcb9f35dbb0d8a9f7731",
"8d8e787bef0ff6c2f494ca45f4dad198c6bee01212d6c84067159c52e1863ad5",
"0e80d6e8f6192c003b5488ce1eec8f5429587d48cf001541e713b2d53c09d928"
],
group_secret: "8ba9bba2e0fd8c4767154d35a0b7562244a4aaf6f36c8fb8735fa48b301bd8de",
group_key: "023a309ad94e9fe8a7ba45dfc58f38bf091959d3c99cfbd02b4dc00585ec45ab70",
msg: "74657374",
included: &[1, 3],
nonces: &[
[
"081617b24375e069b39f649d4c4ce2fba6e38b73e7c16759de0b6079a22c4c7e",
"4de5fb77d99f03a2491a83a6a4cb91ca3c82a3f34ce94cec939174f47c9f95dd"
],
[
"d186ea92593f83ea83181b184d41aa93493301ac2bc5b4b1767e94d2db943e38",
"486e2ee25a3fbc8e6399d748b077a2755fde99fa85cc24fa647ea4ebf5811a15"
]
],
sig_shares: &[
"9e4d8865faf8c7b3193a3b35eda3d9e12118447114b1e7d5b4809ea28067f8a9",
"b7d094eab6305ae74daeed1acd31abba9ab81f638d38b72c132cb25a5dfae1fc"
],
sig: "0342c14c77f9d4ef9b8bd64fb0d7bbfdb9f8216a44e5f7bbe6ac0f3ed5e1a57367".to_owned() +
"561e1d51b129229966e92850bad5859bfee96926fad3007cd3f38639e1ffb554"
}
);
}

View File

@@ -1,2 +1,4 @@
mod secp256k1;
mod schnorr;
#[cfg(any(test, feature = "dalek"))]
mod dalek;
#[cfg(feature = "kp256")]
mod kp256;

View File

@@ -1,42 +0,0 @@
use std::rc::Rc;
use rand::rngs::OsRng;
use crate::{
Curve, schnorr, algorithm::{Hram, Schnorr},
tests::{key_gen, algorithm_machines, sign as sign_test, literal::secp256k1::{Secp256k1, TestHram}}
};
const MESSAGE: &[u8] = b"Hello World";
#[test]
fn sign() {
sign_test(
&mut OsRng,
algorithm_machines(
&mut OsRng,
Schnorr::<Secp256k1, TestHram>::new(),
&key_gen::<_, Secp256k1>(&mut OsRng)
),
MESSAGE
);
}
#[test]
fn sign_with_offset() {
let mut keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&1].group_key();
let offset = Secp256k1::hash_to_F(b"offset");
for i in 1 ..= u16::try_from(keys.len()).unwrap() {
keys.insert(i, Rc::new(keys[&i].offset(offset)));
}
let offset_key = group_key + (Secp256k1::generator_table() * offset);
let sig = sign_test(
&mut OsRng,
algorithm_machines(&mut OsRng, Schnorr::<Secp256k1, TestHram>::new(), &keys),
MESSAGE
);
assert!(schnorr::verify(offset_key, TestHram::hram(&sig.R, &offset_key, MESSAGE), &sig));
}

View File

@@ -1,120 +0,0 @@
use core::convert::TryInto;
use rand::rngs::OsRng;
use ff::PrimeField;
use group::GroupEncoding;
use sha2::{Digest, Sha256, Sha512};
use k256::{
elliptic_curve::{generic_array::GenericArray, bigint::{ArrayEncoding, U512}, ops::Reduce},
Scalar,
ProjectivePoint
};
use crate::{CurveError, Curve, algorithm::Hram, tests::curve::test_curve};
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Secp256k1;
impl Curve for Secp256k1 {
type F = Scalar;
type G = ProjectivePoint;
type T = ProjectivePoint;
fn id() -> String {
"secp256k1".to_string()
}
fn id_len() -> u8 {
u8::try_from(Self::id().len()).unwrap()
}
fn generator() -> Self::G {
Self::G::GENERATOR
}
fn generator_table() -> Self::T {
Self::G::GENERATOR
}
fn little_endian() -> bool {
false
}
// The IETF draft doesn't specify a secp256k1 ciphersuite
// This test just uses the simplest ciphersuite which would still be viable to deploy
// The comparable P-256 curve uses hash_to_field from the Hash To Curve IETF draft with a context
// string and further DST for H1 ("rho") and H3 ("digest"). It's not currently worth it to add
// that weight, yet if secp256k1 is ever officially acknowledged (not just a testing curve), it
// must be properly implemented.
fn hash_msg(msg: &[u8]) -> Vec<u8> {
(&Sha256::digest(msg)).to_vec()
}
fn hash_binding_factor(binding: &[u8]) -> Self::F {
Self::hash_to_F(&[b"rho", binding].concat())
}
// Use wide reduction for security
fn hash_to_F(data: &[u8]) -> Self::F {
Scalar::from_uint_reduced(U512::from_be_byte_array(Sha512::digest(data)))
}
fn F_len() -> usize {
32
}
fn G_len() -> usize {
33
}
fn F_from_slice(slice: &[u8]) -> Result<Self::F, CurveError> {
let bytes: [u8; 32] = slice.try_into()
.map_err(|_| CurveError::InvalidLength(32, slice.len()))?;
let scalar = Scalar::from_repr(bytes.into());
if scalar.is_none().unwrap_u8() == 1 {
Err(CurveError::InvalidScalar)?;
}
Ok(scalar.unwrap())
}
fn G_from_slice(slice: &[u8]) -> Result<Self::G, CurveError> {
let point = ProjectivePoint::from_bytes(GenericArray::from_slice(slice));
if point.is_none().unwrap_u8() == 1 {
Err(CurveError::InvalidScalar)?;
}
Ok(point.unwrap())
}
fn F_to_bytes(f: &Self::F) -> Vec<u8> {
(&f.to_bytes()).to_vec()
}
fn G_to_bytes(g: &Self::G) -> Vec<u8> {
(&g.to_bytes()).to_vec()
}
}
#[allow(non_snake_case)]
#[derive(Clone)]
pub struct TestHram {}
impl Hram<Secp256k1> for TestHram {
#[allow(non_snake_case)]
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
Scalar::from_uint_reduced(
U512::from_be_byte_array(
Sha512::new()
.chain_update(Secp256k1::G_to_bytes(R))
.chain_update(Secp256k1::G_to_bytes(A))
.chain_update(m)
.finalize()
)
)
}
}
#[test]
fn secp256k1_curve() {
test_curve::<_, Secp256k1>(&mut OsRng);
}

View File

@@ -1,23 +1,22 @@
use std::{rc::Rc, collections::HashMap};
use std::{sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::ff::Field;
use crate::{
Curve,
MultisigParams, MultisigKeys,
FrostParams, FrostKeys,
lagrange,
key_gen,
key_gen::KeyGenMachine,
algorithm::Algorithm,
sign::{StateMachine, AlgorithmMachine}
sign::{PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine}
};
// Internal tests
mod schnorr;
// Test suites for public usage
pub mod curve;
pub mod schnorr;
pub mod vectors;
// Literal test definitions to run during `cargo test`
#[cfg(test)]
@@ -37,50 +36,37 @@ pub fn clone_without<K: Clone + std::cmp::Eq + std::hash::Hash, V: Clone>(
pub fn key_gen<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R
) -> HashMap<u16, Rc<MultisigKeys<C>>> {
let mut params = HashMap::new();
) -> HashMap<u16, Arc<FrostKeys<C>>> {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
for i in 1 ..= PARTICIPANTS {
params.insert(
i,
MultisigParams::new(
THRESHOLD,
PARTICIPANTS,
i
).unwrap()
);
machines.insert(
i,
key_gen::StateMachine::<C>::new(
params[&i],
"FROST test key_gen".to_string()
)
);
commitments.insert(
i,
machines.get_mut(&i).unwrap().generate_coefficients(rng).unwrap()
let machine = KeyGenMachine::<C>::new(
FrostParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(),
"FROST Test key_gen".to_string()
);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(i, these_commitments);
}
let mut secret_shares = HashMap::new();
for (l, machine) in machines.iter_mut() {
secret_shares.insert(
*l,
let mut machines = machines.drain().map(|(l, machine)| {
let (machine, shares) = machine.generate_secret_shares(
rng,
// clone_without isn't necessary, as this machine's own data will be inserted without
// conflict, yet using it ensures the machine's own data is actually inserted as expected
machine.generate_secret_shares(rng, clone_without(&commitments, l)).unwrap()
);
}
clone_without(&commitments, &l)
).unwrap();
secret_shares.insert(l, shares);
(l, machine)
}).collect::<HashMap<_, _>>();
let mut verification_shares = None;
let mut group_key = None;
let mut keys = HashMap::new();
for (i, machine) in machines.iter_mut() {
machines.drain().map(|(i, machine)| {
let mut our_secret_shares = HashMap::new();
for (l, shares) in &secret_shares {
if i == l {
if i == *l {
continue;
}
our_secret_shares.insert(*l, shares[&i].clone());
@@ -99,13 +85,11 @@ pub fn key_gen<R: RngCore + CryptoRng, C: Curve>(
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
keys.insert(*i, Rc::new(these_keys));
}
keys
(i, Arc::new(these_keys))
}).collect::<HashMap<_, _>>()
}
pub fn recover<C: Curve>(keys: &HashMap<u16, MultisigKeys<C>>) -> C::F {
pub fn recover<C: Curve>(keys: &HashMap<u16, FrostKeys<C>>) -> C::F {
let first = keys.values().next().expect("no keys provided");
assert!(keys.len() >= first.params().t().into(), "not enough keys provided");
let included = keys.keys().cloned().collect::<Vec<_>>();
@@ -114,14 +98,14 @@ pub fn recover<C: Curve>(keys: &HashMap<u16, MultisigKeys<C>>) -> C::F {
C::F::zero(),
|accum, (i, keys)| accum + (keys.secret_share() * lagrange::<C::F>(*i, &included))
);
assert_eq!(C::generator_table() * group_private, first.group_key(), "failed to recover keys");
assert_eq!(C::GENERATOR * group_private, first.group_key(), "failed to recover keys");
group_private
}
pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
rng: &mut R,
algorithm: A,
keys: &HashMap<u16, Rc<MultisigKeys<C>>>,
keys: &HashMap<u16, Arc<FrostKeys<C>>>,
) -> HashMap<u16, AlgorithmMachine<C, A>> {
let mut included = vec![];
while included.len() < usize::from(keys[&1].params().t()) {
@@ -148,27 +132,28 @@ pub fn algorithm_machines<R: RngCore, C: Curve, A: Algorithm<C>>(
).collect()
}
pub fn sign<R: RngCore + CryptoRng, M: StateMachine>(
pub fn sign<R: RngCore + CryptoRng, M: PreprocessMachine>(
rng: &mut R,
mut machines: HashMap<u16, M>,
msg: &[u8]
) -> M::Signature {
let mut commitments = HashMap::new();
for (i, machine) in machines.iter_mut() {
commitments.insert(*i, machine.preprocess(rng).unwrap());
}
let mut machines = machines.drain().map(|(i, machine)| {
let (machine, preprocess) = machine.preprocess(rng);
commitments.insert(i, preprocess);
(i, machine)
}).collect::<HashMap<_, _>>();
let mut shares = HashMap::new();
for (i, machine) in machines.iter_mut() {
shares.insert(
*i,
machine.sign(clone_without(&commitments, i), msg).unwrap()
);
}
let mut machines = machines.drain().map(|(i, machine)| {
let (machine, share) = machine.sign(clone_without(&commitments, &i), msg).unwrap();
shares.insert(i, share);
(i, machine)
}).collect::<HashMap<_, _>>();
let mut signature = None;
for (i, machine) in machines.iter_mut() {
let sig = machine.complete(clone_without(&shares, i)).unwrap();
for (i, machine) in machines.drain() {
let sig = machine.complete(clone_without(&shares, &i)).unwrap();
if signature.is_none() {
signature = Some(sig.clone());
}

View File

@@ -1,16 +1,21 @@
use std::{marker::PhantomData, sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use ff::Field;
use group::{ff::Field, GroupEncoding};
use crate::{Curve, schnorr, algorithm::SchnorrSignature};
use crate::{
Curve, FrostKeys, schnorr::{self, SchnorrSignature}, algorithm::{Hram, Schnorr},
tests::{key_gen, algorithm_machines, sign as sign_test}
};
pub(crate) fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
pub(crate) fn core_sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let private_key = C::F::random(&mut *rng);
let nonce = C::F::random(&mut *rng);
let challenge = C::F::random(rng); // Doesn't bother to craft an HRAM
assert!(
schnorr::verify::<C>(
C::generator_table() * private_key,
C::GENERATOR * private_key,
challenge,
&schnorr::sign(private_key, nonce, challenge)
)
@@ -20,17 +25,17 @@ pub(crate) fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// The above sign function verifies signing works
// This verifies invalid signatures don't pass, using zero signatures, which should effectively be
// random
pub(crate) fn verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
pub(crate) fn core_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
assert!(
!schnorr::verify::<C>(
C::generator_table() * C::F::random(&mut *rng),
C::GENERATOR * C::F::random(&mut *rng),
C::F::random(rng),
&SchnorrSignature { R: C::generator_table() * C::F::zero(), s: C::F::zero() }
&SchnorrSignature { R: C::GENERATOR * C::F::zero(), s: C::F::zero() }
)
);
}
pub(crate) fn batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
pub(crate) fn core_batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Create 5 signatures
let mut keys = vec![];
let mut challenges = vec![];
@@ -43,7 +48,7 @@ pub(crate) fn batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Batch verify
let triplets = (0 .. 5).map(
|i| (u16::try_from(i + 1).unwrap(), C::generator_table() * keys[i], challenges[i], sigs[i])
|i| (u16::try_from(i + 1).unwrap(), C::GENERATOR * keys[i], challenges[i], sigs[i])
).collect::<Vec<_>>();
schnorr::batch_verify(rng, &triplets).unwrap();
@@ -71,3 +76,56 @@ pub(crate) fn batch_verify<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
}
}
}
fn sign_core<R: RngCore + CryptoRng, C: Curve>(
rng: &mut R,
group_key: C::G,
keys: &HashMap<u16, Arc<FrostKeys<C>>>
) {
const MESSAGE: &'static [u8] = b"Hello, World!";
let machines = algorithm_machines(rng, Schnorr::<C, TestHram<C>>::new(), keys);
let sig = sign_test(&mut *rng, machines, MESSAGE);
assert!(schnorr::verify(group_key, TestHram::<C>::hram(&sig.R, &group_key, MESSAGE), &sig));
}
#[derive(Clone)]
pub struct TestHram<C: Curve> {
_curve: PhantomData<C>
}
impl<C: Curve> Hram<C> for TestHram<C> {
#[allow(non_snake_case)]
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F {
C::hash_to_F(b"challenge", &[R.to_bytes().as_ref(), A.to_bytes().as_ref(), m].concat())
}
}
fn sign<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<_, C>(&mut *rng);
sign_core(rng, keys[&1].group_key(), &keys);
}
fn sign_with_offset<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let mut keys = key_gen::<_, C>(&mut *rng);
let group_key = keys[&1].group_key();
let offset = C::hash_to_F(b"FROST Test sign_with_offset", b"offset");
for i in 1 ..= u16::try_from(keys.len()).unwrap() {
keys.insert(i, Arc::new(keys[&i].offset(offset)));
}
let offset_key = group_key + (C::GENERATOR * offset);
sign_core(rng, offset_key, &keys);
}
pub fn test_schnorr<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
// Test Schnorr signatures work as expected
// This is a bit unnecessary, as they should for any valid curve, yet this establishes sanity
core_sign::<_, C>(rng);
core_verify::<_, C>(rng);
core_batch_verify::<_, C>(rng);
// Test Schnorr signatures under FROST
sign::<_, C>(rng);
sign_with_offset::<_, C>(rng);
}

View File

@@ -0,0 +1,136 @@
use std::{sync::Arc, collections::HashMap};
use rand_core::{RngCore, CryptoRng};
use group::{ff::PrimeField, GroupEncoding};
use crate::{
curve::{Curve, F_from_slice, G_from_slice}, FrostKeys,
algorithm::{Schnorr, Hram},
sign::{PreprocessPackage, SignMachine, SignatureMachine, AlgorithmMachine},
tests::{curve::test_curve, schnorr::test_schnorr, recover}
};
pub struct Vectors {
pub threshold: u16,
pub shares: &'static [&'static str],
pub group_secret: &'static str,
pub group_key: &'static str,
pub msg: &'static str,
pub included: &'static [u16],
pub nonces: &'static [[&'static str; 2]],
pub sig_shares: &'static [&'static str],
pub sig: String
}
// Load these vectors into FrostKeys using a custom serialization it'll deserialize
fn vectors_to_multisig_keys<C: Curve>(vectors: &Vectors) -> HashMap<u16, FrostKeys<C>> {
let shares = vectors.shares.iter().map(
|secret| F_from_slice::<C::F>(&hex::decode(secret).unwrap()).unwrap()
).collect::<Vec<_>>();
let verification_shares = shares.iter().map(
|secret| C::GENERATOR * secret
).collect::<Vec<_>>();
let mut keys = HashMap::new();
for i in 1 ..= u16::try_from(shares.len()).unwrap() {
let mut serialized = vec![];
serialized.extend(u64::try_from(C::ID.len()).unwrap().to_be_bytes());
serialized.extend(C::ID);
serialized.extend(vectors.threshold.to_be_bytes());
serialized.extend(u16::try_from(shares.len()).unwrap().to_be_bytes());
serialized.extend(i.to_be_bytes());
serialized.extend(shares[usize::from(i) - 1].to_repr().as_ref());
serialized.extend(&hex::decode(vectors.group_key).unwrap());
for share in &verification_shares {
serialized.extend(share.to_bytes().as_ref());
}
let these_keys = FrostKeys::<C>::deserialize(&serialized).unwrap();
assert_eq!(these_keys.params().t(), vectors.threshold);
assert_eq!(usize::from(these_keys.params().n()), shares.len());
assert_eq!(these_keys.params().i(), i);
assert_eq!(these_keys.secret_share(), shares[usize::from(i - 1)]);
assert_eq!(&hex::encode(these_keys.group_key().to_bytes().as_ref()), vectors.group_key);
keys.insert(i, these_keys);
}
keys
}
pub fn test_with_vectors<
R: RngCore + CryptoRng,
C: Curve,
H: Hram<C>
>(rng: &mut R, vectors: Vectors) {
// Do basic tests before trying the vectors
test_curve::<_, C>(&mut *rng);
test_schnorr::<_, C>(rng);
// Test against the vectors
let keys = vectors_to_multisig_keys::<C>(&vectors);
let group_key = G_from_slice::<C::G>(&hex::decode(vectors.group_key).unwrap()).unwrap();
assert_eq!(
C::GENERATOR * F_from_slice::<C::F>(&hex::decode(vectors.group_secret).unwrap()).unwrap(),
group_key
);
assert_eq!(
recover(&keys),
F_from_slice::<C::F>(&hex::decode(vectors.group_secret).unwrap()).unwrap()
);
let mut machines = vec![];
for i in vectors.included {
machines.push((
*i,
AlgorithmMachine::new(
Schnorr::<C, H>::new(),
Arc::new(keys[i].clone()),
vectors.included.clone()
).unwrap()
));
}
let mut commitments = HashMap::new();
let mut c = 0;
let mut machines = machines.drain(..).map(|(i, machine)| {
let nonces = [
F_from_slice::<C::F>(&hex::decode(vectors.nonces[c][0]).unwrap()).unwrap(),
F_from_slice::<C::F>(&hex::decode(vectors.nonces[c][1]).unwrap()).unwrap()
];
c += 1;
let mut serialized = (C::GENERATOR * nonces[0]).to_bytes().as_ref().to_vec();
serialized.extend((C::GENERATOR * nonces[1]).to_bytes().as_ref());
let (machine, serialized) = machine.unsafe_override_preprocess(
PreprocessPackage { nonces: vec![nonces], serialized: serialized.clone() }
);
commitments.insert(i, serialized);
(i, machine)
}).collect::<Vec<_>>();
let mut shares = HashMap::new();
c = 0;
let mut machines = machines.drain(..).map(|(i, machine)| {
let (machine, share) = machine.sign(
commitments.clone(),
&hex::decode(vectors.msg).unwrap()
).unwrap();
assert_eq!(share, hex::decode(vectors.sig_shares[c]).unwrap());
c += 1;
shares.insert(i, share);
(i, machine)
}).collect::<HashMap<_, _>>();
for (_, machine) in machines.drain() {
let sig = machine.complete(shares.clone()).unwrap();
let mut serialized = sig.R.to_bytes().as_ref().to_vec();
serialized.extend(sig.s.to_repr().as_ref());
assert_eq!(hex::encode(serialized), vectors.sig);
}
}

View File

@@ -3,13 +3,22 @@ name = "multiexp"
version = "0.1.0"
description = "Multiexponentation algorithms for ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["multiexp", "ff", "group"]
edition = "2021"
[dependencies]
group = "0.11"
ff = "0.12"
group = "0.12"
rand_core = { version = "0.6", optional = true }
[dev-dependencies]
rand_core = "0.6"
k256 = { version = "0.11", features = ["bits"] }
dalek-ff-group = { path = "../dalek-ff-group" }
[features]
batch = ["rand_core"]

View File

@@ -0,0 +1,6 @@
# Multiexp
A multiexp implementation for ff/group implementing Straus and Pippenger. A
batch verification API is also available via the "batch" feature, which enables
secure multiexponentation batch verification given a series of values which
should sum to 0, identifying which doesn't via binary search if they don't.

View File

@@ -0,0 +1,84 @@
use rand_core::{RngCore, CryptoRng};
use ff::{Field, PrimeFieldBits};
use group::Group;
use crate::{multiexp, multiexp_vartime};
#[cfg(feature = "batch")]
pub struct BatchVerifier<Id: Copy, G: Group>(Vec<(Id, Vec<(G::Scalar, G)>)>);
#[cfg(feature = "batch")]
impl<Id: Copy, G: Group> BatchVerifier<Id, G> where <G as Group>::Scalar: PrimeFieldBits {
pub fn new(capacity: usize) -> BatchVerifier<Id, G> {
BatchVerifier(Vec::with_capacity(capacity))
}
pub fn queue<
R: RngCore + CryptoRng,
I: IntoIterator<Item = (G::Scalar, G)>
>(&mut self, rng: &mut R, id: Id, pairs: I) {
// Define a unique scalar factor for this set of variables so individual items can't overlap
let u = if self.0.len() == 0 {
G::Scalar::one()
} else {
let mut weight;
// Ensure it's non-zero, as a zero scalar would cause this item to pass no matter what
while {
weight = G::Scalar::random(&mut *rng);
weight.is_zero().into()
} {}
weight
};
self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect()));
}
#[must_use]
pub fn verify(&self) -> bool {
multiexp(
&self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()
).is_identity().into()
}
#[must_use]
pub fn verify_vartime(&self) -> bool {
multiexp_vartime(
&self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()
).is_identity().into()
}
// A constant time variant may be beneficial for robust protocols
pub fn blame_vartime(&self) -> Option<Id> {
let mut slice = self.0.as_slice();
while slice.len() > 1 {
let split = slice.len() / 2;
if multiexp_vartime(
&slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned().collect::<Vec<_>>()
).is_identity().into() {
slice = &slice[split ..];
} else {
slice = &slice[.. split];
}
}
slice.get(0).filter(
|(_, value)| !bool::from(multiexp_vartime(value).is_identity())
).map(|(id, _)| *id)
}
pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
}
pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify_vartime() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
}
}

View File

@@ -1,156 +1,176 @@
use group::{ff::PrimeField, Group};
use ff::PrimeFieldBits;
use group::Group;
mod straus;
use straus::*;
mod pippenger;
use pippenger::*;
#[cfg(feature = "batch")]
use group::ff::Field;
mod batch;
#[cfg(feature = "batch")]
use rand_core::{RngCore, CryptoRng};
pub use batch::BatchVerifier;
fn prep<
G: Group,
I: IntoIterator<Item = (G::Scalar, G)>
>(pairs: I, little: bool) -> (Vec<Vec<u8>>, Vec<[G; 16]>) {
let mut nibbles = vec![];
let mut tables = vec![];
for pair in pairs.into_iter() {
let p = nibbles.len();
nibbles.push(vec![]);
{
let mut repr = pair.0.to_repr();
let bytes = repr.as_mut();
if !little {
bytes.reverse();
}
#[cfg(test)]
mod tests;
nibbles[p].resize(bytes.len() * 2, 0);
for i in 0 .. bytes.len() {
nibbles[p][i * 2] = bytes[i] & 0b1111;
nibbles[p][(i * 2) + 1] = (bytes[i] >> 4) & 0b1111;
}
pub(crate) fn prep_bits<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> Vec<Vec<u8>> where G::Scalar: PrimeFieldBits {
let w_usize = usize::from(window);
let mut groupings = vec![];
for pair in pairs {
let p = groupings.len();
let bits = pair.0.to_le_bits();
groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]);
for (i, bit) in bits.into_iter().enumerate() {
let bit = bit as u8;
debug_assert_eq!(bit | 1, 1);
groupings[p][i / w_usize] |= bit << (i % w_usize);
}
}
tables.push([G::identity(); 16]);
groupings
}
pub(crate) fn prep_tables<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> Vec<Vec<G>> {
let mut tables = Vec::with_capacity(pairs.len());
for pair in pairs {
let p = tables.len();
tables.push(vec![G::identity(); 2_usize.pow(window.into())]);
let mut accum = G::identity();
for i in 1 .. 16 {
for i in 1 .. tables[p].len() {
accum += pair.1;
tables[p][i] = accum;
}
}
(nibbles, tables)
tables
}
// An implementation of Straus, with a extremely minimal API that lets us add other algorithms in
// the future. Takes in an iterator of scalars and points with a boolean for if the scalars are
// little endian encoded in their Reprs or not
pub fn multiexp<
G: Group,
I: IntoIterator<Item = (G::Scalar, G)>
>(pairs: I, little: bool) -> G {
let (nibbles, tables) = prep(pairs, little);
let mut res = G::identity();
for b in (0 .. nibbles[0].len()).rev() {
for _ in 0 .. 4 {
res = res.double();
}
for s in 0 .. tables.len() {
res += tables[s][usize::from(nibbles[s][b])];
}
}
res
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum Algorithm {
Null,
Single,
Straus(u8),
Pippenger(u8)
}
pub fn multiexp_vartime<
G: Group,
I: IntoIterator<Item = (G::Scalar, G)>
>(pairs: I, little: bool) -> G {
let (nibbles, tables) = prep(pairs, little);
/*
Release (with runs 20, so all of these are off by 20x):
let mut res = G::identity();
for b in (0 .. nibbles[0].len()).rev() {
for _ in 0 .. 4 {
res = res.double();
}
k256
Straus 3 is more efficient at 5 with 678µs per
Straus 4 is more efficient at 10 with 530µs per
Straus 5 is more efficient at 35 with 467µs per
for s in 0 .. tables.len() {
if nibbles[s][b] != 0 {
res += tables[s][usize::from(nibbles[s][b])];
}
}
}
res
}
Pippenger 5 is more efficient at 125 with 431µs per
Pippenger 6 is more efficient at 275 with 349µs per
Pippenger 7 is more efficient at 375 with 360µs per
#[cfg(feature = "batch")]
pub struct BatchVerifier<Id: Copy, G: Group>(Vec<(Id, Vec<(G::Scalar, G)>)>, bool);
dalek
Straus 3 is more efficient at 5 with 519µs per
Straus 4 is more efficient at 10 with 376µs per
Straus 5 is more efficient at 170 with 330µs per
#[cfg(feature = "batch")]
impl<Id: Copy, G: Group> BatchVerifier<Id, G> {
pub fn new(capacity: usize, endian: bool) -> BatchVerifier<Id, G> {
BatchVerifier(Vec::with_capacity(capacity), endian)
Pippenger 5 is more efficient at 125 with 305µs per
Pippenger 6 is more efficient at 275 with 250µs per
Pippenger 7 is more efficient at 450 with 205µs per
Pippenger 8 is more efficient at 800 with 213µs per
Debug (with runs 5, so...):
k256
Straus 3 is more efficient at 5 with 2532µs per
Straus 4 is more efficient at 10 with 1930µs per
Straus 5 is more efficient at 80 with 1632µs per
Pippenger 5 is more efficient at 150 with 1441µs per
Pippenger 6 is more efficient at 300 with 1235µs per
Pippenger 7 is more efficient at 475 with 1182µs per
Pippenger 8 is more efficient at 625 with 1170µs per
dalek:
Straus 3 is more efficient at 5 with 971µs per
Straus 4 is more efficient at 10 with 782µs per
Straus 5 is more efficient at 75 with 778µs per
Straus 6 is more efficient at 165 with 867µs per
Pippenger 5 is more efficient at 125 with 677µs per
Pippenger 6 is more efficient at 250 with 655µs per
Pippenger 7 is more efficient at 475 with 500µs per
Pippenger 8 is more efficient at 875 with 499µs per
*/
fn algorithm(len: usize) -> Algorithm {
#[cfg(not(debug_assertions))]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
// Straus 2 never showed a performance benefit, even with just 2 elements
Algorithm::Straus(3)
} else if len < 20 {
Algorithm::Straus(4)
} else if len < 50 {
Algorithm::Straus(5)
} else if len < 100 {
Algorithm::Pippenger(4)
} else if len < 125 {
Algorithm::Pippenger(5)
} else if len < 275 {
Algorithm::Pippenger(6)
} else if len < 400 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
}
pub fn queue<
R: RngCore + CryptoRng,
I: IntoIterator<Item = (G::Scalar, G)>
>(&mut self, rng: &mut R, id: Id, pairs: I) {
// Define a unique scalar factor for this set of variables so individual items can't overlap
let u = if self.0.len() == 0 {
G::Scalar::one()
} else {
G::Scalar::random(rng)
};
self.0.push((id, pairs.into_iter().map(|(scalar, point)| (scalar * u, point)).collect()));
}
pub fn verify(&self) -> bool {
multiexp(
self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned(),
self.1
).is_identity().into()
}
pub fn verify_vartime(&self) -> bool {
multiexp_vartime(
self.0.iter().flat_map(|pairs| pairs.1.iter()).cloned(),
self.1
).is_identity().into()
}
// A constant time variant may be beneficial for robust protocols
pub fn blame_vartime(&self) -> Option<Id> {
let mut slice = self.0.as_slice();
while slice.len() > 1 {
let split = slice.len() / 2;
if multiexp_vartime(
slice[.. split].iter().flat_map(|pairs| pairs.1.iter()).cloned(),
self.1
).is_identity().into() {
slice = &slice[split ..];
} else {
slice = &slice[.. split];
}
}
slice.get(0).filter(
|(_, value)| !bool::from(multiexp_vartime(value.clone(), self.1).is_identity())
).map(|(id, _)| *id)
}
pub fn verify_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
}
pub fn verify_vartime_with_vartime_blame(&self) -> Result<(), Id> {
if self.verify_vartime() {
Ok(())
} else {
Err(self.blame_vartime().unwrap())
}
#[cfg(debug_assertions)]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
Algorithm::Straus(3)
} else if len < 80 {
Algorithm::Straus(4)
} else if len < 100 {
Algorithm::Straus(5)
} else if len < 125 {
Algorithm::Pippenger(4)
} else if len < 275 {
Algorithm::Pippenger(5)
} else if len < 475 {
Algorithm::Pippenger(6)
} else if len < 750 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
}
}
// Performs a multiexp, automatically selecting the optimal algorithm based on amount of pairs
pub fn multiexp<G: Group>(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
Algorithm::Straus(window) => straus(pairs, window),
Algorithm::Pippenger(window) => pippenger(pairs, window)
}
}
pub fn multiexp_vartime<G: Group>(pairs: &[(G::Scalar, G)]) -> G where G::Scalar: PrimeFieldBits {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
Algorithm::Straus(window) => straus_vartime(pairs, window),
Algorithm::Pippenger(window) => pippenger_vartime(pairs, window)
}
}

View File

@@ -0,0 +1,63 @@
use ff::PrimeFieldBits;
use group::Group;
use crate::prep_bits;
pub(crate) fn pippenger<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let bits = prep_bits(pairs, window);
let mut res = G::identity();
for n in (0 .. bits[0].len()).rev() {
for _ in 0 .. window {
res = res.double();
}
let mut buckets = vec![G::identity(); 2_usize.pow(window.into())];
for p in 0 .. bits.len() {
buckets[usize::from(bits[p][n])] += pairs[p].1;
}
let mut intermediate_sum = G::identity();
for b in (1 .. buckets.len()).rev() {
intermediate_sum += buckets[b];
res += intermediate_sum;
}
}
res
}
pub(crate) fn pippenger_vartime<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let bits = prep_bits(pairs, window);
let mut res = G::identity();
for n in (0 .. bits[0].len()).rev() {
if n != (bits[0].len() - 1) {
for _ in 0 .. window {
res = res.double();
}
}
let mut buckets = vec![G::identity(); 2_usize.pow(window.into())];
for p in 0 .. bits.len() {
let nibble = usize::from(bits[p][n]);
if nibble != 0 {
buckets[nibble] += pairs[p].1;
}
}
let mut intermediate_sum = G::identity();
for b in (1 .. buckets.len()).rev() {
intermediate_sum += buckets[b];
res += intermediate_sum;
}
}
res
}

View File

@@ -0,0 +1,49 @@
use ff::PrimeFieldBits;
use group::Group;
use crate::{prep_bits, prep_tables};
pub(crate) fn straus<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let groupings = prep_bits(pairs, window);
let tables = prep_tables(pairs, window);
let mut res = G::identity();
for b in (0 .. groupings[0].len()).rev() {
for _ in 0 .. window {
res = res.double();
}
for s in 0 .. tables.len() {
res += tables[s][usize::from(groupings[s][b])];
}
}
res
}
pub(crate) fn straus_vartime<G: Group>(
pairs: &[(G::Scalar, G)],
window: u8
) -> G where G::Scalar: PrimeFieldBits {
let groupings = prep_bits(pairs, window);
let tables = prep_tables(pairs, window);
let mut res = G::identity();
for b in (0 .. groupings[0].len()).rev() {
if b != (groupings[0].len() - 1) {
for _ in 0 .. window {
res = res.double();
}
}
for s in 0 .. tables.len() {
if groupings[s][b] != 0 {
res += tables[s][usize::from(groupings[s][b])];
}
}
}
res
}

View File

@@ -0,0 +1,112 @@
use std::time::Instant;
use rand_core::OsRng;
use ff::{Field, PrimeFieldBits};
use group::Group;
use k256::ProjectivePoint;
use dalek_ff_group::EdwardsPoint;
use crate::{straus, pippenger, multiexp, multiexp_vartime};
#[allow(dead_code)]
fn benchmark_internal<G: Group>(straus_bool: bool) where G::Scalar: PrimeFieldBits {
let runs: usize = 20;
let mut start = 0;
let mut increment: usize = 5;
let mut total: usize = 250;
let mut current = 2;
if !straus_bool {
start = 100;
increment = 25;
total = 1000;
current = 4;
};
let mut pairs = Vec::with_capacity(total);
let mut sum = G::identity();
for _ in 0 .. start {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
for _ in 0 .. (total / increment) {
for _ in 0 .. increment {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
let now = Instant::now();
for _ in 0 .. runs {
if straus_bool {
assert_eq!(straus(&pairs, current), sum);
} else {
assert_eq!(pippenger(&pairs, current), sum);
}
}
let current_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap();
let now = Instant::now();
for _ in 0 .. runs {
if straus_bool {
assert_eq!(straus(&pairs, current + 1), sum);
} else {
assert_eq!(pippenger(&pairs, current + 1), sum);
}
}
let next_per = now.elapsed().as_micros() / u128::try_from(pairs.len()).unwrap();
if next_per < current_per {
current += 1;
println!(
"{} {} is more efficient at {} with {}µs per",
if straus_bool { "Straus" } else { "Pippenger" }, current, pairs.len(), next_per
);
if current >= 8 {
return;
}
}
}
}
fn test_multiexp<G: Group>() where G::Scalar: PrimeFieldBits {
let mut pairs = Vec::with_capacity(1000);
let mut sum = G::identity();
for _ in 0 .. 10 {
for _ in 0 .. 100 {
pairs.push((G::Scalar::random(&mut OsRng), G::generator() * G::Scalar::random(&mut OsRng)));
sum += pairs[pairs.len() - 1].1 * pairs[pairs.len() - 1].0;
}
assert_eq!(multiexp(&pairs), sum);
assert_eq!(multiexp_vartime(&pairs), sum);
}
}
#[test]
fn test_secp256k1() {
test_multiexp::<ProjectivePoint>();
}
#[test]
fn test_ed25519() {
test_multiexp::<EdwardsPoint>();
}
#[ignore]
#[test]
fn benchmark() {
// Activate the processor's boost clock
for _ in 0 .. 30 {
test_multiexp::<ProjectivePoint>();
}
benchmark_internal::<ProjectivePoint>(true);
benchmark_internal::<ProjectivePoint>(false);
benchmark_internal::<EdwardsPoint>(true);
benchmark_internal::<EdwardsPoint>(false);
}

View File

@@ -1,15 +1,19 @@
[package]
name = "transcript"
version = "0.1.0"
description = "A simple transcript definition"
name = "flexible-transcript"
version = "0.1.2"
description = "A simple transcript trait definition, along with viable options"
license = "MIT"
repository = "https://github.com/serai-dex/serai"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["transcript"]
edition = "2021"
[dependencies]
digest = "0.10"
blake2 = { version = "0.10", optional = true }
merlin = { version = "3", optional = true }
[features]
recommended = ["blake2"]
merlin = ["dep:merlin"]

View File

@@ -0,0 +1,27 @@
# Flexible Transcript
Flexible Transcript is a crate offering:
- `Transcript`, a trait offering functions transcripts should implement.
- `DigestTranscript`, a competent transcript format instantiated against a
provided hash function.
- `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the
`merlin` feature).
- `RecommendedTranscript`, a transcript recommended for usage in applications.
Currently, this is `DigestTranscript<Blake2b512>` (available via the
`recommended` feature).
The trait was created while working on an IETF draft which defined an incredibly
simple transcript format. Extensions of the protocol would quickly require a
more competent format, yet implementing the one specified was mandatory to meet
the specification. Accordingly, the library implementing the draft defined an
`IetfTranscript`, dropping labels and not allowing successive challenges, yet
thanks to the trait, allowed protocols building on top to provide their own
transcript format as needed.
`DigestTranscript` takes in any hash function implementing `Digest`, offering a
secure transcript format around it. All items are prefixed by a flag, denoting
their type, and their length.
`MerlinTranscript` was used to justify the API, and if any issues existed with
`DigestTranscript`, enable a fallback. It was also meant as a way to be
compatible with existing Rust projects using `merlin`.

View File

@@ -1,57 +1,101 @@
use core::{marker::PhantomData, fmt::Debug};
#![no_std]
#[cfg(features = "merlin")]
#[cfg(feature = "merlin")]
mod merlin;
#[cfg(features = "merlin")]
pub use merlin::MerlinTranscript;
#[cfg(feature = "merlin")]
pub use crate::merlin::MerlinTranscript;
use digest::Digest;
use digest::{typenum::type_operators::IsGreaterOrEqual, consts::U256, Digest, Output};
pub trait Transcript {
fn domain_separate(&mut self, label: &[u8]);
type Challenge: Clone + Send + Sync + AsRef<[u8]>;
/// Create a new transcript with the specified name
fn new(name: &'static [u8]) -> Self;
/// Apply a domain separator to the transcript
fn domain_separate(&mut self, label: &'static [u8]);
/// Append a message to the transcript
fn append_message(&mut self, label: &'static [u8], message: &[u8]);
fn challenge(&mut self, label: &'static [u8]) -> Vec<u8>;
/// Produce a challenge. This MUST update the transcript as it does so, preventing the same
/// challenge from being generated multiple times
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge;
/// Produce a RNG seed. Helper function for parties needing to generate random data from an
/// agreed upon state. Internally calls the challenge function for the needed bytes, converting
/// them to the seed format rand_core expects
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32];
}
enum DigestTranscriptMember {
Name,
Domain,
Label,
Value,
Challenge
}
impl DigestTranscriptMember {
fn as_u8(&self) -> u8 {
match self {
DigestTranscriptMember::Name => 0,
DigestTranscriptMember::Domain => 1,
DigestTranscriptMember::Label => 2,
DigestTranscriptMember::Value => 3,
DigestTranscriptMember::Challenge => 4
}
}
}
/// A trait defining Digests with at least a 256-byte output size, assuming at least a 128-bit
/// level of security accordingly
pub trait SecureDigest: Clone + Digest {}
impl<D: Clone + Digest> SecureDigest for D where D::OutputSize: IsGreaterOrEqual<U256> {}
/// A simple transcript format constructed around the specified hash algorithm
#[derive(Clone, Debug)]
pub struct DigestTranscript<D: Digest>(Vec<u8>, PhantomData<D>);
pub struct DigestTranscript<D: SecureDigest>(D);
impl<D: Digest> PartialEq for DigestTranscript<D> {
fn eq(&self, other: &DigestTranscript<D>) -> bool {
self.0 == other.0
impl<D: SecureDigest> DigestTranscript<D> {
fn append(&mut self, kind: DigestTranscriptMember, value: &[u8]) {
self.0.update(&[kind.as_u8()]);
// Assumes messages don't exceed 16 exabytes
self.0.update(u64::try_from(value.len()).unwrap().to_le_bytes());
self.0.update(value);
}
}
impl<D: Digest> DigestTranscript<D> {
pub fn new(label: Vec<u8>) -> Self {
DigestTranscript(label, PhantomData)
}
}
impl<D: SecureDigest> Transcript for DigestTranscript<D> {
type Challenge = Output<D>;
fn new(name: &'static [u8]) -> Self {
let mut res = DigestTranscript(D::new());
res.append(DigestTranscriptMember::Name, name);
res
}
impl<D: Digest> Transcript for DigestTranscript<D> {
// It may be beneficial for each domain to be a nested transcript which is itself length prefixed
// This would go further than Merlin though and require an accurate end_domain function which has
// frustrations not worth bothering with when this shouldn't actually be meaningful
fn domain_separate(&mut self, label: &[u8]) {
self.append_message(b"domain", label);
self.append(DigestTranscriptMember::Domain, label);
}
fn append_message(&mut self, label: &'static [u8], message: &[u8]) {
self.0.extend(label);
// Assumes messages don't exceed 16 exabytes
self.0.extend(u64::try_from(message.len()).unwrap().to_le_bytes());
self.0.extend(message);
self.append(DigestTranscriptMember::Label, label);
self.append(DigestTranscriptMember::Value, message);
}
fn challenge(&mut self, label: &'static [u8]) -> Vec<u8> {
self.0.extend(label);
D::new().chain_update(&self.0).finalize().to_vec()
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
self.append(DigestTranscriptMember::Challenge, label);
self.0.clone().finalize()
}
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {
let mut seed = [0; 32];
seed.copy_from_slice(&self.challenge(label)[0 .. 32]);
seed.copy_from_slice(&self.challenge(label)[.. 32]);
seed
}
}
#[cfg(feature = "recommended")]
pub type RecommendedTranscript = DigestTranscript<blake2::Blake2b512>;

View File

@@ -1,16 +1,27 @@
use core::{marker::PhantomData, fmt::{Debug, Formatter}};
use core::fmt::{Debug, Formatter};
use digest::Digest;
use crate::Transcript;
#[derive(Clone, PartialEq)]
#[derive(Clone)]
pub struct MerlinTranscript(pub merlin::Transcript);
// Merlin doesn't implement Debug so provide a stub which won't panic
impl Debug for MerlinTranscript {
fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { Ok(()) }
fn fmt(&self, _: &mut Formatter<'_>) -> Result<(), core::fmt::Error> { Ok(()) }
}
impl Transcript for MerlinTranscript {
fn domain_separate(&mut self, label: &[u8]) {
// Uses a challenge length of 64 bytes to support wide reduction on generated scalars
// From a security level standpoint, this should just be 32 bytes
// From a Merlin standpoint, this should be variable per call
// From a practical standpoint, this is a demo file not planned to be used and anything using
// this wrapper should be secure with this setting
type Challenge = [u8; 64];
fn new(name: &'static [u8]) -> Self {
MerlinTranscript(merlin::Transcript::new(name))
}
fn domain_separate(&mut self, label: &'static [u8]) {
self.append_message(b"dom-sep", label);
}
@@ -18,21 +29,15 @@ impl Transcript for MerlinTranscript {
self.0.append_message(label, message);
}
fn challenge(&mut self, label: &'static [u8]) -> Vec<u8> {
let mut challenge = vec![];
// Uses a challenge length of 64 bytes to support wide reduction on generated scalars
// From a security level standpoint, this should just be 32 bytes
// From a Merlin standpoint, this should be variable per call
// From a practical standpoint, this is a demo file not planned to be used and anything using
// this wrapper is fine without any settings it uses
challenge.resize(64, 0);
fn challenge(&mut self, label: &'static [u8]) -> Self::Challenge {
let mut challenge = [0; 64];
self.0.challenge_bytes(label, &mut challenge);
challenge
}
fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32] {
let mut seed = [0; 32];
transcript.challenge_bytes(label, &mut seed);
seed.copy_from_slice(&self.challenge(label)[.. 32]);
seed
}
}