Update FROST signing to match the IETF draft

Modernizes dependencies
This commit is contained in:
Luke Parker
2022-04-23 03:49:30 -04:00
parent 76a6ff46be
commit e22dcb1441
18 changed files with 226 additions and 724 deletions

View File

@@ -1,13 +1,13 @@
use core::{marker::PhantomData, fmt::Debug};
use rand_core::{RngCore, CryptoRng};
use digest::Digest;
use group::Group;
use crate::{Curve, FrostError, sign};
pub trait Algorithm<C: Curve>: Clone + Debug {
/// Algorithm to use FROST with
pub trait Algorithm<C: Curve>: Clone {
/// The resulting type of the signatures this algorithm will produce
type Signature: Clone + Debug;
@@ -59,40 +59,24 @@ pub trait Algorithm<C: Curve>: Clone + Debug {
) -> bool;
}
pub trait Hram: PartialEq + Eq + Copy + Clone + Debug {
pub trait Hram<C: Curve>: Clone {
/// HRAM function to generate a challenge
/// H2 from the IETF draft despite having a different argument set (not pre-formatted)
#[allow(non_snake_case)]
fn hram<C: Curve>(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Blake2bHram {}
impl Hram for Blake2bHram {
#[allow(non_snake_case)]
fn hram<C: Curve>(R: &C::G, A: &C::G, m: &[u8]) -> C::F {
C::F_from_bytes_wide(
blake2::Blake2b::new()
.chain(C::G_to_bytes(R))
.chain(C::G_to_bytes(A))
.chain(m)
.finalize()
.as_slice()
.try_into()
.expect("couldn't convert a 64-byte hash to a 64-byte array")
)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Schnorr<C: Curve, H: Hram> {
#[derive(Clone)]
pub struct Schnorr<C: Curve, H: Hram<C>> {
c: Option<C::F>,
hram: PhantomData<H>,
_hram: PhantomData<H>,
}
impl<C: Curve, H: Hram> Schnorr<C, H> {
impl<C: Curve, H: Hram<C>> Schnorr<C, H> {
pub fn new() -> Schnorr<C, H> {
Schnorr {
c: None,
hram: PhantomData
_hram: PhantomData
}
}
}
@@ -104,7 +88,8 @@ pub struct SchnorrSignature<C: Curve> {
pub s: C::F,
}
impl<C: Curve, H: Hram> Algorithm<C> for Schnorr<C, H> {
/// Implementation of Schnorr signatures for use with FROST
impl<C: Curve, H: Hram<C>> Algorithm<C> for Schnorr<C, H> {
type Signature = SchnorrSignature<C>;
fn context(&self) -> Vec<u8> {
@@ -141,7 +126,7 @@ impl<C: Curve, H: Hram> Algorithm<C> for Schnorr<C, H> {
nonce: C::F,
msg: &[u8],
) -> C::F {
let c = H::hram::<C>(&nonce_sum, &params.group_key(), msg);
let c = H::hram(&nonce_sum, &params.group_key(), msg);
self.c = Some(c);
nonce + (params.secret_share() * c)

View File

@@ -1,13 +1,24 @@
use core::{convert::{TryFrom, TryInto}, cmp::min, fmt};
use core::{convert::TryFrom, cmp::min, fmt};
use rand_core::{RngCore, CryptoRng};
use blake2::{Digest, Blake2b};
use ff::{Field, PrimeField};
use group::Group;
use crate::{Curve, MultisigParams, MultisigKeys, FrostError};
#[allow(non_snake_case)]
fn challenge<C: Curve>(l: usize, context: &str, R: &[u8], Am: &[u8]) -> C::F {
let mut c = Vec::with_capacity(8 + context.len() + R.len() + Am.len());
c.extend(&u64::try_from(l).unwrap().to_le_bytes());
c.extend(context.as_bytes());
c.extend(R); // R
c.extend(Am); // A of the first commitment, which is what we're proving we have the private key
// for
// m of the rest of the commitments, authenticating them
C::hash_to_F(&c)
}
// Implements steps 1 through 3 of round 1 of FROST DKG. Returns the coefficients, commitments, and
// the serialized commitments to be broadcasted over an authenticated channel to all parties
// TODO: This potentially could return a much more robust serialized message, including a signature
@@ -44,19 +55,7 @@ fn generate_key_r1<R: RngCore + CryptoRng, C: Curve>(
let k = C::F::random(rng);
#[allow(non_snake_case)]
let R = C::generator_table() * k;
let c = C::F_from_bytes_wide(
Blake2b::new()
.chain(&u64::try_from(params.i).unwrap().to_le_bytes())
.chain(context.as_bytes())
.chain(&C::G_to_bytes(&R)) // R
.chain(&serialized) // A of the first commitment, which is what we're proving we have
// the private key for
// m of the rest of the commitments, authenticating them
.finalize()
.as_slice()
.try_into()
.expect("couldn't convert a 64-byte hash to a 64-byte array")
);
let c = challenge::<C>(params.i, context, &C::G_to_bytes(&R), &serialized);
let s = k + (coefficients[0] * c);
serialized.extend(&C::G_to_bytes(&R));
@@ -155,17 +154,11 @@ fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
);
points.push(C::generator());
let c = C::F_from_bytes_wide(
Blake2b::new()
// Bounded by n which is already checked to be within the u64 range
.chain(&u64::try_from(l).unwrap().to_le_bytes())
.chain(context.as_bytes())
.chain(&serialized[l][commitments_len .. commitments_len + C::G_len()])
.chain(&serialized[l][0 .. commitments_len])
.finalize()
.as_slice()
.try_into()
.expect("couldn't convert a 64-byte hash to a 64-byte array")
let c = challenge::<C>(
l,
context,
&serialized[l][commitments_len .. commitments_len + C::G_len()],
&serialized[l][0 .. commitments_len]
);
if first {
@@ -195,17 +188,11 @@ fn verify_r1<R: RngCore + CryptoRng, C: Curve>(
&serialized[l][commitments_len + C::G_len() .. serialized[l].len()]
).map_err(|_| FrostError::InvalidProofOfKnowledge(l))?;
let c = C::F_from_bytes_wide(
Blake2b::new()
// Bounded by n which is already checked to be within the u64 range
.chain(&u64::try_from(l).unwrap().to_le_bytes())
.chain(context.as_bytes())
.chain(&serialized[l][commitments_len .. commitments_len + C::G_len()])
.chain(&serialized[l][0 .. commitments_len])
.finalize()
.as_slice()
.try_into()
.expect("couldn't convert a 64-byte hash to a 64-byte array")
let c = challenge::<C>(
l,
context,
&serialized[l][commitments_len .. commitments_len + C::G_len()],
&serialized[l][0 .. commitments_len]
);
if R != ((C::generator_table() * s) + (commitments[l][0] * (C::F::zero() - &c))) {
@@ -389,6 +376,7 @@ impl fmt::Display for State {
}
/// State machine which manages key generation
#[allow(non_snake_case)]
pub struct StateMachine<C: Curve> {
params: MultisigParams,
context: String,
@@ -396,7 +384,7 @@ pub struct StateMachine<C: Curve> {
coefficients: Option<Vec<C::F>>,
our_commitments: Option<Vec<C::G>>,
secret: Option<C::F>,
commitments: Option<Vec<Vec<C::G>>>,
commitments: Option<Vec<Vec<C::G>>>
}
impl<C: Curve> StateMachine<C> {
@@ -410,7 +398,7 @@ impl<C: Curve> StateMachine<C> {
coefficients: None,
our_commitments: None,
secret: None,
commitments: None,
commitments: None
}
}

View File

@@ -14,11 +14,10 @@ pub mod sign;
pub enum CurveError {
#[error("invalid length for data (expected {0}, got {0})")]
InvalidLength(usize, usize),
// Push towards hex encoding in error messages
#[error("invalid scalar ({0})")]
InvalidScalar(String),
#[error("invalid point ({0})")]
InvalidPoint(String),
#[error("invalid scalar")]
InvalidScalar,
#[error("invalid point")]
InvalidPoint,
}
/// Unified trait to manage a field/group
@@ -58,6 +57,16 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
// This could also be written as -> Option<C::G> with None for not implemented
fn multiexp_vartime(scalars: &[Self::F], points: &[Self::G]) -> Self::G;
/// Hash the message as needed to calculate the binding factor
/// H3 from the IETF draft
fn hash_msg(msg: &[u8]) -> Vec<u8>;
/// Field element from hash, used in key generation and to calculate the binding factor
/// H1 from the IETF draft
/// Key generation uses it as if it's H2 to generate a challenge for a Proof of Knowledge
#[allow(non_snake_case)]
fn hash_to_F(data: &[u8]) -> Self::F;
// The following methods would optimally be F:: and G:: yet developers can't control F/G
// They can control a trait they pass into this library
@@ -82,10 +91,6 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
#[allow(non_snake_case)]
fn F_from_le_slice(slice: &[u8]) -> Result<Self::F, CurveError>;
/// Field element from slice. Must support reducing the input into a valid field element
#[allow(non_snake_case)]
fn F_from_le_slice_unreduced(slice: &[u8]) -> Self::F;
/// Group element from slice. Should be canonical
#[allow(non_snake_case)]
fn G_from_slice(slice: &[u8]) -> Result<Self::G, CurveError>;
@@ -97,10 +102,6 @@ pub trait Curve: Clone + Copy + PartialEq + Eq + Debug {
/// Obtain a vector of the byte encoding of G
#[allow(non_snake_case)]
fn G_to_bytes(g: &Self::G) -> Vec<u8>;
/// Takes 64-bytes and returns a scalar reduced mod n
#[allow(non_snake_case)]
fn F_from_bytes_wide(bytes: [u8; 64]) -> Self::F;
}
/// Parameters for a multisig

View File

@@ -2,19 +2,12 @@ use core::{convert::{TryFrom, TryInto}, cmp::min, fmt};
use std::rc::Rc;
use rand_core::{RngCore, CryptoRng};
use blake2::{Digest, Blake2b};
use ff::{Field, PrimeField};
use group::Group;
use crate::{Curve, MultisigParams, MultisigKeys, FrostError, algorithm::Algorithm};
// Matches ZCash's FROST Jubjub implementation
const BINDING_DST: &'static [u8; 9] = b"FROST_rho";
// Doesn't match ZCash except for their desire for messages to be hashed in advance before used
// here and domain separated
const BINDING_MESSAGE_DST: &'static [u8; 17] = b"FROST_rho_message";
/// Calculate the lagrange coefficient
pub fn lagrange<F: PrimeField>(
i: usize,
@@ -198,7 +191,18 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
#[allow(non_snake_case)]
let mut B = Vec::with_capacity(multisig_params.n + 1);
B.push(None);
let mut b: Vec<u8> = vec![];
// Commitments + a presumed 32-byte hash of the message
let mut b: Vec<u8> = Vec::with_capacity((multisig_params.n * 2 * C::G_len()) + 32);
// If the offset functionality provided by this library is in use, include it in the binding
// factor
if params.keys.offset.is_some() {
b.extend(&C::F_to_le_bytes(&params.keys.offset.unwrap()));
}
// Also include any context the algorithm may want to specify
b.extend(&params.algorithm.context());
for l in 1 ..= multisig_params.n {
if l == multisig_params.i {
if commitments[l].is_some() {
@@ -206,8 +210,7 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
}
B.push(Some(our_preprocess.commitments));
// Slightly more robust
b.extend(&u64::try_from(l).unwrap().to_le_bytes());
b.extend(&u16::try_from(l).unwrap().to_le_bytes());
b.extend(&our_preprocess.serialized[0 .. commit_len]);
continue;
}
@@ -237,46 +240,26 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
let E = C::G_from_slice(&commitments[C::G_len() .. commitments_len])
.map_err(|_| FrostError::InvalidCommitment(l))?;
B.push(Some([D, E]));
b.extend(&u64::try_from(l).unwrap().to_le_bytes());
b.extend(&u16::try_from(l).unwrap().to_le_bytes());
b.extend(&commitments[0 .. commit_len]);
}
let offset = if params.keys.offset.is_some() {
C::F_to_le_bytes(&params.keys.offset.unwrap())
} else {
vec![]
};
let context = params.algorithm.context();
let mut p = Vec::with_capacity(multisig_params.t);
let mut pi = C::F::zero();
for l in &params.view.included {
p.push(
C::F_from_bytes_wide(
Blake2b::new()
.chain(BINDING_DST)
.chain(u64::try_from(*l).unwrap().to_le_bytes())
.chain(Blake2b::new().chain(BINDING_MESSAGE_DST).chain(msg).finalize())
.chain(&offset)
.chain(&context)
.chain(&b)
.finalize()
.as_slice()
.try_into()
.expect("couldn't convert a 64-byte hash to a 64-byte array")
)
);
b.extend(&C::hash_msg(&msg));
let b = C::hash_to_F(&b);
let view = &params.view;
let view = &params.view;
for l in &params.view.included {
params.algorithm.process_addendum(
view,
*l,
B[*l].as_ref().unwrap(),
&p[p.len() - 1],
&b,
if *l == multisig_params.i {
pi = p[p.len() - 1];
&our_preprocess.serialized[commitments_len .. our_preprocess.serialized.len()]
} else {
&commitments[*l].as_ref().unwrap()[commitments_len .. commitments[*l].as_ref().unwrap().len()]
&commitments[*l].as_ref().unwrap()[
commitments_len .. commitments[*l].as_ref().unwrap().len()
]
}
)?;
}
@@ -288,7 +271,7 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
for i in 0 .. params.view.included.len() {
let commitments = B[params.view.included[i]].unwrap();
#[allow(non_snake_case)]
let this_R = commitments[0] + (commitments[1] * p[i]);
let this_R = commitments[0] + (commitments[1] * b);
Ris.push(this_R);
R += this_R;
}
@@ -297,7 +280,7 @@ fn sign_with_share<C: Curve, A: Algorithm<C>>(
let share = params.algorithm.sign_share(
view,
R,
our_preprocess.nonces[0] + (our_preprocess.nonces[1] * pi),
our_preprocess.nonces[0] + (our_preprocess.nonces[1] * b),
msg
);
Ok((Package { Ris, R, share }, C::F_to_le_bytes(&share)))