mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-09 04:39:24 +00:00
git checkout -f next ./crypto
Proceeds to remove the eVRF DKG after, only keeping what's relevant to this branch alone.
This commit is contained in:
@@ -6,7 +6,7 @@ license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.79"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -18,7 +18,7 @@ workspace = true
|
||||
[dependencies]
|
||||
rustversion = "1"
|
||||
|
||||
thiserror = { version = "1", optional = true }
|
||||
thiserror = { version = "2", default-features = false, optional = true }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
@@ -44,7 +44,7 @@ dalek-ff-group = { path = "../dalek-ff-group" }
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] }
|
||||
|
||||
[features]
|
||||
std = ["rand_core/std", "zeroize/std", "digest/std", "transcript/std", "ff/std", "multiexp?/std"]
|
||||
std = ["thiserror?/std", "rand_core/std", "zeroize/std", "digest/std", "transcript/std", "ff/std", "multiexp?/std"]
|
||||
serialize = ["std"]
|
||||
|
||||
# Needed for cross-group DLEqs
|
||||
|
||||
@@ -92,7 +92,7 @@ impl<G: PrimeGroup> Generators<G> {
|
||||
}
|
||||
|
||||
/// Error for cross-group DLEq proofs.
|
||||
#[derive(Error, PartialEq, Eq, Debug)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||
pub enum DLEqError {
|
||||
/// Invalid proof length.
|
||||
#[error("invalid proof length")]
|
||||
|
||||
@@ -37,11 +37,11 @@ pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
|
||||
// Get a wide amount of bytes to safely reduce without bias
|
||||
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
|
||||
// groups which may technically require more than 1.5x bytes for this to work as intended
|
||||
let target_bytes = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
|
||||
let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2;
|
||||
let mut challenge_bytes = transcript.challenge(b"challenge");
|
||||
let challenge_bytes_len = challenge_bytes.as_ref().len();
|
||||
// If the challenge is 32 bytes, and we need 64, we need two challenges
|
||||
let needed_challenges = (target_bytes + (challenge_bytes_len - 1)) / challenge_bytes_len;
|
||||
let needed_challenges = target_bytes.div_ceil(challenge_bytes_len);
|
||||
|
||||
// The following algorithm should be equivalent to a wide reduction of the challenges,
|
||||
// interpreted as concatenated, big-endian byte string
|
||||
|
||||
Reference in New Issue
Block a user