Merge branch 'develop' into next

This resolves the conflicts and gets the workspace `Cargo.toml`s to not be
invalid. It doesn't actually get clippy to pass again yet.

Does move `crypto/dkg/src/evrf` into a new `crypto/dkg/evrf` crate (which does
not yet compile).
This commit is contained in:
Luke Parker
2025-08-23 15:04:39 -04:00
319 changed files with 4016 additions and 26990 deletions

View File

@@ -12,7 +12,7 @@ runs:
steps:
- name: Bitcoin Daemon Cache
id: cache-bitcoind
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -7,13 +7,15 @@ runs:
- name: Remove unused packages
shell: bash
run: |
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
# This removal command requires the prior removals due to unmet dependencies otherwise
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
sudo apt autoremove -y
sudo apt clean
docker system prune -a --volumes
# Reinstall python3 as a general dependency of a functional operating system
sudo apt install python3
if: runner.os == 'Linux'
- name: Remove unused packages
@@ -41,9 +43,34 @@ runs:
- name: Install solc
shell: bash
run: |
cargo install svm-rs
cargo +1.89 install svm-rs --version =0.5.18
svm install 0.8.26
svm use 0.8.26
- name: Remove preinstalled Docker
shell: bash
run: |
docker system prune -a --volumes
sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker
sudo apt install uidmap
if: runner.os == 'Linux'
- name: Update system dependencies
shell: bash
run: |
sudo apt update -y
sudo apt upgrade -y
sudo apt autoremove -y
sudo apt clean
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with:
rootless: true
set-host: true
if: runner.os == 'Linux'
# - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -12,7 +12,7 @@ runs:
steps:
- name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -12,7 +12,7 @@ runs:
steps:
- name: Monero Daemon Cache
id: cache-monerod
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -1 +1 @@
nightly-2025-02-01
nightly-2025-08-01

View File

@@ -32,6 +32,7 @@ jobs:
-p dalek-ff-group \
-p minimal-ed448 \
-p ciphersuite \
-p ciphersuite-kp256 \
-p multiexp \
-p schnorr-signatures \
-p dleq \
@@ -40,5 +41,11 @@ jobs:
-p ec-divisors \
-p generalized-bulletproofs-ec-gadgets \
-p dkg \
-p dkg-recovery \
-p dkg-dealer \
-p dkg-promote \
-p dkg-musig \
-p dkg-pedpop \
-p dkg-evrf \
-p modular-frost \
-p frost-schnorrkel

View File

@@ -12,13 +12,13 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo install --locked cargo-deny
run: cargo +1.89 install cargo-deny --version =0.18.3
- name: Run cargo deny
run: cargo deny -L error --all-features check
run: cargo deny -L error --all-features check --hide-inclusion-graph

View File

@@ -26,7 +26,7 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
- name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
@@ -46,16 +46,16 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo install --locked cargo-deny
run: cargo +1.89 install cargo-deny --version =0.18.3
- name: Run cargo deny
run: cargo deny -L error --all-features check
run: cargo deny -L error --all-features check --hide-inclusion-graph
fmt:
runs-on: ubuntu-latest
@@ -88,8 +88,8 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use
run: |
cargo install cargo-machete
cargo machete
cargo +1.89 install cargo-machete --version =0.8.0
cargo +1.89 machete
slither:
runs-on: ubuntu-latest

View File

@@ -1,72 +0,0 @@
name: Monero Tests
on:
push:
branches:
- develop
paths:
- "networks/monero/**"
- "processor/**"
pull_request:
paths:
- "networks/monero/**"
- "processor/**"
workflow_dispatch:
jobs:
# Only run these once since they will be consistent regardless of any node
unit-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
- name: Run Unit Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
# Doesn't run unit tests with features as the tests workflow will
integration-tests:
runs-on: ubuntu-latest
# Test against all supported protocol versions
strategy:
matrix:
version: [v0.17.3.2, v0.18.3.4]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
with:
monero-version: ${{ matrix.version }}
- name: Run Integration Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
- name: Run Integration Tests
# Don't run if the the tests workflow also will
if: ${{ matrix.version != 'v0.18.3.4' }}
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'

View File

@@ -34,16 +34,3 @@ jobs:
-p ethereum-schnorr-contract \
-p alloy-simple-request-transport \
-p serai-ethereum-relayer \
-p monero-io \
-p monero-generators \
-p monero-primitives \
-p monero-mlsag \
-p monero-clsag \
-p monero-borromean \
-p monero-bulletproofs \
-p monero-serai \
-p monero-rpc \
-p monero-simple-request-rpc \
-p monero-address \
-p monero-wallet \
-p monero-serai-verify-chain

View File

@@ -69,7 +69,7 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Buld Rust docs
run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c rust-docs
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
mv target/doc docs/_site/rust

2398
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -29,6 +29,7 @@ members = [
"crypto/dalek-ff-group",
"crypto/ed448",
"crypto/ciphersuite",
"crypto/ciphersuite/kp256",
"crypto/multiexp",
"crypto/schnorr",
@@ -36,12 +37,14 @@ members = [
"crypto/evrf/secq256k1",
"crypto/evrf/embedwards25519",
"crypto/evrf/generalized-bulletproofs",
"crypto/evrf/circuit-abstraction",
"crypto/evrf/divisors",
"crypto/evrf/ec-gadgets",
"crypto/dkg",
"crypto/dkg/recovery",
"crypto/dkg/dealer",
"crypto/dkg/promote",
"crypto/dkg/musig",
"crypto/dkg/pedpop",
"crypto/dkg/evrf",
"crypto/frost",
"crypto/schnorrkel",
@@ -52,20 +55,6 @@ members = [
"networks/ethereum/alloy-simple-request-transport",
"networks/ethereum/relayer",
"networks/monero/io",
"networks/monero/generators",
"networks/monero/primitives",
"networks/monero/ringct/mlsag",
"networks/monero/ringct/clsag",
"networks/monero/ringct/borromean",
"networks/monero/ringct/bulletproofs",
"networks/monero",
"networks/monero/rpc",
"networks/monero/rpc/simple-request",
"networks/monero/wallet/address",
"networks/monero/wallet",
"networks/monero/verify-chain",
"message-queue",
"processor/messages",
@@ -167,21 +156,29 @@ secq256k1 = { opt-level = 3 }
embedwards25519 = { opt-level = 3 }
generalized-bulletproofs = { opt-level = 3 }
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
ec-divisors = { opt-level = 3 }
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
dkg = { opt-level = 3 }
monero-generators = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
monero-oxide = { opt-level = 3 }
[profile.release]
panic = "unwind"
overflow-checks = true
[patch.crates-io]
# Dependencies from monero-oxide which originate from within our own tree
std-shims = { path = "common/std-shims" }
simple-request = { path = "common/request" }
multiexp = { path = "crypto/multiexp" }
flexible-transcript = { path = "crypto/transcript" }
dalek-ff-group = { path = "crypto/dalek-ff-group" }
minimal-ed448 = { path = "crypto/ed448" }
modular-frost = { path = "crypto/frost" }
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
@@ -209,6 +206,8 @@ directories-next = { path = "patches/directories-next" }
unwrap_or_default = "allow"
map_unwrap_or = "allow"
needless_continue = "allow"
manual_is_multiple_of = "allow"
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
borrow_as_ptr = "deny"
cast_lossless = "deny"
cast_possible_truncation = "deny"

View File

@@ -59,7 +59,6 @@ issued at the discretion of the Immunefi program managers.
- [Website](https://serai.exchange/): https://serai.exchange/
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021"
rust-version = "1.71"
rust-version = "1.70"
[package.metadata.docs.rs]
all-features = true

View File

@@ -1,13 +1,13 @@
[package]
name = "std-shims"
version = "0.1.1"
version = "0.1.4"
description = "A series of std shims to make alloc more feasible"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021"
rust-version = "1.80"
rust-version = "1.64"
[package.metadata.docs.rs]
all-features = true
@@ -17,7 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
rustversion = { version = "1", default-features = false }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
[features]

View File

@@ -3,4 +3,9 @@
A crate which passes through to std when the default `std` feature is enabled,
yet provides a series of shims when it isn't.
`HashSet` and `HashMap` are provided via `hashbrown`.
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
average case.
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
`spin` (avoiding a requirement on `critical-section`).
types are not guaranteed to be

View File

@@ -11,3 +11,64 @@ pub mod io;
pub use alloc::vec;
pub use alloc::str;
pub use alloc::string;
pub mod prelude {
#[rustversion::before(1.73)]
#[doc(hidden)]
pub trait StdShimsDivCeil {
fn div_ceil(self, rhs: Self) -> Self;
}
#[rustversion::before(1.73)]
mod impl_divceil {
use super::StdShimsDivCeil;
impl StdShimsDivCeil for u8 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u16 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u32 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u64 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u128 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for usize {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
#[doc(hidden)]
pub trait StdShimsIoErrorOther {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
impl StdShimsIoErrorOther for std::io::Error {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::new(std::io::ErrorKind::Other, error)
}
}
}

View File

@@ -25,7 +25,11 @@ mod mutex_shim {
}
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[cfg(feature = "std")]
pub use std::sync::LazyLock;
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;
#[rustversion::before(1.80)]
#[cfg(feature = "std")]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(feature = "std")]
pub use std::sync::LazyLock;

View File

@@ -25,8 +25,12 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
dkg = { path = "../crypto/dkg", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" }
hex = { version = "0.4", default-features = false, features = ["std"] }

View File

@@ -4,9 +4,13 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::PrimeField, GroupEncoding},
Ciphersuite, Ristretto,
group::{
ff::{Field, PrimeField},
GroupEncoding,
},
Ciphersuite,
};
use borsh::BorshDeserialize;

View File

@@ -27,8 +27,8 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] }
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std", "aggregate"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
log = { version = "0.4", default-features = false, features = ["std"] }

View File

@@ -1,6 +1,7 @@
use std::collections::{VecDeque, HashSet};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use serai_db::{Get, DbTxn, Db};

View File

@@ -1,6 +1,7 @@
use std::collections::HashMap;
use ciphersuite::{Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use serai_db::{DbTxn, Db};

View File

@@ -9,12 +9,13 @@ use rand_chacha::ChaCha12Rng;
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{
GroupEncoding,
ff::{Field, PrimeField},
},
Ciphersuite, Ristretto,
Ciphersuite,
};
use schnorr::{
SchnorrSignature,

View File

@@ -4,7 +4,8 @@ use scale::{Encode, Decode, IoReader};
use blake2::{Digest, Blake2s256};
use ciphersuite::{Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use crate::{
transaction::{Transaction, TransactionKind, TransactionError},

View File

@@ -1,9 +1,11 @@
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite, Ristretto,
Ciphersuite,
};
use schnorr::SchnorrSignature;

View File

@@ -10,7 +10,8 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use serai_db::{DbTxn, Db, MemDb};

View File

@@ -3,7 +3,8 @@ use std::{sync::Arc, collections::HashMap};
use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use tendermint::ext::Commit;

View File

@@ -6,9 +6,10 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite, Ristretto,
Ciphersuite,
};
use schnorr::SchnorrSignature;

View File

@@ -2,7 +2,8 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use crate::{
ReadWrite,

View File

@@ -3,7 +3,8 @@ use std::sync::Arc;
use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng};
use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
use dalek_ff_group::Ristretto;
use ciphersuite::{Ciphersuite, group::ff::Field};
use scale::Encode;

View File

@@ -5,9 +5,10 @@ use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};
use blake2::{digest::typenum::U32, Digest, Blake2b};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group, GroupEncoding},
Ciphersuite, Ristretto,
group::{Group, GroupEncoding},
Ciphersuite,
};
use schnorr::SchnorrSignature;

View File

@@ -1,13 +1,13 @@
[package]
name = "ciphersuite"
version = "0.4.1"
version = "0.4.2"
description = "Ciphersuites built around ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
rust-version = "1.80"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
@@ -24,22 +24,12 @@ rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
subtle = { version = "^2.4", default-features = false }
digest = { version = "0.10", default-features = false }
digest = { version = "0.10", default-features = false, features = ["core-api"] }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
sha2 = { version = "0.10", default-features = false, optional = true }
sha3 = { version = "0.10", default-features = false, optional = true }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.4", default-features = false, optional = true }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, optional = true }
[dev-dependencies]
hex = { version = "0.4", default-features = false, features = ["std"] }
@@ -59,27 +49,8 @@ std = [
"digest/std",
"transcript/std",
"sha2?/std",
"sha3?/std",
"ff/std",
"dalek-ff-group?/std",
"elliptic-curve?/std",
"p256?/std",
"k256?/std",
"minimal-ed448?/std",
]
dalek = ["sha2", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]
kp256 = ["sha2", "elliptic-curve"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
ed448 = ["sha3", "minimal-ed448"]
default = ["std"]

View File

@@ -21,6 +21,8 @@ Their `hash_to_F` is the
[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),
yet applied to their scalar field.
Please see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info.
### Ed25519/Ristretto
Ed25519/Ristretto are offered via
@@ -33,6 +35,8 @@ the draft
[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).
The domain-separation tag is naively prefixed to the message.
Please see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info.
### Ed448
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
@@ -42,3 +46,5 @@ to its prime-order subgroup.
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The
domain-separation tag is naively prefixed to the message.
Please see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info.

View File

@@ -0,0 +1,55 @@
[package]
name = "ciphersuite-kp256"
version = "0.4.0"
description = "Ciphersuites built around ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
sha2 = { version = "0.10", default-features = false }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"] }
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
ciphersuite = { path = "../", version = "0.4", default-features = false }
[dev-dependencies]
hex = { version = "0.4", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { version = "0.13", path = "../../ff-group-tests" }
[features]
alloc = ["ciphersuite/alloc"]
std = [
"rand_core/std",
"zeroize/std",
"sha2/std",
"elliptic-curve/std",
"p256/std",
"k256/std",
"ciphersuite/std",
]
default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2023-2024 Luke Parker
Copyright (c) 2021-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -0,0 +1,3 @@
# Ciphersuite {k, p}256
SECP256k1 and P-256 Ciphersuites around k256 and p256.

View File

@@ -1,16 +1,17 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
use zeroize::Zeroize;
use sha2::Sha256;
use group::ff::PrimeField;
use elliptic_curve::{
generic_array::GenericArray,
bigint::{NonZero, CheckedAdd, Encoding, U384, U512},
bigint::{NonZero, CheckedAdd, Encoding, U384},
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
};
use crate::Ciphersuite;
use ciphersuite::{group::ff::PrimeField, Ciphersuite};
macro_rules! kp_curve {
(
@@ -31,22 +32,6 @@ macro_rules! kp_curve {
$lib::ProjectivePoint::GENERATOR
}
fn reduce_512(scalar: [u8; 64]) -> Self::F {
let mut modulus = [0; 64];
modulus[32 ..].copy_from_slice(&(Self::F::ZERO - Self::F::ONE).to_bytes());
let modulus = U512::from_be_slice(&modulus).checked_add(&U512::ONE).unwrap();
let mut wide =
U512::from_be_bytes(scalar).rem(&NonZero::new(modulus).unwrap()).to_be_bytes();
let mut array = *GenericArray::from_slice(&wide[32 ..]);
let res = $lib::Scalar::from_repr(array).unwrap();
wide.zeroize();
array.zeroize();
res
}
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
// While one of these two libraries does support directly hashing to the Scalar field, the
// other doesn't. While that's probably an oversight, this is a universally working method
@@ -123,12 +108,9 @@ fn test_oversize_dst<C: Ciphersuite>() {
/// Ciphersuite for Secp256k1.
///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "secp256k1")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Secp256k1;
#[cfg(feature = "secp256k1")]
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
#[cfg(feature = "secp256k1")]
#[test]
fn test_secp256k1() {
ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
@@ -161,12 +143,9 @@ fn test_secp256k1() {
/// Ciphersuite for P-256.
///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "p256")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct P256;
#[cfg(feature = "p256")]
kp_curve!("p256", p256, P256, b"P-256");
#[cfg(feature = "p256")]
#[test]
fn test_p256() {
ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);

View File

@@ -2,7 +2,7 @@
Ciphersuites for elliptic curves premised on ff/group.
This library, except for the not recommended Ed448 ciphersuite, was
This library was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).

View File

@@ -4,6 +4,9 @@
use core::fmt::Debug;
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(unused_imports)]
use std_shims::prelude::*;
#[cfg(any(feature = "alloc", feature = "std"))]
use std_shims::io::{self, Read};
use rand_core::{RngCore, CryptoRng};
@@ -23,25 +26,6 @@ use group::{
#[cfg(any(feature = "alloc", feature = "std"))]
use group::GroupEncoding;
#[cfg(feature = "dalek")]
mod dalek;
#[cfg(feature = "ristretto")]
pub use dalek::Ristretto;
#[cfg(feature = "ed25519")]
pub use dalek::Ed25519;
#[cfg(feature = "kp256")]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::Secp256k1;
#[cfg(feature = "p256")]
pub use kp256::P256;
#[cfg(feature = "ed448")]
mod ed448;
#[cfg(feature = "ed448")]
pub use ed448::*;
/// Unified trait defining a ciphersuite around an elliptic curve.
pub trait Ciphersuite:
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
@@ -62,12 +46,6 @@ pub trait Ciphersuite:
// While group does provide this in its API, privacy coins may want to use a custom basepoint
fn generator() -> Self::G;
/// Reduce 512 bits into a uniform scalar.
///
/// If 512 bits is insufficient to perform a reduction into a uniform scalar, the ciphersuite
/// will perform a hash to sample the necessary bits.
fn reduce_512(scalar: [u8; 64]) -> Self::F;
/// Hash the provided domain-separation tag and message to a scalar. Ciphersuites MAY naively
/// prefix the tag to the message, enabling transpotion between the two. Accordingly, this
/// function should NOT be used in any scheme where one tag is a valid substring of another

View File

@@ -1,13 +1,13 @@
[package]
name = "dalek-ff-group"
version = "0.4.1"
version = "0.4.4"
description = "ff/group bindings around curve25519-dalek"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021"
rust-version = "1.71"
rust-version = "1.65"
[package.metadata.docs.rs]
all-features = true
@@ -25,18 +25,22 @@ subtle = { version = "^2.4", default-features = false }
rand_core = { version = "0.6", default-features = false }
digest = { version = "0.10", default-features = false }
sha2 = { version = "0.10", default-features = false }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
ciphersuite = { path = "../ciphersuite", default-features = false }
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] }
[dev-dependencies]
hex = "0.4"
rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { path = "../ff-group-tests" }
[features]
std = ["zeroize/std", "subtle/std", "rand_core/std", "digest/std"]
alloc = ["zeroize/alloc", "ciphersuite/alloc"]
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "digest/std", "sha2/std", "ciphersuite/std"]
default = ["std"]

View File

@@ -3,9 +3,9 @@ use zeroize::Zeroize;
use sha2::{Digest, Sha512};
use group::Group;
use dalek_ff_group::Scalar;
use crate::Scalar;
use crate::Ciphersuite;
use ciphersuite::Ciphersuite;
macro_rules! dalek_curve {
(
@@ -15,7 +15,7 @@ macro_rules! dalek_curve {
$Point: ident,
$ID: literal
) => {
use dalek_ff_group::$Point;
use crate::$Point;
impl Ciphersuite for $Ciphersuite {
type F = Scalar;
@@ -28,12 +28,6 @@ macro_rules! dalek_curve {
$Point::generator()
}
fn reduce_512(mut scalar: [u8; 64]) -> Self::F {
let res = Scalar::from_bytes_mod_order_wide(&scalar);
scalar.zeroize();
res
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat()))
}
@@ -46,12 +40,9 @@ macro_rules! dalek_curve {
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(any(test, feature = "ristretto"))]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ristretto;
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
#[cfg(any(test, feature = "ristretto"))]
#[test]
fn test_ristretto() {
ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
@@ -77,12 +68,9 @@ fn test_ristretto() {
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(feature = "ed25519")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed25519;
#[cfg(feature = "ed25519")]
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
#[cfg(feature = "ed25519")]
#[test]
fn test_ed25519() {
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);

View File

@@ -17,7 +17,7 @@ use crypto_bigint::{
impl_modulus,
};
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes};
use crate::{u8_from_bool, constant_time, math_op, math};
@@ -36,6 +36,7 @@ type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;
/// A constant-time implementation of the Ed25519 field.
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]
#[repr(transparent)]
pub struct FieldElement(ResidueType);
// Square root of -1.
@@ -216,10 +217,18 @@ impl PrimeFieldBits for FieldElement {
}
impl FieldElement {
/// Interpret the value as a little-endian integer, square it, and reduce it into a FieldElement.
pub fn from_square(value: [u8; 32]) -> FieldElement {
let value = U256::from_le_bytes(value);
FieldElement(reduce(U512::from(value.mul_wide(&value))))
/// Create a FieldElement from a `crypto_bigint::U256`.
///
/// This will reduce the `U256` by the modulus, into a member of the field.
pub const fn from_u256(u256: &U256) -> Self {
FieldElement(Residue::new(u256))
}
/// Create a `FieldElement` from the reduction of a 512-bit number.
///
/// The bytes are interpreted in little-endian format.
pub fn wide_reduce(value: [u8; 64]) -> Self {
FieldElement(reduce(U512::from_le_bytes(value)))
}
/// Perform an exponentiation.
@@ -297,6 +306,12 @@ impl FieldElement {
}
}
impl FromUniformBytes<64> for FieldElement {
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
Self::wide_reduce(*bytes)
}
}
impl Sum<FieldElement> for FieldElement {
fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
let mut res = FieldElement::ZERO;

View File

@@ -30,7 +30,7 @@ use dalek::{
pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};
use group::{
ff::{Field, PrimeField, FieldBits, PrimeFieldBits},
ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes},
Group, GroupEncoding,
prime::PrimeGroup,
};
@@ -38,13 +38,24 @@ use group::{
mod field;
pub use field::FieldElement;
mod ciphersuite;
pub use crate::ciphersuite::{Ed25519, Ristretto};
// Use black_box when possible
#[rustversion::since(1.66)]
use core::hint::black_box;
#[rustversion::before(1.66)]
fn black_box<T>(val: T) -> T {
val
mod black_box {
pub(crate) fn black_box<T>(val: T) -> T {
#[allow(clippy::incompatible_msrv)]
core::hint::black_box(val)
}
}
#[rustversion::before(1.66)]
mod black_box {
pub(crate) fn black_box<T>(val: T) -> T {
val
}
}
use black_box::black_box;
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
@@ -314,6 +325,12 @@ impl PrimeFieldBits for Scalar {
}
}
impl FromUniformBytes<64> for Scalar {
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
Self::from_bytes_mod_order_wide(bytes)
}
}
impl Sum<Scalar> for Scalar {
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
Self(DScalar::sum(iter))
@@ -351,7 +368,12 @@ macro_rules! dalek_group {
$BASEPOINT_POINT: ident,
$BASEPOINT_TABLE: ident
) => {
/// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup.
/// Wrapper around the dalek Point type.
///
/// All operations will be restricted to a prime-order subgroup (equivalent to the group itself
/// in the case of Ristretto). The exposure of the internal element does allow bypassing this
/// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at
/// the user's risk.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Point(pub $DPoint);
deref_borrow!($Point, $DPoint);

View File

@@ -1,13 +1,13 @@
[package]
name = "dkg"
version = "0.5.1"
version = "0.6.1"
description = "Distributed key generation over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.81"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
@@ -17,82 +17,25 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] }
thiserror = { version = "2", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false }
multiexp = { path = "../multiexp", version = "0.4", default-features = false }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false }
dleq = { path = "../dleq", version = "^0.4.1", default-features = false }
# eVRF DKG dependencies
generic-array = { version = "1", default-features = false, features = ["alloc"], optional = true }
blake2 = { version = "0.10", default-features = false, features = ["std"], optional = true }
rand_chacha = { version = "0.3", default-features = false, features = ["std"], optional = true }
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", default-features = false, optional = true }
ec-divisors = { path = "../evrf/divisors", default-features = false, optional = true }
generalized-bulletproofs-circuit-abstraction = { path = "../evrf/circuit-abstraction", optional = true }
generalized-bulletproofs-ec-gadgets = { path = "../evrf/ec-gadgets", optional = true }
secq256k1 = { path = "../evrf/secq256k1", optional = true }
embedwards25519 = { path = "../evrf/embedwards25519", optional = true }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
rand = { version = "0.8", default-features = false, features = ["std"] }
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] }
ec-divisors = { path = "../evrf/divisors", features = ["pasta"] }
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
[features]
std = [
"thiserror/std",
"rand_core/std",
"std-shims/std",
"borsh?/std",
"transcript/std",
"chacha20/std",
"ciphersuite/std",
"multiexp/std",
"multiexp/batch",
"schnorr/std",
"dleq/std",
"dleq/serialize"
]
borsh = ["dep:borsh"]
evrf = [
"std",
"dep:generic-array",
"dep:blake2",
"dep:rand_chacha",
"dep:generalized-bulletproofs",
"dep:ec-divisors",
"dep:generalized-bulletproofs-circuit-abstraction",
"dep:generalized-bulletproofs-ec-gadgets",
]
evrf-secp256k1 = ["evrf", "ciphersuite/secp256k1", "secq256k1"]
evrf-ed25519 = ["evrf", "ciphersuite/ed25519", "embedwards25519"]
evrf-ristretto = ["evrf", "ciphersuite/ristretto", "embedwards25519"]
tests = ["rand_core/getrandom"]
default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021-2023 Luke Parker
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,16 +1,15 @@
# Distributed Key Generation
A collection of implementations of various distributed key generation protocols.
A crate implementing a type for keys, presumably the result of a distributed
key generation protocol, and utilities from there.
All included protocols resolve into the provided `Threshold` types, intended to
enable their modularity. Additional utilities around these types, such as
promotion from one generator to another, are also provided.
This crate used to host implementations of distributed key generation protocols
as well (hence the name). Those have been smashed into their own crates, such
as [`dkg-musig`](https://docs.rs/dkg-musig) and
[`dkg-pedpop`](https://docs.rs/dkg-pedpop).
Currently, the only included protocol is the two-round protocol from the
[FROST paper](https://eprint.iacr.org/2020/852).
This library was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
Any subsequent changes have not undergone auditing.
Before being smashed, this crate was [audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -0,0 +1,36 @@
[package]
name = "dkg-dealer"
version = "0.6.0"
description = "Produce dkg::ThresholdKeys with a dealer key generation"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false }
rand_core = { version = "0.6", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[features]
std = [
"zeroize/std",
"rand_core/std",
"std-shims/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2024 Luke Parker
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -0,0 +1,13 @@
# Distributed Key Generation - Dealer
This crate implements a dealer key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types. This provides a single point of
failure when the key is being generated and is NOT recommended for use outside
of tests.
This crate was originally part of (in some form) the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -0,0 +1,68 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![no_std]
use core::ops::Deref;
use std_shims::{vec::Vec, collections::HashMap};
use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, CryptoRng};
use ciphersuite::{
group::ff::{Field, PrimeField},
Ciphersuite,
};
pub use dkg::*;
/// Create a key via a dealer key generation protocol.
pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
threshold: u16,
participants: u16,
) -> Result<HashMap<Participant, ThresholdKeys<C>>, DkgError> {
let mut coefficients = Vec::with_capacity(usize::from(participants));
// `.max(1)` so we always generate the 0th coefficient which we'll share
for _ in 0 .. threshold.max(1) {
coefficients.push(Zeroizing::new(C::F::random(&mut *rng)));
}
fn polynomial<F: PrimeField + Zeroize>(
coefficients: &[Zeroizing<F>],
l: Participant,
) -> Zeroizing<F> {
let l = F::from(u64::from(u16::from(l)));
// This should never be reached since Participant is explicitly non-zero
assert!(l != F::ZERO, "zero participant passed to polynomial");
let mut share = Zeroizing::new(F::ZERO);
for (idx, coefficient) in coefficients.iter().rev().enumerate() {
*share += coefficient.deref();
if idx != (coefficients.len() - 1) {
*share *= l;
}
}
share
}
let group_key = C::generator() * coefficients[0].deref();
let mut secret_shares = HashMap::with_capacity(participants as usize);
let mut verification_shares = HashMap::with_capacity(participants as usize);
for i in 1 ..= participants {
let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index");
let secret_share = polynomial(&coefficients, i);
secret_shares.insert(i, secret_share.clone());
verification_shares.insert(i, C::generator() * *secret_share);
}
let mut res = HashMap::with_capacity(participants as usize);
for (i, secret_share) in secret_shares {
let keys = ThresholdKeys::new(
ThresholdParams::new(threshold, participants, i)?,
Interpolation::Lagrange,
secret_share,
verification_shares.clone(),
)?;
debug_assert_eq!(keys.group_key(), group_key);
res.insert(i, keys);
}
Ok(res)
}

View File

@@ -0,0 +1,68 @@
[package]
name = "dkg-evrf"
version = "0.1.0"
description = "Distributed key generation over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/evrf"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.81"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
generic-array = { version = "1", default-features = false, features = ["alloc"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
generalized-bulletproofs = { git = "https://github.com/kayabaNerve/monero-oxide", rev = "b6dd1a9ff7ac6b96eb7cb488a4501fd1f6f2dd1e", default-features = false }
ec-divisors = { git = "https://github.com/kayabaNerve/monero-oxide", rev = "b6dd1a9ff7ac6b96eb7cb488a4501fd1f6f2dd1e", default-features = false }
generalized-bulletproofs-circuit-abstraction = { git = "https://github.com/kayabaNerve/monero-oxide", rev = "b6dd1a9ff7ac6b96eb7cb488a4501fd1f6f2dd1e" }
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/kayabaNerve/monero-oxide", rev = "b6dd1a9ff7ac6b96eb7cb488a4501fd1f6f2dd1e" }
dkg = { path = ".." }
secq256k1 = { path = "../../evrf/secq256k1", optional = true }
embedwards25519 = { path = "../../evrf/embedwards25519", optional = true }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
rand = { version = "0.8", default-features = false, features = ["std"] }
ciphersuite = { path = "../../ciphersuite", default-features = false, features = ["std"] }
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false, features = ["std"] }
generalized-bulletproofs = { git = "https://github.com/kayabaNerve/monero-oxide", rev = "b6dd1a9ff7ac6b96eb7cb488a4501fd1f6f2dd1e", features = ["tests"] }
ec-divisors = { git = "https://github.com/kayabaNerve/monero-oxide", rev = "b6dd1a9ff7ac6b96eb7cb488a4501fd1f6f2dd1e" }
[features]
std = [
"thiserror/std",
"rand_core/std",
"transcript/std",
"ciphersuite/std",
"multiexp/std",
"multiexp/batch",
]
secp256k1 = ["secq256k1"]
ed25519 = ["embedwards25519"]
ristretto = ["embedwards25519"]
tests = ["rand_core/getrandom"]
default = ["std"]

View File

@@ -88,7 +88,7 @@ use multiexp::multiexp_vartime;
use generalized_bulletproofs::{Generators, arithmetic_circuit_proof::*};
use ec_divisors::DivisorCurve;
use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore, ThresholdKeys};
use dkg::{Participant, ThresholdParams, Interpolation, ThresholdKeys};
pub(crate) mod proof;
use proof::*;

View File

@@ -35,9 +35,6 @@ impl Ciphersuite for Pallas {
// This is solely test code so it's fine
Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into())
}
fn reduce_512(scalar: [u8; 64]) -> Self::F {
Self::F::from_uniform_bytes(&scalar)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
@@ -55,9 +52,6 @@ impl Ciphersuite for Vesta {
// This is solely test code so it's fine
Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into())
}
fn reduce_512(scalar: [u8; 64]) -> Self::F {
Self::F::from_uniform_bytes(&scalar)
}
}
pub struct VestaParams;

View File

@@ -0,0 +1,49 @@
[package]
name = "dkg-musig"
version = "0.6.0"
description = "The MuSig key aggregation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/musig"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.79"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }
[features]
std = [
"thiserror/std",
"rand_core/std",
"std-shims/std",
"multiexp/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021-2024 Luke Parker
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -0,0 +1,12 @@
# Distributed Key Generation - MuSig
This implements the MuSig key aggregation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of (in some form) the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

162
crypto/dkg/musig/src/lib.rs Normal file
View File

@@ -0,0 +1,162 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use core::ops::Deref;
use std_shims::{
vec,
vec::Vec,
collections::{HashSet, HashMap},
};
use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
pub use dkg::*;
#[cfg(test)]
mod tests;
/// Errors encountered when working with threshold keys.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum MusigError<C: Ciphersuite> {
/// No keys were provided.
#[error("no keys provided")]
NoKeysProvided,
/// Too many keys were provided.
#[error("too many keys (allowed {max}, provided {provided})")]
TooManyKeysProvided {
/// The maximum amount of keys allowed.
max: u16,
/// The amount of keys provided.
provided: usize,
},
/// A participant was duplicated.
#[error("a participant was duplicated")]
DuplicatedParticipant(C::G),
/// Participating, yet our public key wasn't found in the list of keys.
#[error("private key's public key wasn't present in the list of public keys")]
NotPresent,
/// An error propagated from the underlying `dkg` crate.
#[error("error from dkg ({0})")]
DkgError(DkgError),
}
fn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, MusigError<C>> {
if keys.is_empty() {
Err(MusigError::NoKeysProvided)?;
}
let keys_len = u16::try_from(keys.len())
.map_err(|_| MusigError::TooManyKeysProvided { max: u16::MAX, provided: keys.len() })?;
let mut set = HashSet::with_capacity(keys.len());
for key in keys {
let bytes = key.to_bytes().as_ref().to_vec();
if !set.insert(bytes) {
Err(MusigError::DuplicatedParticipant(*key))?;
}
}
Ok(keys_len)
}
fn binding_factor_transcript<C: Ciphersuite>(
context: [u8; 32],
keys_len: u16,
keys: &[C::G],
) -> Vec<u8> {
debug_assert_eq!(usize::from(keys_len), keys.len());
let mut transcript = vec![];
transcript.extend(&context);
transcript.extend(keys_len.to_le_bytes());
for key in keys {
transcript.extend(key.to_bytes().as_ref());
}
transcript
}
fn binding_factor<C: Ciphersuite>(mut transcript: Vec<u8>, i: u16) -> C::F {
transcript.extend(i.to_le_bytes());
C::hash_to_F(b"dkg-musig", &transcript)
}
#[allow(clippy::type_complexity)]
fn musig_key_multiexp<C: Ciphersuite>(
context: [u8; 32],
keys: &[C::G],
) -> Result<Vec<(C::F, C::G)>, MusigError<C>> {
let keys_len = check_keys::<C>(keys)?;
let transcript = binding_factor_transcript::<C>(context, keys_len, keys);
let mut multiexp = Vec::with_capacity(keys.len());
for i in 1 ..= keys_len {
multiexp.push((binding_factor::<C>(transcript.clone(), i), keys[usize::from(i - 1)]));
}
Ok(multiexp)
}
/// The group key resulting from using this library's MuSig key aggregation.
///
/// This function executes in variable time and MUST NOT be used with secret data.
pub fn musig_key_vartime<C: Ciphersuite>(
context: [u8; 32],
keys: &[C::G],
) -> Result<C::G, MusigError<C>> {
Ok(multiexp::multiexp_vartime(&musig_key_multiexp(context, keys)?))
}
/// The group key resulting from using this library's MuSig key aggregation.
pub fn musig_key<C: Ciphersuite>(context: [u8; 32], keys: &[C::G]) -> Result<C::G, MusigError<C>> {
Ok(multiexp::multiexp(&musig_key_multiexp(context, keys)?))
}
/// A n-of-n non-interactive DKG which does not guarantee the usability of the resulting key.
pub fn musig<C: Ciphersuite>(
context: [u8; 32],
private_key: Zeroizing<C::F>,
keys: &[C::G],
) -> Result<ThresholdKeys<C>, MusigError<C>> {
let our_pub_key = C::generator() * private_key.deref();
let Some(our_i) = keys.iter().position(|key| *key == our_pub_key) else {
Err(MusigError::DkgError(DkgError::NotParticipating))?
};
let keys_len: u16 = check_keys::<C>(keys)?;
let params = ThresholdParams::new(
keys_len,
keys_len,
// The `+ 1` won't fail as `keys.len() <= u16::MAX`, so any index is `< u16::MAX`
Participant::new(
u16::try_from(our_i).expect("keys.len() <= u16::MAX yet index of keys > u16::MAX?") + 1,
)
.expect("i + 1 != 0"),
)
.map_err(MusigError::DkgError)?;
let transcript = binding_factor_transcript::<C>(context, keys_len, keys);
let mut binding_factors = Vec::with_capacity(keys.len());
let mut multiexp = Vec::with_capacity(keys.len());
let mut verification_shares = HashMap::with_capacity(keys.len());
for (i, key) in (1 ..= keys_len).zip(keys.iter().copied()) {
let binding_factor = binding_factor::<C>(transcript.clone(), i);
binding_factors.push(binding_factor);
multiexp.push((binding_factor, key));
let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index?");
verification_shares.insert(i, key);
}
let group_key = multiexp::multiexp(&multiexp);
debug_assert_eq!(our_pub_key, verification_shares[&params.i()]);
debug_assert_eq!(musig_key_vartime::<C>(context, keys).unwrap(), group_key);
ThresholdKeys::new(
params,
Interpolation::Constant(binding_factors),
private_key,
verification_shares,
)
.map_err(MusigError::DkgError)
}

View File

@@ -0,0 +1,71 @@
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use dkg_recovery::recover_key;
use crate::*;
/// Tests MuSig key generation.
#[test]
pub fn test_musig() {
const PARTICIPANTS: u16 = 5;
let mut keys = vec![];
let mut pub_keys = vec![];
for _ in 0 .. PARTICIPANTS {
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
pub_keys.push(<Ristretto as Ciphersuite>::generator() * *key);
keys.push(key);
}
const CONTEXT: [u8; 32] = *b"MuSig Test ";
// Empty signing set
musig::<Ristretto>(CONTEXT, Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO), &[])
.unwrap_err();
// Signing set we're not part of
musig::<Ristretto>(
CONTEXT,
Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO),
&[<Ristretto as Ciphersuite>::generator()],
)
.unwrap_err();
// Test with n keys
{
let mut created_keys = HashMap::new();
let mut verification_shares = HashMap::new();
let group_key = musig_key::<Ristretto>(CONTEXT, &pub_keys).unwrap();
for (i, key) in keys.iter().enumerate() {
let these_keys = musig::<Ristretto>(CONTEXT, key.clone(), &pub_keys).unwrap();
assert_eq!(these_keys.params().t(), PARTICIPANTS);
assert_eq!(these_keys.params().n(), PARTICIPANTS);
assert_eq!(usize::from(u16::from(these_keys.params().i())), i + 1);
verification_shares.insert(
these_keys.params().i(),
<Ristretto as Ciphersuite>::generator() * **these_keys.original_secret_share(),
);
assert_eq!(these_keys.group_key(), group_key);
created_keys.insert(these_keys.params().i(), these_keys);
}
for keys in created_keys.values() {
for (l, verification_share) in &verification_shares {
assert_eq!(keys.original_verification_share(*l), *verification_share);
}
}
assert_eq!(
<Ristretto as Ciphersuite>::generator() *
*recover_key(&created_keys.values().cloned().collect::<Vec<_>>()).unwrap(),
group_key
);
}
}

View File

@@ -0,0 +1,37 @@
[package]
name = "dkg-pedpop"
version = "0.6.0"
description = "The PedPoP distributed key generation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/pedpop"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.3", default-features = false, features = ["std", "recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["std", "zeroize"] }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6", default-features = false, features = ["std"] }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false }

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2024 Luke Parker
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -0,0 +1,12 @@
# Distributed Key Generation - PedPoP
This implements the PedPoP distributed key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -21,7 +21,7 @@ use multiexp::BatchVerifier;
use schnorr::SchnorrSignature;
use dleq::DLEqProof;
use crate::{Participant, ThresholdParams};
use dkg::{Participant, ThresholdParams};
mod sealed {
use super::*;
@@ -69,7 +69,7 @@ impl<C: Ciphersuite, M: Message> EncryptionKeyMessage<C, M> {
buf
}
#[cfg(any(test, feature = "tests"))]
#[cfg(test)]
pub(crate) fn enc_key(&self) -> C::G {
self.enc_key
}

View File

@@ -1,15 +1,20 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
// This crate requires `dleq` which doesn't support no-std via std-shims
// #![cfg_attr(not(feature = "std"), no_std)]
use core::{marker::PhantomData, ops::Deref, fmt};
use std::{
io::{self, Read, Write},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use rand_core::{RngCore, CryptoRng};
use transcript::{Transcript, RecommendedTranscript};
use multiexp::{multiexp_vartime, BatchVerifier};
use ciphersuite::{
group::{
ff::{Field, PrimeField},
@@ -17,29 +22,75 @@ use ciphersuite::{
},
Ciphersuite,
};
use multiexp::{multiexp_vartime, BatchVerifier};
use schnorr::SchnorrSignature;
use crate::{
Participant, DkgError, ThresholdParams, Interpolation, ThresholdCore, validate_map,
encryption::{
ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, Decryption, EncryptionKeyProof,
DecryptionError,
},
};
pub use dkg::*;
type FrostError<C> = DkgError<EncryptionKeyProof<C>>;
mod encryption;
pub use encryption::*;
#[cfg(test)]
mod tests;
/// Errors possible during key generation.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum PedPoPError<C: Ciphersuite> {
/// An incorrect amount of participants was provided.
#[error("incorrect amount of participants (expected {expected}, found {found})")]
IncorrectAmountOfParticipants { expected: usize, found: usize },
/// An invalid proof of knowledge was provided.
#[error("invalid proof of knowledge (participant {0})")]
InvalidCommitments(Participant),
/// An invalid DKG share was provided.
#[error("invalid share (participant {participant}, blame {blame})")]
InvalidShare { participant: Participant, blame: Option<EncryptionKeyProof<C>> },
/// A participant was missing.
#[error("missing participant {0}")]
MissingParticipant(Participant),
/// An error propagated from the underlying `dkg` crate.
#[error("error from dkg ({0})")]
DkgError(DkgError),
}
// Validate a map of values to have the expected included participants
fn validate_map<T, C: Ciphersuite>(
map: &HashMap<Participant, T>,
included: &[Participant],
ours: Participant,
) -> Result<(), PedPoPError<C>> {
if (map.len() + 1) != included.len() {
Err(PedPoPError::IncorrectAmountOfParticipants {
expected: included.len(),
found: map.len() + 1,
})?;
}
for included in included {
if *included == ours {
if map.contains_key(included) {
Err(PedPoPError::DkgError(DkgError::DuplicatedParticipant(*included)))?;
}
continue;
}
if !map.contains_key(included) {
Err(PedPoPError::MissingParticipant(*included))?;
}
}
Ok(())
}
#[allow(non_snake_case)]
fn challenge<C: Ciphersuite>(context: [u8; 32], l: Participant, R: &[u8], Am: &[u8]) -> C::F {
let mut transcript = RecommendedTranscript::new(b"DKG FROST v0.2");
let mut transcript = RecommendedTranscript::new(b"DKG PedPoP v0.2");
transcript.domain_separate(b"schnorr_proof_of_knowledge");
transcript.append_message(b"context", context);
transcript.append_message(b"participant", l.to_bytes());
transcript.append_message(b"nonce", R);
transcript.append_message(b"commitments", Am);
C::hash_to_F(b"DKG-FROST-proof_of_knowledge-0", &transcript.challenge(b"schnorr"))
C::hash_to_F(b"DKG-PedPoP-proof_of_knowledge-0", &transcript.challenge(b"schnorr"))
}
/// The commitments message, intended to be broadcast to all other parties.
@@ -98,7 +149,7 @@ impl<C: Ciphersuite> KeyGenMachine<C> {
KeyGenMachine { params, context, _curve: PhantomData }
}
/// Start generating a key according to the FROST DKG spec.
/// Start generating a key according to the PedPoP DKG specification present in the FROST paper.
///
/// Returns a commitments message to be sent to all parties over an authenticated channel. If any
/// party submits multiple sets of commitments, they MUST be treated as malicious.
@@ -106,7 +157,7 @@ impl<C: Ciphersuite> KeyGenMachine<C> {
self,
rng: &mut R,
) -> (SecretShareMachine<C>, EncryptionKeyMessage<C, Commitments<C>>) {
let t = usize::from(self.params.t);
let t = usize::from(self.params.t());
let mut coefficients = Vec::with_capacity(t);
let mut commitments = Vec::with_capacity(t);
let mut cached_msg = vec![];
@@ -133,7 +184,7 @@ impl<C: Ciphersuite> KeyGenMachine<C> {
);
// Additionally create an encryption mechanism to protect the secret shares
let encryption = Encryption::new(self.context, self.params.i, rng);
let encryption = Encryption::new(self.context, self.params.i(), rng);
// Step 4: Broadcast
let msg =
@@ -250,21 +301,21 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
&mut self,
rng: &mut R,
mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<HashMap<Participant, Vec<C::G>>, FrostError<C>> {
) -> Result<HashMap<Participant, Vec<C::G>>, PedPoPError<C>> {
validate_map(
&commitment_msgs,
&(1 ..= self.params.n()).map(Participant).collect::<Vec<_>>(),
&self.params.all_participant_indexes().collect::<Vec<_>>(),
self.params.i(),
)?;
let mut batch = BatchVerifier::<Participant, C::G>::new(commitment_msgs.len());
let mut commitments = HashMap::new();
for l in (1 ..= self.params.n()).map(Participant) {
for l in self.params.all_participant_indexes() {
let Some(msg) = commitment_msgs.remove(&l) else { continue };
let mut msg = self.encryption.register(l, msg);
if msg.commitments.len() != self.params.t().into() {
Err(FrostError::InvalidCommitments(l))?;
Err(PedPoPError::InvalidCommitments(l))?;
}
// Step 5: Validate each proof of knowledge
@@ -280,9 +331,9 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
commitments.insert(l, msg.commitments.drain(..).collect::<Vec<_>>());
}
batch.verify_vartime_with_vartime_blame().map_err(FrostError::InvalidCommitments)?;
batch.verify_vartime_with_vartime_blame().map_err(PedPoPError::InvalidCommitments)?;
commitments.insert(self.params.i, self.our_commitments.drain(..).collect());
commitments.insert(self.params.i(), self.our_commitments.drain(..).collect());
Ok(commitments)
}
@@ -299,13 +350,13 @@ impl<C: Ciphersuite> SecretShareMachine<C> {
commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<
(KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
FrostError<C>,
PedPoPError<C>,
> {
let commitments = self.verify_r1(&mut *rng, commitments)?;
// Step 1: Generate secret shares for all other parties
let mut res = HashMap::new();
for l in (1 ..= self.params.n()).map(Participant) {
for l in self.params.all_participant_indexes() {
// Don't insert our own shares to the byte buffer which is meant to be sent around
// An app developer could accidentally send it. Best to keep this black boxed
if l == self.params.i() {
@@ -413,10 +464,10 @@ impl<C: Ciphersuite> KeyMachine<C> {
mut self,
rng: &mut R,
mut shares: HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>,
) -> Result<BlameMachine<C>, FrostError<C>> {
) -> Result<BlameMachine<C>, PedPoPError<C>> {
validate_map(
&shares,
&(1 ..= self.params.n()).map(Participant).collect::<Vec<_>>(),
&self.params.all_participant_indexes().collect::<Vec<_>>(),
self.params.i(),
)?;
@@ -427,7 +478,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
self.encryption.decrypt(rng, &mut batch, BatchId::Decryption(l), l, share_bytes);
let share =
Zeroizing::new(Option::<C::F>::from(C::F::from_repr(share_bytes.0)).ok_or_else(|| {
FrostError::InvalidShare { participant: l, blame: Some(blame.clone()) }
PedPoPError::InvalidShare { participant: l, blame: Some(blame.clone()) }
})?);
share_bytes.zeroize();
*self.secret += share.deref();
@@ -444,7 +495,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
BatchId::Decryption(l) => (l, None),
BatchId::Share(l) => (l, Some(blames.remove(&l).unwrap())),
};
FrostError::InvalidShare { participant: l, blame }
PedPoPError::InvalidShare { participant: l, blame }
})?;
// Stripe commitments per t and sum them in advance. Calculating verification shares relies on
@@ -458,7 +509,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
// Calculate each user's verification share
let mut verification_shares = HashMap::new();
for i in (1 ..= self.params.n()).map(Participant) {
for i in self.params.all_participant_indexes() {
verification_shares.insert(
i,
if i == self.params.i() {
@@ -473,13 +524,10 @@ impl<C: Ciphersuite> KeyMachine<C> {
Ok(BlameMachine {
commitments,
encryption: encryption.into_decryption(),
result: Some(ThresholdCore {
params,
interpolation: Interpolation::Lagrange,
secret_share: secret,
group_key: stripes[0],
verification_shares,
}),
result: Some(
ThresholdKeys::new(params, Interpolation::Lagrange, secret, verification_shares)
.map_err(PedPoPError::DkgError)?,
),
})
}
}
@@ -488,7 +536,7 @@ impl<C: Ciphersuite> KeyMachine<C> {
pub struct BlameMachine<C: Ciphersuite> {
commitments: HashMap<Participant, Vec<C::G>>,
encryption: Decryption<C>,
result: Option<ThresholdCore<C>>,
result: Option<ThresholdKeys<C>>,
}
impl<C: Ciphersuite> fmt::Debug for BlameMachine<C> {
@@ -520,7 +568,7 @@ impl<C: Ciphersuite> BlameMachine<C> {
/// territory of consensus protocols. This library does not handle that nor does it provide any
/// tooling to do so. This function is solely intended to force users to acknowledge they're
/// completing the protocol, not processing any blame.
pub fn complete(self) -> ThresholdCore<C> {
pub fn complete(self) -> ThresholdKeys<C> {
self.result.unwrap()
}
@@ -602,12 +650,12 @@ impl<C: Ciphersuite> AdditionalBlameMachine<C> {
context: [u8; 32],
n: u16,
mut commitment_msgs: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<Self, FrostError<C>> {
) -> Result<Self, PedPoPError<C>> {
let mut commitments = HashMap::new();
let mut encryption = Decryption::new(context);
for i in 1 ..= n {
let i = Participant::new(i).unwrap();
let Some(msg) = commitment_msgs.remove(&i) else { Err(DkgError::MissingParticipant(i))? };
let Some(msg) = commitment_msgs.remove(&i) else { Err(PedPoPError::MissingParticipant(i))? };
commitments.insert(i, encryption.register(i, msg).commitments);
}
Ok(AdditionalBlameMachine(BlameMachine { commitments, encryption, result: None }))

View File

@@ -0,0 +1,346 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use crate::*;
const THRESHOLD: u16 = 3;
const PARTICIPANTS: u16 = 5;
/// Clone a map without a specific value.
fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
type PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
type PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage<C>>;
const CONTEXT: [u8; 32] = *b"DKG Test Key Generation ";
// Commit, then return commitment messages, enc keys, and shares
#[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> (
HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
HashMap<Participant, C::G>,
HashMap<Participant, PedPoPSecretShares<C>>,
) {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
let mut enc_keys = HashMap::new();
for i in (1 ..= PARTICIPANTS).map(|i| Participant::new(i).unwrap()) {
let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();
let machine = KeyGenMachine::<C>::new(params, CONTEXT);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(
i,
EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)
.unwrap(),
);
enc_keys.insert(i, commitments[&i].enc_key());
}
let mut secret_shares = HashMap::new();
let machines = machines
.drain()
.map(|(l, machine)| {
let (machine, mut shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
let shares = shares
.drain()
.map(|(l, share)| {
(
l,
EncryptedMessage::read::<&[u8]>(
&mut share.serialize().as_ref(),
// Only t/n actually matters, so hardcode i to 1 here
ThresholdParams::new(THRESHOLD, PARTICIPANTS, Participant::new(1).unwrap()).unwrap(),
)
.unwrap(),
)
})
.collect::<HashMap<_, _>>();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
(machines, commitments, enc_keys, secret_shares)
}
fn generate_secret_shares<C: Ciphersuite>(
shares: &HashMap<Participant, PedPoPSecretShares<C>>,
recipient: Participant,
) -> PedPoPSecretShares<C> {
let mut our_secret_shares = HashMap::new();
for (i, shares) in shares {
if recipient == *i {
continue;
}
our_secret_shares.insert(*i, shares[&recipient].clone());
}
our_secret_shares
}
/// Fully perform the PedPoP key generation algorithm.
fn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdKeys<C>> {
let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(
these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>(),
);
}
assert_eq!(
verification_shares.as_ref().unwrap(),
&these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>()
);
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}
const ONE: Participant = Participant::new(1).unwrap();
const TWO: Participant = Participant::new(2).unwrap();
#[test]
fn test_pedpop() {
let _ = core::hint::black_box(pedpop_gen::<_, Ristretto>(&mut OsRng));
}
fn test_blame(
commitment_msgs: &HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,
machines: Vec<BlameMachine<Ristretto>>,
msg: &PedPoPEncryptedMessage<Ristretto>,
blame: &Option<EncryptionKeyProof<Ristretto>>,
) {
for machine in machines {
let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());
assert_eq!(blamed, ONE);
// Verify additional blame also works
assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
// Verify machines constructed with AdditionalBlameMachine::new work
assert_eq!(
AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame(
ONE,
TWO,
msg.clone(),
blame.clone()
),
ONE,
);
}
}
// TODO: Write a macro which expands to the following
#[test]
fn invalid_encryption_pop_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
assert_eq!(
machine.err(),
Some(PedPoPError::InvalidShare { participant: ONE, blame: None })
);
// Explicitly declare we have a blame object, which happens to be None since invalid PoP
// is self-explainable
blame = Some(None);
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_ecdh_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the share to trigger a blame event
// Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass
// While here, 2 is malicious, this is so 1 creates the blame proof
// We then malleate 1's blame proof, so 1 ends up malicious
// Doesn't simply invalidate the PoP as that won't have a blame statement
// By mutating the encrypted data, we do ensure a blame statement is created
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
// This should be largely equivalent to the prior test
#[test]
fn invalid_dleq_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_serialization_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_value_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}

View File

@@ -0,0 +1,34 @@
[package]
name = "dkg-promote"
version = "0.6.1"
description = "Promotions for keys from the dkg crate"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/promote"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6.1", default-features = false, features = ["std"] }
[dev-dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,13 @@
# Distributed Key Generation - Promote
This crate implements 'promotions' for keys from the
[`dkg`](https://docs.rs/dkg) crate. A promotion takes a set of keys and maps it
to a different `Ciphersuite`.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,7 +1,11 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
// This crate requires `dleq` which doesn't support no-std via std-shims
// #![cfg_attr(not(feature = "std"), no_std)]
use core::{marker::PhantomData, ops::Deref};
use std::{
io::{self, Read, Write},
sync::Arc,
collections::HashMap,
};
@@ -12,11 +16,37 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite};
use transcript::{Transcript, RecommendedTranscript};
use dleq::DLEqProof;
use crate::{Participant, DkgError, ThresholdCore, ThresholdKeys, validate_map};
pub use dkg::*;
/// Promote a set of keys to another Ciphersuite definition.
pub trait CiphersuitePromote<C2: Ciphersuite> {
fn promote(self) -> ThresholdKeys<C2>;
#[cfg(test)]
mod tests;
/// Errors encountered when promoting keys.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum PromotionError {
/// Invalid participant identifier.
#[error("invalid participant (1 <= participant <= {n}, yet participant is {participant})")]
InvalidParticipant {
/// The total amount of participants.
n: u16,
/// The specified participant.
participant: Participant,
},
/// An incorrect amount of participants was specified.
#[error("incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}")]
IncorrectAmountOfParticipants {
/// The threshold required.
t: u16,
/// The total amount of participants.
n: u16,
/// The amount of participants specified.
amount: usize,
},
/// Participant provided an invalid proof.
#[error("invalid proof {0}")]
InvalidProof(Participant),
}
fn transcript<G: GroupEncoding>(key: &G, i: Participant) -> RecommendedTranscript {
@@ -65,20 +95,21 @@ pub struct GeneratorPromotion<C1: Ciphersuite, C2: Ciphersuite> {
}
impl<C1: Ciphersuite, C2: Ciphersuite<F = C1::F, G = C1::G>> GeneratorPromotion<C1, C2> {
/// Begin promoting keys from one generator to another. Returns a proof this share was properly
/// promoted.
/// Begin promoting keys from one generator to another.
///
/// Returns a proof this share was properly promoted.
pub fn promote<R: RngCore + CryptoRng>(
rng: &mut R,
base: ThresholdKeys<C1>,
) -> (GeneratorPromotion<C1, C2>, GeneratorProof<C1>) {
// Do a DLEqProof for the new generator
let proof = GeneratorProof {
share: C2::generator() * base.secret_share().deref(),
share: C2::generator() * base.original_secret_share().deref(),
proof: DLEqProof::prove(
rng,
&mut transcript(&base.core.group_key(), base.params().i),
&mut transcript(&base.original_group_key(), base.params().i()),
&[C1::generator(), C2::generator()],
base.secret_share(),
base.original_secret_share(),
),
};
@@ -89,35 +120,49 @@ impl<C1: Ciphersuite, C2: Ciphersuite<F = C1::F, G = C1::G>> GeneratorPromotion<
pub fn complete(
self,
proofs: &HashMap<Participant, GeneratorProof<C1>>,
) -> Result<ThresholdKeys<C2>, DkgError<()>> {
) -> Result<ThresholdKeys<C2>, PromotionError> {
let params = self.base.params();
validate_map(proofs, &(1 ..= params.n).map(Participant).collect::<Vec<_>>(), params.i)?;
let original_shares = self.base.verification_shares();
if proofs.len() != (usize::from(params.n()) - 1) {
Err(PromotionError::IncorrectAmountOfParticipants {
t: params.n(),
n: params.n(),
amount: proofs.len() + 1,
})?;
}
for i in proofs.keys().copied() {
if u16::from(i) > params.n() {
Err(PromotionError::InvalidParticipant { n: params.n(), participant: i })?;
}
}
let mut verification_shares = HashMap::new();
verification_shares.insert(params.i, self.proof.share);
for (i, proof) in proofs {
let i = *i;
verification_shares.insert(params.i(), self.proof.share);
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
if i == params.i() {
continue;
}
let proof = proofs.get(&i).unwrap();
proof
.proof
.verify(
&mut transcript(&self.base.core.group_key(), i),
&mut transcript(&self.base.original_group_key(), i),
&[C1::generator(), C2::generator()],
&[original_shares[&i], proof.share],
&[self.base.original_verification_share(i), proof.share],
)
.map_err(|_| DkgError::InvalidCommitments(i))?;
.map_err(|_| PromotionError::InvalidProof(i))?;
verification_shares.insert(i, proof.share);
}
Ok(ThresholdKeys {
core: Arc::new(ThresholdCore::new(
Ok(
ThresholdKeys::new(
params,
self.base.core.interpolation.clone(),
self.base.secret_share().clone(),
self.base.interpolation().clone(),
self.base.original_secret_share().clone(),
verification_shares,
)),
offset: None,
})
)
.unwrap(),
)
}
}

View File

@@ -0,0 +1,113 @@
use core::marker::PhantomData;
use std::collections::HashMap;
use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite,
};
use dkg::*;
use dkg_recovery::recover_key;
use crate::{GeneratorPromotion, GeneratorProof};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Ciphersuite> {
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
type F = C::F;
type G = C::G;
type H = C::H;
const ID: &'static [u8] = b"Alternate Ciphersuite";
fn generator() -> Self::G {
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
<C as Ciphersuite>::hash_to_F(dst, data)
}
}
/// Clone a map without a specific value.
pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
// Test promotion of threshold keys to another generator
#[test]
fn test_generator_promotion() {
// Generate a set of `ThresholdKeys`
const PARTICIPANTS: u16 = 5;
let keys: [ThresholdKeys<_>; PARTICIPANTS as usize] = {
let shares: [<Ristretto as Ciphersuite>::F; PARTICIPANTS as usize] =
core::array::from_fn(|_| <Ristretto as Ciphersuite>::F::random(&mut OsRng));
let verification_shares = (0 .. PARTICIPANTS)
.map(|i| {
(
Participant::new(i + 1).unwrap(),
<Ristretto as Ciphersuite>::generator() * shares[usize::from(i)],
)
})
.collect::<HashMap<_, _>>();
core::array::from_fn(|i| {
ThresholdKeys::new(
ThresholdParams::new(
PARTICIPANTS,
PARTICIPANTS,
Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),
)
.unwrap(),
Interpolation::Constant(vec![<Ristretto as Ciphersuite>::F::ONE; PARTICIPANTS as usize]),
Zeroizing::new(shares[i]),
verification_shares.clone(),
)
.unwrap()
})
};
// Perform the promotion
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for keys in &keys {
let i = keys.params().i();
let (promotion, proof) =
GeneratorPromotion::<_, AltGenerator<Ristretto>>::promote(&mut OsRng, keys.clone());
promotions.insert(i, promotion);
proofs.insert(
i,
GeneratorProof::<Ristretto>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap(),
);
}
// Complete the promotion, and verify it worked
let new_group_key = AltGenerator::<Ristretto>::generator() * *recover_key(&keys).unwrap();
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[usize::from(u16::from(i) - 1)].params(), promoted.params());
assert_eq!(
keys[usize::from(u16::from(i) - 1)].original_secret_share(),
promoted.original_secret_share()
);
assert_eq!(new_group_key, promoted.group_key());
for l in 0 .. PARTICIPANTS {
let verification_share =
promoted.original_verification_share(Participant::new(l + 1).unwrap());
assert_eq!(
AltGenerator::<Ristretto>::generator() * **keys[usize::from(l)].original_secret_share(),
verification_share
);
}
}
}

View File

@@ -0,0 +1,34 @@
[package]
name = "dkg-recovery"
version = "0.6.0"
description = "Recover a secret-shared key from a collection of dkg::ThresholdKeys"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/recovery"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false }
thiserror = { version = "2", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[features]
std = [
"zeroize/std",
"thiserror/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,14 @@
# Distributed Key Generation - Recovery
A utility function to recover a key from its secret shares.
Keys likely SHOULD NOT ever be recovered, making this primarily intended for
testing purposes. Instead, the shares of the key should be used to produce
shares for the desired action, allowing using the key while never
reconstructing it.
Before being smashed, this crate was [audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -0,0 +1,85 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![no_std]
use core::ops::{Deref, DerefMut};
extern crate alloc;
use alloc::vec::Vec;
use zeroize::Zeroizing;
use ciphersuite::Ciphersuite;
pub use dkg::*;
/// Errors encountered when recovering a secret-shared key from a collection of
/// `dkg::ThresholdKeys`.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum RecoveryError {
/// No keys were provided.
#[error("no keys provided")]
NoKeysProvided,
/// Not enough keys were provided.
#[error("not enough keys provided (threshold required {required}, provided {provided})")]
NotEnoughKeysProvided { required: u16, provided: usize },
/// The keys had inconsistent parameters.
#[error("keys had inconsistent parameters")]
InconsistentParameters,
/// The keys are from distinct secret-sharing sessions or otherwise corrupt.
#[error("recovery failed")]
Failure,
/// An error propagated from the underlying `dkg` crate.
#[error("error from dkg ({0})")]
DkgError(DkgError),
}
/// Recover a shared secret from a collection of `dkg::ThresholdKeys`.
pub fn recover_key<C: Ciphersuite>(
keys: &[ThresholdKeys<C>],
) -> Result<Zeroizing<C::F>, RecoveryError> {
let included = keys.iter().map(|keys| keys.params().i()).collect::<Vec<_>>();
let keys_len = keys.len();
let mut keys = keys.iter();
let first_keys = keys.next().ok_or(RecoveryError::NoKeysProvided)?;
{
let t = first_keys.params().t();
if keys_len < usize::from(t) {
Err(RecoveryError::NotEnoughKeysProvided { required: t, provided: keys_len })?;
}
}
{
let first_params = (
first_keys.params().t(),
first_keys.params().n(),
first_keys.group_key(),
first_keys.current_scalar(),
first_keys.current_offset(),
);
for keys in keys.clone() {
let params = (
keys.params().t(),
keys.params().n(),
keys.group_key(),
keys.current_scalar(),
keys.current_offset(),
);
if params != first_params {
Err(RecoveryError::InconsistentParameters)?;
}
}
}
let mut res: Zeroizing<_> =
first_keys.view(included.clone()).map_err(RecoveryError::DkgError)?.secret_share().clone();
for keys in keys {
*res.deref_mut() +=
keys.view(included.clone()).map_err(RecoveryError::DkgError)?.secret_share().deref();
}
if (C::generator() * res.deref()) != first_keys.group_key() {
Err(RecoveryError::Failure)?;
}
Ok(res)
}

View File

@@ -2,43 +2,31 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use core::fmt::{self, Debug};
use core::{
ops::Deref,
fmt::{self, Debug},
};
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::{sync::Arc, vec, vec::Vec, collections::HashMap, io};
use thiserror::Error;
use zeroize::{Zeroize, Zeroizing};
use zeroize::Zeroize;
/// MuSig-style key aggregation.
pub mod musig;
/// Encryption types and utilities used to secure DKG messages.
#[cfg(feature = "std")]
pub mod encryption;
/// The PedPoP distributed key generation protocol described in the
/// [FROST paper](https://eprint.iacr.org/2020/852), augmented to be verifiable.
#[cfg(feature = "std")]
pub mod pedpop;
/// The one-round DKG described in the [eVRF paper](https://eprint.iacr.org/2024/397).
#[cfg(all(feature = "std", feature = "evrf"))]
pub mod evrf;
/// Promote keys between ciphersuites.
#[cfg(feature = "std")]
pub mod promote;
/// Tests for application-provided curves and algorithms.
#[cfg(any(test, feature = "tests"))]
pub mod tests;
use ciphersuite::{
group::{
ff::{Field, PrimeField},
GroupEncoding,
},
Ciphersuite,
};
/// The ID of a participant, defined as a non-zero u16.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
pub struct Participant(pub(crate) u16);
pub struct Participant(u16);
impl Participant {
/// Create a new Participant identifier from a u16.
pub fn new(i: u16) -> Option<Participant> {
pub const fn new(i: u16) -> Option<Participant> {
if i == 0 {
None
} else {
@@ -48,7 +36,7 @@ impl Participant {
/// Convert a Participant identifier to bytes.
#[allow(clippy::wrong_self_convention)]
pub fn to_bytes(&self) -> [u8; 2] {
pub const fn to_bytes(&self) -> [u8; 2] {
self.0.to_le_bytes()
}
}
@@ -65,156 +53,177 @@ impl fmt::Display for Participant {
}
}
/// Various errors possible during key generation.
#[derive(Clone, PartialEq, Eq, Debug, Error)]
pub enum DkgError<B: Clone + PartialEq + Eq + Debug> {
/// Errors encountered when working with threshold keys.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum DkgError {
/// A parameter was zero.
#[cfg_attr(feature = "std", error("a parameter was 0 (threshold {0}, participants {1})"))]
ZeroParameter(u16, u16),
#[error("a parameter was 0 (threshold {t}, participants {n})")]
ZeroParameter {
/// The specified threshold.
t: u16,
/// The specified total amount of participants.
n: u16,
},
/// The threshold exceeded the amount of participants.
#[cfg_attr(feature = "std", error("invalid threshold (max {1}, got {0})"))]
InvalidThreshold(u16, u16),
#[error("invalid threshold (max {n}, got {t})")]
InvalidThreshold {
/// The specified threshold.
t: u16,
/// The specified total amount of participants.
n: u16,
},
/// Invalid participant identifier.
#[cfg_attr(
feature = "std",
error("invalid participant (0 < participant <= {0}, yet participant is {1})")
)]
InvalidParticipant(u16, Participant),
#[error("invalid participant (1 <= participant <= {n}, yet participant is {participant})")]
InvalidParticipant {
/// The total amount of participants.
n: u16,
/// The specified participant.
participant: Participant,
},
/// An incorrect amount of participants was specified.
#[error("incorrect amount of verification shares (n = {n} yet {shares} provided)")]
IncorrectAmountOfVerificationShares {
/// The amount of participants.
n: u16,
/// The amount of shares provided.
shares: usize,
},
/// An inapplicable method of interpolation was specified.
#[error("inapplicable method of interpolation ({0})")]
InapplicableInterpolation(&'static str),
/// An incorrect amount of participants was specified.
#[error("incorrect amount of participants. {t} <= amount <= {n}, yet amount is {amount}")]
IncorrectAmountOfParticipants {
/// The threshold required.
t: u16,
/// The total amount of participants.
n: u16,
/// The amount of participants specified.
amount: usize,
},
/// Invalid signing set.
#[cfg_attr(feature = "std", error("invalid signing set"))]
InvalidSigningSet,
/// Invalid amount of participants.
#[cfg_attr(feature = "std", error("invalid participant quantity (expected {0}, got {1})"))]
InvalidParticipantQuantity(usize, usize),
/// A participant was duplicated.
#[cfg_attr(feature = "std", error("duplicated participant ({0})"))]
#[error("a participant ({0}) was duplicated")]
DuplicatedParticipant(Participant),
/// A participant was missing.
#[cfg_attr(feature = "std", error("missing participant {0}"))]
MissingParticipant(Participant),
/// An invalid proof of knowledge was provided.
#[cfg_attr(feature = "std", error("invalid proof of knowledge (participant {0})"))]
InvalidCommitments(Participant),
/// An invalid DKG share was provided.
#[cfg_attr(feature = "std", error("invalid share (participant {participant}, blame {blame})"))]
InvalidShare { participant: Participant, blame: Option<B> },
/// Not participating in declared signing set.
#[error("not participating in declared signing set")]
NotParticipating,
}
#[cfg(feature = "std")]
mod lib {
pub use super::*;
use core::ops::Deref;
use std::{io, sync::Arc, collections::HashMap};
use zeroize::Zeroizing;
use ciphersuite::{
group::{
ff::{Field, PrimeField},
GroupEncoding,
},
Ciphersuite,
};
#[cfg(feature = "borsh")]
impl borsh::BorshDeserialize for Participant {
// Manually implements BorshDeserialize so we can enforce it's a valid index
#[cfg(feature = "borsh")]
impl borsh::BorshDeserialize for Participant {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
Participant::new(u16::deserialize_reader(reader)?)
.ok_or_else(|| io::Error::other("invalid participant"))
}
}
}
// Validate a map of values to have the expected included participants
pub(crate) fn validate_map<T, B: Clone + PartialEq + Eq + Debug>(
map: &HashMap<Participant, T>,
included: &[Participant],
ours: Participant,
) -> Result<(), DkgError<B>> {
if (map.len() + 1) != included.len() {
Err(DkgError::InvalidParticipantQuantity(included.len(), map.len() + 1))?;
}
for included in included {
if *included == ours {
if map.contains_key(included) {
Err(DkgError::DuplicatedParticipant(*included))?;
}
continue;
}
if !map.contains_key(included) {
Err(DkgError::MissingParticipant(*included))?;
}
}
Ok(())
}
/// Parameters for a multisig.
// These fields should not be made public as they should be static
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
pub struct ThresholdParams {
/// Parameters for a multisig.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
pub struct ThresholdParams {
/// Participants needed to sign on behalf of the group.
pub(crate) t: u16,
t: u16,
/// Amount of participants.
pub(crate) n: u16,
n: u16,
/// Index of the participant being acted for.
pub(crate) i: Participant,
i: Participant,
}
/// An iterator over all participant indexes.
struct AllParticipantIndexes {
i: u16,
n: u16,
}
impl Iterator for AllParticipantIndexes {
type Item = Participant;
fn next(&mut self) -> Option<Participant> {
if self.i > self.n {
None?;
}
let res = Participant::new(self.i).unwrap();
// If i == n == u16::MAX, we cause `i > n` by setting `n` to `0` so the iterator becomes empty
if self.i == u16::MAX {
self.n = 0;
} else {
self.i += 1;
}
impl ThresholdParams {
Some(res)
}
}
impl ThresholdParams {
/// Create a new set of parameters.
pub fn new(t: u16, n: u16, i: Participant) -> Result<ThresholdParams, DkgError<()>> {
pub const fn new(t: u16, n: u16, i: Participant) -> Result<ThresholdParams, DkgError> {
if (t == 0) || (n == 0) {
Err(DkgError::ZeroParameter(t, n))?;
return Err(DkgError::ZeroParameter { t, n });
}
if t > n {
Err(DkgError::InvalidThreshold(t, n))?;
return Err(DkgError::InvalidThreshold { t, n });
}
if u16::from(i) > n {
Err(DkgError::InvalidParticipant(n, i))?;
if i.0 > n {
return Err(DkgError::InvalidParticipant { n, participant: i });
}
Ok(ThresholdParams { t, n, i })
}
/// Return the threshold for a multisig with these parameters.
pub fn t(&self) -> u16 {
/// The threshold for a multisig with these parameters.
pub const fn t(&self) -> u16 {
self.t
}
/// Return the amount of participants for a multisig with these parameters.
pub fn n(&self) -> u16 {
/// The amount of participants for a multisig with these parameters.
pub const fn n(&self) -> u16 {
self.n
}
/// Return the participant index of the share with these parameters.
pub fn i(&self) -> Participant {
/// The participant index of the share with these parameters.
pub const fn i(&self) -> Participant {
self.i
}
}
#[cfg(feature = "borsh")]
impl borsh::BorshDeserialize for ThresholdParams {
/// An iterator over all participant indexes.
pub fn all_participant_indexes(&self) -> impl Iterator<Item = Participant> {
AllParticipantIndexes { i: 1, n: self.n }
}
}
#[cfg(feature = "borsh")]
impl borsh::BorshDeserialize for ThresholdParams {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let t = u16::deserialize_reader(reader)?;
let n = u16::deserialize_reader(reader)?;
let i = Participant::deserialize_reader(reader)?;
ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}")))
}
}
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub(crate) enum Interpolation<F: Zeroize + PrimeField> {
/// A method of interpolation.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub enum Interpolation<F: Zeroize + PrimeField> {
/// A list of constant coefficients, one for each of the secret key shares.
/*
There's no benefit to using a full linear combination here, as the additive term would have
an entirely known evaluation with a fixed, public coefficient of `1`. Accordingly, the entire
key can simply be offset with the additive term to achieve the same effect.
*/
Constant(Vec<F>),
/// Lagrange interpolation.
Lagrange,
}
}
impl<F: Zeroize + PrimeField> Interpolation<F> {
pub(crate) fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F {
impl<F: Zeroize + PrimeField> Interpolation<F> {
/// The interpolation factor for this participant, within this signing set.
fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F {
match self {
Interpolation::Constant(c) => c[usize::from(u16::from(i) - 1)],
Interpolation::Lagrange => {
@@ -238,89 +247,301 @@ mod lib {
}
}
}
}
}
/// Keys and verification shares generated by a DKG.
/// Called core as they're expected to be wrapped into an Arc before usage in various operations.
#[derive(Clone, PartialEq, Eq)]
pub struct ThresholdCore<C: Ciphersuite> {
/// Threshold Parameters.
pub(crate) params: ThresholdParams,
/// The interpolation method used.
pub(crate) interpolation: Interpolation<C::F>,
/// A key share for a thresholdized secret key.
///
/// This is the 'core' structure containing all relevant data, expected to be wrapped into an
/// heap-allocated pointer to minimize copies on the stack (`ThresholdKeys`, the publicly exposed
/// type).
#[derive(Clone, PartialEq, Eq)]
struct ThresholdCore<C: Ciphersuite> {
params: ThresholdParams,
group_key: C::G,
verification_shares: HashMap<Participant, C::G>,
interpolation: Interpolation<C::F>,
secret_share: Zeroizing<C::F>,
}
/// Secret share key.
pub(crate) secret_share: Zeroizing<C::F>,
/// Group key.
pub(crate) group_key: C::G,
/// Verification shares.
pub(crate) verification_shares: HashMap<Participant, C::G>,
}
impl<C: Ciphersuite> fmt::Debug for ThresholdCore<C> {
impl<C: Ciphersuite> fmt::Debug for ThresholdCore<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("ThresholdCore")
.field("params", &self.params)
.field("interpolation", &self.interpolation)
.field("group_key", &self.group_key)
.field("verification_shares", &self.verification_shares)
.field("interpolation", &self.interpolation)
.finish_non_exhaustive()
}
}
}
impl<C: Ciphersuite> Zeroize for ThresholdCore<C> {
impl<C: Ciphersuite> Zeroize for ThresholdCore<C> {
fn zeroize(&mut self) {
self.params.zeroize();
self.interpolation.zeroize();
self.secret_share.zeroize();
self.group_key.zeroize();
for share in self.verification_shares.values_mut() {
share.zeroize();
}
self.interpolation.zeroize();
self.secret_share.zeroize();
}
}
}
impl<C: Ciphersuite> ThresholdCore<C> {
pub(crate) fn new(
/// Threshold keys usable for signing.
#[derive(Clone, Debug, Zeroize)]
pub struct ThresholdKeys<C: Ciphersuite> {
// Core keys.
#[zeroize(skip)]
core: Arc<Zeroizing<ThresholdCore<C>>>,
// Scalar applied to these keys.
scalar: C::F,
// Offset applied to these keys.
offset: C::F,
}
/// View of keys, interpolated and with the expected linear combination taken for usage.
#[derive(Clone)]
pub struct ThresholdView<C: Ciphersuite> {
interpolation: Interpolation<C::F>,
scalar: C::F,
offset: C::F,
group_key: C::G,
included: Vec<Participant>,
secret_share: Zeroizing<C::F>,
original_verification_shares: HashMap<Participant, C::G>,
verification_shares: HashMap<Participant, C::G>,
}
impl<C: Ciphersuite> fmt::Debug for ThresholdView<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("ThresholdView")
.field("interpolation", &self.interpolation)
.field("scalar", &self.scalar)
.field("offset", &self.offset)
.field("group_key", &self.group_key)
.field("included", &self.included)
.field("original_verification_shares", &self.original_verification_shares)
.field("verification_shares", &self.verification_shares)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for ThresholdView<C> {
fn zeroize(&mut self) {
self.scalar.zeroize();
self.offset.zeroize();
self.group_key.zeroize();
self.included.zeroize();
self.secret_share.zeroize();
for share in self.original_verification_shares.values_mut() {
share.zeroize();
}
for share in self.verification_shares.values_mut() {
share.zeroize();
}
}
}
impl<C: Ciphersuite> ThresholdKeys<C> {
/// Create a new set of ThresholdKeys.
pub fn new(
params: ThresholdParams,
interpolation: Interpolation<C::F>,
secret_share: Zeroizing<C::F>,
verification_shares: HashMap<Participant, C::G>,
) -> ThresholdCore<C> {
) -> Result<ThresholdKeys<C>, DkgError> {
if verification_shares.len() != usize::from(params.n()) {
Err(DkgError::IncorrectAmountOfVerificationShares {
n: params.n(),
shares: verification_shares.len(),
})?;
}
for participant in verification_shares.keys().copied() {
if u16::from(participant) > params.n() {
Err(DkgError::InvalidParticipant { n: params.n(), participant })?;
}
}
match &interpolation {
Interpolation::Constant(_) => {
if params.t() != params.n() {
Err(DkgError::InapplicableInterpolation("constant interpolation for keys where t != n"))?;
}
}
Interpolation::Lagrange => {}
}
let t = (1 ..= params.t()).map(Participant).collect::<Vec<_>>();
let group_key =
t.iter().map(|i| verification_shares[i] * interpolation.interpolation_factor(*i, &t)).sum();
ThresholdCore { params, interpolation, secret_share, group_key, verification_shares }
Ok(ThresholdKeys {
core: Arc::new(Zeroizing::new(ThresholdCore {
params,
interpolation,
secret_share,
group_key,
verification_shares,
})),
scalar: C::F::ONE,
offset: C::F::ZERO,
})
}
/// Parameters for these keys.
/// Scale the keys by a given scalar to allow for various account and privacy schemes.
///
/// This scalar is ephemeral and will not be included when these keys are serialized. The
/// scalar is applied on top of any already-existing scalar/offset.
///
/// Returns `None` if the scalar is equal to `0`.
#[must_use]
pub fn scale(mut self, scalar: C::F) -> Option<ThresholdKeys<C>> {
if bool::from(scalar.is_zero()) {
None?;
}
self.scalar *= scalar;
self.offset *= scalar;
Some(self)
}
/// Offset the keys by a given scalar to allow for various account and privacy schemes.
///
/// This offset is ephemeral and will not be included when these keys are serialized. The
/// offset is applied on top of any already-existing scalar/offset.
#[must_use]
pub fn offset(mut self, offset: C::F) -> ThresholdKeys<C> {
self.offset += offset;
self
}
/// Return the current scalar in-use for these keys.
pub fn current_scalar(&self) -> C::F {
self.scalar
}
/// Return the current offset in-use for these keys.
pub fn current_offset(&self) -> C::F {
self.offset
}
/// Return the parameters for these keys.
pub fn params(&self) -> ThresholdParams {
self.params
self.core.params
}
/// Secret share for these keys.
pub fn secret_share(&self) -> &Zeroizing<C::F> {
&self.secret_share
/// Return the original group key, without any tweaks applied.
pub fn original_group_key(&self) -> C::G {
self.core.group_key
}
/// Group key for these keys.
/// Return the interpolation method for these keys.
pub fn interpolation(&self) -> &Interpolation<C::F> {
&self.core.interpolation
}
/// Return the group key, with the expected linear combination taken.
pub fn group_key(&self) -> C::G {
self.group_key
(self.core.group_key * self.scalar) + (C::generator() * self.offset)
}
pub(crate) fn verification_shares(&self) -> HashMap<Participant, C::G> {
self.verification_shares.clone()
/// Return the underlying secret share for these keys, without any tweaks applied.
pub fn original_secret_share(&self) -> &Zeroizing<C::F> {
&self.core.secret_share
}
/// Write these keys to a type satisfying std::io::Write.
/// Return the original (untweaked) verification share for the specified participant.
///
/// This will panic if the participant index is invalid for these keys.
pub fn original_verification_share(&self, l: Participant) -> C::G {
self.core.verification_shares[&l]
}
/// Obtain a view of these keys, interpolated for the specified signing set, with the specified
/// linear combination taken.
pub fn view(&self, mut included: Vec<Participant>) -> Result<ThresholdView<C>, DkgError> {
if (included.len() < self.params().t.into()) ||
(usize::from(self.params().n()) < included.len())
{
Err(DkgError::IncorrectAmountOfParticipants {
t: self.params().t,
n: self.params().n,
amount: included.len(),
})?;
}
included.sort();
{
let mut found = included[0] == self.params().i();
for i in 1 .. included.len() {
if included[i - 1] == included[i] {
Err(DkgError::DuplicatedParticipant(included[i]))?;
}
found |= included[i] == self.params().i();
}
if !found {
Err(DkgError::NotParticipating)?;
}
}
{
let last = *included.last().unwrap();
if u16::from(last) > self.params().n() {
Err(DkgError::InvalidParticipant { n: self.params().n(), participant: last })?;
}
}
// The interpolation occurs multiplicatively, letting us scale by the scalar now
let secret_share_scaled = Zeroizing::new(self.scalar * self.original_secret_share().deref());
let mut secret_share = Zeroizing::new(
self.core.interpolation.interpolation_factor(self.params().i(), &included) *
secret_share_scaled.deref(),
);
let mut verification_shares = HashMap::with_capacity(included.len());
for i in &included {
let verification_share = self.core.verification_shares[i];
let verification_share = verification_share *
self.scalar *
self.core.interpolation.interpolation_factor(*i, &included);
verification_shares.insert(*i, verification_share);
}
/*
The offset is included by adding it to the participant with the lowest ID.
This is done after interpolating to ensure, regardless of the method of interpolation, that
the method of interpolation does not scale the offset. For Lagrange interpolation, we could
add the offset to every key share before interpolating, yet for Constant interpolation, we
_have_ to add it as we do here (which also works even when we intend to perform Lagrange
interpolation).
*/
if included[0] == self.params().i() {
*secret_share += self.offset;
}
*verification_shares.get_mut(&included[0]).unwrap() += C::generator() * self.offset;
Ok(ThresholdView {
interpolation: self.core.interpolation.clone(),
scalar: self.scalar,
offset: self.offset,
group_key: self.group_key(),
secret_share,
original_verification_shares: self.core.verification_shares.clone(),
verification_shares,
included,
})
}
/// Write these keys to a type satisfying `std::io::Write`.
///
/// This will not include the ephemeral scalar/offset.
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&u32::try_from(C::ID.len()).unwrap().to_le_bytes())?;
writer.write_all(C::ID)?;
writer.write_all(&self.params.t.to_le_bytes())?;
writer.write_all(&self.params.n.to_le_bytes())?;
writer.write_all(&self.params.i.to_bytes())?;
match &self.interpolation {
writer.write_all(&self.core.params.t.to_le_bytes())?;
writer.write_all(&self.core.params.n.to_le_bytes())?;
writer.write_all(&self.core.params.i.to_bytes())?;
match &self.core.interpolation {
Interpolation::Constant(c) => {
writer.write_all(&[0])?;
for c in c {
@@ -329,27 +550,30 @@ mod lib {
}
Interpolation::Lagrange => writer.write_all(&[1])?,
};
let mut share_bytes = self.secret_share.to_repr();
let mut share_bytes = self.core.secret_share.to_repr();
writer.write_all(share_bytes.as_ref())?;
share_bytes.as_mut().zeroize();
for l in 1 ..= self.params.n {
writer
.write_all(self.verification_shares[&Participant::new(l).unwrap()].to_bytes().as_ref())?;
for l in 1 ..= self.core.params.n {
writer.write_all(
self.core.verification_shares[&Participant::new(l).unwrap()].to_bytes().as_ref(),
)?;
}
Ok(())
}
/// Serialize these keys to a `Vec<u8>`.
///
/// This will not include the ephemeral scalar/offset.
pub fn serialize(&self) -> Zeroizing<Vec<u8>> {
let mut serialized = Zeroizing::new(vec![]);
self.write::<Vec<u8>>(serialized.as_mut()).unwrap();
serialized
}
/// Read keys from a type satisfying std::io::Read.
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<ThresholdCore<C>> {
/// Read keys from a type satisfying `std::io::Read`.
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<ThresholdKeys<C>> {
{
let different = || io::Error::other("deserializing ThresholdCore for another curve");
let different = || io::Error::other("deserializing ThresholdKeys for another curve");
let mut id_len = [0; 4];
reader.read_exact(&mut id_len)?;
@@ -398,166 +622,23 @@ mod lib {
verification_shares.insert(l, <C as Ciphersuite>::read_G(reader)?);
}
Ok(ThresholdCore::new(
ThresholdParams::new(t, n, i).map_err(|_| io::Error::other("invalid parameters"))?,
ThresholdKeys::new(
ThresholdParams::new(t, n, i).map_err(io::Error::other)?,
interpolation,
secret_share,
verification_shares,
))
)
.map_err(io::Error::other)
}
}
impl<C: Ciphersuite> ThresholdView<C> {
/// Return the scalar applied to this view.
pub fn scalar(&self) -> C::F {
self.scalar
}
/// Threshold keys usable for signing.
#[derive(Clone, Debug, Zeroize)]
pub struct ThresholdKeys<C: Ciphersuite> {
// Core keys.
// If this is the last reference, the underlying keys will be dropped. When that happens, the
// private key present within it will be zeroed out (as it's within Zeroizing).
#[zeroize(skip)]
pub(crate) core: Arc<ThresholdCore<C>>,
// Offset applied to these keys.
pub(crate) offset: Option<C::F>,
}
/// View of keys, interpolated and offset for usage.
#[derive(Clone)]
pub struct ThresholdView<C: Ciphersuite> {
interpolation: Interpolation<C::F>,
offset: C::F,
group_key: C::G,
included: Vec<Participant>,
secret_share: Zeroizing<C::F>,
original_verification_shares: HashMap<Participant, C::G>,
verification_shares: HashMap<Participant, C::G>,
}
impl<C: Ciphersuite> fmt::Debug for ThresholdView<C> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("ThresholdView")
.field("interpolation", &self.interpolation)
.field("offset", &self.offset)
.field("group_key", &self.group_key)
.field("included", &self.included)
.field("original_verification_shares", &self.original_verification_shares)
.field("verification_shares", &self.verification_shares)
.finish_non_exhaustive()
}
}
impl<C: Ciphersuite> Zeroize for ThresholdView<C> {
fn zeroize(&mut self) {
self.offset.zeroize();
self.group_key.zeroize();
self.included.zeroize();
self.secret_share.zeroize();
for share in self.original_verification_shares.values_mut() {
share.zeroize();
}
for share in self.verification_shares.values_mut() {
share.zeroize();
}
}
}
impl<C: Ciphersuite> ThresholdKeys<C> {
/// Create a new set of ThresholdKeys from a ThresholdCore.
pub fn new(core: ThresholdCore<C>) -> ThresholdKeys<C> {
ThresholdKeys { core: Arc::new(core), offset: None }
}
/// Offset the keys by a given scalar to allow for various account and privacy schemes.
///
/// This offset is ephemeral and will not be included when these keys are serialized. It also
/// accumulates, so calling offset multiple times will produce a offset of the offsets' sum.
#[must_use]
pub fn offset(&self, offset: C::F) -> ThresholdKeys<C> {
let mut res = self.clone();
// Carry any existing offset
// Enables schemes like Monero's subaddresses which have a per-subaddress offset and then a
// one-time-key offset
res.offset = Some(offset + res.offset.unwrap_or(C::F::ZERO));
res
}
/// Return the current offset in-use for these keys.
pub fn current_offset(&self) -> Option<C::F> {
self.offset
}
/// Return the parameters for these keys.
pub fn params(&self) -> ThresholdParams {
self.core.params
}
/// Return the secret share for these keys.
pub fn secret_share(&self) -> &Zeroizing<C::F> {
&self.core.secret_share
}
/// Return the group key, with any offset applied.
pub fn group_key(&self) -> C::G {
self.core.group_key + (C::generator() * self.offset.unwrap_or(C::F::ZERO))
}
/// Return all participants' verification shares without any offsetting.
pub(crate) fn verification_shares(&self) -> HashMap<Participant, C::G> {
self.core.verification_shares()
}
/// Serialize these keys to a `Vec<u8>`.
pub fn serialize(&self) -> Zeroizing<Vec<u8>> {
self.core.serialize()
}
/// Obtain a view of these keys, with any offset applied, interpolated for the specified signing
/// set.
pub fn view(&self, mut included: Vec<Participant>) -> Result<ThresholdView<C>, DkgError<()>> {
if (included.len() < self.params().t.into()) ||
(usize::from(self.params().n()) < included.len())
{
Err(DkgError::InvalidSigningSet)?;
}
included.sort();
let mut secret_share = Zeroizing::new(
self.core.interpolation.interpolation_factor(self.params().i(), &included) *
self.secret_share().deref(),
);
let mut verification_shares = self.verification_shares();
for (i, share) in &mut verification_shares {
*share *= self.core.interpolation.interpolation_factor(*i, &included);
}
// The offset is included by adding it to the participant with the lowest ID
let offset = self.offset.unwrap_or(C::F::ZERO);
if included[0] == self.params().i() {
*secret_share += offset;
}
*verification_shares.get_mut(&included[0]).unwrap() += C::generator() * offset;
Ok(ThresholdView {
interpolation: self.core.interpolation.clone(),
offset,
group_key: self.group_key(),
secret_share,
original_verification_shares: self.verification_shares(),
verification_shares,
included,
})
}
}
impl<C: Ciphersuite> From<ThresholdCore<C>> for ThresholdKeys<C> {
fn from(keys: ThresholdCore<C>) -> ThresholdKeys<C> {
ThresholdKeys::new(keys)
}
}
impl<C: Ciphersuite> ThresholdView<C> {
/// Return the offset for this view.
/// Return the offset applied to this view.
pub fn offset(&self) -> C::F {
self.offset
}
@@ -580,21 +661,23 @@ mod lib {
Some(self.interpolation.interpolation_factor(participant, &self.included))
}
/// Return the interpolated, offset secret share.
/// Return the interpolated secret share, with the expected linear combination taken.
pub fn secret_share(&self) -> &Zeroizing<C::F> {
&self.secret_share
}
/// Return the original verification share for the specified participant.
/// Return the original (untweaked) verification share for the specified participant.
///
/// This will panic if the participant index is invalid for these keys.
pub fn original_verification_share(&self, l: Participant) -> C::G {
self.original_verification_shares[&l]
}
/// Return the interpolated, offset verification share for the specified participant.
/// Return the interpolated verification share, with the expected linear combination taken,
/// for the specified participant.
///
/// This will panic if the participant was not included in the signing set.
pub fn verification_share(&self, l: Participant) -> C::G {
self.verification_shares[&l]
}
}
}
#[cfg(feature = "std")]
pub use lib::*;

View File

@@ -1,128 +0,0 @@
#[cfg(feature = "std")]
use core::ops::Deref;
use std_shims::{vec, vec::Vec, collections::HashSet};
#[cfg(feature = "std")]
use std_shims::collections::HashMap;
#[cfg(feature = "std")]
use zeroize::Zeroizing;
use ciphersuite::{
group::{Group, GroupEncoding},
Ciphersuite,
};
use crate::DkgError;
#[cfg(feature = "std")]
use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore};
fn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, DkgError<()>> {
if keys.is_empty() {
Err(DkgError::InvalidSigningSet)?;
}
// Too many signers
let keys_len = u16::try_from(keys.len()).map_err(|_| DkgError::InvalidSigningSet)?;
// Duplicated public keys
if keys.iter().map(|key| key.to_bytes().as_ref().to_vec()).collect::<HashSet<_>>().len() !=
keys.len()
{
Err(DkgError::InvalidSigningSet)?;
}
Ok(keys_len)
}
// This function panics if called with keys whose length exceed 2**16.
// This is fine since it's internal and all calls occur after calling check_keys, which does check
// the keys' length.
fn binding_factor_transcript<C: Ciphersuite>(
context: &[u8],
keys: &[C::G],
) -> Result<Vec<u8>, DkgError<()>> {
let mut transcript = vec![];
transcript.push(u8::try_from(context.len()).map_err(|_| DkgError::InvalidSigningSet)?);
transcript.extend(context);
transcript.extend(u16::try_from(keys.len()).unwrap().to_le_bytes());
for key in keys {
transcript.extend(key.to_bytes().as_ref());
}
Ok(transcript)
}
fn binding_factor<C: Ciphersuite>(mut transcript: Vec<u8>, i: u16) -> C::F {
transcript.extend(i.to_le_bytes());
C::hash_to_F(b"musig", &transcript)
}
/// The group key resulting from using this library's MuSig key gen.
///
/// This function will return an error if the context is longer than 255 bytes.
///
/// Creating an aggregate key with a list containing duplicated public keys will return an error.
pub fn musig_key<C: Ciphersuite>(context: &[u8], keys: &[C::G]) -> Result<C::G, DkgError<()>> {
let keys_len = check_keys::<C>(keys)?;
let transcript = binding_factor_transcript::<C>(context, keys)?;
let mut res = C::G::identity();
for i in 1 ..= keys_len {
// TODO: Calculate this with a multiexp
res += keys[usize::from(i - 1)] * binding_factor::<C>(transcript.clone(), i);
}
Ok(res)
}
/// A n-of-n non-interactive DKG which does not guarantee the usability of the resulting key.
///
/// Creating an aggregate key with a list containing duplicated public keys returns an error.
#[cfg(feature = "std")]
pub fn musig<C: Ciphersuite>(
context: &[u8],
private_key: &Zeroizing<C::F>,
keys: &[C::G],
) -> Result<ThresholdCore<C>, DkgError<()>> {
let keys_len = check_keys::<C>(keys)?;
let our_pub_key = C::generator() * private_key.deref();
let Some(pos) = keys.iter().position(|key| *key == our_pub_key) else {
// Not present in signing set
Err(DkgError::InvalidSigningSet)?
};
let params = ThresholdParams::new(
keys_len,
keys_len,
// These errors shouldn't be possible, as pos is bounded to len - 1
// Since len is prior guaranteed to be within u16::MAX, pos + 1 must also be
Participant::new((pos + 1).try_into().map_err(|_| DkgError::InvalidSigningSet)?)
.ok_or(DkgError::InvalidSigningSet)?,
)?;
// Calculate the binding factor per-key
let transcript = binding_factor_transcript::<C>(context, keys)?;
let mut binding = Vec::with_capacity(keys.len());
for i in 1 ..= keys_len {
binding.push(binding_factor::<C>(transcript.clone(), i));
}
// Our secret share is our private key
let secret_share = private_key.clone();
// Calculate verification shares
let mut verification_shares = HashMap::new();
let mut group_key = C::G::identity();
for l in 1 ..= keys_len {
let key = keys[usize::from(l) - 1];
group_key += key * binding[usize::from(l - 1)];
// These errors also shouldn't be possible, for the same reasons as documented above
verification_shares.insert(Participant::new(l).ok_or(DkgError::InvalidSigningSet)?, key);
}
debug_assert_eq!(C::generator() * secret_share.deref(), verification_shares[&params.i()]);
debug_assert_eq!(musig_key::<C>(context, keys).unwrap(), group_key);
Ok(ThresholdCore::new(
params,
Interpolation::Constant(binding),
secret_share,
verification_shares,
))
}

View File

@@ -1,105 +0,0 @@
use core::ops::Deref;
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};
use ciphersuite::{group::ff::Field, Ciphersuite};
use crate::{Participant, ThresholdCore, ThresholdKeys, musig::musig as musig_fn};
mod musig;
pub use musig::test_musig;
/// FROST key generation testing utility.
pub mod pedpop;
use pedpop::pedpop_gen;
// Promotion test.
mod promote;
use promote::test_generator_promotion;
#[cfg(all(test, feature = "evrf"))]
mod evrf;
/// Constant amount of participants to use when testing.
pub const PARTICIPANTS: u16 = 5;
/// Constant threshold of participants to use when testing.
pub const THRESHOLD: u16 = ((PARTICIPANTS * 2) / 3) + 1;
/// Clone a map without a specific value.
pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
/// Recover the secret from a collection of keys.
///
/// This will panic if no keys, an insufficient amount of keys, or the wrong keys are provided.
pub fn recover_key<C: Ciphersuite>(keys: &HashMap<Participant, ThresholdKeys<C>>) -> C::F {
let first = keys.values().next().expect("no keys provided");
assert!(keys.len() >= first.params().t().into(), "not enough keys provided");
let included = keys.keys().copied().collect::<Vec<_>>();
let group_private = keys.iter().fold(C::F::ZERO, |accum, (i, keys)| {
accum +
(first.core.interpolation.interpolation_factor(*i, &included) * keys.secret_share().deref())
});
assert_eq!(C::generator() * group_private, first.group_key(), "failed to recover keys");
group_private
}
/// Generate threshold keys for tests.
pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdKeys<C>> {
let res = pedpop_gen(rng)
.drain()
.map(|(i, core)| {
assert_eq!(
&ThresholdCore::<C>::read::<&[u8]>(&mut core.serialize().as_ref()).unwrap(),
&core
);
(i, ThresholdKeys::new(core))
})
.collect();
assert_eq!(C::generator() * recover_key(&res), res[&Participant(1)].group_key());
res
}
/// Generate MuSig keys for tests.
pub fn musig_key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdKeys<C>> {
let mut keys = vec![];
let mut pub_keys = vec![];
for _ in 0 .. PARTICIPANTS {
let key = Zeroizing::new(C::F::random(&mut *rng));
pub_keys.push(C::generator() * *key);
keys.push(key);
}
let mut res = HashMap::new();
for key in keys {
let these_keys = musig_fn::<C>(b"Test MuSig Key Gen", &key, &pub_keys).unwrap();
res.insert(these_keys.params().i(), ThresholdKeys::new(these_keys));
}
assert_eq!(C::generator() * recover_key(&res), res[&Participant(1)].group_key());
res
}
/// Run the test suite on a ciphersuite.
pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Ciphersuite>(rng: &mut R) {
key_gen::<_, C>(rng);
test_generator_promotion::<_, C>(rng);
}
#[test]
fn test_with_ristretto() {
test_ciphersuite::<_, ciphersuite::Ristretto>(&mut rand_core::OsRng);
}

View File

@@ -1,61 +0,0 @@
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};
use ciphersuite::{group::ff::Field, Ciphersuite};
use crate::{
ThresholdKeys,
musig::{musig_key, musig},
tests::{PARTICIPANTS, recover_key},
};
/// Tests MuSig key generation.
pub fn test_musig<R: RngCore + CryptoRng, C: Ciphersuite>(rng: &mut R) {
let mut keys = vec![];
let mut pub_keys = vec![];
for _ in 0 .. PARTICIPANTS {
let key = Zeroizing::new(C::F::random(&mut *rng));
pub_keys.push(C::generator() * *key);
keys.push(key);
}
const CONTEXT: &[u8] = b"MuSig Test";
// Empty signing set
musig::<C>(CONTEXT, &Zeroizing::new(C::F::ZERO), &[]).unwrap_err();
// Signing set we're not part of
musig::<C>(CONTEXT, &Zeroizing::new(C::F::ZERO), &[C::generator()]).unwrap_err();
// Test with n keys
{
let mut created_keys = HashMap::new();
let mut verification_shares = HashMap::new();
let group_key = musig_key::<C>(CONTEXT, &pub_keys).unwrap();
for (i, key) in keys.iter().enumerate() {
let these_keys = musig::<C>(CONTEXT, key, &pub_keys).unwrap();
assert_eq!(these_keys.params().t(), PARTICIPANTS);
assert_eq!(these_keys.params().n(), PARTICIPANTS);
assert_eq!(usize::from(these_keys.params().i().0), i + 1);
verification_shares
.insert(these_keys.params().i(), C::generator() * **these_keys.secret_share());
assert_eq!(these_keys.group_key(), group_key);
created_keys.insert(these_keys.params().i(), ThresholdKeys::new(these_keys));
}
for keys in created_keys.values() {
assert_eq!(keys.verification_shares(), verification_shares);
}
assert_eq!(C::generator() * recover_key(&created_keys), group_key);
}
}
#[test]
fn musig_literal() {
test_musig::<_, ciphersuite::Ristretto>(&mut rand_core::OsRng)
}

View File

@@ -1,331 +0,0 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng};
use ciphersuite::Ciphersuite;
use crate::{
Participant, ThresholdParams, ThresholdCore,
pedpop::{Commitments, KeyGenMachine, SecretShare, KeyMachine},
encryption::{EncryptionKeyMessage, EncryptedMessage},
tests::{THRESHOLD, PARTICIPANTS, clone_without},
};
type PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
type PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage<C>>;
const CONTEXT: [u8; 32] = *b"DKG Test Key Generation ";
// Commit, then return commitment messages, enc keys, and shares
#[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> (
HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
HashMap<Participant, C::G>,
HashMap<Participant, PedPoPSecretShares<C>>,
) {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
let mut enc_keys = HashMap::new();
for i in (1 ..= PARTICIPANTS).map(Participant) {
let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();
let machine = KeyGenMachine::<C>::new(params, CONTEXT);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(
i,
EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)
.unwrap(),
);
enc_keys.insert(i, commitments[&i].enc_key());
}
let mut secret_shares = HashMap::new();
let machines = machines
.drain()
.map(|(l, machine)| {
let (machine, mut shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
let shares = shares
.drain()
.map(|(l, share)| {
(
l,
EncryptedMessage::read::<&[u8]>(
&mut share.serialize().as_ref(),
// Only t/n actually matters, so hardcode i to 1 here
ThresholdParams { t: THRESHOLD, n: PARTICIPANTS, i: Participant(1) },
)
.unwrap(),
)
})
.collect::<HashMap<_, _>>();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
(machines, commitments, enc_keys, secret_shares)
}
fn generate_secret_shares<C: Ciphersuite>(
shares: &HashMap<Participant, PedPoPSecretShares<C>>,
recipient: Participant,
) -> PedPoPSecretShares<C> {
let mut our_secret_shares = HashMap::new();
for (i, shares) in shares {
if recipient == *i {
continue;
}
our_secret_shares.insert(*i, shares[&recipient].clone());
}
our_secret_shares
}
/// Fully perform the PedPoP key generation algorithm.
pub fn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdCore<C>> {
let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(these_keys.verification_shares());
}
assert_eq!(verification_shares.as_ref().unwrap(), &these_keys.verification_shares());
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}
#[cfg(test)]
mod literal {
use rand_core::OsRng;
use ciphersuite::Ristretto;
use crate::{
DkgError,
encryption::EncryptionKeyProof,
pedpop::{BlameMachine, AdditionalBlameMachine},
};
use super::*;
const ONE: Participant = Participant(1);
const TWO: Participant = Participant(2);
fn test_blame(
commitment_msgs: &HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,
machines: Vec<BlameMachine<Ristretto>>,
msg: &PedPoPEncryptedMessage<Ristretto>,
blame: &Option<EncryptionKeyProof<Ristretto>>,
) {
for machine in machines {
let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());
assert_eq!(blamed, ONE);
// Verify additional blame also works
assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
// Verify machines constructed with AdditionalBlameMachine::new work
assert_eq!(
AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame(
ONE,
TWO,
msg.clone(),
blame.clone()
),
ONE,
);
}
}
// TODO: Write a macro which expands to the following
#[test]
fn invalid_encryption_pop_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
assert_eq!(machine.err(), Some(DkgError::InvalidShare { participant: ONE, blame: None }));
// Explicitly declare we have a blame object, which happens to be None since invalid PoP
// is self-explainable
blame = Some(None);
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_ecdh_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the share to trigger a blame event
// Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass
// While here, 2 is malicious, this is so 1 creates the blame proof
// We then malleate 1's blame proof, so 1 ends up malicious
// Doesn't simply invalidate the PoP as that won't have a blame statement
// By mutating the encrypted data, we do ensure a blame statement is created
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
// This should be largely equivalent to the prior test
#[test]
fn invalid_dleq_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_serialization_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_value_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(DkgError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
}

View File

@@ -1,66 +0,0 @@
use core::{marker::PhantomData, ops::Deref};
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use ciphersuite::{group::Group, Ciphersuite};
use crate::{
promote::{GeneratorPromotion, GeneratorProof},
tests::{clone_without, key_gen, recover_key},
};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Ciphersuite> {
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
type F = C::F;
type G = C::G;
type H = C::H;
const ID: &'static [u8] = b"Alternate Ciphersuite";
fn generator() -> Self::G {
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
}
fn reduce_512(scalar: [u8; 64]) -> Self::F {
<C as Ciphersuite>::reduce_512(scalar)
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
<C as Ciphersuite>::hash_to_F(dst, data)
}
}
// Test promotion of threshold keys to another generator
pub(crate) fn test_generator_promotion<R: RngCore + CryptoRng, C: Ciphersuite>(rng: &mut R) {
let keys = key_gen::<_, C>(&mut *rng);
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for (i, keys) in &keys {
let (promotion, proof) =
GeneratorPromotion::<_, AltGenerator<C>>::promote(&mut *rng, keys.clone());
promotions.insert(*i, promotion);
proofs.insert(*i, GeneratorProof::<C>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap());
}
let new_group_key = AltGenerator::<C>::generator() * recover_key(&keys);
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[&i].params(), promoted.params());
assert_eq!(keys[&i].secret_share(), promoted.secret_share());
assert_eq!(new_group_key, promoted.group_key());
for (l, verification_share) in promoted.verification_shares() {
assert_eq!(
AltGenerator::<C>::generator() * keys[&l].secret_share().deref(),
verification_share
);
}
}
}

View File

@@ -1,13 +1,13 @@
[package]
name = "minimal-ed448"
version = "0.4.0"
version = "0.4.2"
description = "Unaudited, inefficient implementation of Ed448 in Rust"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ed448", "ff", "group"]
edition = "2021"
rust-version = "1.71"
rust-version = "1.65"
[package.metadata.docs.rs]
all-features = true
@@ -24,8 +24,11 @@ rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
subtle = { version = "^2.4", default-features = false }
sha3 = { version = "0.10", default-features = false }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
ciphersuite = { path = "../ciphersuite", default-features = false }
generic-array = { version = "1", default-features = false }
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
@@ -38,5 +41,6 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { path = "../ff-group-tests" }
[features]
std = ["rand_core/std", "zeroize/std", "subtle/std", "ff/std"]
alloc = ["zeroize/alloc", "ciphersuite/alloc"]
std = ["alloc", "rand_core/std", "zeroize/std", "subtle/std", "sha3/std", "ff/std", "ciphersuite/std"]
default = ["std"]

View File

@@ -2,11 +2,19 @@ use zeroize::Zeroize;
// Use black_box when possible
#[rustversion::since(1.66)]
use core::hint::black_box;
#[rustversion::before(1.66)]
fn black_box<T>(val: T) -> T {
val
mod black_box {
pub(crate) fn black_box<T>(val: T) -> T {
#[allow(clippy::incompatible_msrv)]
core::hint::black_box(val)
}
}
#[rustversion::before(1.66)]
mod black_box {
pub(crate) fn black_box<T>(val: T) -> T {
val
}
}
use black_box::black_box;
pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);

View File

@@ -1,15 +1,17 @@
use zeroize::Zeroize;
use digest::{
use sha3::{
digest::{
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
ExtendableOutput, XofReader, HashMarker, Digest,
},
Shake256,
};
use sha3::Shake256;
use group::Group;
use minimal_ed448::{Scalar, Point};
use crate::{Scalar, Point};
use crate::Ciphersuite;
use ciphersuite::Ciphersuite;
/// Shake256, fixed to a 114-byte output, as used by Ed448.
#[derive(Clone, Default)]
@@ -66,12 +68,6 @@ impl Ciphersuite for Ed448 {
Point::generator()
}
fn reduce_512(mut scalar: [u8; 64]) -> Self::F {
let res = Self::hash_to_F(b"Ciphersuite-reduce_512", &scalar);
scalar.zeroize();
res
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_ref().try_into().unwrap())
}

View File

@@ -14,3 +14,6 @@ pub use field::FieldElement;
mod point;
pub use point::Point;
mod ciphersuite;
pub use crate::ciphersuite::Ed448;

View File

@@ -50,13 +50,25 @@ fn recover_x(y: FieldElement) -> CtOption<FieldElement> {
}
/// Ed448 point.
#[derive(Clone, Copy, Debug, Zeroize)]
#[derive(Clone, Copy, Debug)]
pub struct Point {
x: FieldElement,
y: FieldElement,
z: FieldElement,
}
impl Zeroize for Point {
fn zeroize(&mut self) {
self.x.zeroize();
self.y.zeroize();
self.z.zeroize();
let identity = Self::identity();
self.x = identity.x;
self.y = identity.y;
self.z = identity.z;
}
}
const G: Point = Point { x: G_X, y: G_Y, z: FieldElement::ONE };
impl ConstantTimeEq for Point {

View File

@@ -1,27 +0,0 @@
[package]
name = "generalized-bulletproofs-circuit-abstraction"
version = "0.1.0"
description = "An abstraction for arithmetic circuits over Generalized Bulletproofs"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/fcmps/circuit-abstraction"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["bulletproofs", "circuit"]
edition = "2021"
rust-version = "1.69"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
generalized-bulletproofs = { path = "../generalized-bulletproofs", default-features = false }
[features]
std = ["std-shims/std", "zeroize/std", "ciphersuite/std", "generalized-bulletproofs/std"]
default = ["std"]

View File

@@ -1,3 +0,0 @@
# Generalized Bulletproofs Circuit Abstraction
A circuit abstraction around `generalized-bulletproofs`.

View File

@@ -1,39 +0,0 @@
use ciphersuite::{group::ff::Field, Ciphersuite};
use crate::*;
impl<C: Ciphersuite> Circuit<C> {
/// Constrain two linear combinations to be equal.
pub fn equality(&mut self, a: LinComb<C::F>, b: &LinComb<C::F>) {
self.constrain_equal_to_zero(a - b);
}
/// Calculate (and constrain) the inverse of a value.
///
/// A linear combination may optionally be passed as a constraint for the value being inverted.
/// A reference to the inverted value and its inverse is returned.
///
/// May panic if any linear combinations reference non-existent terms, the witness isn't provided
/// when proving/is provided when verifying, or if the witness is 0 (and accordingly doesn't have
/// an inverse).
pub fn inverse(
&mut self,
lincomb: Option<LinComb<C::F>>,
witness: Option<C::F>,
) -> (Variable, Variable) {
let (l, r, o) = self.mul(lincomb, None, witness.map(|f| (f, f.invert().unwrap())));
// The output of a value multiplied by its inverse is 1
// Constrain `1 o - 1 = 0`
self.constrain_equal_to_zero(LinComb::from(o).constant(-C::F::ONE));
(l, r)
}
/// Constrain two linear combinations as inequal.
///
/// May panic if any linear combinations reference non-existent terms.
pub fn inequality(&mut self, a: LinComb<C::F>, b: &LinComb<C::F>, witness: Option<(C::F, C::F)>) {
let l_constraint = a - b;
// The existence of a multiplicative inverse means a-b != 0, which means a != b
self.inverse(Some(l_constraint), witness.map(|(a, b)| a - b));
}
}

View File

@@ -1,197 +0,0 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#![deny(missing_docs)]
#![allow(non_snake_case)]
use std_shims::{vec, vec::Vec};
use zeroize::{Zeroize, ZeroizeOnDrop};
use ciphersuite::{group::ff::Field, Ciphersuite};
use generalized_bulletproofs::{
ScalarVector, PedersenCommitment, PedersenVectorCommitment, ProofGenerators,
transcript::{Transcript as ProverTranscript, VerifierTranscript, Commitments},
arithmetic_circuit_proof::{AcError, ArithmeticCircuitStatement, ArithmeticCircuitWitness},
};
pub use generalized_bulletproofs::arithmetic_circuit_proof::{Variable, LinComb};
mod gadgets;
/// A trait for the transcript, whether proving for verifying, as necessary for sampling
/// challenges.
pub trait Transcript {
/// Sample a challenge from the transacript.
///
/// It is the caller's responsibility to have properly transcripted all variables prior to
/// sampling this challenge.
fn challenge<C: Ciphersuite>(&mut self) -> C::F;
/// Sample a challenge as a byte array.
///
/// It is the caller's responsibility to have properly transcripted all variables prior to
/// sampling this challenge.
fn challenge_bytes(&mut self) -> [u8; 64];
}
impl Transcript for ProverTranscript {
fn challenge<C: Ciphersuite>(&mut self) -> C::F {
self.challenge::<C>()
}
fn challenge_bytes(&mut self) -> [u8; 64] {
self.challenge_bytes()
}
}
impl Transcript for VerifierTranscript<'_> {
fn challenge<C: Ciphersuite>(&mut self) -> C::F {
self.challenge::<C>()
}
fn challenge_bytes(&mut self) -> [u8; 64] {
self.challenge_bytes()
}
}
/// The witness for the satisfaction of this circuit.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
struct ProverData<C: Ciphersuite> {
aL: Vec<C::F>,
aR: Vec<C::F>,
C: Vec<PedersenVectorCommitment<C>>,
V: Vec<PedersenCommitment<C>>,
}
/// A struct representing a circuit.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Circuit<C: Ciphersuite> {
muls: usize,
// A series of linear combinations which must evaluate to 0.
constraints: Vec<LinComb<C::F>>,
prover: Option<ProverData<C>>,
}
impl<C: Ciphersuite> Circuit<C> {
/// Returns the amount of multiplications used by this circuit.
pub fn muls(&self) -> usize {
self.muls
}
/// Create an instance to prove satisfaction of a circuit with.
#[allow(clippy::type_complexity)]
pub fn prove(
vector_commitments: Vec<PedersenVectorCommitment<C>>,
commitments: Vec<PedersenCommitment<C>>,
) -> Self {
Self {
muls: 0,
constraints: vec![],
prover: Some(ProverData { aL: vec![], aR: vec![], C: vector_commitments, V: commitments }),
}
}
/// Create an instance to verify a proof with.
pub fn verify() -> Self {
Self { muls: 0, constraints: vec![], prover: None }
}
/// Evaluate a linear combination.
///
/// Yields WL aL + WR aR + WO aO + WCG CG + WV V + c.
///
/// May panic if the linear combination references non-existent terms.
///
/// Returns None if not a prover.
pub fn eval(&self, lincomb: &LinComb<C::F>) -> Option<C::F> {
self.prover.as_ref().map(|prover| {
let mut res = lincomb.c();
for (index, weight) in lincomb.WL() {
res += prover.aL[*index] * weight;
}
for (index, weight) in lincomb.WR() {
res += prover.aR[*index] * weight;
}
for (index, weight) in lincomb.WO() {
res += prover.aL[*index] * prover.aR[*index] * weight;
}
for (WCG, C) in lincomb.WCG().iter().zip(&prover.C) {
for (j, weight) in WCG {
res += C.g_values[*j] * weight;
}
}
for (index, weight) in lincomb.WV() {
res += prover.V[*index].value * weight;
}
res
})
}
/// Multiply two values, optionally constrained, returning the constrainable left/right/out
/// terms.
///
/// May panic if any linear combinations reference non-existent terms or if the witness isn't
/// provided when proving/is provided when verifying.
pub fn mul(
&mut self,
a: Option<LinComb<C::F>>,
b: Option<LinComb<C::F>>,
witness: Option<(C::F, C::F)>,
) -> (Variable, Variable, Variable) {
let l = Variable::aL(self.muls);
let r = Variable::aR(self.muls);
let o = Variable::aO(self.muls);
self.muls += 1;
debug_assert_eq!(self.prover.is_some(), witness.is_some());
if let Some(witness) = witness {
let prover = self.prover.as_mut().unwrap();
prover.aL.push(witness.0);
prover.aR.push(witness.1);
}
if let Some(a) = a {
self.constrain_equal_to_zero(a.term(-C::F::ONE, l));
}
if let Some(b) = b {
self.constrain_equal_to_zero(b.term(-C::F::ONE, r));
}
(l, r, o)
}
/// Constrain a linear combination to be equal to 0.
///
/// May panic if the linear combination references non-existent terms.
pub fn constrain_equal_to_zero(&mut self, lincomb: LinComb<C::F>) {
self.constraints.push(lincomb);
}
/// Obtain the statement for this circuit.
///
/// If configured as the prover, the witness to use is also returned.
#[allow(clippy::type_complexity)]
pub fn statement(
self,
generators: ProofGenerators<'_, C>,
commitments: Commitments<C>,
) -> Result<(ArithmeticCircuitStatement<'_, C>, Option<ArithmeticCircuitWitness<C>>), AcError> {
let statement = ArithmeticCircuitStatement::new(generators, self.constraints, commitments)?;
let witness = self
.prover
.map(|mut prover| {
// We can't deconstruct the witness as it implements Drop (per ZeroizeOnDrop)
// Accordingly, we take the values within it and move forward with those
let mut aL = vec![];
core::mem::swap(&mut prover.aL, &mut aL);
let mut aR = vec![];
core::mem::swap(&mut prover.aR, &mut aR);
let mut C = vec![];
core::mem::swap(&mut prover.C, &mut C);
let mut V = vec![];
core::mem::swap(&mut prover.V, &mut V);
ArithmeticCircuitWitness::new(ScalarVector::from(aL), ScalarVector::from(aR), C, V)
})
.transpose()?;
Ok((statement, witness))
}
}

View File

@@ -1,41 +0,0 @@
[package]
name = "ec-divisors"
version = "0.1.0"
description = "A library for calculating elliptic curve divisors"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/divisors"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
rust-version = "1.69"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
subtle = { version = "2", default-features = false }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
hex = { version = "0.4", default-features = false, optional = true }
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false, optional = true }
pasta_curves = { version = "0.5", git = "https://github.com/kayabaNerve/pasta_curves.git", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616", default-features = false, features = ["bits", "alloc"], optional = true }
[dev-dependencies]
rand_core = { version = "0.6", features = ["getrandom"] }
hex = "0.4"
dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"] }
pasta_curves = { version = "0.5", git = "https://github.com/kayabaNerve/pasta_curves.git", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616", default-features = false, features = ["bits", "alloc"] }
[features]
std = ["std-shims/std", "zeroize/std", "subtle/std", "ff/std", "dalek-ff-group?/std"]
ed25519 = ["hex/alloc", "dalek-ff-group"]
pasta = ["pasta_curves"]
default = ["std"]

View File

@@ -1,4 +0,0 @@
# Elliptic Curve Divisors
An implementation of a representation for and construction of elliptic curve
divisors, intended for Eagen's [EC IP work](https://eprint.iacr.org/2022/596).

View File

@@ -1,581 +0,0 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#![deny(missing_docs)]
#![allow(non_snake_case)]
use std_shims::{vec, vec::Vec};
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable};
use zeroize::{Zeroize, ZeroizeOnDrop};
use group::{
ff::{Field, PrimeField, PrimeFieldBits},
Group,
};
mod poly;
pub use poly::Poly;
#[cfg(test)]
mod tests;
/// A curve usable with this library.
pub trait DivisorCurve: Group + ConstantTimeEq + ConditionallySelectable + Zeroize {
/// An element of the field this curve is defined over.
type FieldElement: Zeroize + PrimeField + ConditionallySelectable;
/// The A in the curve equation y^2 = x^3 + A x + B.
fn a() -> Self::FieldElement;
/// The B in the curve equation y^2 = x^3 + A x + B.
fn b() -> Self::FieldElement;
/// y^2 - x^3 - A x - B
///
/// Section 2 of the security proofs define this modulus.
///
/// This MUST NOT be overriden.
// TODO: Move to an extension trait
fn divisor_modulus() -> Poly<Self::FieldElement> {
Poly {
// 0 y**1, 1 y*2
y_coefficients: vec![Self::FieldElement::ZERO, Self::FieldElement::ONE],
yx_coefficients: vec![],
x_coefficients: vec![
// - A x
-Self::a(),
// 0 x^2
Self::FieldElement::ZERO,
// - x^3
-Self::FieldElement::ONE,
],
// - B
zero_coefficient: -Self::b(),
}
}
/// Convert a point to its x and y coordinates.
///
/// Returns None if passed the point at infinity.
///
/// This function may run in time variable to if the point is the identity.
fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)>;
}
/// Calculate the slope and intercept between two points.
///
/// This function panics when `a @ infinity`, `b @ infinity`, `a == b`, or when `a == -b`.
pub(crate) fn slope_intercept<C: DivisorCurve>(a: C, b: C) -> (C::FieldElement, C::FieldElement) {
let (ax, ay) = C::to_xy(a).unwrap();
debug_assert_eq!(C::divisor_modulus().eval(ax, ay), C::FieldElement::ZERO);
let (bx, by) = C::to_xy(b).unwrap();
debug_assert_eq!(C::divisor_modulus().eval(bx, by), C::FieldElement::ZERO);
let slope = (by - ay) *
Option::<C::FieldElement>::from((bx - ax).invert())
.expect("trying to get slope/intercept of points sharing an x coordinate");
let intercept = by - (slope * bx);
debug_assert!(bool::from((ay - (slope * ax) - intercept).is_zero()));
debug_assert!(bool::from((by - (slope * bx) - intercept).is_zero()));
(slope, intercept)
}
// The line interpolating two points.
fn line<C: DivisorCurve>(a: C, b: C) -> Poly<C::FieldElement> {
#[derive(Clone, Copy)]
struct LinesRes<F: ConditionallySelectable> {
y_coefficient: F,
x_coefficient: F,
zero_coefficient: F,
}
impl<F: ConditionallySelectable> ConditionallySelectable for LinesRes<F> {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Self {
y_coefficient: <_>::conditional_select(&a.y_coefficient, &b.y_coefficient, choice),
x_coefficient: <_>::conditional_select(&a.x_coefficient, &b.x_coefficient, choice),
zero_coefficient: <_>::conditional_select(&a.zero_coefficient, &b.zero_coefficient, choice),
}
}
}
let a_is_identity = a.is_identity();
let b_is_identity = b.is_identity();
// If they're both the point at infinity, we simply set the line to one
let both_are_identity = a_is_identity & b_is_identity;
let if_both_are_identity = LinesRes {
y_coefficient: C::FieldElement::ZERO,
x_coefficient: C::FieldElement::ZERO,
zero_coefficient: C::FieldElement::ONE,
};
// If either point is the point at infinity, or these are additive inverses, the line is
// `1 * x - x`. The first `x` is a term in the polynomial, the `x` is the `x` coordinate of these
// points (of which there is one, as the second point is either at infinity or has a matching `x`
// coordinate).
let one_is_identity = a_is_identity | b_is_identity;
let additive_inverses = a.ct_eq(&-b);
let one_is_identity_or_additive_inverses = one_is_identity | additive_inverses;
let if_one_is_identity_or_additive_inverses = {
// If both are identity, set `a` to the generator so we can safely evaluate the following
// (which we won't select at the end of this function)
let a = <_>::conditional_select(&a, &C::generator(), both_are_identity);
// If `a` is identity, this selects `b`. If `a` isn't identity, this selects `a`
let non_identity = <_>::conditional_select(&a, &b, a.is_identity());
let (x, _) = C::to_xy(non_identity).unwrap();
LinesRes {
y_coefficient: C::FieldElement::ZERO,
x_coefficient: C::FieldElement::ONE,
zero_coefficient: -x,
}
};
// The following calculation assumes neither point is the point at infinity
// If either are, we use a prior result
// To ensure we can calculcate a result here, set any points at infinity to the generator
let a = <_>::conditional_select(&a, &C::generator(), a_is_identity);
let b = <_>::conditional_select(&b, &C::generator(), b_is_identity);
// It also assumes a, b aren't additive inverses which is also covered by a prior result
let b = <_>::conditional_select(&b, &a.double(), additive_inverses);
// If the points are equal, we use the line interpolating the sum of these points with the point
// at infinity
let b = <_>::conditional_select(&b, &-a.double(), a.ct_eq(&b));
let (slope, intercept) = slope_intercept::<C>(a, b);
// Section 4 of the proofs explicitly state the line `L = y - lambda * x - mu`
// y - (slope * x) - intercept
let mut res = LinesRes {
y_coefficient: C::FieldElement::ONE,
x_coefficient: -slope,
zero_coefficient: -intercept,
};
res = <_>::conditional_select(
&res,
&if_one_is_identity_or_additive_inverses,
one_is_identity_or_additive_inverses,
);
res = <_>::conditional_select(&res, &if_both_are_identity, both_are_identity);
Poly {
y_coefficients: vec![res.y_coefficient],
yx_coefficients: vec![],
x_coefficients: vec![res.x_coefficient],
zero_coefficient: res.zero_coefficient,
}
}
/// Create a divisor interpolating the following points.
///
/// Returns None if:
/// - No points were passed in
/// - The points don't sum to the point at infinity
/// - A passed in point was the point at infinity
///
/// If the arguments were valid, this function executes in an amount of time constant to the amount
/// of points.
#[allow(clippy::new_ret_no_self)]
pub fn new_divisor<C: DivisorCurve>(points: &[C]) -> Option<Poly<C::FieldElement>> {
// No points were passed in, this is the point at infinity, or the single point isn't infinity
// and accordingly doesn't sum to infinity. All three cause us to return None
// Checks a bit other than the first bit is set, meaning this is >= 2
let mut invalid_args = (points.len() & (!1)).ct_eq(&0);
// The points don't sum to the point at infinity
invalid_args |= !points.iter().sum::<C>().is_identity();
// A point was the point at identity
for point in points {
invalid_args |= point.is_identity();
}
if bool::from(invalid_args) {
None?;
}
let points_len = points.len();
// Create the initial set of divisors
let mut divs = vec![];
let mut iter = points.iter().copied();
while let Some(a) = iter.next() {
let b = iter.next();
// Draw the line between those points
// These unwraps are branching on the length of the iterator, not violating the constant-time
// priorites desired
divs.push((2, a + b.unwrap_or(C::identity()), line::<C>(a, b.unwrap_or(-a))));
}
let modulus = C::divisor_modulus();
// Our Poly algorithm is leaky and will create an excessive amount of y x**j and x**j
// coefficients which are zero, yet as our implementation is constant time, still come with
// an immense performance cost. This code truncates the coefficients we know are zero.
let trim = |divisor: &mut Poly<_>, points_len: usize| {
// We should only be trimming divisors reduced by the modulus
debug_assert!(divisor.yx_coefficients.len() <= 1);
if divisor.yx_coefficients.len() == 1 {
let truncate_to = ((points_len + 1) / 2).saturating_sub(2);
#[cfg(debug_assertions)]
for p in truncate_to .. divisor.yx_coefficients[0].len() {
debug_assert_eq!(divisor.yx_coefficients[0][p], <C::FieldElement as Field>::ZERO);
}
divisor.yx_coefficients[0].truncate(truncate_to);
}
{
let truncate_to = points_len / 2;
#[cfg(debug_assertions)]
for p in truncate_to .. divisor.x_coefficients.len() {
debug_assert_eq!(divisor.x_coefficients[p], <C::FieldElement as Field>::ZERO);
}
divisor.x_coefficients.truncate(truncate_to);
}
};
// Pair them off until only one remains
while divs.len() > 1 {
let mut next_divs = vec![];
// If there's an odd amount of divisors, carry the odd one out to the next iteration
if (divs.len() % 2) == 1 {
next_divs.push(divs.pop().unwrap());
}
while let Some((a_points, a, a_div)) = divs.pop() {
let (b_points, b, b_div) = divs.pop().unwrap();
let points = a_points + b_points;
// Merge the two divisors
let numerator = a_div.mul_mod(&b_div, &modulus).mul_mod(&line::<C>(a, b), &modulus);
let denominator = line::<C>(a, -a).mul_mod(&line::<C>(b, -b), &modulus);
let (mut q, r) = numerator.div_rem(&denominator);
debug_assert_eq!(r, Poly::zero());
trim(&mut q, 1 + points);
next_divs.push((points, a + b, q));
}
divs = next_divs;
}
// Return the unified divisor
let mut divisor = divs.remove(0).2;
trim(&mut divisor, points_len);
Some(divisor)
}
/// The decomposition of a scalar.
///
/// The decomposition ($d$) of a scalar ($s$) has the following two properties:
///
/// - $\sum^{\mathsf{NUM_BITS} - 1}_{i=0} d_i * 2^i = s$
/// - $\sum^{\mathsf{NUM_BITS} - 1}_{i=0} d_i = \mathsf{NUM_BITS}$
#[derive(Clone, Zeroize, ZeroizeOnDrop)]
pub struct ScalarDecomposition<F: Zeroize + PrimeFieldBits> {
scalar: F,
decomposition: Vec<u64>,
}
impl<F: Zeroize + PrimeFieldBits> ScalarDecomposition<F> {
/// Decompose a non-zero scalar.
///
/// Returns `None` if the scalar is zero.
///
/// This function is constant time if the scalar is non-zero.
pub fn new(scalar: F) -> Option<Self> {
if bool::from(scalar.is_zero()) {
None?;
}
/*
We need the sum of the coefficients to equal F::NUM_BITS. The scalar's bits will be less than
F::NUM_BITS. Accordingly, we need to increment the sum of the coefficients without
incrementing the scalar represented. We do this by finding the highest non-0 coefficient,
decrementing it, and increasing the immediately less significant coefficient by 2. This
increases the sum of the coefficients by 1 (-1+2=1).
*/
let num_bits = u64::from(F::NUM_BITS);
// Obtain the bits of the scalar
let num_bits_usize = usize::try_from(num_bits).unwrap();
let mut decomposition = vec![0; num_bits_usize];
for (i, bit) in scalar.to_le_bits().into_iter().take(num_bits_usize).enumerate() {
let bit = u64::from(u8::from(bit));
decomposition[i] = bit;
}
// The following algorithm only works if the value of the scalar exceeds num_bits
// If it isn't, we increase it by the modulus such that it does exceed num_bits
{
let mut less_than_num_bits = Choice::from(0);
for i in 0 .. num_bits {
less_than_num_bits |= scalar.ct_eq(&F::from(i));
}
let mut decomposition_of_modulus = vec![0; num_bits_usize];
// Decompose negative one
for (i, bit) in (-F::ONE).to_le_bits().into_iter().take(num_bits_usize).enumerate() {
let bit = u64::from(u8::from(bit));
decomposition_of_modulus[i] = bit;
}
// Increment it by one
decomposition_of_modulus[0] += 1;
// Add the decomposition onto the decomposition of the modulus
for i in 0 .. num_bits_usize {
let new_decomposition = <_>::conditional_select(
&decomposition[i],
&(decomposition[i] + decomposition_of_modulus[i]),
less_than_num_bits,
);
decomposition[i] = new_decomposition;
}
}
// Calculcate the sum of the coefficients
let mut sum_of_coefficients: u64 = 0;
for decomposition in &decomposition {
sum_of_coefficients += *decomposition;
}
/*
Now, because we added a log2(k)-bit number to a k-bit number, we may have our sum of
coefficients be *too high*. We attempt to reduce the sum of the coefficients accordingly.
This algorithm is guaranteed to complete as expected. Take the sequence `222`. `222` becomes
`032` becomes `013`. Even if the next coefficient in the sequence is `2`, the third
coefficient will be reduced once and the next coefficient (`2`, increased to `3`) will only
be eligible for reduction once. This demonstrates, even for a worst case of log2(k) `2`s
followed by `1`s (as possible if the modulus is a Mersenne prime), the log2(k) `2`s can be
reduced as necessary so long as there is a single coefficient after (requiring the entire
sequence be at least of length log2(k) + 1). For a 2-bit number, log2(k) + 1 == 2, so this
holds for any odd prime field.
To fully type out the demonstration for the Mersenne prime 3, with scalar to encode 1 (the
highest value less than the number of bits):
10 - Little-endian bits of 1
21 - Little-endian bits of 1, plus the modulus
02 - After one reduction, where the sum of the coefficients does in fact equal 2 (the target)
*/
{
let mut log2_num_bits = 0;
while (1 << log2_num_bits) < num_bits {
log2_num_bits += 1;
}
for _ in 0 .. log2_num_bits {
// If the sum of coefficients is the amount of bits, we're done
let mut done = sum_of_coefficients.ct_eq(&num_bits);
for i in 0 .. (num_bits_usize - 1) {
let should_act = (!done) & decomposition[i].ct_gt(&1);
// Subtract 2 from this coefficient
let amount_to_sub = <_>::conditional_select(&0, &2, should_act);
decomposition[i] -= amount_to_sub;
// Add 1 to the next coefficient
let amount_to_add = <_>::conditional_select(&0, &1, should_act);
decomposition[i + 1] += amount_to_add;
// Also update the sum of coefficients
sum_of_coefficients -= <_>::conditional_select(&0, &1, should_act);
// If we updated the coefficients this loop iter, we're done for this loop iter
done |= should_act;
}
}
}
for _ in 0 .. num_bits {
// If the sum of coefficients is the amount of bits, we're done
let mut done = sum_of_coefficients.ct_eq(&num_bits);
// Find the highest coefficient currently non-zero
for i in (1 .. decomposition.len()).rev() {
// If this is non-zero, we should decrement this coefficient if we haven't already
// decremented a coefficient this round
let is_non_zero = !(0.ct_eq(&decomposition[i]));
let should_act = (!done) & is_non_zero;
// Update this coefficient and the prior coefficient
let amount_to_sub = <_>::conditional_select(&0, &1, should_act);
decomposition[i] -= amount_to_sub;
let amount_to_add = <_>::conditional_select(&0, &2, should_act);
// i must be at least 1, so i - 1 will be at least 0 (meaning it's safe to index with)
decomposition[i - 1] += amount_to_add;
// Also update the sum of coefficients
sum_of_coefficients += <_>::conditional_select(&0, &1, should_act);
// If we updated the coefficients this loop iter, we're done for this loop iter
done |= should_act;
}
}
debug_assert!(bool::from(decomposition.iter().sum::<u64>().ct_eq(&num_bits)));
Some(ScalarDecomposition { scalar, decomposition })
}
/// The scalar.
pub fn scalar(&self) -> &F {
&self.scalar
}
/// The decomposition of the scalar.
pub fn decomposition(&self) -> &[u64] {
&self.decomposition
}
/// A divisor to prove a scalar multiplication.
///
/// The divisor will interpolate $-(s \cdot G)$ with $d_i$ instances of $2^i \cdot G$.
///
/// This function executes in constant time with regards to the scalar.
///
/// This function MAY panic if the generator is the point at infinity.
pub fn scalar_mul_divisor<C: Zeroize + DivisorCurve<Scalar = F>>(
&self,
mut generator: C,
) -> Poly<C::FieldElement> {
// 1 is used for the resulting point, NUM_BITS is used for the decomposition, and then we store
// one additional index in a usize for the points we shouldn't write at all (hence the +2)
let _ = usize::try_from(<C::Scalar as PrimeField>::NUM_BITS + 2)
.expect("NUM_BITS + 2 didn't fit in usize");
let mut divisor_points =
vec![C::identity(); (<C::Scalar as PrimeField>::NUM_BITS + 1) as usize];
// Write the inverse of the resulting point
divisor_points[0] = -generator * self.scalar;
// Write the decomposition
let mut write_above: u64 = 0;
for coefficient in &self.decomposition {
// Write the generator to every slot except the slots we have already written to.
for i in 1 ..= (<C::Scalar as PrimeField>::NUM_BITS as u64) {
divisor_points[i as usize].conditional_assign(&generator, i.ct_gt(&write_above));
}
// Increase the next write start by the coefficient.
write_above += coefficient;
generator = generator.double();
}
// Create a divisor out of the points
let res = new_divisor(&divisor_points).unwrap();
divisor_points.zeroize();
res
}
}
#[cfg(any(test, feature = "pasta"))]
mod pasta {
use group::{ff::Field, Curve};
use pasta_curves::{
arithmetic::{Coordinates, CurveAffine},
Ep, Fp, Eq, Fq,
};
use crate::DivisorCurve;
impl DivisorCurve for Ep {
type FieldElement = Fp;
fn a() -> Self::FieldElement {
Self::FieldElement::ZERO
}
fn b() -> Self::FieldElement {
Self::FieldElement::from(5u64)
}
fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> {
Option::<Coordinates<_>>::from(point.to_affine().coordinates())
.map(|coords| (*coords.x(), *coords.y()))
}
}
impl DivisorCurve for Eq {
type FieldElement = Fq;
fn a() -> Self::FieldElement {
Self::FieldElement::ZERO
}
fn b() -> Self::FieldElement {
Self::FieldElement::from(5u64)
}
fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> {
Option::<Coordinates<_>>::from(point.to_affine().coordinates())
.map(|coords| (*coords.x(), *coords.y()))
}
}
}
#[cfg(any(test, feature = "ed25519"))]
mod ed25519 {
use subtle::{Choice, ConditionallySelectable};
use group::{
ff::{Field, PrimeField},
Group, GroupEncoding,
};
use dalek_ff_group::{FieldElement, EdwardsPoint};
impl crate::DivisorCurve for EdwardsPoint {
type FieldElement = FieldElement;
// Wei25519 a/b
// https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.3
fn a() -> Self::FieldElement {
let mut be_bytes =
hex::decode("2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa984914a144").unwrap();
be_bytes.reverse();
let le_bytes = be_bytes;
Self::FieldElement::from_repr(le_bytes.try_into().unwrap()).unwrap()
}
fn b() -> Self::FieldElement {
let mut be_bytes =
hex::decode("7b425ed097b425ed097b425ed097b425ed097b425ed097b4260b5e9c7710c864").unwrap();
be_bytes.reverse();
let le_bytes = be_bytes;
Self::FieldElement::from_repr(le_bytes.try_into().unwrap()).unwrap()
}
// https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2
fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> {
if bool::from(point.is_identity()) {
None?;
}
// Extract the y coordinate from the compressed point
let mut edwards_y = point.to_bytes();
let x_is_odd = edwards_y[31] >> 7;
edwards_y[31] &= (1 << 7) - 1;
let edwards_y = Self::FieldElement::from_repr(edwards_y).unwrap();
// Recover the x coordinate
let edwards_y_sq = edwards_y * edwards_y;
let D = -Self::FieldElement::from(121665u64) *
Self::FieldElement::from(121666u64).invert().unwrap();
let mut edwards_x = ((edwards_y_sq - Self::FieldElement::ONE) *
((D * edwards_y_sq) + Self::FieldElement::ONE).invert().unwrap())
.sqrt()
.unwrap();
// Negate the x coordinate if the sign doesn't match
edwards_x = <_>::conditional_select(
&edwards_x,
&-edwards_x,
edwards_x.is_odd() ^ Choice::from(x_is_odd),
);
// Calculate the x and y coordinates for Wei25519
let edwards_y_plus_one = Self::FieldElement::ONE + edwards_y;
let one_minus_edwards_y = Self::FieldElement::ONE - edwards_y;
let wei_x = (edwards_y_plus_one * one_minus_edwards_y.invert().unwrap()) +
(Self::FieldElement::from(486662u64) * Self::FieldElement::from(3u64).invert().unwrap());
let c =
(-(Self::FieldElement::from(486662u64) + Self::FieldElement::from(2u64))).sqrt().unwrap();
let wei_y = c * edwards_y_plus_one * (one_minus_edwards_y * edwards_x).invert().unwrap();
Some((wei_x, wei_y))
}
}
}

View File

@@ -1,744 +0,0 @@
use core::ops::{Add, Neg, Sub, Mul, Rem};
use std_shims::{vec, vec::Vec};
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable};
use zeroize::{Zeroize, ZeroizeOnDrop};
use group::ff::PrimeField;
#[derive(Clone, Copy, PartialEq, Debug)]
struct CoefficientIndex {
y_pow: u64,
x_pow: u64,
}
impl ConditionallySelectable for CoefficientIndex {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Self {
y_pow: <_>::conditional_select(&a.y_pow, &b.y_pow, choice),
x_pow: <_>::conditional_select(&a.x_pow, &b.x_pow, choice),
}
}
}
impl ConstantTimeEq for CoefficientIndex {
fn ct_eq(&self, other: &Self) -> Choice {
self.y_pow.ct_eq(&other.y_pow) & self.x_pow.ct_eq(&other.x_pow)
}
}
impl ConstantTimeGreater for CoefficientIndex {
fn ct_gt(&self, other: &Self) -> Choice {
self.y_pow.ct_gt(&other.y_pow) |
(self.y_pow.ct_eq(&other.y_pow) & self.x_pow.ct_gt(&other.x_pow))
}
}
/// A structure representing a Polynomial with x^i, y^i, and y^i * x^j terms.
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
pub struct Poly<F: From<u64> + Zeroize + PrimeField> {
/// c\[i] * y^(i + 1)
pub y_coefficients: Vec<F>,
/// c\[i]\[j] * y^(i + 1) x^(j + 1)
pub yx_coefficients: Vec<Vec<F>>,
/// c\[i] * x^(i + 1)
pub x_coefficients: Vec<F>,
/// Coefficient for x^0, y^0, and x^0 y^0 (the coefficient for 1)
pub zero_coefficient: F,
}
impl<F: From<u64> + Zeroize + PrimeField> PartialEq for Poly<F> {
// This is not constant time and is not meant to be
fn eq(&self, b: &Poly<F>) -> bool {
{
let mutual_y_coefficients = self.y_coefficients.len().min(b.y_coefficients.len());
if self.y_coefficients[.. mutual_y_coefficients] != b.y_coefficients[.. mutual_y_coefficients]
{
return false;
}
for coeff in &self.y_coefficients[mutual_y_coefficients ..] {
if *coeff != F::ZERO {
return false;
}
}
for coeff in &b.y_coefficients[mutual_y_coefficients ..] {
if *coeff != F::ZERO {
return false;
}
}
}
{
for (i, yx_coeffs) in self.yx_coefficients.iter().enumerate() {
for (j, coeff) in yx_coeffs.iter().enumerate() {
if coeff != b.yx_coefficients.get(i).unwrap_or(&vec![]).get(j).unwrap_or(&F::ZERO) {
return false;
}
}
}
// Run from the other perspective in case other is longer than self
for (i, yx_coeffs) in b.yx_coefficients.iter().enumerate() {
for (j, coeff) in yx_coeffs.iter().enumerate() {
if coeff != self.yx_coefficients.get(i).unwrap_or(&vec![]).get(j).unwrap_or(&F::ZERO) {
return false;
}
}
}
}
{
let mutual_x_coefficients = self.x_coefficients.len().min(b.x_coefficients.len());
if self.x_coefficients[.. mutual_x_coefficients] != b.x_coefficients[.. mutual_x_coefficients]
{
return false;
}
for coeff in &self.x_coefficients[mutual_x_coefficients ..] {
if *coeff != F::ZERO {
return false;
}
}
for coeff in &b.x_coefficients[mutual_x_coefficients ..] {
if *coeff != F::ZERO {
return false;
}
}
}
self.zero_coefficient == b.zero_coefficient
}
}
impl<F: From<u64> + Zeroize + PrimeField> Poly<F> {
/// A polynomial for zero.
pub(crate) fn zero() -> Self {
Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: vec![],
zero_coefficient: F::ZERO,
}
}
}
impl<F: From<u64> + Zeroize + PrimeField> Add<&Self> for Poly<F> {
type Output = Self;
fn add(mut self, other: &Self) -> Self {
// Expand to be the neeeded size
while self.y_coefficients.len() < other.y_coefficients.len() {
self.y_coefficients.push(F::ZERO);
}
while self.yx_coefficients.len() < other.yx_coefficients.len() {
self.yx_coefficients.push(vec![]);
}
for i in 0 .. other.yx_coefficients.len() {
while self.yx_coefficients[i].len() < other.yx_coefficients[i].len() {
self.yx_coefficients[i].push(F::ZERO);
}
}
while self.x_coefficients.len() < other.x_coefficients.len() {
self.x_coefficients.push(F::ZERO);
}
// Perform the addition
for (i, coeff) in other.y_coefficients.iter().enumerate() {
self.y_coefficients[i] += coeff;
}
for (i, coeffs) in other.yx_coefficients.iter().enumerate() {
for (j, coeff) in coeffs.iter().enumerate() {
self.yx_coefficients[i][j] += coeff;
}
}
for (i, coeff) in other.x_coefficients.iter().enumerate() {
self.x_coefficients[i] += coeff;
}
self.zero_coefficient += other.zero_coefficient;
self
}
}
impl<F: From<u64> + Zeroize + PrimeField> Neg for Poly<F> {
type Output = Self;
fn neg(mut self) -> Self {
for y_coeff in self.y_coefficients.iter_mut() {
*y_coeff = -*y_coeff;
}
for yx_coeffs in self.yx_coefficients.iter_mut() {
for yx_coeff in yx_coeffs.iter_mut() {
*yx_coeff = -*yx_coeff;
}
}
for x_coeff in self.x_coefficients.iter_mut() {
*x_coeff = -*x_coeff;
}
self.zero_coefficient = -self.zero_coefficient;
self
}
}
impl<F: From<u64> + Zeroize + PrimeField> Sub for Poly<F> {
type Output = Self;
fn sub(self, other: Self) -> Self {
self + &-other
}
}
impl<F: From<u64> + Zeroize + PrimeField> Mul<F> for Poly<F> {
type Output = Self;
fn mul(mut self, scalar: F) -> Self {
for y_coeff in self.y_coefficients.iter_mut() {
*y_coeff *= scalar;
}
for coeffs in self.yx_coefficients.iter_mut() {
for coeff in coeffs.iter_mut() {
*coeff *= scalar;
}
}
for x_coeff in self.x_coefficients.iter_mut() {
*x_coeff *= scalar;
}
self.zero_coefficient *= scalar;
self
}
}
impl<F: From<u64> + Zeroize + PrimeField> Poly<F> {
#[must_use]
fn shift_by_x(mut self, power_of_x: usize) -> Self {
if power_of_x == 0 {
return self;
}
// Shift up every x coefficient
for _ in 0 .. power_of_x {
self.x_coefficients.insert(0, F::ZERO);
for yx_coeffs in &mut self.yx_coefficients {
yx_coeffs.insert(0, F::ZERO);
}
}
// Move the zero coefficient
self.x_coefficients[power_of_x - 1] = self.zero_coefficient;
self.zero_coefficient = F::ZERO;
// Move the y coefficients
// Start by creating yx coefficients with the necessary powers of x
let mut yx_coefficients_to_push = vec![];
while yx_coefficients_to_push.len() < power_of_x {
yx_coefficients_to_push.push(F::ZERO);
}
// Now, ensure the yx coefficients has the slots for the y coefficients we're moving
while self.yx_coefficients.len() < self.y_coefficients.len() {
self.yx_coefficients.push(yx_coefficients_to_push.clone());
}
// Perform the move
for (i, y_coeff) in self.y_coefficients.drain(..).enumerate() {
self.yx_coefficients[i][power_of_x - 1] = y_coeff;
}
self
}
#[must_use]
fn shift_by_y(mut self, power_of_y: usize) -> Self {
if power_of_y == 0 {
return self;
}
// Shift up every y coefficient
for _ in 0 .. power_of_y {
self.y_coefficients.insert(0, F::ZERO);
self.yx_coefficients.insert(0, vec![]);
}
// Move the zero coefficient
self.y_coefficients[power_of_y - 1] = self.zero_coefficient;
self.zero_coefficient = F::ZERO;
// Move the x coefficients
core::mem::swap(&mut self.yx_coefficients[power_of_y - 1], &mut self.x_coefficients);
self.x_coefficients = vec![];
self
}
}
impl<F: From<u64> + Zeroize + PrimeField> Mul<&Poly<F>> for Poly<F> {
type Output = Self;
fn mul(self, other: &Self) -> Self {
let mut res = self.clone() * other.zero_coefficient;
for (i, y_coeff) in other.y_coefficients.iter().enumerate() {
let scaled = self.clone() * *y_coeff;
res = res + &scaled.shift_by_y(i + 1);
}
for (y_i, yx_coeffs) in other.yx_coefficients.iter().enumerate() {
for (x_i, yx_coeff) in yx_coeffs.iter().enumerate() {
let scaled = self.clone() * *yx_coeff;
res = res + &scaled.shift_by_y(y_i + 1).shift_by_x(x_i + 1);
}
}
for (i, x_coeff) in other.x_coefficients.iter().enumerate() {
let scaled = self.clone() * *x_coeff;
res = res + &scaled.shift_by_x(i + 1);
}
res
}
}
impl<F: From<u64> + Zeroize + PrimeField> Poly<F> {
// The leading y coefficient and associated x coefficient.
fn leading_coefficient(&self) -> (usize, usize) {
if self.y_coefficients.len() > self.yx_coefficients.len() {
(self.y_coefficients.len(), 0)
} else if !self.yx_coefficients.is_empty() {
(self.yx_coefficients.len(), self.yx_coefficients.last().unwrap().len())
} else {
(0, self.x_coefficients.len())
}
}
/// Returns the highest non-zero coefficient greater than the specified coefficient.
///
/// If no non-zero coefficient is greater than the specified coefficient, this will return
/// (0, 0).
fn greater_than_or_equal_coefficient(
&self,
greater_than_or_equal: &CoefficientIndex,
) -> CoefficientIndex {
let mut leading_coefficient = CoefficientIndex { y_pow: 0, x_pow: 0 };
for (y_pow_sub_one, coeff) in self.y_coefficients.iter().enumerate() {
let y_pow = u64::try_from(y_pow_sub_one + 1).unwrap();
let coeff_is_non_zero = !coeff.is_zero();
let potential = CoefficientIndex { y_pow, x_pow: 0 };
leading_coefficient = <_>::conditional_select(
&leading_coefficient,
&potential,
coeff_is_non_zero &
potential.ct_gt(&leading_coefficient) &
(potential.ct_gt(greater_than_or_equal) | potential.ct_eq(greater_than_or_equal)),
);
}
for (y_pow_sub_one, yx_coefficients) in self.yx_coefficients.iter().enumerate() {
let y_pow = u64::try_from(y_pow_sub_one + 1).unwrap();
for (x_pow_sub_one, coeff) in yx_coefficients.iter().enumerate() {
let x_pow = u64::try_from(x_pow_sub_one + 1).unwrap();
let coeff_is_non_zero = !coeff.is_zero();
let potential = CoefficientIndex { y_pow, x_pow };
leading_coefficient = <_>::conditional_select(
&leading_coefficient,
&potential,
coeff_is_non_zero &
potential.ct_gt(&leading_coefficient) &
(potential.ct_gt(greater_than_or_equal) | potential.ct_eq(greater_than_or_equal)),
);
}
}
for (x_pow_sub_one, coeff) in self.x_coefficients.iter().enumerate() {
let x_pow = u64::try_from(x_pow_sub_one + 1).unwrap();
let coeff_is_non_zero = !coeff.is_zero();
let potential = CoefficientIndex { y_pow: 0, x_pow };
leading_coefficient = <_>::conditional_select(
&leading_coefficient,
&potential,
coeff_is_non_zero &
potential.ct_gt(&leading_coefficient) &
(potential.ct_gt(greater_than_or_equal) | potential.ct_eq(greater_than_or_equal)),
);
}
leading_coefficient
}
/// Perform multiplication mod `modulus`.
#[must_use]
pub(crate) fn mul_mod(self, other: &Self, modulus: &Self) -> Self {
(self * other) % modulus
}
/// Perform division, returning the result and remainder.
///
/// This function is constant time to the structure of the numerator and denominator. The actual
/// value of the coefficients will not introduce timing differences.
///
/// Panics upon division by a polynomial where all coefficients are zero.
#[must_use]
pub(crate) fn div_rem(self, denominator: &Self) -> (Self, Self) {
// These functions have undefined behavior if this isn't a valid index for this poly
fn ct_get<F: From<u64> + Zeroize + PrimeField>(poly: &Poly<F>, index: CoefficientIndex) -> F {
let mut res = poly.zero_coefficient;
for (y_pow_sub_one, coeff) in poly.y_coefficients.iter().enumerate() {
res = <_>::conditional_select(
&res,
coeff,
index
.ct_eq(&CoefficientIndex { y_pow: (y_pow_sub_one + 1).try_into().unwrap(), x_pow: 0 }),
);
}
for (y_pow_sub_one, coeffs) in poly.yx_coefficients.iter().enumerate() {
for (x_pow_sub_one, coeff) in coeffs.iter().enumerate() {
res = <_>::conditional_select(
&res,
coeff,
index.ct_eq(&CoefficientIndex {
y_pow: (y_pow_sub_one + 1).try_into().unwrap(),
x_pow: (x_pow_sub_one + 1).try_into().unwrap(),
}),
);
}
}
for (x_pow_sub_one, coeff) in poly.x_coefficients.iter().enumerate() {
res = <_>::conditional_select(
&res,
coeff,
index
.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: (x_pow_sub_one + 1).try_into().unwrap() }),
);
}
res
}
fn ct_set<F: From<u64> + Zeroize + PrimeField>(
poly: &mut Poly<F>,
index: CoefficientIndex,
value: F,
) {
for (y_pow_sub_one, coeff) in poly.y_coefficients.iter_mut().enumerate() {
*coeff = <_>::conditional_select(
coeff,
&value,
index
.ct_eq(&CoefficientIndex { y_pow: (y_pow_sub_one + 1).try_into().unwrap(), x_pow: 0 }),
);
}
for (y_pow_sub_one, coeffs) in poly.yx_coefficients.iter_mut().enumerate() {
for (x_pow_sub_one, coeff) in coeffs.iter_mut().enumerate() {
*coeff = <_>::conditional_select(
coeff,
&value,
index.ct_eq(&CoefficientIndex {
y_pow: (y_pow_sub_one + 1).try_into().unwrap(),
x_pow: (x_pow_sub_one + 1).try_into().unwrap(),
}),
);
}
}
for (x_pow_sub_one, coeff) in poly.x_coefficients.iter_mut().enumerate() {
*coeff = <_>::conditional_select(
coeff,
&value,
index
.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: (x_pow_sub_one + 1).try_into().unwrap() }),
);
}
poly.zero_coefficient = <_>::conditional_select(
&poly.zero_coefficient,
&value,
index.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }),
);
}
fn conditional_select_poly<F: From<u64> + Zeroize + PrimeField>(
mut a: Poly<F>,
mut b: Poly<F>,
choice: Choice,
) -> Poly<F> {
let pad_to = |a: &mut Poly<F>, b: &Poly<F>| {
while a.x_coefficients.len() < b.x_coefficients.len() {
a.x_coefficients.push(F::ZERO);
}
while a.yx_coefficients.len() < b.yx_coefficients.len() {
a.yx_coefficients.push(vec![]);
}
for (a, b) in a.yx_coefficients.iter_mut().zip(&b.yx_coefficients) {
while a.len() < b.len() {
a.push(F::ZERO);
}
}
while a.y_coefficients.len() < b.y_coefficients.len() {
a.y_coefficients.push(F::ZERO);
}
};
// Pad these to be the same size/layout as each other
pad_to(&mut a, &b);
pad_to(&mut b, &a);
let mut res = Poly::zero();
for (a, b) in a.y_coefficients.iter().zip(&b.y_coefficients) {
res.y_coefficients.push(<_>::conditional_select(a, b, choice));
}
for (a, b) in a.yx_coefficients.iter().zip(&b.yx_coefficients) {
let mut yx_coefficients = Vec::with_capacity(a.len());
for (a, b) in a.iter().zip(b) {
yx_coefficients.push(<_>::conditional_select(a, b, choice))
}
res.yx_coefficients.push(yx_coefficients);
}
for (a, b) in a.x_coefficients.iter().zip(&b.x_coefficients) {
res.x_coefficients.push(<_>::conditional_select(a, b, choice));
}
res.zero_coefficient =
<_>::conditional_select(&a.zero_coefficient, &b.zero_coefficient, choice);
res
}
// The following long division algorithm only works if the denominator actually has a variable
// If the denominator isn't variable to anything, short-circuit to scalar 'division'
// This is safe as `leading_coefficient` is based on the structure, not the values, of the poly
let denominator_leading_coefficient = denominator.leading_coefficient();
if denominator_leading_coefficient == (0, 0) {
return (self * denominator.zero_coefficient.invert().unwrap(), Poly::zero());
}
// The structure of the quotient, which is the the numerator with all coefficients set to 0
let mut quotient_structure = Poly {
y_coefficients: vec![F::ZERO; self.y_coefficients.len()],
yx_coefficients: self.yx_coefficients.clone(),
x_coefficients: vec![F::ZERO; self.x_coefficients.len()],
zero_coefficient: F::ZERO,
};
for coeff in quotient_structure
.yx_coefficients
.iter_mut()
.flat_map(|yx_coefficients| yx_coefficients.iter_mut())
{
*coeff = F::ZERO;
}
// Calculate the amount of iterations we need to perform
let iterations = self.y_coefficients.len() +
self.yx_coefficients.iter().map(|yx_coefficients| yx_coefficients.len()).sum::<usize>() +
self.x_coefficients.len();
// Find the highest non-zero coefficient in the denominator
// This is the coefficient which we actually perform division with
let denominator_dividing_coefficient =
denominator.greater_than_or_equal_coefficient(&CoefficientIndex { y_pow: 0, x_pow: 0 });
let denominator_dividing_coefficient_inv =
ct_get(denominator, denominator_dividing_coefficient).invert().unwrap();
let mut quotient = quotient_structure.clone();
let mut remainder = self.clone();
for _ in 0 .. iterations {
// Find the numerator coefficient we're clearing
// This will be (0, 0) if we aren't clearing a coefficient
let numerator_coefficient =
remainder.greater_than_or_equal_coefficient(&denominator_dividing_coefficient);
// We only apply the effects of this iteration if the numerator's coefficient is actually >=
let meaningful_iteration = numerator_coefficient.ct_gt(&denominator_dividing_coefficient) |
numerator_coefficient.ct_eq(&denominator_dividing_coefficient);
// 1) Find the scalar `q` such that the leading coefficient of `q * denominator` is equal to
// the leading coefficient of self.
let numerator_coefficient_value = ct_get(&remainder, numerator_coefficient);
let q = numerator_coefficient_value * denominator_dividing_coefficient_inv;
// 2) Calculate the full term of the quotient by scaling with the necessary powers of y/x
let proper_powers_of_yx = CoefficientIndex {
y_pow: numerator_coefficient.y_pow.wrapping_sub(denominator_dividing_coefficient.y_pow),
x_pow: numerator_coefficient.x_pow.wrapping_sub(denominator_dividing_coefficient.x_pow),
};
let fallabck_powers_of_yx = CoefficientIndex { y_pow: 0, x_pow: 0 };
let mut quotient_term = quotient_structure.clone();
ct_set(
&mut quotient_term,
// If the numerator coefficient isn't >=, proper_powers_of_yx will have garbage in them
<_>::conditional_select(&fallabck_powers_of_yx, &proper_powers_of_yx, meaningful_iteration),
q,
);
let quotient_if_meaningful = quotient.clone() + &quotient_term;
quotient = conditional_select_poly(quotient, quotient_if_meaningful, meaningful_iteration);
// 3) Remove what we've divided out from self
let remainder_if_meaningful = remainder.clone() - (quotient_term * denominator);
remainder = conditional_select_poly(remainder, remainder_if_meaningful, meaningful_iteration);
}
quotient = conditional_select_poly(
quotient,
// If the dividing coefficient was for y**0 x**0, we return the poly scaled by its inverse
self * denominator_dividing_coefficient_inv,
denominator_dividing_coefficient.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }),
);
remainder = conditional_select_poly(
remainder,
// If the dividing coefficient was for y**0 x**0, we're able to perfectly divide and there's
// no remainder
Poly::zero(),
denominator_dividing_coefficient.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }),
);
// Clear any junk terms out of the remainder which are less than the denominator
let denominator_leading_coefficient = CoefficientIndex {
y_pow: denominator_leading_coefficient.0.try_into().unwrap(),
x_pow: denominator_leading_coefficient.1.try_into().unwrap(),
};
if denominator_leading_coefficient != (CoefficientIndex { y_pow: 0, x_pow: 0 }) {
while {
let index =
CoefficientIndex { y_pow: remainder.y_coefficients.len().try_into().unwrap(), x_pow: 0 };
bool::from(
index.ct_gt(&denominator_leading_coefficient) |
index.ct_eq(&denominator_leading_coefficient),
)
} {
let popped = remainder.y_coefficients.pop();
debug_assert_eq!(popped, Some(F::ZERO));
}
while {
let index = CoefficientIndex {
y_pow: remainder.yx_coefficients.len().try_into().unwrap(),
x_pow: remainder
.yx_coefficients
.last()
.map(|yx_coefficients| yx_coefficients.len())
.unwrap_or(0)
.try_into()
.unwrap(),
};
bool::from(
index.ct_gt(&denominator_leading_coefficient) |
index.ct_eq(&denominator_leading_coefficient),
)
} {
let popped = remainder.yx_coefficients.last_mut().unwrap().pop();
// This may have been `vec![]`
if let Some(popped) = popped {
debug_assert_eq!(popped, F::ZERO);
}
if remainder.yx_coefficients.last().unwrap().is_empty() {
let popped = remainder.yx_coefficients.pop();
debug_assert_eq!(popped, Some(vec![]));
}
}
while {
let index =
CoefficientIndex { y_pow: 0, x_pow: remainder.x_coefficients.len().try_into().unwrap() };
bool::from(
index.ct_gt(&denominator_leading_coefficient) |
index.ct_eq(&denominator_leading_coefficient),
)
} {
let popped = remainder.x_coefficients.pop();
debug_assert_eq!(popped, Some(F::ZERO));
}
}
(quotient, remainder)
}
}
impl<F: From<u64> + Zeroize + PrimeField> Rem<&Self> for Poly<F> {
type Output = Self;
fn rem(self, modulus: &Self) -> Self {
self.div_rem(modulus).1
}
}
impl<F: From<u64> + Zeroize + PrimeField> Poly<F> {
/// Evaluate this polynomial with the specified x/y values.
///
/// Panics on polynomials with terms whose powers exceed 2^64.
#[must_use]
pub fn eval(&self, x: F, y: F) -> F {
let mut res = self.zero_coefficient;
for (pow, coeff) in
self.y_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v))
{
res += y.pow([pow]) * coeff;
}
for (y_pow, coeffs) in
self.yx_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v))
{
let y_pow = y.pow([y_pow]);
for (x_pow, coeff) in
coeffs.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v))
{
res += y_pow * x.pow([x_pow]) * coeff;
}
}
for (pow, coeff) in
self.x_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v))
{
res += x.pow([pow]) * coeff;
}
res
}
/// Differentiate a polynomial, reduced by a modulus with a leading y term y^2 x^0, by x and y.
///
/// This function has undefined behavior if unreduced.
#[must_use]
pub fn differentiate(&self) -> (Poly<F>, Poly<F>) {
// Differentation by x practically involves:
// - Dropping everything without an x component
// - Shifting everything down a power of x
// - Multiplying the new coefficient by the power it prior was used with
let diff_x = {
let mut diff_x = Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: vec![],
zero_coefficient: F::ZERO,
};
if !self.x_coefficients.is_empty() {
let mut x_coeffs = self.x_coefficients.clone();
diff_x.zero_coefficient = x_coeffs.remove(0);
diff_x.x_coefficients = x_coeffs;
let mut prior_x_power = F::from(2);
for x_coeff in &mut diff_x.x_coefficients {
*x_coeff *= prior_x_power;
prior_x_power += F::ONE;
}
}
if !self.yx_coefficients.is_empty() {
let mut yx_coeffs = self.yx_coefficients[0].clone();
if !yx_coeffs.is_empty() {
diff_x.y_coefficients = vec![yx_coeffs.remove(0)];
diff_x.yx_coefficients = vec![yx_coeffs];
let mut prior_x_power = F::from(2);
for yx_coeff in &mut diff_x.yx_coefficients[0] {
*yx_coeff *= prior_x_power;
prior_x_power += F::ONE;
}
}
}
diff_x
};
// Differentation by y is trivial
// It's the y coefficient as the zero coefficient, and the yx coefficients as the x
// coefficients
// This is thanks to any y term over y^2 being reduced out
let diff_y = Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: self.yx_coefficients.first().cloned().unwrap_or(vec![]),
zero_coefficient: self.y_coefficients.first().cloned().unwrap_or(F::ZERO),
};
(diff_x, diff_y)
}
/// Normalize the x coefficient to 1.
///
/// Panics if there is no x coefficient to normalize or if it cannot be normalized to 1.
#[must_use]
pub fn normalize_x_coefficient(self) -> Self {
let scalar = self.x_coefficients[0].invert().unwrap();
self * scalar
}
}

View File

@@ -1,237 +0,0 @@
use rand_core::OsRng;
use group::{ff::Field, Group};
use dalek_ff_group::EdwardsPoint;
use pasta_curves::{Ep, Eq};
use crate::{DivisorCurve, Poly, new_divisor};
mod poly;
// Equation 4 in the security proofs
fn check_divisor<C: DivisorCurve>(points: Vec<C>) {
// Create the divisor
let divisor = new_divisor::<C>(&points).unwrap();
let eval = |c| {
let (x, y) = C::to_xy(c).unwrap();
divisor.eval(x, y)
};
// Decide challgenges
let c0 = C::random(&mut OsRng);
let c1 = C::random(&mut OsRng);
let c2 = -(c0 + c1);
let (slope, intercept) = crate::slope_intercept::<C>(c0, c1);
let mut rhs = <C as DivisorCurve>::FieldElement::ONE;
for point in points {
let (x, y) = C::to_xy(point).unwrap();
rhs *= intercept - (y - (slope * x));
}
assert_eq!(eval(c0) * eval(c1) * eval(c2), rhs);
}
fn test_divisor<C: DivisorCurve>() {
for i in 1 ..= 255 {
println!("Test iteration {i}");
// Select points
let mut points = vec![];
for _ in 0 .. i {
points.push(C::random(&mut OsRng));
}
points.push(-points.iter().sum::<C>());
println!("Points {}", points.len());
// Perform the original check
check_divisor(points.clone());
// Create the divisor
let divisor = new_divisor::<C>(&points).unwrap();
// For a divisor interpolating 256 points, as one does when interpreting a 255-bit discrete log
// with the result of its scalar multiplication against a fixed generator, the lengths of the
// yx/x coefficients shouldn't supersede the following bounds
assert!((divisor.yx_coefficients.first().unwrap_or(&vec![]).len()) <= 126);
assert!((divisor.x_coefficients.len() - 1) <= 127);
assert!(
(1 + divisor.yx_coefficients.first().unwrap_or(&vec![]).len() +
(divisor.x_coefficients.len() - 1) +
1) <=
255
);
// Decide challgenges
let c0 = C::random(&mut OsRng);
let c1 = C::random(&mut OsRng);
let c2 = -(c0 + c1);
let (slope, intercept) = crate::slope_intercept::<C>(c0, c1);
// Perform the Logarithmic derivative check
{
let dx_over_dz = {
let dx = Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: vec![C::FieldElement::ZERO, C::FieldElement::from(3)],
zero_coefficient: C::a(),
};
let dy = Poly {
y_coefficients: vec![C::FieldElement::from(2)],
yx_coefficients: vec![],
x_coefficients: vec![],
zero_coefficient: C::FieldElement::ZERO,
};
let dz = (dy.clone() * -slope) + &dx;
// We want dx/dz, and dz/dx is equal to dy/dx - slope
// Sagemath claims this, dy / dz, is the proper inverse
(dy, dz)
};
{
let sanity_eval = |c| {
let (x, y) = C::to_xy(c).unwrap();
dx_over_dz.0.eval(x, y) * dx_over_dz.1.eval(x, y).invert().unwrap()
};
let sanity = sanity_eval(c0) + sanity_eval(c1) + sanity_eval(c2);
// This verifies the dx/dz polynomial is correct
assert_eq!(sanity, C::FieldElement::ZERO);
}
// Logarithmic derivative check
let test = |divisor: Poly<_>| {
let (dx, dy) = divisor.differentiate();
let lhs = |c| {
let (x, y) = C::to_xy(c).unwrap();
let n_0 = (C::FieldElement::from(3) * (x * x)) + C::a();
let d_0 = (C::FieldElement::from(2) * y).invert().unwrap();
let p_0_n_0 = n_0 * d_0;
let n_1 = dy.eval(x, y);
let first = p_0_n_0 * n_1;
let second = dx.eval(x, y);
let d_1 = divisor.eval(x, y);
let fraction_1_n = first + second;
let fraction_1_d = d_1;
let fraction_2_n = dx_over_dz.0.eval(x, y);
let fraction_2_d = dx_over_dz.1.eval(x, y);
fraction_1_n * fraction_2_n * (fraction_1_d * fraction_2_d).invert().unwrap()
};
let lhs = lhs(c0) + lhs(c1) + lhs(c2);
let mut rhs = C::FieldElement::ZERO;
for point in &points {
let (x, y) = <C as DivisorCurve>::to_xy(*point).unwrap();
rhs += (intercept - (y - (slope * x))).invert().unwrap();
}
assert_eq!(lhs, rhs);
};
// Test the divisor and the divisor with a normalized x coefficient
test(divisor.clone());
test(divisor.normalize_x_coefficient());
}
}
}
fn test_same_point<C: DivisorCurve>() {
let mut points = vec![C::random(&mut OsRng)];
points.push(points[0]);
points.push(-points.iter().sum::<C>());
check_divisor(points);
}
fn test_subset_sum_to_infinity<C: DivisorCurve>() {
// Internally, a binary tree algorithm is used
// This executes the first pass to end up with [0, 0] for further reductions
{
let mut points = vec![C::random(&mut OsRng)];
points.push(-points[0]);
let next = C::random(&mut OsRng);
points.push(next);
points.push(-next);
check_divisor(points);
}
// This executes the first pass to end up with [0, X, -X, 0]
{
let mut points = vec![C::random(&mut OsRng)];
points.push(-points[0]);
let x_1 = C::random(&mut OsRng);
let x_2 = C::random(&mut OsRng);
points.push(x_1);
points.push(x_2);
points.push(-x_1);
points.push(-x_2);
let next = C::random(&mut OsRng);
points.push(next);
points.push(-next);
check_divisor(points);
}
}
#[test]
fn test_divisor_pallas() {
test_same_point::<Ep>();
test_subset_sum_to_infinity::<Ep>();
test_divisor::<Ep>();
}
#[test]
fn test_divisor_vesta() {
test_same_point::<Eq>();
test_subset_sum_to_infinity::<Eq>();
test_divisor::<Eq>();
}
#[test]
fn test_divisor_ed25519() {
// Since we're implementing Wei25519 ourselves, check the isomorphism works as expected
{
let incomplete_add = |p1, p2| {
let (x1, y1) = EdwardsPoint::to_xy(p1).unwrap();
let (x2, y2) = EdwardsPoint::to_xy(p2).unwrap();
// mmadd-1998-cmo
let u = y2 - y1;
let uu = u * u;
let v = x2 - x1;
let vv = v * v;
let vvv = v * vv;
let R = vv * x1;
let A = uu - vvv - R.double();
let x3 = v * A;
let y3 = (u * (R - A)) - (vvv * y1);
let z3 = vvv;
// Normalize from XYZ to XY
let x3 = x3 * z3.invert().unwrap();
let y3 = y3 * z3.invert().unwrap();
// Edwards addition -> Wei25519 coordinates should be equivalent to Wei25519 addition
assert_eq!(EdwardsPoint::to_xy(p1 + p2).unwrap(), (x3, y3));
};
for _ in 0 .. 256 {
incomplete_add(EdwardsPoint::random(&mut OsRng), EdwardsPoint::random(&mut OsRng));
}
}
test_same_point::<EdwardsPoint>();
test_subset_sum_to_infinity::<EdwardsPoint>();
test_divisor::<EdwardsPoint>();
}

View File

@@ -1,148 +0,0 @@
use rand_core::OsRng;
use group::ff::Field;
use pasta_curves::Ep;
use crate::{DivisorCurve, Poly};
type F = <Ep as DivisorCurve>::FieldElement;
#[test]
fn test_poly() {
let zero = F::ZERO;
let one = F::ONE;
{
let mut poly = Poly::zero();
poly.y_coefficients = vec![zero, one];
let mut modulus = Poly::zero();
modulus.y_coefficients = vec![one];
assert_eq!(
poly.clone().div_rem(&modulus).0,
Poly {
y_coefficients: vec![one],
yx_coefficients: vec![],
x_coefficients: vec![],
zero_coefficient: zero
}
);
assert_eq!(
poly % &modulus,
Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: vec![],
zero_coefficient: zero
}
);
}
{
let mut poly = Poly::zero();
poly.y_coefficients = vec![zero, one];
let mut squared = Poly::zero();
squared.y_coefficients = vec![zero, zero, zero, one];
assert_eq!(poly.clone() * &poly, squared);
}
{
let mut a = Poly::zero();
a.zero_coefficient = F::from(2u64);
let mut b = Poly::zero();
b.zero_coefficient = F::from(3u64);
let mut res = Poly::zero();
res.zero_coefficient = F::from(6u64);
assert_eq!(a.clone() * &b, res);
b.y_coefficients = vec![F::from(4u64)];
res.y_coefficients = vec![F::from(8u64)];
assert_eq!(a.clone() * &b, res);
assert_eq!(b.clone() * &a, res);
a.x_coefficients = vec![F::from(5u64)];
res.x_coefficients = vec![F::from(15u64)];
res.yx_coefficients = vec![vec![F::from(20u64)]];
assert_eq!(a.clone() * &b, res);
assert_eq!(b * &a, res);
// res is now 20xy + 8*y + 15*x + 6
// res ** 2 =
// 400*x^2*y^2 + 320*x*y^2 + 64*y^2 + 600*x^2*y + 480*x*y + 96*y + 225*x^2 + 180*x + 36
let mut squared = Poly::zero();
squared.y_coefficients = vec![F::from(96u64), F::from(64u64)];
squared.yx_coefficients =
vec![vec![F::from(480u64), F::from(600u64)], vec![F::from(320u64), F::from(400u64)]];
squared.x_coefficients = vec![F::from(180u64), F::from(225u64)];
squared.zero_coefficient = F::from(36u64);
assert_eq!(res.clone() * &res, squared);
}
}
#[test]
fn test_differentation() {
let random = || F::random(&mut OsRng);
let input = Poly {
y_coefficients: vec![random()],
yx_coefficients: vec![vec![random()]],
x_coefficients: vec![random(), random(), random()],
zero_coefficient: random(),
};
let (diff_x, diff_y) = input.differentiate();
assert_eq!(
diff_x,
Poly {
y_coefficients: vec![input.yx_coefficients[0][0]],
yx_coefficients: vec![],
x_coefficients: vec![
F::from(2) * input.x_coefficients[1],
F::from(3) * input.x_coefficients[2]
],
zero_coefficient: input.x_coefficients[0],
}
);
assert_eq!(
diff_y,
Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: vec![input.yx_coefficients[0][0]],
zero_coefficient: input.y_coefficients[0],
}
);
let input = Poly {
y_coefficients: vec![random()],
yx_coefficients: vec![vec![random(), random()]],
x_coefficients: vec![random(), random(), random(), random()],
zero_coefficient: random(),
};
let (diff_x, diff_y) = input.differentiate();
assert_eq!(
diff_x,
Poly {
y_coefficients: vec![input.yx_coefficients[0][0]],
yx_coefficients: vec![vec![F::from(2) * input.yx_coefficients[0][1]]],
x_coefficients: vec![
F::from(2) * input.x_coefficients[1],
F::from(3) * input.x_coefficients[2],
F::from(4) * input.x_coefficients[3],
],
zero_coefficient: input.x_coefficients[0],
}
);
assert_eq!(
diff_y,
Poly {
y_coefficients: vec![],
yx_coefficients: vec![],
x_coefficients: vec![input.yx_coefficients[0][0], input.yx_coefficients[0][1]],
zero_coefficient: input.y_coefficients[0],
}
);
}

View File

@@ -1,27 +0,0 @@
[package]
name = "generalized-bulletproofs-ec-gadgets"
version = "0.1.0"
description = "Gadgets for working with an embedded Elliptic Curve in a Generalized Bulletproofs circuit"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/fcmps/ec-gadgets"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["bulletproofs", "circuit", "divisors"]
edition = "2021"
rust-version = "1.69"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[dependencies]
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
generic-array = { version = "1", default-features = false, features = ["alloc"] }
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
generalized-bulletproofs-circuit-abstraction = { path = "../circuit-abstraction", default-features = false }
[features]
std = ["std-shims/std", "ciphersuite/std", "generalized-bulletproofs-circuit-abstraction/std"]
default = ["std"]

View File

@@ -1,3 +0,0 @@
# Generalized Bulletproofs Circuit Abstraction
A circuit abstraction around `generalized-bulletproofs`.

Some files were not shown because too many files have changed in this diff Show More