1 Commits

Author SHA1 Message Date
Luke Parker
acb4c08d3f Rename the coins folder to networks
Ethereum isn't a coin. It's a network.

Resolves #357.
2024-07-17 19:35:01 -04:00
515 changed files with 67055 additions and 12163 deletions

View File

@@ -12,7 +12,7 @@ runs:
steps:
- name: Bitcoin Daemon Cache
id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -7,20 +7,13 @@ runs:
- name: Remove unused packages
shell: bash
run: |
# Ensure the repositories are synced
sudo apt update -y
# Actually perform the removals
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
# This removal command requires the prior removals due to unmet dependencies otherwise
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
# Reinstall python3 as a general dependency of a functional operating system
sudo apt install -y python3 --fix-missing
sudo apt autoremove -y
sudo apt clean
docker system prune -a --volumes
if: runner.os == 'Linux'
- name: Remove unused packages
@@ -38,48 +31,19 @@ runs:
shell: bash
run: |
if [ "$RUNNER_OS" == "Linux" ]; then
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
sudo apt install -y ca-certificates protobuf-compiler
elif [ "$RUNNER_OS" == "Windows" ]; then
choco install protoc
elif [ "$RUNNER_OS" == "macOS" ]; then
brew install protobuf llvm
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
brew install protobuf
fi
- name: Install solc
shell: bash
run: |
cargo +1.89 install svm-rs --version =0.5.18
svm install 0.8.26
svm use 0.8.26
- name: Remove preinstalled Docker
shell: bash
run: |
docker system prune -a --volumes
sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker
sudo apt install uidmap
if: runner.os == 'Linux'
- name: Update system dependencies
shell: bash
run: |
sudo apt update -y
sudo apt upgrade -y
sudo apt autoremove -y
sudo apt clean
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with:
rootless: true
set-host: true
if: runner.os == 'Linux'
cargo install svm-rs
svm install 0.8.25
svm use 0.8.25
# - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -5,14 +5,14 @@ inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.3.4
default: v0.18.3.1
runs:
using: "composite"
steps:
- name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,14 +5,14 @@ inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.3.4
default: v0.18.3.1
runs:
using: "composite"
steps:
- name: Monero Daemon Cache
id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,7 +5,7 @@ inputs:
monero-version:
description: "Monero version to download and run as a regtest node"
required: false
default: v0.18.3.4
default: v0.18.3.1
bitcoin-version:
description: "Bitcoin version to download and run as a regtest node"

View File

@@ -1 +1 @@
nightly-2025-11-01
nightly-2024-07-01

View File

@@ -27,7 +27,6 @@ jobs:
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p std-shims \
-p zalloc \
-p patchable-async-sleep \
-p serai-db \
-p serai-env \
-p simple-request

View File

@@ -32,15 +32,9 @@ jobs:
-p dalek-ff-group \
-p minimal-ed448 \
-p ciphersuite \
-p ciphersuite-kp256 \
-p multiexp \
-p schnorr-signatures \
-p dleq \
-p dkg \
-p dkg-recovery \
-p dkg-dealer \
-p dkg-promote \
-p dkg-musig \
-p dkg-pedpop \
-p modular-frost \
-p frost-schnorrkel

View File

@@ -12,13 +12,13 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo +1.89 install cargo-deny --version =0.18.3
run: cargo install --locked cargo-deny
- name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph
run: cargo deny -L error --all-features check

View File

@@ -11,7 +11,7 @@ jobs:
clippy:
strategy:
matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
runs-on: ${{ matrix.os }}
steps:
@@ -26,7 +26,7 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
- name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
@@ -46,16 +46,16 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo +1.89 install cargo-deny --version =0.18.4
run: cargo install --locked cargo-deny
- name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph
run: cargo deny -L error --all-features check
fmt:
runs-on: ubuntu-latest
@@ -79,5 +79,5 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use
run: |
cargo +1.89 install cargo-machete --version =0.8.0
cargo +1.89 machete
cargo install cargo-machete
cargo machete

77
.github/workflows/monero-tests.yaml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Monero Tests
on:
push:
branches:
- develop
paths:
- "networks/monero/**"
- "processor/**"
pull_request:
paths:
- "networks/monero/**"
- "processor/**"
workflow_dispatch:
jobs:
# Only run these once since they will be consistent regardless of any node
unit-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
- name: Run Unit Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
# Doesn't run unit tests with features as the tests workflow will
integration-tests:
runs-on: ubuntu-latest
# Test against all supported protocol versions
strategy:
matrix:
version: [v0.17.3.2, v0.18.2.0]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
with:
monero-version: ${{ matrix.version }}
- name: Run Integration Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
- name: Run Integration Tests
# Don't run if the the tests workflow also will
if: ${{ matrix.version != 'v0.18.2.0' }}
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'

View File

@@ -18,7 +18,7 @@ on:
workflow_dispatch:
jobs:
test-networks:
test-coins:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
@@ -33,3 +33,19 @@ jobs:
-p alloy-simple-request-transport \
-p ethereum-serai \
-p serai-ethereum-relayer \
-p monero-io \
-p monero-generators \
-p monero-primitives \
-p monero-mlsag \
-p monero-clsag \
-p monero-borromean \
-p monero-bulletproofs \
-p monero-serai \
-p monero-rpc \
-p monero-simple-request-rpc \
-p monero-address \
-p monero-wallet \
-p monero-seed \
-p polyseed \
-p monero-wallet-util \
-p monero-serai-verify-chain

View File

@@ -1,7 +1,6 @@
# MIT License
#
# Copyright (c) 2022 just-the-docs
# Copyright (c) 2022-2024 Luke Parker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -21,21 +20,31 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
name: Deploy Rust docs and Jekyll site to Pages
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy Jekyll site to Pages
on:
push:
branches:
- "develop"
paths:
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Only allow one concurrent deployment
# Allow one concurrent deployment
concurrency:
group: "pages"
cancel-in-progress: true
@@ -44,37 +53,27 @@ jobs:
# Build job
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: docs
steps:
- name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
uses: actions/checkout@v3
- name: Setup Ruby
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
uses: ruby/setup-ruby@v1
with:
bundler-cache: true
cache-version: 0
working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages
id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
uses: actions/configure-pages@v3
- name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Buld Rust docs
run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
mv target/doc docs/_site/rust
- name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
uses: actions/upload-pages-artifact@v1
with:
path: "docs/_site/"
@@ -88,4 +87,4 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
uses: actions/deploy-pages@v2

View File

@@ -63,11 +63,6 @@ jobs:
-p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \
-p serai-emissions-primitives \
-p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \
-p serai-signals-primitives \

7
.gitignore vendored
View File

@@ -1,14 +1,7 @@
target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!./Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile
.test-logs
.vscode

7022
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,16 @@
[workspace]
resolver = "2"
members = [
# Version patches
"patches/parking_lot_core",
"patches/parking_lot",
"patches/zstd",
"patches/rocksdb",
"patches/proc-macro-crate",
# std patches
"patches/matches",
"patches/is-terminal",
# Rewrites/redirects
"patches/option-ext",
@@ -10,7 +18,6 @@ members = [
"common/std-shims",
"common/zalloc",
"common/patchable-async-sleep",
"common/db",
"common/env",
"common/request",
@@ -21,18 +28,12 @@ members = [
"crypto/dalek-ff-group",
"crypto/ed448",
"crypto/ciphersuite",
"crypto/ciphersuite/kp256",
"crypto/multiexp",
"crypto/schnorr",
"crypto/dleq",
"crypto/dkg",
"crypto/dkg/recovery",
"crypto/dkg/dealer",
"crypto/dkg/promote",
"crypto/dkg/musig",
"crypto/dkg/pedpop",
"crypto/frost",
"crypto/schnorrkel",
@@ -42,6 +43,23 @@ members = [
"networks/ethereum",
"networks/ethereum/relayer",
"networks/monero/io",
"networks/monero/generators",
"networks/monero/primitives",
"networks/monero/ringct/mlsag",
"networks/monero/ringct/clsag",
"networks/monero/ringct/borromean",
"networks/monero/ringct/bulletproofs",
"networks/monero",
"networks/monero/rpc",
"networks/monero/rpc/simple-request",
"networks/monero/wallet/address",
"networks/monero/wallet",
"networks/monero/wallet/seed",
"networks/monero/wallet/polyseed",
"networks/monero/wallet/util",
"networks/monero/verify-chain",
"message-queue",
"processor/messages",
@@ -61,14 +79,6 @@ members = [
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
@@ -111,26 +121,31 @@ minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 }
monero-oxide = { opt-level = 3 }
monero-serai = { opt-level = 3 }
[profile.release]
panic = "unwind"
overflow-checks = true
[patch.crates-io]
# Dependencies from monero-oxide which originate from within our own tree
std-shims = { path = "common/std-shims" }
simple-request = { path = "common/request" }
dalek-ff-group = { path = "crypto/dalek-ff-group" }
flexible-transcript = { path = "crypto/transcript" }
modular-frost = { path = "crypto/frost" }
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
# These have `std` alternatives
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" }
parking_lot_core = { path = "patches/parking_lot_core" }
parking_lot = { path = "patches/parking_lot" }
# wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" }
# Needed for WAL compression
rocksdb = { path = "patches/rocksdb" }
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
proc-macro-crate = { path = "patches/proc-macro-crate" }
# is-terminal now has an std-based solution with an equivalent API
is-terminal = { path = "patches/is-terminal" }
# So does matches
matches = { path = "patches/matches" }
home = { path = "patches/home" }
# directories-next was created because directories was unmaintained
# directories-next is now unmaintained while directories is maintained
@@ -141,10 +156,7 @@ option-ext = { path = "patches/option-ext" }
directories-next = { path = "patches/directories-next" }
[workspace.lints.clippy]
uninlined_format_args = "allow" # TODO
unwrap_or_default = "allow"
manual_is_multiple_of = "allow"
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
borrow_as_ptr = "deny"
cast_lossless = "deny"
cast_possible_truncation = "deny"
@@ -169,14 +181,14 @@ large_stack_arrays = "deny"
linkedlist = "deny"
macro_use_imports = "deny"
manual_instant_elapsed = "deny"
# TODO manual_let_else = "deny"
manual_let_else = "deny"
manual_ok_or = "deny"
manual_string_new = "deny"
map_unwrap_or = "deny"
match_bool = "deny"
match_same_arms = "deny"
missing_fields_in_debug = "deny"
# TODO needless_continue = "deny"
needless_continue = "deny"
needless_pass_by_value = "deny"
ptr_cast_constness = "deny"
range_minus_one = "deny"
@@ -184,7 +196,8 @@ range_plus_one = "deny"
redundant_closure_for_method_calls = "deny"
redundant_else = "deny"
string_add_assign = "deny"
unchecked_time_subtraction = "deny"
unchecked_duration_subtraction = "deny"
uninlined_format_args = "deny"
unnecessary_box_returns = "deny"
unnecessary_join = "deny"
unnecessary_wraps = "deny"
@@ -192,21 +205,3 @@ unnested_or_patterns = "deny"
unused_async = "deny"
unused_self = "deny"
zero_sized_map_values = "deny"
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
# primary branch, `next` is)
needless_continue = "allow"
needless_lifetimes = "allow"
useless_conversion = "allow"
empty_line_after_doc_comments = "allow"
manual_div_ceil = "allow"
manual_let_else = "allow"
unnecessary_map_or = "allow"
result_large_err = "allow"
unneeded_struct_pattern = "allow"
[workspace.lints.rust]
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
mismatched_lifetime_syntaxes = "allow"
unused_attributes = "allow"
unused_parens = "allow"

View File

@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
as a reference text. This copy should be provided with any distribution of a
crate licensed under the AGPL-3.0, as per its terms.
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
The GitHub actions (`.github/actions`) are licensed under the MIT license.

View File

@@ -59,6 +59,7 @@ issued at the discretion of the Immunefi program managers.
- [Website](https://serai.exchange/): https://serai.exchange/
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/

View File

@@ -18,7 +18,7 @@ workspace = true
[dependencies]
parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
[features]
parity-db = ["dep:parity-db"]

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
// Obtain a variable from the Serai environment/secret store.
pub fn var(variable: &str) -> Option<String> {

View File

@@ -1,19 +0,0 @@
[package]
name = "patchable-async-sleep"
version = "0.1.0"
description = "An async sleep function, patchable to the preferred runtime"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-async-sleep"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tokio = { version = "1", default-features = false, features = [ "time"] }

View File

@@ -1,7 +0,0 @@
# Patchable Async Sleep
An async sleep function, patchable to the preferred runtime.
This crate is `tokio`-backed. Applications which don't want to use `tokio`
should patch this crate to one which works witht heir preferred runtime. The
point of it is to have a minimal API surface to trivially facilitate such work.

View File

@@ -1,10 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::time::Duration;
/// Sleep for the specified duration.
pub fn sleep(duration: Duration) -> impl core::future::Future<Output = ()> {
tokio::time::sleep(duration)
}

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021"
rust-version = "1.70"
rust-version = "1.64"
[package.metadata.docs.rs]
all-features = true

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
use std::sync::Arc;

View File

@@ -1,13 +1,13 @@
[package]
name = "std-shims"
version = "0.1.4"
version = "0.1.1"
description = "A series of std shims to make alloc more feasible"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021"
rust-version = "1.64"
rust-version = "1.70"
[package.metadata.docs.rs]
all-features = true
@@ -17,8 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
rustversion = { version = "1", default-features = false }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "once"] }
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
[features]

View File

@@ -3,9 +3,4 @@
A crate which passes through to std when the default `std` feature is enabled,
yet provides a series of shims when it isn't.
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
average case.
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
`spin` (avoiding a requirement on `critical-section`).
types are not guaranteed to be
`HashSet` and `HashMap` are provided via `hashbrown`.

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
@@ -11,64 +11,3 @@ pub mod io;
pub use alloc::vec;
pub use alloc::str;
pub use alloc::string;
pub mod prelude {
#[rustversion::before(1.73)]
#[doc(hidden)]
pub trait StdShimsDivCeil {
fn div_ceil(self, rhs: Self) -> Self;
}
#[rustversion::before(1.73)]
mod impl_divceil {
use super::StdShimsDivCeil;
impl StdShimsDivCeil for u8 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u16 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u32 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u64 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u128 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for usize {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
#[doc(hidden)]
pub trait StdShimsIoErrorOther {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
impl StdShimsIoErrorOther for std::io::Error {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::new(std::io::ErrorKind::Other, error)
}
}
}

View File

@@ -25,11 +25,28 @@ mod mutex_shim {
}
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[cfg(feature = "std")]
pub use std::sync::OnceLock;
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;
#[rustversion::before(1.80)]
#[cfg(feature = "std")]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(feature = "std")]
pub use std::sync::LazyLock;
mod oncelock_shim {
use spin::Once;
pub struct OnceLock<T>(Once<T>);
impl<T> OnceLock<T> {
pub const fn new() -> OnceLock<T> {
OnceLock(Once::new())
}
pub fn get(&self) -> Option<&T> {
self.0.poll()
}
pub fn get_mut(&mut self) -> Option<&mut T> {
self.0.get_mut()
}
pub fn get_or_init<F: FnOnce() -> T>(&self, f: F) -> &T {
self.0.call_once(f)
}
}
}
#[cfg(not(feature = "std"))]
pub use oncelock_shim::*;

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
rust-version = "1.77"
rust-version = "1.77.0"
[package.metadata.docs.rs]
all-features = true

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.

View File

@@ -25,10 +25,8 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" }
@@ -42,7 +40,7 @@ processor-messages = { package = "serai-processor-messages", path = "../processo
message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary = { package = "tributary-chain", path = "./tributary" }
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
@@ -57,8 +55,8 @@ libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp
[dev-dependencies]
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
[features]
longer-reattempts = []

View File

@@ -12,9 +12,9 @@ use tokio::{
use borsh::BorshSerialize;
use sp_application_crypto::RuntimePublic;
use serai_client::{
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError, TemporalSerai,
primitives::{NETWORKS, NetworkId, Signature},
validator_sets::primitives::{Session, ValidatorSet},
SeraiError, TemporalSerai, Serai,
};
use serai_db::{Get, DbTxn, Db, create_db};
@@ -28,17 +28,17 @@ use crate::{
create_db! {
CosignDb {
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
DistinctChain: (set: ExternalValidatorSet) -> (),
ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: NetworkId) -> CosignedBlock,
DistinctChain: (set: ValidatorSet) -> (),
}
}
pub struct CosignEvaluator<D: Db> {
db: Mutex<D>,
serai: Arc<Serai>,
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
stakes: RwLock<Option<HashMap<NetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<NetworkId, CosignedBlock>>,
}
impl<D: Db> CosignEvaluator<D> {
@@ -79,7 +79,7 @@ impl<D: Db> CosignEvaluator<D> {
let serai = self.serai.as_of_latest_finalized_block().await?;
let mut stakes = HashMap::new();
for network in EXTERNAL_NETWORKS {
for network in NETWORKS {
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
if set_key {
@@ -87,7 +87,7 @@ impl<D: Db> CosignEvaluator<D> {
network,
serai
.validator_sets()
.total_allocated_stake(network.into())
.total_allocated_stake(network)
.await?
.expect("network which published a batch didn't have a stake set")
.0,
@@ -126,9 +126,9 @@ impl<D: Db> CosignEvaluator<D> {
async fn set_with_keys_fn(
serai: &TemporalSerai<'_>,
network: ExternalNetworkId,
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
network: NetworkId,
) -> Result<Option<ValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network).await? else {
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
return Ok(None);
};
@@ -136,13 +136,13 @@ impl<D: Db> CosignEvaluator<D> {
Ok(Some(
if serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.keys(ValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ExternalValidatorSet { network, session: prior_session }
ValidatorSet { network, session: prior_session }
} else {
ExternalValidatorSet { network, session: latest_session }
ValidatorSet { network, session: latest_session }
},
))
}
@@ -164,7 +164,7 @@ impl<D: Db> CosignEvaluator<D> {
if !keys
.0
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &Signature(cosign.signature))
{
log::warn!("received cosigned block with an invalid signature");
return Ok(());
@@ -204,12 +204,16 @@ impl<D: Db> CosignEvaluator<D> {
let mut total_stake = 0;
let mut total_on_distinct_chain = 0;
for network in EXTERNAL_NETWORKS {
for network in NETWORKS {
if network == NetworkId::Serai {
continue;
}
// Get the current set for this network
let set_with_keys = {
let mut res;
while {
res = set_with_keys_fn(&serai, network).await;
res = set_with_keys_fn(&serai, cosign.network).await;
res.is_err()
} {
log::error!(
@@ -227,8 +231,7 @@ impl<D: Db> CosignEvaluator<D> {
let stake = {
let mut res;
while {
res =
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
res = serai.validator_sets().total_allocated_stake(set_with_keys.network).await;
res.is_err()
} {
log::error!(
@@ -268,7 +271,7 @@ impl<D: Db> CosignEvaluator<D> {
#[allow(clippy::new_ret_no_self)]
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
let mut latest_cosigns = HashMap::new();
for network in EXTERNAL_NETWORKS {
for network in NETWORKS {
if let Some(cosign) = LatestCosign::get(&db, network) {
latest_cosigns.insert(network, cosign);
}

View File

@@ -6,9 +6,9 @@ use blake2::{
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
in_instructions::primitives::{Batch, SignedBatch},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
};
pub use serai_db::*;
@@ -18,21 +18,21 @@ use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
create_db!(
MainDb {
HandledMessageDb: (network: ExternalNetworkId) -> u64,
HandledMessageDb: (network: NetworkId) -> u64,
ActiveTributaryDb: () -> Vec<u8>,
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
RetiredTributaryDb: (set: ValidatorSet) -> (),
FirstPreprocessDb: (
network: ExternalNetworkId,
network: NetworkId,
id_type: RecognizedIdType,
id: &[u8]
) -> Vec<Vec<u8>>,
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
LastReceivedBatchDb: (network: NetworkId) -> u32,
ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: NetworkId) -> u32,
HandoverBatchDb: (set: ValidatorSet) -> u32,
LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
}
);
@@ -61,7 +61,7 @@ impl ActiveTributaryDb {
ActiveTributaryDb::set(txn, &existing_bytes);
}
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
let mut active = Self::active_tributaries(txn).1;
for i in 0 .. active.len() {
if active[i].set() == set {
@@ -82,7 +82,7 @@ impl ActiveTributaryDb {
impl FirstPreprocessDb {
pub fn save_first_preprocess(
txn: &mut impl DbTxn,
network: ExternalNetworkId,
network: NetworkId,
id_type: RecognizedIdType,
id: &[u8],
preprocess: &Vec<Vec<u8>>,
@@ -108,19 +108,19 @@ impl ExpectedBatchDb {
}
impl HandoverBatchDb {
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
Self::set(txn, set, &batch);
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
}
}
impl QueuedBatchesDb {
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
let mut batches = Self::get(txn, set).unwrap_or_default();
batch.write(&mut batches).unwrap();
Self::set(txn, set, &batches);
}
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
let batches_vec = Self::get(txn, set).unwrap_or_default();
txn.del(Self::key(set));

View File

@@ -1,5 +1,3 @@
#![expect(clippy::cast_possible_truncation)]
use core::ops::Deref;
use std::{
sync::{OnceLock, Arc},
@@ -10,13 +8,12 @@ use std::{
use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{
ff::{Field, PrimeField},
GroupEncoding,
},
Ciphersuite,
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature;
use frost::Participant;
@@ -26,8 +23,8 @@ use serai_db::{DbTxn, Db};
use scale::Encode;
use borsh::BorshSerialize;
use serai_client::{
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session},
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
Public, Serai, SeraiInInstructions,
};
@@ -82,7 +79,7 @@ pub struct ActiveTributary<D: Db, P: P2p> {
#[derive(Clone)]
pub enum TributaryEvent<D: Db, P: P2p> {
NewTributary(ActiveTributary<D, P>),
TributaryRetired(ExternalValidatorSet),
TributaryRetired(ValidatorSet),
}
// Creates a new tributary and sends it to all listeners.
@@ -148,7 +145,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
p2p: &P,
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
tributaries: &HashMap<Session, ActiveTributary<D, P>>,
network: ExternalNetworkId,
network: NetworkId,
msg: &processors::Message,
) -> bool {
#[allow(clippy::nonminimal_bool)]
@@ -196,8 +193,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
.iter()
.map(|plan| plan.session)
.filter(|session| {
RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session })
.is_none()
RetiredTributaryDb::get(&txn, ValidatorSet { network, session: *session }).is_none()
})
.collect::<HashSet<_>>();
@@ -269,17 +265,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
// This causes an action on Substrate yet not on any Tributary
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
let set = ExternalValidatorSet { network, session: *session };
let set = ValidatorSet { network, session: *session };
let signature: &[u8] = signature.as_ref();
let signature = <[u8; 64]>::try_from(signature).unwrap();
let signature: serai_client::Signature = signature.into();
let signature = serai_client::Signature(signature.try_into().unwrap());
let slashes = crate::tributary::SlashReport::get(&txn, set)
.expect("signed slash report despite not having slash report locally");
let slashes_pubs = slashes
.iter()
.map(|(address, points)| (Public::from(*address), *points))
.collect::<Vec<_>>();
let slashes_pubs =
slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::<Vec<_>>();
let tx = serai_client::SeraiValidatorSets::report_slashes(
network,
@@ -289,7 +282,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
.collect::<Vec<_>>()
.try_into()
.unwrap(),
signature,
signature.clone(),
);
loop {
@@ -400,7 +393,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
if let Some(relevant_tributary_value) = relevant_tributary {
if RetiredTributaryDb::get(
&txn,
ExternalValidatorSet { network: msg.network, session: relevant_tributary_value },
ValidatorSet { network: msg.network, session: relevant_tributary_value },
)
.is_some()
{
@@ -506,7 +499,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
&mut txn,
key,
spec,
&KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()),
&KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
id.attempt,
);
@@ -789,7 +782,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
processors: Pro,
p2p: P,
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
network: ExternalNetworkId,
network: NetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) {
let mut tributaries = HashMap::new();
@@ -838,7 +831,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
#[allow(clippy::too_many_arguments)]
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
mut db: D,
network: ExternalNetworkId,
network: NetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) {
let mut tributaries = HashMap::new();
@@ -912,7 +905,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
for batch in start_id ..= last_id {
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
if let Some(session) = is_pre_handover {
let set = ExternalValidatorSet { network, session };
let set = ValidatorSet { network, session };
let mut queued = QueuedBatchesDb::take(&mut txn, set);
// is_handover_batch is only set for handover `Batch`s we're participating in, making
// this safe
@@ -930,8 +923,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
if let Some(session) = is_handover {
for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session })
{
for queued in QueuedBatchesDb::take(&mut txn, ValidatorSet { network, session }) {
to_publish.push((session, queued));
}
}
@@ -978,7 +970,10 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
) {
let mut channels = HashMap::new();
for network in serai_client::primitives::EXTERNAL_NETWORKS {
for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
let (processor_send, processor_recv) = mpsc::unbounded_channel();
tokio::spawn(handle_processor_messages(
db.clone(),
@@ -1200,7 +1195,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
}
});
move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| {
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
let mut raw_db = raw_db.clone();
let key = key.clone();

View File

@@ -11,9 +11,7 @@ use rand_core::{RngCore, OsRng};
use scale::{Decode, Encode};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,
};
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
use serai_db::Db;
@@ -71,7 +69,7 @@ const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignedBlock {
pub network: ExternalNetworkId,
pub network: NetworkId,
pub block_number: u64,
pub block: [u8; 32],
pub signature: [u8; 64],
@@ -210,8 +208,8 @@ pub struct HeartbeatBatch {
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
type Id: Send + Sync + Clone + Copy + fmt::Debug;
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
@@ -311,7 +309,7 @@ struct Behavior {
#[allow(clippy::type_complexity)]
#[derive(Clone)]
pub struct LibP2p {
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ExternalValidatorSet, [u8; 32])>>>,
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
@@ -399,7 +397,7 @@ impl LibP2p {
let (receive_send, receive_recv) = mpsc::unbounded_channel();
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic {
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
}
@@ -409,8 +407,7 @@ impl LibP2p {
// The addrs we're currently dialing, and the networks associated with them
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
// The peers we're currently connected to, and the networks associated with them
let connected_peers =
Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<ExternalNetworkId>>::new()));
let connected_peers = Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<NetworkId>>::new()));
// Find and connect to peers
let (connect_to_network_send, mut connect_to_network_recv) =
@@ -423,7 +420,7 @@ impl LibP2p {
let connect_to_network_send = connect_to_network_send.clone();
async move {
loop {
let connect = |network: ExternalNetworkId, addr: Multiaddr| {
let connect = |network: NetworkId, addr: Multiaddr| {
let dialing_peers = dialing_peers.clone();
let connected_peers = connected_peers.clone();
let to_dial_send = to_dial_send.clone();
@@ -510,7 +507,7 @@ impl LibP2p {
connect_to_network_networks.insert(network);
}
for network in connect_to_network_networks {
if let Ok(mut nodes) = serai.p2p_validators(network.into()).await {
if let Ok(mut nodes) = serai.p2p_validators(network).await {
// If there's an insufficient amount of nodes known, connect to all yet add it
// back and break
if nodes.len() < TARGET_PEERS {
@@ -560,7 +557,7 @@ impl LibP2p {
// Subscribe to any new topics
set = subscribe_recv.recv() => {
let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) =
let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
set.expect("subscribe_recv closed. are we shutting down?");
let topic = topic_for_set(set);
if subscribe {
@@ -779,7 +776,7 @@ impl LibP2p {
impl P2p for LibP2p {
type Id = PeerId;
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
self
.subscribe
.lock()
@@ -788,7 +785,7 @@ impl P2p for LibP2p {
.expect("subscribe_send closed. are we shutting down?");
}
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
self
.subscribe
.lock()

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use serai_client::primitives::ExternalNetworkId;
use serai_client::primitives::NetworkId;
use processor_messages::{ProcessorMessage, CoordinatorMessage};
use message_queue::{Service, Metadata, client::MessageQueue};
@@ -8,27 +8,27 @@ use message_queue::{Service, Metadata, client::MessageQueue};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Message {
pub id: u64,
pub network: ExternalNetworkId,
pub network: NetworkId,
pub msg: ProcessorMessage,
}
#[async_trait::async_trait]
pub trait Processors: 'static + Send + Sync + Clone {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&self, network: ExternalNetworkId) -> Message;
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&self, network: NetworkId) -> Message;
async fn ack(&self, msg: Message);
}
#[async_trait::async_trait]
impl Processors for Arc<MessageQueue> {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let msg: CoordinatorMessage = msg.into();
let metadata =
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
let msg = borsh::to_vec(&msg).unwrap();
self.queue(metadata, msg).await;
}
async fn recv(&self, network: ExternalNetworkId) -> Message {
async fn recv(&self, network: NetworkId) -> Message {
let msg = self.next(Service::Processor(network)).await;
assert_eq!(msg.from, Service::Processor(network));

View File

@@ -14,15 +14,14 @@
use zeroize::Zeroizing;
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use ciphersuite::{Ciphersuite, Ristretto};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError,
SeraiError, Serai,
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
};
use serai_db::*;
@@ -71,18 +70,13 @@ impl LatestCosignedBlock {
db_channel! {
SubstrateDbChannels {
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
CosignTransactions: (network: NetworkId) -> (Session, u64, [u8; 32]),
}
}
impl CosignTransactions {
// Append a cosign transaction.
pub fn append_cosign(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
number: u64,
hash: [u8; 32],
) {
pub fn append_cosign(txn: &mut impl DbTxn, set: ValidatorSet, number: u64, hash: [u8; 32]) {
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
}
}
@@ -262,22 +256,22 @@ async fn advance_cosign_protocol_inner(
// Using the keys of the prior block ensures this deadlock isn't reached
let serai = serai.as_of(actual_block.header.parent_hash.into());
for network in serai_client::primitives::EXTERNAL_NETWORKS {
for network in serai_client::primitives::NETWORKS {
// Get the latest session to have set keys
let set_with_keys = {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
let Some(latest_session) = serai.validator_sets().session(network).await? else {
continue;
};
let prior_session = Session(latest_session.0.saturating_sub(1));
if serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.keys(ValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ExternalValidatorSet { network, session: prior_session }
ValidatorSet { network, session: prior_session }
} else {
let set = ExternalValidatorSet { network, session: latest_session };
let set = ValidatorSet { network, session: latest_session };
if serai.validator_sets().keys(set).await?.is_none() {
continue;
}
@@ -286,7 +280,7 @@ async fn advance_cosign_protocol_inner(
};
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys).await?.unwrap()));
}
break;

View File

@@ -1,4 +1,4 @@
use serai_client::primitives::ExternalNetworkId;
use serai_client::primitives::NetworkId;
pub use serai_db::*;
@@ -9,7 +9,7 @@ mod inner_db {
SubstrateDb {
NextBlock: () -> u64,
HandledEvent: (block: [u8; 32]) -> u32,
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
}
);
}

View File

@@ -6,18 +6,14 @@ use std::{
use zeroize::Zeroizing;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::{
coins::CoinsEvent,
SeraiError, Block, Serai, TemporalSerai,
primitives::{BlockHash, NetworkId},
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
in_instructions::InInstructionsEvent,
primitives::{BlockHash, ExternalNetworkId},
validator_sets::{
primitives::{ExternalValidatorSet, ValidatorSet},
ValidatorSetsEvent,
},
Block, Serai, SeraiError, TemporalSerai,
coins::CoinsEvent,
};
use serai_db::DbTxn;
@@ -56,9 +52,9 @@ async fn handle_new_set<D: Db>(
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
serai: &Serai,
block: &Block,
set: ExternalValidatorSet,
set: ValidatorSet,
) -> Result<(), SeraiError> {
if in_set(key, &serai.as_of(block.hash()), set.into())
if in_set(key, &serai.as_of(block.hash()), set)
.await?
.expect("NewSet for set which doesn't exist")
{
@@ -68,7 +64,7 @@ async fn handle_new_set<D: Db>(
let serai = serai.as_of(block.hash());
let serai = serai.validator_sets();
let set_participants =
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
};
@@ -135,7 +131,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
};
let mut batch_block = HashMap::new();
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
let mut batches = HashMap::<NetworkId, Vec<u32>>::new();
let mut burns = HashMap::new();
let serai = serai.as_of(block.hash());
@@ -209,8 +205,8 @@ async fn handle_block<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro,
serai: &Serai,
block: Block,
@@ -230,8 +226,12 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("NewSet event wasn't NewSet: {new_set:?}");
};
// If this is Serai, do nothing
// We only coordinate/process external networks
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh new set event {:?}", new_set);
let mut txn = db.txn();
@@ -286,7 +286,10 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
};
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh accepted handover event {:?}", accepted_handover);
// TODO: This isn't atomic with the event handling
@@ -304,7 +307,10 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
};
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh set retired event {:?}", retired_set);
let mut txn = db.txn();
@@ -334,8 +340,8 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro,
serai: &Serai,
next_block: &mut u64,
@@ -389,8 +395,8 @@ pub async fn scan_task<D: Db, Pro: Processors>(
processors: Pro,
serai: Arc<Serai>,
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
) {
log::info!("scanning substrate");
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
@@ -488,12 +494,9 @@ pub async fn scan_task<D: Db, Pro: Processors>(
/// retry.
pub(crate) async fn expected_next_batch(
serai: &Serai,
network: ExternalNetworkId,
network: NetworkId,
) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner(
serai: &Serai,
network: ExternalNetworkId,
) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
let serai = serai.as_of_latest_finalized_block().await?;
let last = serai.in_instructions().last_batch_for_network(network).await?;
Ok(if let Some(last) = last { last + 1 } else { 0 })
@@ -516,7 +519,7 @@ pub(crate) async fn expected_next_batch(
/// This is deemed fine.
pub(crate) async fn verify_published_batches<D: Db>(
txn: &mut D::Transaction<'_>,
network: ExternalNetworkId,
network: NetworkId,
optimistic_up_to: u32,
) -> Option<u32> {
// TODO: Localize from MainDb to SubstrateDb

View File

@@ -4,7 +4,7 @@ use std::{
collections::{VecDeque, HashSet, HashMap},
};
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
use processor_messages::CoordinatorMessage;
@@ -20,7 +20,7 @@ use crate::{
pub mod tributary;
#[derive(Clone)]
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
pub struct MemProcessors(pub Arc<RwLock<HashMap<NetworkId, VecDeque<CoordinatorMessage>>>>);
impl MemProcessors {
#[allow(clippy::new_without_default)]
pub fn new() -> MemProcessors {
@@ -30,12 +30,12 @@ impl MemProcessors {
#[async_trait::async_trait]
impl Processors for MemProcessors {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let mut processors = self.0.write().await;
let processor = processors.entry(network).or_insert_with(VecDeque::new);
processor.push_back(msg.into());
}
async fn recv(&self, _: ExternalNetworkId) -> Message {
async fn recv(&self, _: NetworkId) -> Message {
todo!()
}
async fn ack(&self, _: Message) {
@@ -65,8 +65,8 @@ impl LocalP2p {
impl P2p for LocalP2p {
type Id = usize;
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
let mut msg_ref = msg.as_slice();

View File

@@ -7,17 +7,16 @@ use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng, OsRng};
use futures_util::{task::Poll, poll};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, GroupEncoding},
Ciphersuite,
Ciphersuite, Ristretto,
};
use sp_application_crypto::sr25519;
use borsh::BorshDeserialize;
use serai_client::{
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
};
use tokio::time::sleep;
@@ -51,13 +50,11 @@ pub fn new_spec<R: RngCore + CryptoRng>(
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let set_participants = keys
.iter()
.map(|key| {
(sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)
})
.map(|key| (sr25519::Public((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1))
.collect::<Vec<_>>();
let res = TributarySpec::new(serai_block, start_time, set, set_participants);

View File

@@ -4,14 +4,13 @@ use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use sp_runtime::traits::Verify;
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
validator_sets::primitives::{ValidatorSet, KeyPair},
};
use tokio::time::sleep;
@@ -316,8 +315,7 @@ async fn dkg_test() {
OsRng.fill_bytes(&mut substrate_key);
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
OsRng.fill_bytes(&mut network_key);
let key_pair =
KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());
let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap());
let mut txs = vec![];
for (i, key) in keys.iter().enumerate() {
@@ -352,7 +350,7 @@ async fn dkg_test() {
async fn publish_set_keys(
&self,
_db: &(impl Sync + Get),
set: ExternalValidatorSet,
set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature: Signature,
@@ -362,9 +360,9 @@ async fn dkg_test() {
assert_eq!(self.key_pair, key_pair);
assert!(signature.verify(
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
&serai_client::Public::from(
dkg_musig::musig_key_vartime::<Ristretto>(
serai_client::validator_sets::primitives::musig_context(set.into()),
&serai_client::Public(
frost::dkg::musig::musig_key::<Ristretto>(
&serai_client::validator_sets::primitives::musig_context(set),
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
)
.unwrap()

View File

@@ -2,13 +2,12 @@ use core::fmt::Debug;
use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::Group, Ciphersuite};
use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
};
use processor_messages::coordinator::SubstrateSignableId;
@@ -32,7 +31,7 @@ impl PublishSeraiTransaction for () {
async fn publish_set_keys(
&self,
_db: &(impl Sync + serai_db::Get),
_set: ExternalValidatorSet,
_set: ValidatorSet,
_removed: Vec<SeraiAddress>,
_key_pair: KeyPair,
_signature: Signature,

View File

@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashSet};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use tokio::{
sync::{mpsc, broadcast},

View File

@@ -3,11 +3,10 @@ use std::collections::HashMap;
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};
use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
use processor_messages::coordinator::SubstrateSignableId;
@@ -47,7 +46,7 @@ pub enum Accumulation {
create_db!(
Tributary {
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],
SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32],
TributaryBlockNumber: (block: [u8; 32]) -> u32,
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
@@ -81,7 +80,7 @@ create_db!(
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
SlashReported: (genesis: [u8; 32]) -> u16,
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,
SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
}
);

View File

@@ -4,12 +4,11 @@ use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::dkg::Participant;
use scale::{Encode, Decode};
use serai_client::validator_sets::primitives::KeyPair;
use serai_client::{Signature, validator_sets::primitives::KeyPair};
use tributary::{Signed, TransactionKind, TransactionTrait};
@@ -554,7 +553,7 @@ impl<
self.spec.set(),
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
key_pair,
sig.into(),
Signature(sig),
)
.await;
}

View File

@@ -1,7 +1,6 @@
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::validator_sets::primitives::ExternalValidatorSet;
use serai_client::validator_sets::primitives::ValidatorSet;
use tributary::{
ReadWrite,
@@ -41,7 +40,7 @@ pub fn removed_as_of_dkg_attempt(
pub fn removed_as_of_set_keys(
getter: &impl Get,
set: ExternalValidatorSet,
set: ValidatorSet,
genesis: [u8; 32],
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
// SeraiDkgCompleted has the key placed on-chain.

View File

@@ -3,15 +3,14 @@ use std::{sync::Arc, collections::HashSet};
use zeroize::Zeroizing;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use tokio::sync::broadcast;
use scale::{Encode, Decode};
use serai_client::{
primitives::{SeraiAddress, Signature},
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
validator_sets::primitives::{KeyPair, ValidatorSet},
Serai,
};
@@ -39,7 +38,7 @@ pub enum RecognizedIdType {
pub trait RIDTrait {
async fn recognized_id(
&self,
set: ExternalValidatorSet,
set: ValidatorSet,
genesis: [u8; 32],
kind: RecognizedIdType,
id: Vec<u8>,
@@ -48,12 +47,12 @@ pub trait RIDTrait {
#[async_trait::async_trait]
impl<
FRid: Send + Future<Output = ()>,
F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
> RIDTrait for F
{
async fn recognized_id(
&self,
set: ExternalValidatorSet,
set: ValidatorSet,
genesis: [u8; 32],
kind: RecognizedIdType,
id: Vec<u8>,
@@ -67,7 +66,7 @@ pub trait PublishSeraiTransaction {
async fn publish_set_keys(
&self,
db: &(impl Sync + Get),
set: ExternalValidatorSet,
set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature: Signature,
@@ -87,7 +86,7 @@ mod impl_pst_for_serai {
async fn publish(
serai: &Serai,
db: &impl Get,
set: ExternalValidatorSet,
set: ValidatorSet,
tx: serai_client::Transaction,
meta: $Meta,
) -> bool {
@@ -129,7 +128,7 @@ mod impl_pst_for_serai {
async fn publish_set_keys(
&self,
db: &(impl Sync + Get),
set: ExternalValidatorSet,
set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair,
signature: Signature,
@@ -141,7 +140,7 @@ mod impl_pst_for_serai {
key_pair,
signature,
);
async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {
async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
if matches!(serai.keys(set).await, Ok(Some(_))) {
log::info!("another coordinator set key pair for {:?}", set);
return true;

View File

@@ -63,13 +63,16 @@ use rand_core::OsRng;
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::PrimeField, GroupEncoding},
Ciphersuite,
Ciphersuite, Ristretto,
};
use frost::{
FrostError,
dkg::{Participant, musig::musig},
ThresholdKeys,
sign::*,
};
use dkg_musig::musig;
use frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*};
use frost_schnorrkel::Schnorrkel;
use scale::Encode;
@@ -116,7 +119,7 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
let algorithm = Schnorrkel::new(b"substrate");
let keys: ThresholdKeys<Ristretto> =
musig(musig_context(self.spec.set().into()), self.key.clone(), participants)
musig(&musig_context(self.spec.set()), self.key, participants)
.expect("signing for a set we aren't in/validator present multiple times")
.into();
@@ -295,7 +298,7 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1;
let msg = set_keys_message(
&self.spec.set(),
&self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::<Vec<_>>(),
&self.removed.iter().map(|key| Public(key.to_bytes())).collect::<Vec<_>>(),
key_pair,
);
self.signing_protocol().share_internal(&participants, preprocesses, &msg)

View File

@@ -3,14 +3,13 @@ use std::{io, collections::HashMap};
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet};
use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet};
fn borsh_serialize_validators<W: io::Write>(
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
@@ -44,7 +43,7 @@ fn borsh_deserialize_validators<R: io::Read>(
pub struct TributarySpec {
serai_block: [u8; 32],
start_time: u64,
set: ExternalValidatorSet,
set: ValidatorSet,
#[borsh(
serialize_with = "borsh_serialize_validators",
deserialize_with = "borsh_deserialize_validators"
@@ -56,7 +55,7 @@ impl TributarySpec {
pub fn new(
serai_block: [u8; 32],
start_time: u64,
set: ExternalValidatorSet,
set: ValidatorSet,
set_participants: Vec<(PublicKey, u16)>,
) -> TributarySpec {
let mut validators = vec![];
@@ -69,7 +68,7 @@ impl TributarySpec {
Self { serai_block, start_time, set, validators }
}
pub fn set(&self) -> ExternalValidatorSet {
pub fn set(&self) -> ValidatorSet {
self.set
}

View File

@@ -7,10 +7,9 @@ use rand_core::{RngCore, CryptoRng};
use blake2::{Digest, Blake2s256};
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, GroupEncoding},
Ciphersuite,
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature;
use frost::Participant;

View File

@@ -27,8 +27,7 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] }

View File

@@ -1,7 +1,6 @@
use std::collections::{VecDeque, HashSet};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_db::{Get, DbTxn, Db};

View File

@@ -5,8 +5,7 @@ use async_trait::async_trait;
use zeroize::Zeroizing;
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use ciphersuite::{Ciphersuite, Ristretto};
use scale::Decode;
use futures_channel::mpsc::UnboundedReceiver;

View File

@@ -1,7 +1,6 @@
use std::collections::HashMap;
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use ciphersuite::{Ciphersuite, Ristretto};
use serai_db::{DbTxn, Db};

View File

@@ -11,13 +11,12 @@ use rand_chacha::ChaCha12Rng;
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{
GroupEncoding,
ff::{Field, PrimeField},
},
Ciphersuite,
Ciphersuite, Ristretto,
};
use schnorr::{
SchnorrSignature,

View File

@@ -4,8 +4,7 @@ use scale::{Encode, Decode, IoReader};
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use ciphersuite::{Ciphersuite, Ristretto};
use crate::{
transaction::{Transaction, TransactionKind, TransactionError},

View File

@@ -1,11 +1,9 @@
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite,
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature;

View File

@@ -10,8 +10,7 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use serai_db::{DbTxn, Db, MemDb};

View File

@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashMap};
use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use tendermint::ext::Commit;

View File

@@ -6,10 +6,9 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite,
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature;

View File

@@ -2,8 +2,7 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use crate::{
ReadWrite,

View File

@@ -3,8 +3,7 @@ use std::sync::Arc;
use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{Ciphersuite, group::ff::Field};
use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
use scale::Encode;

View File

@@ -6,10 +6,9 @@ use thiserror::Error;
use blake2::{Digest, Blake2b512};
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{Group, GroupEncoding},
Ciphersuite,
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature;

View File

@@ -25,7 +25,7 @@ parity-scale-codec = { version = "3", default-features = false, features = ["std
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
patchable-async-sleep = { version = "0.1", path = "../../../common/patchable-async-sleep", default-features = false }
tokio = { version = "1", default-features = false, features = ["time"] }
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }

View File

@@ -1,5 +1,3 @@
#![expect(clippy::cast_possible_truncation)]
use core::fmt::Debug;
use std::{
@@ -15,7 +13,7 @@ use futures_util::{
FutureExt, StreamExt, SinkExt,
future::{self, Fuse},
};
use patchable_async_sleep::sleep;
use tokio::time::sleep;
use serai_db::{Get, DbTxn, Db};
@@ -710,17 +708,9 @@ impl<N: Network + 'static> TendermintMachine<N> {
if let Data::Proposal(_, block) = &msg.data {
match self.network.validate(block).await {
Ok(()) => {}
Err(BlockError::Temporal) => {
if self.block.round().step == Step::Propose {
self.broadcast(Data::Prevote(None));
}
Err(TendermintError::Temporal)?;
}
Err(BlockError::Temporal) => return Err(TendermintError::Temporal),
Err(BlockError::Fatal) => {
log::warn!(target: "tendermint", "validator proposed a fatally invalid block");
if self.block.round().step == Step::Propose {
self.broadcast(Data::Prevote(None));
}
self
.slash(
msg.sender,
@@ -739,9 +729,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
target: "tendermint",
"proposed proposed with a syntactically invalid valid round",
);
if self.block.round().step == Step::Propose {
self.broadcast(Data::Prevote(None));
}
self
.slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode())))
.await;

View File

@@ -5,7 +5,7 @@ use std::{
};
use futures_util::{FutureExt, future};
use patchable_async_sleep::sleep;
use tokio::time::sleep;
use crate::{
time::CanonicalInstant,

View File

@@ -1,13 +1,13 @@
[package]
name = "ciphersuite"
version = "0.4.2"
version = "0.4.1"
description = "Ciphersuites built around ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
rust-version = "1.66"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
@@ -24,12 +24,22 @@ rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
subtle = { version = "^2.4", default-features = false }
digest = { version = "0.10", default-features = false, features = ["core-api"] }
digest = { version = "0.10", default-features = false }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
sha2 = { version = "0.10", default-features = false, optional = true }
sha3 = { version = "0.10", default-features = false, optional = true }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.4", default-features = false, optional = true }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, optional = true }
[dev-dependencies]
hex = { version = "0.4", default-features = false, features = ["std"] }
@@ -38,7 +48,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
[features]
alloc = ["std-shims", "ff/alloc"]
alloc = ["std-shims"]
std = [
"std-shims/std",
@@ -49,8 +59,27 @@ std = [
"digest/std",
"transcript/std",
"sha2?/std",
"sha3?/std",
"ff/std",
"dalek-ff-group?/std",
"elliptic-curve?/std",
"p256?/std",
"k256?/std",
"minimal-ed448?/std",
]
dalek = ["sha2", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]
kp256 = ["sha2", "elliptic-curve"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
ed448 = ["sha3", "minimal-ed448"]
default = ["std"]

View File

@@ -21,8 +21,6 @@ Their `hash_to_F` is the
[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),
yet applied to their scalar field.
Please see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info.
### Ed25519/Ristretto
Ed25519/Ristretto are offered via
@@ -35,8 +33,6 @@ the draft
[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).
The domain-separation tag is naively prefixed to the message.
Please see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info.
### Ed448
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
@@ -46,5 +42,3 @@ to its prime-order subgroup.
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The
domain-separation tag is naively prefixed to the message.
Please see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info.

View File

@@ -1,55 +0,0 @@
[package]
name = "ciphersuite-kp256"
version = "0.4.0"
description = "Ciphersuites built around ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
sha2 = { version = "0.10", default-features = false }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"] }
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
ciphersuite = { path = "../", version = "0.4", default-features = false }
[dev-dependencies]
hex = { version = "0.4", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { version = "0.13", path = "../../ff-group-tests" }
[features]
alloc = ["ciphersuite/alloc"]
std = [
"rand_core/std",
"zeroize/std",
"sha2/std",
"elliptic-curve/std",
"p256/std",
"k256/std",
"ciphersuite/std",
]
default = ["std"]

View File

@@ -1,3 +0,0 @@
# Ciphersuite {k, p}256
SECP256k1 and P-256 Ciphersuites around k256 and p256.

View File

@@ -3,9 +3,9 @@ use zeroize::Zeroize;
use sha2::{Digest, Sha512};
use group::Group;
use crate::Scalar;
use dalek_ff_group::Scalar;
use ciphersuite::Ciphersuite;
use crate::Ciphersuite;
macro_rules! dalek_curve {
(
@@ -15,7 +15,7 @@ macro_rules! dalek_curve {
$Point: ident,
$ID: literal
) => {
use crate::$Point;
use dalek_ff_group::$Point;
impl Ciphersuite for $Ciphersuite {
type F = Scalar;
@@ -40,9 +40,12 @@ macro_rules! dalek_curve {
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(any(test, feature = "ristretto"))]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ristretto;
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
#[cfg(any(test, feature = "ristretto"))]
#[test]
fn test_ristretto() {
ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
@@ -68,9 +71,12 @@ fn test_ristretto() {
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(feature = "ed25519")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed25519;
#[cfg(feature = "ed25519")]
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
#[cfg(feature = "ed25519")]
#[test]
fn test_ed25519() {
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);

View File

@@ -1,17 +1,15 @@
use zeroize::Zeroize;
use sha3::{
digest::{
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
ExtendableOutput, XofReader, HashMarker, Digest,
},
Shake256,
use digest::{
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
ExtendableOutput, XofReader, HashMarker, Digest,
};
use sha3::Shake256;
use group::Group;
use crate::{Scalar, Point};
use minimal_ed448::{Scalar, Point};
use ciphersuite::Ciphersuite;
use crate::Ciphersuite;
/// Shake256, fixed to a 114-byte output, as used by Ed448.
#[derive(Clone, Default)]

View File

@@ -1,17 +1,16 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
use zeroize::Zeroize;
use sha2::Sha256;
use group::ff::PrimeField;
use elliptic_curve::{
generic_array::GenericArray,
bigint::{NonZero, CheckedAdd, Encoding, U384},
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
};
use ciphersuite::{group::ff::PrimeField, Ciphersuite};
use crate::Ciphersuite;
macro_rules! kp_curve {
(
@@ -108,9 +107,12 @@ fn test_oversize_dst<C: Ciphersuite>() {
/// Ciphersuite for Secp256k1.
///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "secp256k1")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Secp256k1;
#[cfg(feature = "secp256k1")]
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
#[cfg(feature = "secp256k1")]
#[test]
fn test_secp256k1() {
ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
@@ -143,9 +145,12 @@ fn test_secp256k1() {
/// Ciphersuite for P-256.
///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "p256")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct P256;
#[cfg(feature = "p256")]
kp_curve!("p256", p256, P256, b"P-256");
#[cfg(feature = "p256")]
#[test]
fn test_p256() {
ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);

View File

@@ -2,7 +2,7 @@
Ciphersuites for elliptic curves premised on ff/group.
This library was
This library, except for the not recommended Ed448 ciphersuite, was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).

View File

@@ -1,12 +1,9 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("lib.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use core::fmt::Debug;
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(unused_imports)]
use std_shims::prelude::*;
#[cfg(any(feature = "alloc", feature = "std"))]
use std_shims::io::{self, Read};
use rand_core::{RngCore, CryptoRng};
@@ -26,6 +23,25 @@ use group::{
#[cfg(any(feature = "alloc", feature = "std"))]
use group::GroupEncoding;
#[cfg(feature = "dalek")]
mod dalek;
#[cfg(feature = "ristretto")]
pub use dalek::Ristretto;
#[cfg(feature = "ed25519")]
pub use dalek::Ed25519;
#[cfg(feature = "kp256")]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::Secp256k1;
#[cfg(feature = "p256")]
pub use kp256::P256;
#[cfg(feature = "ed448")]
mod ed448;
#[cfg(feature = "ed448")]
pub use ed448::*;
/// Unified trait defining a ciphersuite around an elliptic curve.
pub trait Ciphersuite:
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
@@ -83,9 +99,6 @@ pub trait Ciphersuite:
}
/// Read a canonical point from something implementing std::io::Read.
///
/// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a
/// canonical serialization.
#[cfg(any(feature = "alloc", feature = "std"))]
#[allow(non_snake_case)]
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {

View File

@@ -1,13 +1,13 @@
[package]
name = "dalek-ff-group"
version = "0.4.4"
version = "0.4.1"
description = "ff/group bindings around curve25519-dalek"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021"
rust-version = "1.65"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
@@ -25,22 +25,18 @@ subtle = { version = "^2.4", default-features = false }
rand_core = { version = "0.6", default-features = false }
digest = { version = "0.10", default-features = false }
sha2 = { version = "0.10", default-features = false }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
ciphersuite = { path = "../ciphersuite", default-features = false }
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] }
[dev-dependencies]
hex = "0.4"
rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { path = "../ff-group-tests" }
[features]
alloc = ["zeroize/alloc", "ciphersuite/alloc"]
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "digest/std", "sha2/std", "ciphersuite/std"]
std = ["zeroize/std", "subtle/std", "rand_core/std", "digest/std"]
default = ["std"]

View File

@@ -17,7 +17,7 @@ use crypto_bigint::{
impl_modulus,
};
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes};
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use crate::{u8_from_bool, constant_time, math_op, math};
@@ -35,8 +35,7 @@ impl_modulus!(
type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;
/// A constant-time implementation of the Ed25519 field.
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)]
pub struct FieldElement(ResidueType);
// Square root of -1.
@@ -93,7 +92,7 @@ impl Neg for FieldElement {
}
}
impl Neg for &FieldElement {
impl<'a> Neg for &'a FieldElement {
type Output = FieldElement;
fn neg(self) -> Self::Output {
(*self).neg()
@@ -217,18 +216,10 @@ impl PrimeFieldBits for FieldElement {
}
impl FieldElement {
/// Create a FieldElement from a `crypto_bigint::U256`.
///
/// This will reduce the `U256` by the modulus, into a member of the field.
pub const fn from_u256(u256: &U256) -> Self {
FieldElement(Residue::new(u256))
}
/// Create a `FieldElement` from the reduction of a 512-bit number.
///
/// The bytes are interpreted in little-endian format.
pub fn wide_reduce(value: [u8; 64]) -> Self {
FieldElement(reduce(U512::from_le_bytes(value)))
/// Interpret the value as a little-endian integer, square it, and reduce it into a FieldElement.
pub fn from_square(value: [u8; 32]) -> FieldElement {
let value = U256::from_le_bytes(value);
FieldElement(reduce(U512::from(value.mul_wide(&value))))
}
/// Perform an exponentiation.
@@ -253,16 +244,7 @@ impl FieldElement {
res *= res;
}
}
let mut scale_by = FieldElement::ONE;
#[allow(clippy::needless_range_loop)]
for i in 0 .. 16 {
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
{
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
}
}
res *= scale_by;
res *= table[usize::from(bits)];
bits = 0;
}
}
@@ -306,12 +288,6 @@ impl FieldElement {
}
}
impl FromUniformBytes<64> for FieldElement {
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
Self::wide_reduce(*bytes)
}
}
impl Sum<FieldElement> for FieldElement {
fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
let mut res = FieldElement::ZERO;

View File

@@ -1,5 +1,5 @@
#![allow(deprecated)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
#![doc = include_str!("../README.md")]
#![allow(clippy::redundant_closure_call)]
@@ -30,7 +30,7 @@ use dalek::{
pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};
use group::{
ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes},
ff::{Field, PrimeField, FieldBits, PrimeFieldBits},
Group, GroupEncoding,
prime::PrimeGroup,
};
@@ -38,24 +38,13 @@ use group::{
mod field;
pub use field::FieldElement;
mod ciphersuite;
pub use crate::ciphersuite::{Ed25519, Ristretto};
// Use black_box when possible
#[rustversion::since(1.66)]
mod black_box {
pub(crate) fn black_box<T>(val: T) -> T {
#[allow(clippy::incompatible_msrv)]
core::hint::black_box(val)
}
}
use core::hint::black_box;
#[rustversion::before(1.66)]
mod black_box {
pub(crate) fn black_box<T>(val: T) -> T {
val
}
fn black_box<T>(val: T) -> T {
val
}
use black_box::black_box;
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
@@ -219,16 +208,7 @@ impl Scalar {
res *= res;
}
}
let mut scale_by = Scalar::ONE;
#[allow(clippy::needless_range_loop)]
for i in 0 .. 16 {
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
{
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
}
}
res *= scale_by;
res *= table[usize::from(bits)];
bits = 0;
}
}
@@ -325,12 +305,6 @@ impl PrimeFieldBits for Scalar {
}
}
impl FromUniformBytes<64> for Scalar {
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
Self::from_bytes_mod_order_wide(bytes)
}
}
impl Sum<Scalar> for Scalar {
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
Self(DScalar::sum(iter))
@@ -368,12 +342,7 @@ macro_rules! dalek_group {
$BASEPOINT_POINT: ident,
$BASEPOINT_TABLE: ident
) => {
/// Wrapper around the dalek Point type.
///
/// All operations will be restricted to a prime-order subgroup (equivalent to the group itself
/// in the case of Ristretto). The exposure of the internal element does allow bypassing this
/// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at
/// the user's risk.
/// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Point(pub $DPoint);
deref_borrow!($Point, $DPoint);

View File

@@ -1,13 +1,13 @@
[package]
name = "dkg"
version = "0.6.1"
version = "0.5.1"
description = "Distributed key generation over ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.66"
rust-version = "1.79"
[package.metadata.docs.rs]
all-features = true
@@ -17,25 +17,50 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] }
thiserror = { version = "1", default-features = false, optional = true }
thiserror = { version = "2", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false }
multiexp = { path = "../multiexp", version = "0.4", default-features = false }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false }
dleq = { path = "../dleq", version = "^0.4.1", default-features = false }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
[features]
std = [
"thiserror/std",
"thiserror",
"rand_core/std",
"std-shims/std",
"borsh?/std",
"transcript/std",
"chacha20/std",
"ciphersuite/std",
"multiexp/std",
"multiexp/batch",
"schnorr/std",
"dleq/std",
"dleq/serialize"
]
borsh = ["dep:borsh"]
tests = ["rand_core/getrandom"]
default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Copyright (c) 2021-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,15 +1,16 @@
# Distributed Key Generation
A crate implementing a type for keys, presumably the result of a distributed
key generation protocol, and utilities from there.
A collection of implementations of various distributed key generation protocols.
This crate used to host implementations of distributed key generation protocols
as well (hence the name). Those have been smashed into their own crates, such
as [`dkg-musig`](https://docs.rs/dkg-musig) and
[`dkg-pedpop`](https://docs.rs/dkg-pedpop).
All included protocols resolve into the provided `Threshold` types, intended to
enable their modularity. Additional utilities around these types, such as
promotion from one generator to another, are also provided.
Before being smashed, this crate was [audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.
Currently, the only included protocol is the two-round protocol from the
[FROST paper](https://eprint.iacr.org/2020/852).
This library was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
Any subsequent changes have not undergone auditing.

View File

@@ -1,36 +0,0 @@
[package]
name = "dkg-dealer"
version = "0.6.0"
description = "Produce dkg::ThresholdKeys with a dealer key generation"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false }
rand_core = { version = "0.6", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[features]
std = [
"zeroize/std",
"rand_core/std",
"std-shims/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -1,13 +0,0 @@
# Distributed Key Generation - Dealer
This crate implements a dealer key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types. This provides a single point of
failure when the key is being generated and is NOT recommended for use outside
of tests.
This crate was originally part of (in some form) the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,68 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![no_std]
use core::ops::Deref;
use std_shims::{vec::Vec, collections::HashMap};
use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, CryptoRng};
use ciphersuite::{
group::ff::{Field, PrimeField},
Ciphersuite,
};
pub use dkg::*;
/// Create a key via a dealer key generation protocol.
pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
threshold: u16,
participants: u16,
) -> Result<HashMap<Participant, ThresholdKeys<C>>, DkgError> {
let mut coefficients = Vec::with_capacity(usize::from(participants));
// `.max(1)` so we always generate the 0th coefficient which we'll share
for _ in 0 .. threshold.max(1) {
coefficients.push(Zeroizing::new(C::F::random(&mut *rng)));
}
fn polynomial<F: PrimeField + Zeroize>(
coefficients: &[Zeroizing<F>],
l: Participant,
) -> Zeroizing<F> {
let l = F::from(u64::from(u16::from(l)));
// This should never be reached since Participant is explicitly non-zero
assert!(l != F::ZERO, "zero participant passed to polynomial");
let mut share = Zeroizing::new(F::ZERO);
for (idx, coefficient) in coefficients.iter().rev().enumerate() {
*share += coefficient.deref();
if idx != (coefficients.len() - 1) {
*share *= l;
}
}
share
}
let group_key = C::generator() * coefficients[0].deref();
let mut secret_shares = HashMap::with_capacity(participants as usize);
let mut verification_shares = HashMap::with_capacity(participants as usize);
for i in 1 ..= participants {
let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index");
let secret_share = polynomial(&coefficients, i);
secret_shares.insert(i, secret_share.clone());
verification_shares.insert(i, C::generator() * *secret_share);
}
let mut res = HashMap::with_capacity(participants as usize);
for (i, secret_share) in secret_shares {
let keys = ThresholdKeys::new(
ThresholdParams::new(threshold, participants, i)?,
Interpolation::Lagrange,
secret_share,
verification_shares.clone(),
)?;
debug_assert_eq!(keys.group_key(), group_key);
res.insert(i, keys);
}
Ok(res)
}

View File

@@ -1,49 +0,0 @@
[package]
name = "dkg-musig"
version = "0.6.0"
description = "The MuSig key aggregation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/musig"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.79"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }
[features]
std = [
"thiserror/std",
"rand_core/std",
"std-shims/std",
"multiexp/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -1,12 +0,0 @@
# Distributed Key Generation - MuSig
This implements the MuSig key aggregation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of (in some form) the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,162 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use core::ops::Deref;
use std_shims::{
vec,
vec::Vec,
collections::{HashSet, HashMap},
};
use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
pub use dkg::*;
#[cfg(test)]
mod tests;
/// Errors encountered when working with threshold keys.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum MusigError<C: Ciphersuite> {
/// No keys were provided.
#[error("no keys provided")]
NoKeysProvided,
/// Too many keys were provided.
#[error("too many keys (allowed {max}, provided {provided})")]
TooManyKeysProvided {
/// The maximum amount of keys allowed.
max: u16,
/// The amount of keys provided.
provided: usize,
},
/// A participant was duplicated.
#[error("a participant was duplicated")]
DuplicatedParticipant(C::G),
/// Participating, yet our public key wasn't found in the list of keys.
#[error("private key's public key wasn't present in the list of public keys")]
NotPresent,
/// An error propagated from the underlying `dkg` crate.
#[error("error from dkg ({0})")]
DkgError(DkgError),
}
fn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, MusigError<C>> {
if keys.is_empty() {
Err(MusigError::NoKeysProvided)?;
}
let keys_len = u16::try_from(keys.len())
.map_err(|_| MusigError::TooManyKeysProvided { max: u16::MAX, provided: keys.len() })?;
let mut set = HashSet::with_capacity(keys.len());
for key in keys {
let bytes = key.to_bytes().as_ref().to_vec();
if !set.insert(bytes) {
Err(MusigError::DuplicatedParticipant(*key))?;
}
}
Ok(keys_len)
}
fn binding_factor_transcript<C: Ciphersuite>(
context: [u8; 32],
keys_len: u16,
keys: &[C::G],
) -> Vec<u8> {
debug_assert_eq!(usize::from(keys_len), keys.len());
let mut transcript = vec![];
transcript.extend(&context);
transcript.extend(keys_len.to_le_bytes());
for key in keys {
transcript.extend(key.to_bytes().as_ref());
}
transcript
}
fn binding_factor<C: Ciphersuite>(mut transcript: Vec<u8>, i: u16) -> C::F {
transcript.extend(i.to_le_bytes());
C::hash_to_F(b"dkg-musig", &transcript)
}
#[allow(clippy::type_complexity)]
fn musig_key_multiexp<C: Ciphersuite>(
context: [u8; 32],
keys: &[C::G],
) -> Result<Vec<(C::F, C::G)>, MusigError<C>> {
let keys_len = check_keys::<C>(keys)?;
let transcript = binding_factor_transcript::<C>(context, keys_len, keys);
let mut multiexp = Vec::with_capacity(keys.len());
for i in 1 ..= keys_len {
multiexp.push((binding_factor::<C>(transcript.clone(), i), keys[usize::from(i - 1)]));
}
Ok(multiexp)
}
/// The group key resulting from using this library's MuSig key aggregation.
///
/// This function executes in variable time and MUST NOT be used with secret data.
pub fn musig_key_vartime<C: Ciphersuite>(
context: [u8; 32],
keys: &[C::G],
) -> Result<C::G, MusigError<C>> {
Ok(multiexp::multiexp_vartime(&musig_key_multiexp(context, keys)?))
}
/// The group key resulting from using this library's MuSig key aggregation.
pub fn musig_key<C: Ciphersuite>(context: [u8; 32], keys: &[C::G]) -> Result<C::G, MusigError<C>> {
Ok(multiexp::multiexp(&musig_key_multiexp(context, keys)?))
}
/// A n-of-n non-interactive DKG which does not guarantee the usability of the resulting key.
pub fn musig<C: Ciphersuite>(
context: [u8; 32],
private_key: Zeroizing<C::F>,
keys: &[C::G],
) -> Result<ThresholdKeys<C>, MusigError<C>> {
let our_pub_key = C::generator() * private_key.deref();
let Some(our_i) = keys.iter().position(|key| *key == our_pub_key) else {
Err(MusigError::DkgError(DkgError::NotParticipating))?
};
let keys_len: u16 = check_keys::<C>(keys)?;
let params = ThresholdParams::new(
keys_len,
keys_len,
// The `+ 1` won't fail as `keys.len() <= u16::MAX`, so any index is `< u16::MAX`
Participant::new(
u16::try_from(our_i).expect("keys.len() <= u16::MAX yet index of keys > u16::MAX?") + 1,
)
.expect("i + 1 != 0"),
)
.map_err(MusigError::DkgError)?;
let transcript = binding_factor_transcript::<C>(context, keys_len, keys);
let mut binding_factors = Vec::with_capacity(keys.len());
let mut multiexp = Vec::with_capacity(keys.len());
let mut verification_shares = HashMap::with_capacity(keys.len());
for (i, key) in (1 ..= keys_len).zip(keys.iter().copied()) {
let binding_factor = binding_factor::<C>(transcript.clone(), i);
binding_factors.push(binding_factor);
multiexp.push((binding_factor, key));
let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index?");
verification_shares.insert(i, key);
}
let group_key = multiexp::multiexp(&multiexp);
debug_assert_eq!(our_pub_key, verification_shares[&params.i()]);
debug_assert_eq!(musig_key_vartime::<C>(context, keys), Ok(group_key));
ThresholdKeys::new(
params,
Interpolation::Constant(binding_factors),
private_key,
verification_shares,
)
.map_err(MusigError::DkgError)
}

View File

@@ -1,71 +0,0 @@
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use dkg_recovery::recover_key;
use crate::*;
/// Tests MuSig key generation.
#[test]
pub fn test_musig() {
const PARTICIPANTS: u16 = 5;
let mut keys = vec![];
let mut pub_keys = vec![];
for _ in 0 .. PARTICIPANTS {
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
pub_keys.push(<Ristretto as Ciphersuite>::generator() * *key);
keys.push(key);
}
const CONTEXT: [u8; 32] = *b"MuSig Test ";
// Empty signing set
musig::<Ristretto>(CONTEXT, Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO), &[])
.unwrap_err();
// Signing set we're not part of
musig::<Ristretto>(
CONTEXT,
Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO),
&[<Ristretto as Ciphersuite>::generator()],
)
.unwrap_err();
// Test with n keys
{
let mut created_keys = HashMap::new();
let mut verification_shares = HashMap::new();
let group_key = musig_key::<Ristretto>(CONTEXT, &pub_keys).unwrap();
for (i, key) in keys.iter().enumerate() {
let these_keys = musig::<Ristretto>(CONTEXT, key.clone(), &pub_keys).unwrap();
assert_eq!(these_keys.params().t(), PARTICIPANTS);
assert_eq!(these_keys.params().n(), PARTICIPANTS);
assert_eq!(usize::from(u16::from(these_keys.params().i())), i + 1);
verification_shares.insert(
these_keys.params().i(),
<Ristretto as Ciphersuite>::generator() * **these_keys.original_secret_share(),
);
assert_eq!(these_keys.group_key(), group_key);
created_keys.insert(these_keys.params().i(), these_keys);
}
for keys in created_keys.values() {
for (l, verification_share) in &verification_shares {
assert_eq!(keys.original_verification_share(*l), *verification_share);
}
}
assert_eq!(
<Ristretto as Ciphersuite>::generator() *
*recover_key(&created_keys.values().cloned().collect::<Vec<_>>()).unwrap(),
group_key
);
}
}

View File

@@ -1,37 +0,0 @@
[package]
name = "dkg-pedpop"
version = "0.6.0"
description = "The PedPoP distributed key generation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/pedpop"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.3", default-features = false, features = ["std", "recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["std", "zeroize"] }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6", default-features = false, features = ["std"] }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false }

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,12 +0,0 @@
# Distributed Key Generation - PedPoP
This implements the PedPoP distributed key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,346 +0,0 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use crate::*;
const THRESHOLD: u16 = 3;
const PARTICIPANTS: u16 = 5;
/// Clone a map without a specific value.
fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
type PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
type PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage<C>>;
const CONTEXT: [u8; 32] = *b"DKG Test Key Generation ";
// Commit, then return commitment messages, enc keys, and shares
#[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> (
HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
HashMap<Participant, C::G>,
HashMap<Participant, PedPoPSecretShares<C>>,
) {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
let mut enc_keys = HashMap::new();
for i in (1 ..= PARTICIPANTS).map(|i| Participant::new(i).unwrap()) {
let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();
let machine = KeyGenMachine::<C>::new(params, CONTEXT);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(
i,
EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)
.unwrap(),
);
enc_keys.insert(i, commitments[&i].enc_key());
}
let mut secret_shares = HashMap::new();
let machines = machines
.drain()
.map(|(l, machine)| {
let (machine, mut shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
let shares = shares
.drain()
.map(|(l, share)| {
(
l,
EncryptedMessage::read::<&[u8]>(
&mut share.serialize().as_ref(),
// Only t/n actually matters, so hardcode i to 1 here
ThresholdParams::new(THRESHOLD, PARTICIPANTS, Participant::new(1).unwrap()).unwrap(),
)
.unwrap(),
)
})
.collect::<HashMap<_, _>>();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
(machines, commitments, enc_keys, secret_shares)
}
fn generate_secret_shares<C: Ciphersuite>(
shares: &HashMap<Participant, PedPoPSecretShares<C>>,
recipient: Participant,
) -> PedPoPSecretShares<C> {
let mut our_secret_shares = HashMap::new();
for (i, shares) in shares {
if recipient == *i {
continue;
}
our_secret_shares.insert(*i, shares[&recipient].clone());
}
our_secret_shares
}
/// Fully perform the PedPoP key generation algorithm.
fn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdKeys<C>> {
let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(
these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>(),
);
}
assert_eq!(
verification_shares.as_ref().unwrap(),
&these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>()
);
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}
const ONE: Participant = Participant::new(1).unwrap();
const TWO: Participant = Participant::new(2).unwrap();
#[test]
fn test_pedpop() {
let _ = core::hint::black_box(pedpop_gen::<_, Ristretto>(&mut OsRng));
}
fn test_blame(
commitment_msgs: &HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,
machines: Vec<BlameMachine<Ristretto>>,
msg: &PedPoPEncryptedMessage<Ristretto>,
blame: &Option<EncryptionKeyProof<Ristretto>>,
) {
for machine in machines {
let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());
assert_eq!(blamed, ONE);
// Verify additional blame also works
assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
// Verify machines constructed with AdditionalBlameMachine::new work
assert_eq!(
AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame(
ONE,
TWO,
msg.clone(),
blame.clone()
),
ONE,
);
}
}
// TODO: Write a macro which expands to the following
#[test]
fn invalid_encryption_pop_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
assert_eq!(
machine.err(),
Some(PedPoPError::InvalidShare { participant: ONE, blame: None })
);
// Explicitly declare we have a blame object, which happens to be None since invalid PoP
// is self-explainable
blame = Some(None);
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_ecdh_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the share to trigger a blame event
// Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass
// While here, 2 is malicious, this is so 1 creates the blame proof
// We then malleate 1's blame proof, so 1 ends up malicious
// Doesn't simply invalidate the PoP as that won't have a blame statement
// By mutating the encrypted data, we do ensure a blame statement is created
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
// This should be largely equivalent to the prior test
#[test]
fn invalid_dleq_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_serialization_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_value_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}

View File

@@ -1,34 +0,0 @@
[package]
name = "dkg-promote"
version = "0.6.1"
description = "Promotions for keys from the dkg crate"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/promote"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6.1", default-features = false, features = ["std"] }
[dev-dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,13 +0,0 @@
# Distributed Key Generation - Promote
This crate implements 'promotions' for keys from the
[`dkg`](https://docs.rs/dkg) crate. A promotion takes a set of keys and maps it
to a different `Ciphersuite`.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,113 +0,0 @@
use core::marker::PhantomData;
use std::collections::HashMap;
use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite,
};
use dkg::*;
use dkg_recovery::recover_key;
use crate::{GeneratorPromotion, GeneratorProof};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Ciphersuite> {
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
type F = C::F;
type G = C::G;
type H = C::H;
const ID: &'static [u8] = b"Alternate Ciphersuite";
fn generator() -> Self::G {
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
<C as Ciphersuite>::hash_to_F(dst, data)
}
}
/// Clone a map without a specific value.
pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
// Test promotion of threshold keys to another generator
#[test]
fn test_generator_promotion() {
// Generate a set of `ThresholdKeys`
const PARTICIPANTS: u16 = 5;
let keys: [ThresholdKeys<_>; PARTICIPANTS as usize] = {
let shares: [<Ristretto as Ciphersuite>::F; PARTICIPANTS as usize] =
core::array::from_fn(|_| <Ristretto as Ciphersuite>::F::random(&mut OsRng));
let verification_shares = (0 .. PARTICIPANTS)
.map(|i| {
(
Participant::new(i + 1).unwrap(),
<Ristretto as Ciphersuite>::generator() * shares[usize::from(i)],
)
})
.collect::<HashMap<_, _>>();
core::array::from_fn(|i| {
ThresholdKeys::new(
ThresholdParams::new(
PARTICIPANTS,
PARTICIPANTS,
Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),
)
.unwrap(),
Interpolation::Constant(vec![<Ristretto as Ciphersuite>::F::ONE; PARTICIPANTS as usize]),
Zeroizing::new(shares[i]),
verification_shares.clone(),
)
.unwrap()
})
};
// Perform the promotion
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for keys in &keys {
let i = keys.params().i();
let (promotion, proof) =
GeneratorPromotion::<_, AltGenerator<Ristretto>>::promote(&mut OsRng, keys.clone());
promotions.insert(i, promotion);
proofs.insert(
i,
GeneratorProof::<Ristretto>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap(),
);
}
// Complete the promotion, and verify it worked
let new_group_key = AltGenerator::<Ristretto>::generator() * *recover_key(&keys).unwrap();
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[usize::from(u16::from(i) - 1)].params(), promoted.params());
assert_eq!(
keys[usize::from(u16::from(i) - 1)].original_secret_share(),
promoted.original_secret_share()
);
assert_eq!(new_group_key, promoted.group_key());
for l in 0 .. PARTICIPANTS {
let verification_share =
promoted.original_verification_share(Participant::new(l + 1).unwrap());
assert_eq!(
AltGenerator::<Ristretto>::generator() * **keys[usize::from(l)].original_secret_share(),
verification_share
);
}
}
}

Some files were not shown because too many files have changed in this diff Show More