43 Commits

Author SHA1 Message Date
Luke Parker
8c50a31633 Improve documentation on functions 2024-08-06 00:27:54 -04:00
Luke Parker
d943e037e5 Remove bad panic in coordinator
It expected ConfirmationShare to be n-of-n, not t-of-n.
2024-08-06 00:27:19 -04:00
Luke Parker
3042697243 Update orchestration 2024-08-06 00:27:07 -04:00
Luke Parker
8de696f169 Add an extra sleep to ensure expected ordering of Participations 2024-08-05 23:50:23 -04:00
Luke Parker
b8912e4b7b cargo machete 2024-08-05 23:43:38 -04:00
Luke Parker
89fc88b283 Get clippy to pass across the repo 2024-08-05 23:29:51 -04:00
Luke Parker
2ae2883106 Update spec to the new DKG 2024-08-05 06:58:44 -04:00
Luke Parker
e74c8f38d5 Get coordinator tests to pass 2024-08-05 06:50:26 -04:00
Luke Parker
9e8e134ef7 Replace Interpolation::None with Interpolation::Constant
Allows the MuSig DKG to keep the secret share as the original private key,
enabling deriving FROST nonces consistently regardless of the MuSig context.
2024-08-05 06:32:37 -04:00
Luke Parker
f08faeadff Have the DKG explicitly declare how to interpolate its shares
Removes the hack for MuSig where we multiply keys by the inverse of their
lagrange interpolation factor.
2024-08-05 06:06:56 -04:00
Luke Parker
1b7613329c Add sensible Debug to key_gen::[Processor, Coordinator]Message 2024-08-05 04:04:02 -04:00
Luke Parker
54eefbde0c Update the coordinator binary for the new DKG
This does not yet update any tests.
2024-08-04 04:48:12 -04:00
Luke Parker
58a435d4e9 Have set_keys take signature_participants, not removed_participants
Now no one is removed from the DKG. Only `t` people publish the key however.

Uses a BitVec for an efficient encoding of the participants.
2024-08-04 01:19:08 -04:00
Luke Parker
5ed355902b Update processor key gen tests to the eVRF DKG 2024-08-03 03:17:55 -04:00
Luke Parker
fc51c9b71c Add embedded elliptic curve keys to Substrate 2024-08-03 02:24:08 -04:00
Luke Parker
9e716c07fc Correct amount of yx coefficients, get processor key gen test to pass 2024-08-02 05:03:14 -04:00
Luke Parker
b5bf70bdb1 Update serai-processor tests to the new key gen 2024-08-02 00:53:24 -04:00
Luke Parker
d3f0378f66 Deduplicate and better document in processor key_gen 2024-08-02 00:53:24 -04:00
Luke Parker
2f564c230e Finish routing the new key gen in the processor
Doesn't touch the tests, coordinator, nor Substrate yet.
`cargo +nightly fmt && cargo +nightly-2024-07-01 clippy --all-features -p serai-processor`
does pass.
2024-08-02 00:53:24 -04:00
Luke Parker
12f74e1813 Rewrite processor key-gen around the eVRF DKG
Still a WIP.
2024-08-02 00:53:24 -04:00
Luke Parker
fb7e966b94 Only participate once per key, not once per key share 2024-08-02 00:53:24 -04:00
Luke Parker
65efbf46c7 Support participating multiple times in the eVRF DKG 2024-08-02 00:53:23 -04:00
Luke Parker
c5cc0dc883 Add Ristretto eVRF trait impls 2024-08-02 00:53:23 -04:00
Luke Parker
a6775d7dc5 Implement eVRF traits, all the way up to the DKG, for secp256k1/ed25519 2024-08-02 00:53:23 -04:00
Luke Parker
681010f422 Ban zero ECDH keys, document non-zero requirements 2024-08-02 00:53:23 -04:00
Luke Parker
f93bd42b99 Resolve various TODOs
Supports recovering multiple key shares from the eVRF DKG.

Inlines two loops to save 2**16 iterations.

Adds support for creating a constant time representation of scalars < NUM_BITS.
2024-08-02 00:53:23 -04:00
Luke Parker
c960d6baaf Start using a proper error for the eVRF DKG 2024-08-02 00:53:23 -04:00
Luke Parker
31ac0ac299 Improve eVRF DKG
Updates how we calculcate verification shares, improves performance when
extracting multiple sets of keys, and adds more to the test for it.
2024-08-02 00:53:23 -04:00
Luke Parker
4bd0d71406 Add initial eVRF DKG test 2024-08-02 00:53:23 -04:00
Luke Parker
ef68885600 Finish routing the eVRF functionality
Still needs errors and serialization, along with a few other TODOs.
2024-08-02 00:53:23 -04:00
Luke Parker
00dc3087bd Update to the new eVRF proof 2024-08-02 00:53:23 -04:00
Luke Parker
eca82f3f7b Add paragraph claiming robustness 2024-08-02 00:53:23 -04:00
Luke Parker
05c26d7818 Add documentation to the eVRF-based DKG 2024-08-02 00:53:23 -04:00
Luke Parker
96175e115d Inline the eVRF into the DKG library
Due to how we're handling share encryption, we'd either need two circuits or to
dedicate this circuit to the DKG. The latter makes sense at this time.
2024-08-02 00:53:23 -04:00
Luke Parker
fa31f26397 Add embedwards25519 curve 2024-08-02 00:53:23 -04:00
Luke Parker
7710da4db3 Initial eVRF-based DKG 2024-08-02 00:53:23 -04:00
Luke Parker
b7103038cb Have the eVRF take a Zeroizing private key 2024-08-02 00:53:23 -04:00
Luke Parker
b6e688076f Make NUM_BITS an argument for the field macro 2024-08-02 00:53:23 -04:00
Luke Parker
b8472963c9 Make DKG Encryption a bit more flexible
No longer requires the use of an EncryptionKeyMessage, and allows pre-defined
keys for encryption.
2024-08-02 00:53:23 -04:00
Luke Parker
772d033bb2 Add implementation of secq256k1 2024-08-02 00:53:23 -04:00
Luke Parker
49a183194d Add the openings of the PCs to the eVRF as necessary 2024-08-02 00:53:23 -04:00
Luke Parker
db31809708 Initial eVRF implementation
Not quite done yet. It needs to communicate the resulting points and proofs to
extract them from the Pedersen Commitments in order to return those, and then
be tested.
2024-08-02 00:53:23 -04:00
Luke Parker
dcc26ecf33 Upstream GBP, divisor, circuit abstraction, and EC gadgets from FCMP++ 2024-08-02 00:53:22 -04:00
546 changed files with 77039 additions and 13114 deletions

View File

@@ -12,7 +12,7 @@ runs:
steps: steps:
- name: Bitcoin Daemon Cache - name: Bitcoin Daemon Cache
id: cache-bitcoind id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: bitcoin.tar.gz path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -7,20 +7,13 @@ runs:
- name: Remove unused packages - name: Remove unused packages
shell: bash shell: bash
run: | run: |
# Ensure the repositories are synced sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt update -y
# Actually perform the removals
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*" sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*" sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
# This removal command requires the prior removals due to unmet dependencies otherwise
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*" sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
sudo apt autoremove -y
# Reinstall python3 as a general dependency of a functional operating system sudo apt clean
sudo apt install -y python3 --fix-missing docker system prune -a --volumes
if: runner.os == 'Linux' if: runner.os == 'Linux'
- name: Remove unused packages - name: Remove unused packages
@@ -38,48 +31,19 @@ runs:
shell: bash shell: bash
run: | run: |
if [ "$RUNNER_OS" == "Linux" ]; then if [ "$RUNNER_OS" == "Linux" ]; then
sudo apt install -y ca-certificates protobuf-compiler libclang-dev sudo apt install -y ca-certificates protobuf-compiler
elif [ "$RUNNER_OS" == "Windows" ]; then elif [ "$RUNNER_OS" == "Windows" ]; then
choco install protoc choco install protoc
elif [ "$RUNNER_OS" == "macOS" ]; then elif [ "$RUNNER_OS" == "macOS" ]; then
brew install protobuf llvm brew install protobuf
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
fi fi
- name: Install solc - name: Install solc
shell: bash shell: bash
run: | run: |
cargo +1.89 install svm-rs --version =0.5.18 cargo install svm-rs
svm install 0.8.26 svm install 0.8.25
svm use 0.8.26 svm use 0.8.25
- name: Remove preinstalled Docker
shell: bash
run: |
docker system prune -a --volumes
sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker
sudo apt install uidmap
if: runner.os == 'Linux'
- name: Update system dependencies
shell: bash
run: |
sudo apt update -y
sudo apt upgrade -y
sudo apt autoremove -y
sudo apt clean
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with:
rootless: true
set-host: true
if: runner.os == 'Linux'
# - name: Cache Rust # - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.3.4 default: v0.18.3.1
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Wallet RPC Cache - name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: monero-wallet-rpc path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.3.4 default: v0.18.3.1
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Daemon Cache - name: Monero Daemon Cache
id: cache-monerod id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: /usr/bin/monerod path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,7 +5,7 @@ inputs:
monero-version: monero-version:
description: "Monero version to download and run as a regtest node" description: "Monero version to download and run as a regtest node"
required: false required: false
default: v0.18.3.4 default: v0.18.3.1
bitcoin-version: bitcoin-version:
description: "Bitcoin version to download and run as a regtest node" description: "Bitcoin version to download and run as a regtest node"

View File

@@ -1 +1 @@
nightly-2025-11-01 nightly-2024-07-01

View File

@@ -27,7 +27,6 @@ jobs:
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p std-shims \ -p std-shims \
-p zalloc \ -p zalloc \
-p patchable-async-sleep \
-p serai-db \ -p serai-db \
-p serai-env \ -p serai-env \
-p simple-request -p simple-request

View File

@@ -32,15 +32,13 @@ jobs:
-p dalek-ff-group \ -p dalek-ff-group \
-p minimal-ed448 \ -p minimal-ed448 \
-p ciphersuite \ -p ciphersuite \
-p ciphersuite-kp256 \
-p multiexp \ -p multiexp \
-p schnorr-signatures \ -p schnorr-signatures \
-p dleq \ -p dleq \
-p generalized-bulletproofs \
-p generalized-bulletproofs-circuit-abstraction \
-p ec-divisors \
-p generalized-bulletproofs-ec-gadgets \
-p dkg \ -p dkg \
-p dkg-recovery \
-p dkg-dealer \
-p dkg-promote \
-p dkg-musig \
-p dkg-pedpop \
-p modular-frost \ -p modular-frost \
-p frost-schnorrkel -p frost-schnorrkel

View File

@@ -12,13 +12,13 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache - name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: ~/.cargo/advisory-db path: ~/.cargo/advisory-db
key: rust-advisory-db key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.89 install cargo-deny --version =0.18.3 run: cargo install --locked cargo-deny
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check

View File

@@ -11,7 +11,7 @@ jobs:
clippy: clippy:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest] os: [ubuntu-latest, macos-13, macos-14, windows-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
@@ -26,7 +26,7 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install nightly rust - name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
- name: Run Clippy - name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
@@ -46,16 +46,16 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache - name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: ~/.cargo/advisory-db path: ~/.cargo/advisory-db
key: rust-advisory-db key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.89 install cargo-deny --version =0.18.4 run: cargo install --locked cargo-deny
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check
fmt: fmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -79,5 +79,5 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use - name: Verify all dependencies are in use
run: | run: |
cargo +1.89 install cargo-machete --version =0.8.0 cargo install cargo-machete
cargo +1.89 machete cargo machete

77
.github/workflows/monero-tests.yaml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Monero Tests
on:
push:
branches:
- develop
paths:
- "networks/monero/**"
- "processor/**"
pull_request:
paths:
- "networks/monero/**"
- "processor/**"
workflow_dispatch:
jobs:
# Only run these once since they will be consistent regardless of any node
unit-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
- name: Run Unit Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
# Doesn't run unit tests with features as the tests workflow will
integration-tests:
runs-on: ubuntu-latest
# Test against all supported protocol versions
strategy:
matrix:
version: [v0.17.3.2, v0.18.2.0]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
with:
monero-version: ${{ matrix.version }}
- name: Run Integration Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
- name: Run Integration Tests
# Don't run if the the tests workflow also will
if: ${{ matrix.version != 'v0.18.2.0' }}
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'

View File

@@ -33,3 +33,19 @@ jobs:
-p alloy-simple-request-transport \ -p alloy-simple-request-transport \
-p ethereum-serai \ -p ethereum-serai \
-p serai-ethereum-relayer \ -p serai-ethereum-relayer \
-p monero-io \
-p monero-generators \
-p monero-primitives \
-p monero-mlsag \
-p monero-clsag \
-p monero-borromean \
-p monero-bulletproofs \
-p monero-serai \
-p monero-rpc \
-p monero-simple-request-rpc \
-p monero-address \
-p monero-wallet \
-p monero-seed \
-p polyseed \
-p monero-wallet-util \
-p monero-serai-verify-chain

View File

@@ -1,7 +1,6 @@
# MIT License # MIT License
# #
# Copyright (c) 2022 just-the-docs # Copyright (c) 2022 just-the-docs
# Copyright (c) 2022-2024 Luke Parker
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal # of this software and associated documentation files (the "Software"), to deal
@@ -21,21 +20,31 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. # SOFTWARE.
name: Deploy Rust docs and Jekyll site to Pages # This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy Jekyll site to Pages
on: on:
push: push:
branches: branches:
- "develop" - "develop"
paths:
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch: workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions: permissions:
contents: read contents: read
pages: write pages: write
id-token: write id-token: write
# Only allow one concurrent deployment # Allow one concurrent deployment
concurrency: concurrency:
group: "pages" group: "pages"
cancel-in-progress: true cancel-in-progress: true
@@ -44,37 +53,27 @@ jobs:
# Build job # Build job
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
defaults:
run:
working-directory: docs
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac uses: actions/checkout@v3
- name: Setup Ruby - name: Setup Ruby
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb uses: ruby/setup-ruby@v1
with: with:
bundler-cache: true bundler-cache: true
cache-version: 0 cache-version: 0
working-directory: "${{ github.workspace }}/docs" working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages - name: Setup Pages
id: pages id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b uses: actions/configure-pages@v3
- name: Build with Jekyll - name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env: env:
JEKYLL_ENV: production JEKYLL_ENV: production
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Buld Rust docs
run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
mv target/doc docs/_site/rust
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b uses: actions/upload-pages-artifact@v1
with: with:
path: "docs/_site/" path: "docs/_site/"
@@ -88,4 +87,4 @@ jobs:
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e uses: actions/deploy-pages@v2

View File

@@ -63,11 +63,6 @@ jobs:
-p serai-dex-pallet \ -p serai-dex-pallet \
-p serai-validator-sets-primitives \ -p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \ -p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \
-p serai-emissions-primitives \
-p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \ -p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \ -p serai-in-instructions-pallet \
-p serai-signals-primitives \ -p serai-signals-primitives \

7
.gitignore vendored
View File

@@ -1,14 +1,7 @@
target target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!./Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile Dockerfile
Dockerfile.fast-epoch Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile !orchestration/runtime/Dockerfile
.test-logs .test-logs
.vscode .vscode

7039
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,16 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
# Version patches
"patches/parking_lot_core",
"patches/parking_lot",
"patches/zstd",
"patches/rocksdb",
"patches/proc-macro-crate",
# std patches # std patches
"patches/matches", "patches/matches",
"patches/is-terminal",
# Rewrites/redirects # Rewrites/redirects
"patches/option-ext", "patches/option-ext",
@@ -10,7 +18,6 @@ members = [
"common/std-shims", "common/std-shims",
"common/zalloc", "common/zalloc",
"common/patchable-async-sleep",
"common/db", "common/db",
"common/env", "common/env",
"common/request", "common/request",
@@ -21,18 +28,19 @@ members = [
"crypto/dalek-ff-group", "crypto/dalek-ff-group",
"crypto/ed448", "crypto/ed448",
"crypto/ciphersuite", "crypto/ciphersuite",
"crypto/ciphersuite/kp256",
"crypto/multiexp", "crypto/multiexp",
"crypto/schnorr", "crypto/schnorr",
"crypto/dleq", "crypto/dleq",
"crypto/evrf/secq256k1",
"crypto/evrf/embedwards25519",
"crypto/evrf/generalized-bulletproofs",
"crypto/evrf/circuit-abstraction",
"crypto/evrf/divisors",
"crypto/evrf/ec-gadgets",
"crypto/dkg", "crypto/dkg",
"crypto/dkg/recovery",
"crypto/dkg/dealer",
"crypto/dkg/promote",
"crypto/dkg/musig",
"crypto/dkg/pedpop",
"crypto/frost", "crypto/frost",
"crypto/schnorrkel", "crypto/schnorrkel",
@@ -42,6 +50,23 @@ members = [
"networks/ethereum", "networks/ethereum",
"networks/ethereum/relayer", "networks/ethereum/relayer",
"networks/monero/io",
"networks/monero/generators",
"networks/monero/primitives",
"networks/monero/ringct/mlsag",
"networks/monero/ringct/clsag",
"networks/monero/ringct/borromean",
"networks/monero/ringct/bulletproofs",
"networks/monero",
"networks/monero/rpc",
"networks/monero/rpc/simple-request",
"networks/monero/wallet/address",
"networks/monero/wallet",
"networks/monero/wallet/seed",
"networks/monero/wallet/polyseed",
"networks/monero/wallet/util",
"networks/monero/verify-chain",
"message-queue", "message-queue",
"processor/messages", "processor/messages",
@@ -61,14 +86,6 @@ members = [
"substrate/validator-sets/primitives", "substrate/validator-sets/primitives",
"substrate/validator-sets/pallet", "substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives", "substrate/in-instructions/primitives",
"substrate/in-instructions/pallet", "substrate/in-instructions/pallet",
@@ -100,37 +117,56 @@ members = [
# to the extensive operations required for Bulletproofs # to the extensive operations required for Bulletproofs
[profile.dev.package] [profile.dev.package]
subtle = { opt-level = 3 } subtle = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
ff = { opt-level = 3 } ff = { opt-level = 3 }
group = { opt-level = 3 } group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 } crypto-bigint = { opt-level = 3 }
secp256k1 = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 } dalek-ff-group = { opt-level = 3 }
minimal-ed448 = { opt-level = 3 } minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 } multiexp = { opt-level = 3 }
monero-oxide = { opt-level = 3 } secq256k1 = { opt-level = 3 }
embedwards25519 = { opt-level = 3 }
generalized-bulletproofs = { opt-level = 3 }
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
ec-divisors = { opt-level = 3 }
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
dkg = { opt-level = 3 }
monero-generators = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
[profile.release] [profile.release]
panic = "unwind" panic = "unwind"
overflow-checks = true
[patch.crates-io] [patch.crates-io]
# Dependencies from monero-oxide which originate from within our own tree
std-shims = { path = "common/std-shims" }
simple-request = { path = "common/request" }
dalek-ff-group = { path = "crypto/dalek-ff-group" }
flexible-transcript = { path = "crypto/transcript" }
modular-frost = { path = "crypto/frost" }
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201 # https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
# These have `std` alternatives # Needed due to dockertest's usage of `Rc`s when we need `Arc`s
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" }
parking_lot_core = { path = "patches/parking_lot_core" }
parking_lot = { path = "patches/parking_lot" }
# wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" }
# Needed for WAL compression
rocksdb = { path = "patches/rocksdb" }
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
proc-macro-crate = { path = "patches/proc-macro-crate" }
# is-terminal now has an std-based solution with an equivalent API
is-terminal = { path = "patches/is-terminal" }
# So does matches
matches = { path = "patches/matches" } matches = { path = "patches/matches" }
home = { path = "patches/home" }
# directories-next was created because directories was unmaintained # directories-next was created because directories was unmaintained
# directories-next is now unmaintained while directories is maintained # directories-next is now unmaintained while directories is maintained
@@ -140,11 +176,11 @@ home = { path = "patches/home" }
option-ext = { path = "patches/option-ext" } option-ext = { path = "patches/option-ext" }
directories-next = { path = "patches/directories-next" } directories-next = { path = "patches/directories-next" }
# The official pasta_curves repo doesn't support Zeroize
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
[workspace.lints.clippy] [workspace.lints.clippy]
uninlined_format_args = "allow" # TODO
unwrap_or_default = "allow" unwrap_or_default = "allow"
manual_is_multiple_of = "allow"
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
borrow_as_ptr = "deny" borrow_as_ptr = "deny"
cast_lossless = "deny" cast_lossless = "deny"
cast_possible_truncation = "deny" cast_possible_truncation = "deny"
@@ -169,14 +205,14 @@ large_stack_arrays = "deny"
linkedlist = "deny" linkedlist = "deny"
macro_use_imports = "deny" macro_use_imports = "deny"
manual_instant_elapsed = "deny" manual_instant_elapsed = "deny"
# TODO manual_let_else = "deny" manual_let_else = "deny"
manual_ok_or = "deny" manual_ok_or = "deny"
manual_string_new = "deny" manual_string_new = "deny"
map_unwrap_or = "deny" map_unwrap_or = "deny"
match_bool = "deny" match_bool = "deny"
match_same_arms = "deny" match_same_arms = "deny"
missing_fields_in_debug = "deny" missing_fields_in_debug = "deny"
# TODO needless_continue = "deny" needless_continue = "deny"
needless_pass_by_value = "deny" needless_pass_by_value = "deny"
ptr_cast_constness = "deny" ptr_cast_constness = "deny"
range_minus_one = "deny" range_minus_one = "deny"
@@ -184,7 +220,8 @@ range_plus_one = "deny"
redundant_closure_for_method_calls = "deny" redundant_closure_for_method_calls = "deny"
redundant_else = "deny" redundant_else = "deny"
string_add_assign = "deny" string_add_assign = "deny"
unchecked_time_subtraction = "deny" unchecked_duration_subtraction = "deny"
uninlined_format_args = "deny"
unnecessary_box_returns = "deny" unnecessary_box_returns = "deny"
unnecessary_join = "deny" unnecessary_join = "deny"
unnecessary_wraps = "deny" unnecessary_wraps = "deny"
@@ -192,21 +229,3 @@ unnested_or_patterns = "deny"
unused_async = "deny" unused_async = "deny"
unused_self = "deny" unused_self = "deny"
zero_sized_map_values = "deny" zero_sized_map_values = "deny"
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
# primary branch, `next` is)
needless_continue = "allow"
needless_lifetimes = "allow"
useless_conversion = "allow"
empty_line_after_doc_comments = "allow"
manual_div_ceil = "allow"
manual_let_else = "allow"
unnecessary_map_or = "allow"
result_large_err = "allow"
unneeded_struct_pattern = "allow"
[workspace.lints.rust]
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
mismatched_lifetime_syntaxes = "allow"
unused_attributes = "allow"
unused_parens = "allow"

View File

@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
as a reference text. This copy should be provided with any distribution of a as a reference text. This copy should be provided with any distribution of a
crate licensed under the AGPL-3.0, as per its terms. crate licensed under the AGPL-3.0, as per its terms.
The GitHub actions/workflows (`.github`) are licensed under the MIT license. The GitHub actions (`.github/actions`) are licensed under the MIT license.

View File

@@ -59,6 +59,7 @@ issued at the discretion of the Immunefi program managers.
- [Website](https://serai.exchange/): https://serai.exchange/ - [Website](https://serai.exchange/): https://serai.exchange/
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/ - [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX - [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz - [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org - [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/ - [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/

View File

@@ -18,7 +18,7 @@ workspace = true
[dependencies] [dependencies]
parity-db = { version = "0.4", default-features = false, optional = true } parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true } rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
[features] [features]
parity-db = ["dep:parity-db"] parity-db = ["dep:parity-db"]

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
// Obtain a variable from the Serai environment/secret store. // Obtain a variable from the Serai environment/secret store.
pub fn var(variable: &str) -> Option<String> { pub fn var(variable: &str) -> Option<String> {

View File

@@ -1,19 +0,0 @@
[package]
name = "patchable-async-sleep"
version = "0.1.0"
description = "An async sleep function, patchable to the preferred runtime"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-async-sleep"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tokio = { version = "1", default-features = false, features = [ "time"] }

View File

@@ -1,7 +0,0 @@
# Patchable Async Sleep
An async sleep function, patchable to the preferred runtime.
This crate is `tokio`-backed. Applications which don't want to use `tokio`
should patch this crate to one which works witht heir preferred runtime. The
point of it is to have a minimal API surface to trivially facilitate such work.

View File

@@ -1,10 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::time::Duration;
/// Sleep for the specified duration.
pub fn sleep(duration: Duration) -> impl core::future::Future<Output = ()> {
tokio::time::sleep(duration)
}

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"] keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021" edition = "2021"
rust-version = "1.70" rust-version = "1.64"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
use std::sync::Arc; use std::sync::Arc;

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "std-shims" name = "std-shims"
version = "0.1.4" version = "0.1.1"
description = "A series of std shims to make alloc more feasible" description = "A series of std shims to make alloc more feasible"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims" repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"] keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021" edition = "2021"
rust-version = "1.64" rust-version = "1.70"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,8 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
rustversion = { version = "1", default-features = false } spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
[features] [features]

View File

@@ -3,9 +3,4 @@
A crate which passes through to std when the default `std` feature is enabled, A crate which passes through to std when the default `std` feature is enabled,
yet provides a series of shims when it isn't. yet provides a series of shims when it isn't.
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the `HashSet` and `HashMap` are provided via `hashbrown`.
average case.
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
`spin` (avoiding a requirement on `critical-section`).
types are not guaranteed to be

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
@@ -11,64 +11,3 @@ pub mod io;
pub use alloc::vec; pub use alloc::vec;
pub use alloc::str; pub use alloc::str;
pub use alloc::string; pub use alloc::string;
pub mod prelude {
#[rustversion::before(1.73)]
#[doc(hidden)]
pub trait StdShimsDivCeil {
fn div_ceil(self, rhs: Self) -> Self;
}
#[rustversion::before(1.73)]
mod impl_divceil {
use super::StdShimsDivCeil;
impl StdShimsDivCeil for u8 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u16 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u32 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u64 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u128 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for usize {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
#[doc(hidden)]
pub trait StdShimsIoErrorOther {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
impl StdShimsIoErrorOther for std::io::Error {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::new(std::io::ErrorKind::Other, error)
}
}
}

View File

@@ -25,11 +25,7 @@ mod mutex_shim {
} }
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard}; pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;
#[rustversion::before(1.80)]
#[cfg(feature = "std")]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::LazyLock; pub use std::sync::LazyLock;
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.77" rust-version = "1.77.0"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] #![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.

View File

@@ -20,15 +20,14 @@ workspace = true
async-trait = { version = "0.1", default-features = false } async-trait = { version = "0.1", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std"] }
bitvec = { version = "1", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] } schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" } frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" } frost-schnorrkel = { path = "../crypto/schnorrkel" }
@@ -42,7 +41,7 @@ processor-messages = { package = "serai-processor-messages", path = "../processo
message-queue = { package = "serai-message-queue", path = "../message-queue" } message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary = { package = "tributary-chain", path = "./tributary" } tributary = { package = "tributary-chain", path = "./tributary" }
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] } serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
@@ -57,8 +56,8 @@ libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp
[dev-dependencies] [dev-dependencies]
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] } sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
[features] [features]
longer-reattempts = [] longer-reattempts = []

View File

@@ -12,9 +12,9 @@ use tokio::{
use borsh::BorshSerialize; use borsh::BorshSerialize;
use sp_application_crypto::RuntimePublic; use sp_application_crypto::RuntimePublic;
use serai_client::{ use serai_client::{
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS}, primitives::{NETWORKS, NetworkId, Signature},
validator_sets::primitives::{ExternalValidatorSet, Session}, validator_sets::primitives::{Session, ValidatorSet},
Serai, SeraiError, TemporalSerai, SeraiError, TemporalSerai, Serai,
}; };
use serai_db::{Get, DbTxn, Db, create_db}; use serai_db::{Get, DbTxn, Db, create_db};
@@ -28,17 +28,17 @@ use crate::{
create_db! { create_db! {
CosignDb { CosignDb {
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock, ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock, LatestCosign: (network: NetworkId) -> CosignedBlock,
DistinctChain: (set: ExternalValidatorSet) -> (), DistinctChain: (set: ValidatorSet) -> (),
} }
} }
pub struct CosignEvaluator<D: Db> { pub struct CosignEvaluator<D: Db> {
db: Mutex<D>, db: Mutex<D>,
serai: Arc<Serai>, serai: Arc<Serai>,
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>, stakes: RwLock<Option<HashMap<NetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>, latest_cosigns: RwLock<HashMap<NetworkId, CosignedBlock>>,
} }
impl<D: Db> CosignEvaluator<D> { impl<D: Db> CosignEvaluator<D> {
@@ -79,7 +79,7 @@ impl<D: Db> CosignEvaluator<D> {
let serai = self.serai.as_of_latest_finalized_block().await?; let serai = self.serai.as_of_latest_finalized_block().await?;
let mut stakes = HashMap::new(); let mut stakes = HashMap::new();
for network in EXTERNAL_NETWORKS { for network in NETWORKS {
// Use if this network has published a Batch for a short-circuit of if they've ever set a key // Use if this network has published a Batch for a short-circuit of if they've ever set a key
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some(); let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
if set_key { if set_key {
@@ -87,7 +87,7 @@ impl<D: Db> CosignEvaluator<D> {
network, network,
serai serai
.validator_sets() .validator_sets()
.total_allocated_stake(network.into()) .total_allocated_stake(network)
.await? .await?
.expect("network which published a batch didn't have a stake set") .expect("network which published a batch didn't have a stake set")
.0, .0,
@@ -126,9 +126,9 @@ impl<D: Db> CosignEvaluator<D> {
async fn set_with_keys_fn( async fn set_with_keys_fn(
serai: &TemporalSerai<'_>, serai: &TemporalSerai<'_>,
network: ExternalNetworkId, network: NetworkId,
) -> Result<Option<ExternalValidatorSet>, SeraiError> { ) -> Result<Option<ValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else { let Some(latest_session) = serai.validator_sets().session(network).await? else {
log::warn!("received cosign from {:?}, which doesn't yet have a session", network); log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
return Ok(None); return Ok(None);
}; };
@@ -136,13 +136,13 @@ impl<D: Db> CosignEvaluator<D> {
Ok(Some( Ok(Some(
if serai if serai
.validator_sets() .validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session }) .keys(ValidatorSet { network, session: prior_session })
.await? .await?
.is_some() .is_some()
{ {
ExternalValidatorSet { network, session: prior_session } ValidatorSet { network, session: prior_session }
} else { } else {
ExternalValidatorSet { network, session: latest_session } ValidatorSet { network, session: latest_session }
}, },
)) ))
} }
@@ -164,7 +164,7 @@ impl<D: Db> CosignEvaluator<D> {
if !keys if !keys
.0 .0
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into()) .verify(&cosign_block_msg(cosign.block_number, cosign.block), &Signature(cosign.signature))
{ {
log::warn!("received cosigned block with an invalid signature"); log::warn!("received cosigned block with an invalid signature");
return Ok(()); return Ok(());
@@ -204,12 +204,16 @@ impl<D: Db> CosignEvaluator<D> {
let mut total_stake = 0; let mut total_stake = 0;
let mut total_on_distinct_chain = 0; let mut total_on_distinct_chain = 0;
for network in EXTERNAL_NETWORKS { for network in NETWORKS {
if network == NetworkId::Serai {
continue;
}
// Get the current set for this network // Get the current set for this network
let set_with_keys = { let set_with_keys = {
let mut res; let mut res;
while { while {
res = set_with_keys_fn(&serai, network).await; res = set_with_keys_fn(&serai, cosign.network).await;
res.is_err() res.is_err()
} { } {
log::error!( log::error!(
@@ -227,8 +231,7 @@ impl<D: Db> CosignEvaluator<D> {
let stake = { let stake = {
let mut res; let mut res;
while { while {
res = res = serai.validator_sets().total_allocated_stake(set_with_keys.network).await;
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
res.is_err() res.is_err()
} { } {
log::error!( log::error!(
@@ -268,7 +271,7 @@ impl<D: Db> CosignEvaluator<D> {
#[allow(clippy::new_ret_no_self)] #[allow(clippy::new_ret_no_self)]
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> { pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
let mut latest_cosigns = HashMap::new(); let mut latest_cosigns = HashMap::new();
for network in EXTERNAL_NETWORKS { for network in NETWORKS {
if let Some(cosign) = LatestCosign::get(&db, network) { if let Some(cosign) = LatestCosign::get(&db, network) {
latest_cosigns.insert(network, cosign); latest_cosigns.insert(network, cosign);
} }

View File

@@ -6,9 +6,9 @@ use blake2::{
use scale::Encode; use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{ use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet},
in_instructions::primitives::{Batch, SignedBatch}, in_instructions::primitives::{Batch, SignedBatch},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
}; };
pub use serai_db::*; pub use serai_db::*;
@@ -18,21 +18,21 @@ use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
create_db!( create_db!(
MainDb { MainDb {
HandledMessageDb: (network: ExternalNetworkId) -> u64, HandledMessageDb: (network: NetworkId) -> u64,
ActiveTributaryDb: () -> Vec<u8>, ActiveTributaryDb: () -> Vec<u8>,
RetiredTributaryDb: (set: ExternalValidatorSet) -> (), RetiredTributaryDb: (set: ValidatorSet) -> (),
FirstPreprocessDb: ( FirstPreprocessDb: (
network: ExternalNetworkId, network: NetworkId,
id_type: RecognizedIdType, id_type: RecognizedIdType,
id: &[u8] id: &[u8]
) -> Vec<Vec<u8>>, ) -> Vec<Vec<u8>>,
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32, LastReceivedBatchDb: (network: NetworkId) -> u32,
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32], ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch, BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32, LastVerifiedBatchDb: (network: NetworkId) -> u32,
HandoverBatchDb: (set: ExternalValidatorSet) -> u32, HandoverBatchDb: (set: ValidatorSet) -> u32,
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session, LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8> QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
} }
); );
@@ -61,7 +61,7 @@ impl ActiveTributaryDb {
ActiveTributaryDb::set(txn, &existing_bytes); ActiveTributaryDb::set(txn, &existing_bytes);
} }
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) { pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
let mut active = Self::active_tributaries(txn).1; let mut active = Self::active_tributaries(txn).1;
for i in 0 .. active.len() { for i in 0 .. active.len() {
if active[i].set() == set { if active[i].set() == set {
@@ -82,7 +82,7 @@ impl ActiveTributaryDb {
impl FirstPreprocessDb { impl FirstPreprocessDb {
pub fn save_first_preprocess( pub fn save_first_preprocess(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
network: ExternalNetworkId, network: NetworkId,
id_type: RecognizedIdType, id_type: RecognizedIdType,
id: &[u8], id: &[u8],
preprocess: &Vec<Vec<u8>>, preprocess: &Vec<Vec<u8>>,
@@ -108,19 +108,19 @@ impl ExpectedBatchDb {
} }
impl HandoverBatchDb { impl HandoverBatchDb {
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) { pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
Self::set(txn, set, &batch); Self::set(txn, set, &batch);
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session); LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
} }
} }
impl QueuedBatchesDb { impl QueuedBatchesDb {
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) { pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
let mut batches = Self::get(txn, set).unwrap_or_default(); let mut batches = Self::get(txn, set).unwrap_or_default();
batch.write(&mut batches).unwrap(); batch.write(&mut batches).unwrap();
Self::set(txn, set, &batches); Self::set(txn, set, &batches);
} }
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> { pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
let batches_vec = Self::get(txn, set).unwrap_or_default(); let batches_vec = Self::get(txn, set).unwrap_or_default();
txn.del(Self::key(set)); txn.del(Self::key(set));

View File

@@ -1,5 +1,3 @@
#![expect(clippy::cast_possible_truncation)]
use core::ops::Deref; use core::ops::Deref;
use std::{ use std::{
sync::{OnceLock, Arc}, sync::{OnceLock, Arc},
@@ -10,24 +8,22 @@ use std::{
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng; use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
GroupEncoding, GroupEncoding,
}, },
Ciphersuite, Ciphersuite, Ristretto,
}; };
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use frost::Participant;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use scale::Encode; use scale::Encode;
use borsh::BorshSerialize; use borsh::BorshSerialize;
use serai_client::{ use serai_client::{
primitives::ExternalNetworkId, primitives::NetworkId,
validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session}, validator_sets::primitives::{Session, ValidatorSet, KeyPair},
Public, Serai, SeraiInInstructions, Public, Serai, SeraiInInstructions,
}; };
@@ -82,7 +78,7 @@ pub struct ActiveTributary<D: Db, P: P2p> {
#[derive(Clone)] #[derive(Clone)]
pub enum TributaryEvent<D: Db, P: P2p> { pub enum TributaryEvent<D: Db, P: P2p> {
NewTributary(ActiveTributary<D, P>), NewTributary(ActiveTributary<D, P>),
TributaryRetired(ExternalValidatorSet), TributaryRetired(ValidatorSet),
} }
// Creates a new tributary and sends it to all listeners. // Creates a new tributary and sends it to all listeners.
@@ -117,16 +113,17 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
// If we're rebooting, we'll re-fire this message // If we're rebooting, we'll re-fire this message
// This is safe due to the message-queue deduplicating based off the intent system // This is safe due to the message-queue deduplicating based off the intent system
let set = spec.set(); let set = spec.set();
let our_i = spec
.i(&[], Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for");
processors processors
.send( .send(
set.network, set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey { processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 }, session: set.session,
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), threshold: spec.t(),
shares: u16::from(our_i.end) - u16::from(our_i.start), evrf_public_keys: spec.evrf_public_keys(),
// TODO
// params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
// shares: u16::from(our_i.end) - u16::from(our_i.start),
}, },
) )
.await; .await;
@@ -148,7 +145,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
p2p: &P, p2p: &P,
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>, cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
tributaries: &HashMap<Session, ActiveTributary<D, P>>, tributaries: &HashMap<Session, ActiveTributary<D, P>>,
network: ExternalNetworkId, network: NetworkId,
msg: &processors::Message, msg: &processors::Message,
) -> bool { ) -> bool {
#[allow(clippy::nonminimal_bool)] #[allow(clippy::nonminimal_bool)]
@@ -169,12 +166,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're // We'll only receive these if we fired GenerateKey, which we'll only do if if we're
// in-set, making the Tributary relevant // in-set, making the Tributary relevant
ProcessorMessage::KeyGen(inner_msg) => match inner_msg { ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, .. } | key_gen::ProcessorMessage::Participation { session, .. } |
key_gen::ProcessorMessage::InvalidCommitments { id, .. } | key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } |
key_gen::ProcessorMessage::Shares { id, .. } | key_gen::ProcessorMessage::Blame { session, .. } => Some(*session),
key_gen::ProcessorMessage::InvalidShare { id, .. } |
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
}, },
ProcessorMessage::Sign(inner_msg) => match inner_msg { ProcessorMessage::Sign(inner_msg) => match inner_msg {
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
@@ -196,8 +190,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
.iter() .iter()
.map(|plan| plan.session) .map(|plan| plan.session)
.filter(|session| { .filter(|session| {
RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session }) RetiredTributaryDb::get(&txn, ValidatorSet { network, session: *session }).is_none()
.is_none()
}) })
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
@@ -269,17 +262,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
} }
// This causes an action on Substrate yet not on any Tributary // This causes an action on Substrate yet not on any Tributary
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => { coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
let set = ExternalValidatorSet { network, session: *session }; let set = ValidatorSet { network, session: *session };
let signature: &[u8] = signature.as_ref(); let signature: &[u8] = signature.as_ref();
let signature = <[u8; 64]>::try_from(signature).unwrap(); let signature = serai_client::Signature(signature.try_into().unwrap());
let signature: serai_client::Signature = signature.into();
let slashes = crate::tributary::SlashReport::get(&txn, set) let slashes = crate::tributary::SlashReport::get(&txn, set)
.expect("signed slash report despite not having slash report locally"); .expect("signed slash report despite not having slash report locally");
let slashes_pubs = slashes let slashes_pubs =
.iter() slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::<Vec<_>>();
.map(|(address, points)| (Public::from(*address), *points))
.collect::<Vec<_>>();
let tx = serai_client::SeraiValidatorSets::report_slashes( let tx = serai_client::SeraiValidatorSets::report_slashes(
network, network,
@@ -289,7 +279,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
.collect::<Vec<_>>() .collect::<Vec<_>>()
.try_into() .try_into()
.unwrap(), .unwrap(),
signature, signature.clone(),
); );
loop { loop {
@@ -400,7 +390,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
if let Some(relevant_tributary_value) = relevant_tributary { if let Some(relevant_tributary_value) = relevant_tributary {
if RetiredTributaryDb::get( if RetiredTributaryDb::get(
&txn, &txn,
ExternalValidatorSet { network: msg.network, session: relevant_tributary_value }, ValidatorSet { network: msg.network, session: relevant_tributary_value },
) )
.is_some() .is_some()
{ {
@@ -428,125 +418,33 @@ async fn handle_processor_message<D: Db, P: P2p>(
let txs = match msg.msg.clone() { let txs = match msg.msg.clone() {
ProcessorMessage::KeyGen(inner_msg) => match inner_msg { ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
key_gen::ProcessorMessage::Commitments { id, commitments } => { key_gen::ProcessorMessage::Participation { session, participation } => {
vec![Transaction::DkgCommitments { assert_eq!(session, spec.set().session);
attempt: id.attempt, vec![Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }]
commitments,
signed: Transaction::empty_signed(),
}]
} }
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => { key_gen::ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } => {
// This doesn't have guaranteed timing assert_eq!(session, spec.set().session);
// crate::tributary::generated_key_pair::<D>(
// While the party *should* be fatally slashed and not included in future attempts,
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
// and not included in future attempts *which begin after the latency window completes*
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
faulty,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::Shares { id, mut shares } => {
// Create a MuSig-based machine to inform Substrate of this key generation
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
let our_i = spec
.i(&removed, pub_key)
.expect("processor message to DKG for an attempt we aren't a validator in");
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context
let mut tx_shares = Vec::with_capacity(shares.len());
for shares in &mut shares {
tx_shares.push(vec![]);
for i in 1 ..= spec.n(&removed) {
let i = Participant::new(i).unwrap();
if our_i.contains(&i) {
if shares.contains_key(&i) {
panic!("processor sent us our own shares");
}
continue;
}
tx_shares.last_mut().unwrap().push(
shares.remove(&i).expect("processor didn't send share for another validator"),
);
}
}
vec![Transaction::DkgShares {
attempt: id.attempt,
shares: tx_shares,
confirmation_nonces: nonces,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
vec![Transaction::InvalidDkgShare {
attempt: id.attempt,
accuser,
faulty,
blame,
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
// TODO2: Check the KeyGenId fields
// Tell the Tributary the key pair, get back the share for the MuSig signature
let share = crate::tributary::generated_key_pair::<D>(
&mut txn, &mut txn,
key, genesis,
spec, &KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
&KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()),
id.attempt,
); );
// TODO: Move this into generated_key_pair? // Create a MuSig-based machine to inform Substrate of this key generation
match share { let confirmation_nonces =
Ok(share) => { crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, 0);
vec![Transaction::DkgConfirmed {
attempt: id.attempt, vec![Transaction::DkgConfirmationNonces {
confirmation_share: share, attempt: 0,
signed: Transaction::empty_signed(), confirmation_nonces,
}]
}
Err(p) => {
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
p,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(),
}]
}
}
}
key_gen::ProcessorMessage::Blame { id, participant } => {
let participant = spec
.reverse_lookup_i(
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
.expect("participating in DKG attempt yet we didn't save who was removed"),
participant,
)
.unwrap();
vec![Transaction::RemoveParticipantDueToDkg {
participant,
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}] }]
} }
key_gen::ProcessorMessage::Blame { session, participant } => {
assert_eq!(session, spec.set().session);
let participant = spec.reverse_lookup_i(participant).unwrap();
vec![Transaction::RemoveParticipant { participant, signed: Transaction::empty_signed() }]
}
}, },
ProcessorMessage::Sign(msg) => match msg { ProcessorMessage::Sign(msg) => match msg {
sign::ProcessorMessage::InvalidParticipant { .. } => { sign::ProcessorMessage::InvalidParticipant { .. } => {
@@ -789,7 +687,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
processors: Pro, processors: Pro,
p2p: P, p2p: P,
cosign_channel: mpsc::UnboundedSender<CosignedBlock>, cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
network: ExternalNetworkId, network: NetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>, mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) { ) {
let mut tributaries = HashMap::new(); let mut tributaries = HashMap::new();
@@ -838,7 +736,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>( async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
mut db: D, mut db: D,
network: ExternalNetworkId, network: NetworkId,
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>, mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
) { ) {
let mut tributaries = HashMap::new(); let mut tributaries = HashMap::new();
@@ -912,7 +810,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
for batch in start_id ..= last_id { for batch in start_id ..= last_id {
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1); let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
if let Some(session) = is_pre_handover { if let Some(session) = is_pre_handover {
let set = ExternalValidatorSet { network, session }; let set = ValidatorSet { network, session };
let mut queued = QueuedBatchesDb::take(&mut txn, set); let mut queued = QueuedBatchesDb::take(&mut txn, set);
// is_handover_batch is only set for handover `Batch`s we're participating in, making // is_handover_batch is only set for handover `Batch`s we're participating in, making
// this safe // this safe
@@ -930,8 +828,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch); let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
if let Some(session) = is_handover { if let Some(session) = is_handover {
for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session }) for queued in QueuedBatchesDb::take(&mut txn, ValidatorSet { network, session }) {
{
to_publish.push((session, queued)); to_publish.push((session, queued));
} }
} }
@@ -978,7 +875,10 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>, mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
) { ) {
let mut channels = HashMap::new(); let mut channels = HashMap::new();
for network in serai_client::primitives::EXTERNAL_NETWORKS { for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
let (processor_send, processor_recv) = mpsc::unbounded_channel(); let (processor_send, processor_recv) = mpsc::unbounded_channel();
tokio::spawn(handle_processor_messages( tokio::spawn(handle_processor_messages(
db.clone(), db.clone(),
@@ -1200,7 +1100,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
} }
}); });
move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| { move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id)); log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
let mut raw_db = raw_db.clone(); let mut raw_db = raw_db.clone();
let key = key.clone(); let key = key.clone();

View File

@@ -11,9 +11,7 @@ use rand_core::{RngCore, OsRng};
use scale::{Decode, Encode}; use scale::{Decode, Encode};
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{ use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,
};
use serai_db::Db; use serai_db::Db;
@@ -71,7 +69,7 @@ const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignedBlock { pub struct CosignedBlock {
pub network: ExternalNetworkId, pub network: NetworkId,
pub block_number: u64, pub block_number: u64,
pub block: [u8; 32], pub block: [u8; 32],
pub signature: [u8; 64], pub signature: [u8; 64],
@@ -210,8 +208,8 @@ pub struct HeartbeatBatch {
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
type Id: Send + Sync + Clone + Copy + fmt::Debug; type Id: Send + Sync + Clone + Copy + fmt::Debug;
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]); async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]); async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>); async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>); async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
@@ -311,7 +309,7 @@ struct Behavior {
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Clone)] #[derive(Clone)]
pub struct LibP2p { pub struct LibP2p {
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ExternalValidatorSet, [u8; 32])>>>, subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>, send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>, broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>, receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
@@ -399,7 +397,7 @@ impl LibP2p {
let (receive_send, receive_recv) = mpsc::unbounded_channel(); let (receive_send, receive_recv) = mpsc::unbounded_channel();
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic { fn topic_for_set(set: ValidatorSet) -> IdentTopic {
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
} }
@@ -409,8 +407,7 @@ impl LibP2p {
// The addrs we're currently dialing, and the networks associated with them // The addrs we're currently dialing, and the networks associated with them
let dialing_peers = Arc::new(RwLock::new(HashMap::new())); let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
// The peers we're currently connected to, and the networks associated with them // The peers we're currently connected to, and the networks associated with them
let connected_peers = let connected_peers = Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<NetworkId>>::new()));
Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<ExternalNetworkId>>::new()));
// Find and connect to peers // Find and connect to peers
let (connect_to_network_send, mut connect_to_network_recv) = let (connect_to_network_send, mut connect_to_network_recv) =
@@ -423,7 +420,7 @@ impl LibP2p {
let connect_to_network_send = connect_to_network_send.clone(); let connect_to_network_send = connect_to_network_send.clone();
async move { async move {
loop { loop {
let connect = |network: ExternalNetworkId, addr: Multiaddr| { let connect = |network: NetworkId, addr: Multiaddr| {
let dialing_peers = dialing_peers.clone(); let dialing_peers = dialing_peers.clone();
let connected_peers = connected_peers.clone(); let connected_peers = connected_peers.clone();
let to_dial_send = to_dial_send.clone(); let to_dial_send = to_dial_send.clone();
@@ -510,7 +507,7 @@ impl LibP2p {
connect_to_network_networks.insert(network); connect_to_network_networks.insert(network);
} }
for network in connect_to_network_networks { for network in connect_to_network_networks {
if let Ok(mut nodes) = serai.p2p_validators(network.into()).await { if let Ok(mut nodes) = serai.p2p_validators(network).await {
// If there's an insufficient amount of nodes known, connect to all yet add it // If there's an insufficient amount of nodes known, connect to all yet add it
// back and break // back and break
if nodes.len() < TARGET_PEERS { if nodes.len() < TARGET_PEERS {
@@ -560,7 +557,7 @@ impl LibP2p {
// Subscribe to any new topics // Subscribe to any new topics
set = subscribe_recv.recv() => { set = subscribe_recv.recv() => {
let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) = let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
set.expect("subscribe_recv closed. are we shutting down?"); set.expect("subscribe_recv closed. are we shutting down?");
let topic = topic_for_set(set); let topic = topic_for_set(set);
if subscribe { if subscribe {
@@ -779,7 +776,7 @@ impl LibP2p {
impl P2p for LibP2p { impl P2p for LibP2p {
type Id = PeerId; type Id = PeerId;
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) { async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
self self
.subscribe .subscribe
.lock() .lock()
@@ -788,7 +785,7 @@ impl P2p for LibP2p {
.expect("subscribe_send closed. are we shutting down?"); .expect("subscribe_send closed. are we shutting down?");
} }
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) { async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
self self
.subscribe .subscribe
.lock() .lock()

View File

@@ -1,6 +1,6 @@
use std::sync::Arc; use std::sync::Arc;
use serai_client::primitives::ExternalNetworkId; use serai_client::primitives::NetworkId;
use processor_messages::{ProcessorMessage, CoordinatorMessage}; use processor_messages::{ProcessorMessage, CoordinatorMessage};
use message_queue::{Service, Metadata, client::MessageQueue}; use message_queue::{Service, Metadata, client::MessageQueue};
@@ -8,27 +8,27 @@ use message_queue::{Service, Metadata, client::MessageQueue};
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub struct Message { pub struct Message {
pub id: u64, pub id: u64,
pub network: ExternalNetworkId, pub network: NetworkId,
pub msg: ProcessorMessage, pub msg: ProcessorMessage,
} }
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait Processors: 'static + Send + Sync + Clone { pub trait Processors: 'static + Send + Sync + Clone {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>); async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&self, network: ExternalNetworkId) -> Message; async fn recv(&self, network: NetworkId) -> Message;
async fn ack(&self, msg: Message); async fn ack(&self, msg: Message);
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl Processors for Arc<MessageQueue> { impl Processors for Arc<MessageQueue> {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) { async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let msg: CoordinatorMessage = msg.into(); let msg: CoordinatorMessage = msg.into();
let metadata = let metadata =
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() }; Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
let msg = borsh::to_vec(&msg).unwrap(); let msg = borsh::to_vec(&msg).unwrap();
self.queue(metadata, msg).await; self.queue(metadata, msg).await;
} }
async fn recv(&self, network: ExternalNetworkId) -> Message { async fn recv(&self, network: NetworkId) -> Message {
let msg = self.next(Service::Processor(network)).await; let msg = self.next(Service::Processor(network)).await;
assert_eq!(msg.from, Service::Processor(network)); assert_eq!(msg.from, Service::Processor(network));

View File

@@ -14,15 +14,14 @@
use zeroize::Zeroizing; use zeroize::Zeroizing;
use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::Ciphersuite;
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{ use serai_client::{
primitives::ExternalNetworkId, SeraiError, Serai,
validator_sets::primitives::{ExternalValidatorSet, Session}, primitives::NetworkId,
Serai, SeraiError, validator_sets::primitives::{Session, ValidatorSet},
}; };
use serai_db::*; use serai_db::*;
@@ -71,18 +70,13 @@ impl LatestCosignedBlock {
db_channel! { db_channel! {
SubstrateDbChannels { SubstrateDbChannels {
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]), CosignTransactions: (network: NetworkId) -> (Session, u64, [u8; 32]),
} }
} }
impl CosignTransactions { impl CosignTransactions {
// Append a cosign transaction. // Append a cosign transaction.
pub fn append_cosign( pub fn append_cosign(txn: &mut impl DbTxn, set: ValidatorSet, number: u64, hash: [u8; 32]) {
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
number: u64,
hash: [u8; 32],
) {
CosignTransactions::send(txn, set.network, &(set.session, number, hash)) CosignTransactions::send(txn, set.network, &(set.session, number, hash))
} }
} }
@@ -262,22 +256,22 @@ async fn advance_cosign_protocol_inner(
// Using the keys of the prior block ensures this deadlock isn't reached // Using the keys of the prior block ensures this deadlock isn't reached
let serai = serai.as_of(actual_block.header.parent_hash.into()); let serai = serai.as_of(actual_block.header.parent_hash.into());
for network in serai_client::primitives::EXTERNAL_NETWORKS { for network in serai_client::primitives::NETWORKS {
// Get the latest session to have set keys // Get the latest session to have set keys
let set_with_keys = { let set_with_keys = {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else { let Some(latest_session) = serai.validator_sets().session(network).await? else {
continue; continue;
}; };
let prior_session = Session(latest_session.0.saturating_sub(1)); let prior_session = Session(latest_session.0.saturating_sub(1));
if serai if serai
.validator_sets() .validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session }) .keys(ValidatorSet { network, session: prior_session })
.await? .await?
.is_some() .is_some()
{ {
ExternalValidatorSet { network, session: prior_session } ValidatorSet { network, session: prior_session }
} else { } else {
let set = ExternalValidatorSet { network, session: latest_session }; let set = ValidatorSet { network, session: latest_session };
if serai.validator_sets().keys(set).await?.is_none() { if serai.validator_sets().keys(set).await?.is_none() {
continue; continue;
} }
@@ -286,7 +280,7 @@ async fn advance_cosign_protocol_inner(
}; };
log::debug!("{:?} will be cosigning {block}", set_with_keys.network); log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap())); cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys).await?.unwrap()));
} }
break; break;

View File

@@ -1,4 +1,4 @@
use serai_client::primitives::ExternalNetworkId; use serai_client::primitives::NetworkId;
pub use serai_db::*; pub use serai_db::*;
@@ -9,7 +9,7 @@ mod inner_db {
SubstrateDb { SubstrateDb {
NextBlock: () -> u64, NextBlock: () -> u64,
HandledEvent: (block: [u8; 32]) -> u32, HandledEvent: (block: [u8; 32]) -> u32,
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32] BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
} }
); );
} }

View File

@@ -6,18 +6,14 @@ use std::{
use zeroize::Zeroizing; use zeroize::Zeroizing;
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use serai_client::{ use serai_client::{
coins::CoinsEvent, SeraiError, Block, Serai, TemporalSerai,
primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId},
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
in_instructions::InInstructionsEvent, in_instructions::InInstructionsEvent,
primitives::{BlockHash, ExternalNetworkId}, coins::CoinsEvent,
validator_sets::{
primitives::{ExternalValidatorSet, ValidatorSet},
ValidatorSetsEvent,
},
Block, Serai, SeraiError, TemporalSerai,
}; };
use serai_db::DbTxn; use serai_db::DbTxn;
@@ -56,21 +52,54 @@ async fn handle_new_set<D: Db>(
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>, new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
serai: &Serai, serai: &Serai,
block: &Block, block: &Block,
set: ExternalValidatorSet, set: ValidatorSet,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
if in_set(key, &serai.as_of(block.hash()), set.into()) if in_set(key, &serai.as_of(block.hash()), set)
.await? .await?
.expect("NewSet for set which doesn't exist") .expect("NewSet for set which doesn't exist")
{ {
log::info!("present in set {:?}", set); log::info!("present in set {:?}", set);
let set_data = { let validators;
let mut evrf_public_keys = vec![];
{
let serai = serai.as_of(block.hash()); let serai = serai.as_of(block.hash());
let serai = serai.validator_sets(); let serai = serai.validator_sets();
let set_participants = let set_participants =
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist"); serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>() validators = set_participants
.iter()
.map(|(k, w)| {
(
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut k.0.as_ref())
.expect("invalid key registered as participant"),
u16::try_from(*w).unwrap(),
)
})
.collect::<Vec<_>>();
for (validator, _) in set_participants {
// This is only run for external networks which always do a DKG for Serai
let substrate = serai
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519)
.await?
.expect("Serai called NewSet on a validator without an Embedwards25519 key");
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
let network =
if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect(
"Serai called NewSet on a validator without the embedded key required for the network",
)
} else {
substrate.clone()
};
evrf_public_keys.push((
<[u8; 32]>::try_from(substrate)
.expect("validator-sets pallet accepted a key of an invalid length"),
network,
));
}
}; };
let time = if let Ok(time) = block.time() { let time = if let Ok(time) = block.time() {
@@ -94,7 +123,7 @@ async fn handle_new_set<D: Db>(
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120; const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY; let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
let spec = TributarySpec::new(block.hash(), time, set, set_data); let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys);
log::info!("creating new tributary for {:?}", spec.set()); log::info!("creating new tributary for {:?}", spec.set());
@@ -135,7 +164,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
}; };
let mut batch_block = HashMap::new(); let mut batch_block = HashMap::new();
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new(); let mut batches = HashMap::<NetworkId, Vec<u32>>::new();
let mut burns = HashMap::new(); let mut burns = HashMap::new();
let serai = serai.as_of(block.hash()); let serai = serai.as_of(block.hash());
@@ -209,8 +238,8 @@ async fn handle_block<D: Db, Pro: Processors>(
db: &mut D, db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>, new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>, perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>, tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro, processors: &Pro,
serai: &Serai, serai: &Serai,
block: Block, block: Block,
@@ -230,8 +259,12 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("NewSet event wasn't NewSet: {new_set:?}"); panic!("NewSet event wasn't NewSet: {new_set:?}");
}; };
// If this is Serai, do nothing
// We only coordinate/process external networks // We only coordinate/process external networks
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) { if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh new set event {:?}", new_set); log::info!("found fresh new set event {:?}", new_set);
let mut txn = db.txn(); let mut txn = db.txn();
@@ -286,7 +319,10 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}"); panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
}; };
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) { if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh accepted handover event {:?}", accepted_handover); log::info!("found fresh accepted handover event {:?}", accepted_handover);
// TODO: This isn't atomic with the event handling // TODO: This isn't atomic with the event handling
@@ -304,7 +340,10 @@ async fn handle_block<D: Db, Pro: Processors>(
panic!("SetRetired event wasn't SetRetired: {retired_set:?}"); panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
}; };
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) { if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh set retired event {:?}", retired_set); log::info!("found fresh set retired event {:?}", retired_set);
let mut txn = db.txn(); let mut txn = db.txn();
@@ -334,8 +373,8 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
db: &mut D, db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>, new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>, perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>, tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro, processors: &Pro,
serai: &Serai, serai: &Serai,
next_block: &mut u64, next_block: &mut u64,
@@ -389,8 +428,8 @@ pub async fn scan_task<D: Db, Pro: Processors>(
processors: Pro, processors: Pro,
serai: Arc<Serai>, serai: Arc<Serai>,
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>, new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>, perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>, tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
) { ) {
log::info!("scanning substrate"); log::info!("scanning substrate");
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default(); let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
@@ -488,12 +527,9 @@ pub async fn scan_task<D: Db, Pro: Processors>(
/// retry. /// retry.
pub(crate) async fn expected_next_batch( pub(crate) async fn expected_next_batch(
serai: &Serai, serai: &Serai,
network: ExternalNetworkId, network: NetworkId,
) -> Result<u32, SeraiError> { ) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner( async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
serai: &Serai,
network: ExternalNetworkId,
) -> Result<u32, SeraiError> {
let serai = serai.as_of_latest_finalized_block().await?; let serai = serai.as_of_latest_finalized_block().await?;
let last = serai.in_instructions().last_batch_for_network(network).await?; let last = serai.in_instructions().last_batch_for_network(network).await?;
Ok(if let Some(last) = last { last + 1 } else { 0 }) Ok(if let Some(last) = last { last + 1 } else { 0 })
@@ -516,7 +552,7 @@ pub(crate) async fn expected_next_batch(
/// This is deemed fine. /// This is deemed fine.
pub(crate) async fn verify_published_batches<D: Db>( pub(crate) async fn verify_published_batches<D: Db>(
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
network: ExternalNetworkId, network: NetworkId,
optimistic_up_to: u32, optimistic_up_to: u32,
) -> Option<u32> { ) -> Option<u32> {
// TODO: Localize from MainDb to SubstrateDb // TODO: Localize from MainDb to SubstrateDb

View File

@@ -4,7 +4,7 @@ use std::{
collections::{VecDeque, HashSet, HashMap}, collections::{VecDeque, HashSet, HashMap},
}; };
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet}; use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
use processor_messages::CoordinatorMessage; use processor_messages::CoordinatorMessage;
@@ -20,7 +20,7 @@ use crate::{
pub mod tributary; pub mod tributary;
#[derive(Clone)] #[derive(Clone)]
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>); pub struct MemProcessors(pub Arc<RwLock<HashMap<NetworkId, VecDeque<CoordinatorMessage>>>>);
impl MemProcessors { impl MemProcessors {
#[allow(clippy::new_without_default)] #[allow(clippy::new_without_default)]
pub fn new() -> MemProcessors { pub fn new() -> MemProcessors {
@@ -30,12 +30,12 @@ impl MemProcessors {
#[async_trait::async_trait] #[async_trait::async_trait]
impl Processors for MemProcessors { impl Processors for MemProcessors {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) { async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let mut processors = self.0.write().await; let mut processors = self.0.write().await;
let processor = processors.entry(network).or_insert_with(VecDeque::new); let processor = processors.entry(network).or_insert_with(VecDeque::new);
processor.push_back(msg.into()); processor.push_back(msg.into());
} }
async fn recv(&self, _: ExternalNetworkId) -> Message { async fn recv(&self, _: NetworkId) -> Message {
todo!() todo!()
} }
async fn ack(&self, _: Message) { async fn ack(&self, _: Message) {
@@ -65,8 +65,8 @@ impl LocalP2p {
impl P2p for LocalP2p { impl P2p for LocalP2p {
type Id = usize; type Id = usize;
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {} async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {} async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) { async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
let mut msg_ref = msg.as_slice(); let mut msg_ref = msg.as_slice();

View File

@@ -7,17 +7,12 @@ use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng, OsRng}; use rand_core::{RngCore, CryptoRng, OsRng};
use futures_util::{task::Poll, poll}; use futures_util::{task::Poll, poll};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::{
group::{ff::Field, GroupEncoding},
Ciphersuite,
};
use sp_application_crypto::sr25519;
use borsh::BorshDeserialize; use borsh::BorshDeserialize;
use serai_client::{ use serai_client::{
primitives::ExternalNetworkId, primitives::NetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session}, validator_sets::primitives::{Session, ValidatorSet},
}; };
use tokio::time::sleep; use tokio::time::sleep;
@@ -51,16 +46,24 @@ pub fn new_spec<R: RngCore + CryptoRng>(
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin }; let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let set_participants = keys let validators = keys
.iter() .iter()
.map(|key| { .map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
(sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let res = TributarySpec::new(serai_block, start_time, set, set_participants); // Generate random eVRF keys as none of these test rely on them to have any structure
let mut evrf_keys = vec![];
for _ in 0 .. keys.len() {
let mut substrate = [0; 32];
OsRng.fill_bytes(&mut substrate);
let mut network = vec![0; 64];
OsRng.fill_bytes(&mut network);
evrf_keys.push((substrate, network));
}
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
assert_eq!( assert_eq!(
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
res, res,

View File

@@ -1,27 +1,22 @@
use core::time::Duration; use core::time::Duration;
use std::collections::HashMap;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use frost::Participant; use frost::Participant;
use sp_runtime::traits::Verify; use sp_runtime::traits::Verify;
use serai_client::{ use serai_client::{
primitives::{SeraiAddress, Signature}, primitives::Signature,
validator_sets::primitives::{ExternalValidatorSet, KeyPair}, validator_sets::primitives::{ValidatorSet, KeyPair},
}; };
use tokio::time::sleep; use tokio::time::sleep;
use serai_db::{Get, DbTxn, Db, MemDb}; use serai_db::{Get, DbTxn, Db, MemDb};
use processor_messages::{ use processor_messages::{key_gen, CoordinatorMessage};
key_gen::{self, KeyGenId},
CoordinatorMessage,
};
use tributary::{TransactionTrait, Tributary}; use tributary::{TransactionTrait, Tributary};
@@ -55,44 +50,41 @@ async fn dkg_test() {
tokio::spawn(run_tributaries(tributaries.clone())); tokio::spawn(run_tributaries(tributaries.clone()));
let mut txs = vec![]; let mut txs = vec![];
// Create DKG commitments for each key // Create DKG participation for each key
for key in &keys { for key in &keys {
let attempt = 0; let mut participation = vec![0; 4096];
let mut commitments = vec![0; 256]; OsRng.fill_bytes(&mut participation);
OsRng.fill_bytes(&mut commitments);
let mut tx = Transaction::DkgCommitments { let mut tx =
attempt, Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
commitments: vec![commitments],
signed: Transaction::empty_signed(),
};
tx.sign(&mut OsRng, spec.genesis(), key); tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx); txs.push(tx);
} }
let block_before_tx = tributaries[0].1.tip().await; let block_before_tx = tributaries[0].1.tip().await;
// Publish all commitments but one // Publish t-1 participations
for (i, tx) in txs.iter().enumerate().skip(1) { let t = ((keys.len() * 2) / 3) + 1;
for (i, tx) in txs.iter().take(t - 1).enumerate() {
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
}
// Wait until these are included
for tx in txs.iter().skip(1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
} }
let expected_commitments: HashMap<_, _> = txs let expected_participations = txs
.iter() .iter()
.enumerate() .enumerate()
.map(|(i, tx)| { .map(|(i, tx)| {
if let Transaction::DkgCommitments { commitments, .. } = tx { if let Transaction::DkgParticipation { participation, .. } = tx {
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
session: spec.set().session,
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
participation: participation.clone(),
})
} else { } else {
panic!("txs had non-commitments"); panic!("txs wasn't a DkgParticipation");
} }
}) })
.collect(); .collect::<Vec<_>>();
async fn new_processors( async fn new_processors(
db: &mut MemDb, db: &mut MemDb,
@@ -121,28 +113,30 @@ async fn dkg_test() {
processors processors
} }
// Instantiate a scanner and verify it has nothing to report // Instantiate a scanner and verify it has the first two participations to report (and isn't
// waiting for `t`)
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
assert!(processors.0.read().await.is_empty()); assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
// Publish the last commitment // Publish the rest of the participations
let block_before_tx = tributaries[0].1.tip().await; let block_before_tx = tributaries[0].1.tip().await;
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); for tx in txs.iter().skip(t - 1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// Verify the scanner emits a KeyGen::Commitments message // Verify the scanner emits all KeyGen::Participations messages
handle_new_blocks::<_, _, _, _, _, LocalP2p>( handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[0], &mut dbs[0],
&keys[0], &keys[0],
&|_, _, _, _| async { &|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after Commitments") panic!("provided TX caused recognized_id to be called after DkgParticipation")
}, },
&processors, &processors,
&(), &(),
&|_| async { &|_| async {
panic!( panic!(
"test tried to publish a new Tributary TX from handle_application_tx after Commitments" "test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
) )
}, },
&spec, &spec,
@@ -151,17 +145,11 @@ async fn dkg_test() {
.await; .await;
{ {
let mut msgs = processors.0.write().await; let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap(); let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone(); assert_eq!(msgs.len(), keys.len());
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap()); for expected in &expected_participations {
assert_eq!( assert_eq!(&msgs.pop_front().unwrap(), expected);
msgs.pop_front().unwrap(), }
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
@@ -169,169 +157,35 @@ async fn dkg_test() {
for (i, key) in keys.iter().enumerate().skip(1) { for (i, key) in keys.iter().enumerate().skip(1) {
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
let mut msgs = processors.0.write().await; let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap(); let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone(); assert_eq!(msgs.len(), keys.len());
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); for expected in &expected_participations {
assert_eq!( assert_eq!(&msgs.pop_front().unwrap(), expected);
msgs.pop_front().unwrap(), }
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert!(msgs.is_empty()); assert!(msgs.is_empty());
} }
// Now do shares
let mut txs = vec![];
for (k, key) in keys.iter().enumerate() {
let attempt = 0;
let mut shares = vec![vec![]];
for i in 0 .. keys.len() {
if i != k {
let mut share = vec![0; 256];
OsRng.fill_bytes(&mut share);
shares.last_mut().unwrap().push(share);
}
}
let mut txn = dbs[k].txn();
let mut tx = Transaction::DkgShares {
attempt,
shares,
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
signed: Transaction::empty_signed(),
};
txn.commit();
tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx);
}
let block_before_tx = tributaries[0].1.tip().await;
for (i, tx) in txs.iter().enumerate().skip(1) {
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
}
for tx in txs.iter().skip(1) {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
}
// With just 4 sets of shares, nothing should happen yet
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[0],
&keys[0],
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after some shares")
},
&processors,
&(),
&|_| async {
panic!(
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
)
},
&spec,
&tributaries[0].1.reader(),
)
.await;
assert_eq!(processors.0.read().await.len(), 1);
assert!(processors.0.read().await[&spec.set().network].is_empty());
// Publish the final set of shares
let block_before_tx = tributaries[0].1.tip().await;
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
// Each scanner should emit a distinct shares message
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { session: spec.set().session, attempt: 0 },
shares: vec![txs
.iter()
.enumerate()
.filter_map(|(l, tx)| {
if let Transaction::DkgShares { shares, .. } = tx {
if i == l {
None
} else {
let relative_i = i - (if i > l { 1 } else { 0 });
Some((
Participant::new((l + 1).try_into().unwrap()).unwrap(),
shares[0][relative_i].clone(),
))
}
} else {
panic!("txs had non-shares");
}
})
.collect::<HashMap<_, _>>()],
})
};
// Any scanner which has handled the prior blocks should only emit the new event
for (i, key) in keys.iter().enumerate() {
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[i],
key,
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
&processors,
&(),
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
&spec,
&tributaries[i].1.reader(),
)
.await;
{
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
}
// Yet new scanners should emit all events
for (i, key) in keys.iter().enumerate() {
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
let mut msgs = processors.0.write().await;
assert_eq!(msgs.len(), 1);
let msgs = msgs.get_mut(&spec.set().network).unwrap();
let mut expected_commitments = expected_commitments.clone();
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
assert_eq!(
msgs.pop_front().unwrap(),
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: spec.set().session, attempt: 0 },
commitments: expected_commitments
})
);
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
assert!(msgs.is_empty());
}
// Send DkgConfirmed
let mut substrate_key = [0; 32]; let mut substrate_key = [0; 32];
OsRng.fill_bytes(&mut substrate_key); OsRng.fill_bytes(&mut substrate_key);
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
OsRng.fill_bytes(&mut network_key); OsRng.fill_bytes(&mut network_key);
let key_pair = let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap());
KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());
let mut txs = vec![]; let mut txs = vec![];
for (i, key) in keys.iter().enumerate() { for (i, key) in keys.iter().enumerate() {
let attempt = 0;
let mut txn = dbs[i].txn(); let mut txn = dbs[i].txn();
let share =
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
txn.commit();
let mut tx = Transaction::DkgConfirmed { // Claim we've generated the key pair
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
// Publish the nonces
let attempt = 0;
let mut tx = Transaction::DkgConfirmationNonces {
attempt, attempt,
confirmation_share: share, confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}; };
txn.commit();
tx.sign(&mut OsRng, spec.genesis(), key); tx.sign(&mut OsRng, spec.genesis(), key);
txs.push(tx); txs.push(tx);
} }
@@ -343,6 +197,35 @@ async fn dkg_test() {
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
} }
// This should not cause any new processor event as the processor doesn't handle DKG confirming
for (i, key) in keys.iter().enumerate() {
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
&mut dbs[i],
key,
&|_, _, _, _| async {
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
},
&processors,
&(),
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
&spec,
&tributaries[i].1.reader(),
)
.await;
{
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
}
}
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
// This means in the block after the next block, the keys should be set onto Serai
// Sleep twice as long as two blocks, in case there's some stability issue
sleep(Duration::from_secs(
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
))
.await;
struct CheckPublishSetKeys { struct CheckPublishSetKeys {
spec: TributarySpec, spec: TributarySpec,
key_pair: KeyPair, key_pair: KeyPair,
@@ -352,20 +235,25 @@ async fn dkg_test() {
async fn publish_set_keys( async fn publish_set_keys(
&self, &self,
_db: &(impl Sync + Get), _db: &(impl Sync + Get),
set: ExternalValidatorSet, set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair, key_pair: KeyPair,
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature, signature: Signature,
) { ) {
assert_eq!(set, self.spec.set()); assert_eq!(set, self.spec.set());
assert!(removed.is_empty());
assert_eq!(self.key_pair, key_pair); assert_eq!(self.key_pair, key_pair);
assert!(signature.verify( assert!(signature.verify(
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair), &*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
&serai_client::Public::from( &serai_client::Public(
dkg_musig::musig_key_vartime::<Ristretto>( frost::dkg::musig::musig_key::<Ristretto>(
serai_client::validator_sets::primitives::musig_context(set.into()), &serai_client::validator_sets::primitives::musig_context(set),
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>() &self
.spec
.validators()
.into_iter()
.zip(signature_participants)
.filter_map(|((validator, _), included)| included.then_some(validator))
.collect::<Vec<_>>()
) )
.unwrap() .unwrap()
.to_bytes() .to_bytes()

View File

@@ -2,13 +2,12 @@ use core::fmt::Debug;
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto; use ciphersuite::{group::Group, Ciphersuite, Ristretto};
use ciphersuite::{group::Group, Ciphersuite};
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use serai_client::{ use serai_client::{
primitives::{SeraiAddress, Signature}, primitives::Signature,
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET}, validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
}; };
use processor_messages::coordinator::SubstrateSignableId; use processor_messages::coordinator::SubstrateSignableId;
@@ -32,9 +31,9 @@ impl PublishSeraiTransaction for () {
async fn publish_set_keys( async fn publish_set_keys(
&self, &self,
_db: &(impl Sync + serai_db::Get), _db: &(impl Sync + serai_db::Get),
_set: ExternalValidatorSet, _set: ValidatorSet,
_removed: Vec<SeraiAddress>,
_key_pair: KeyPair, _key_pair: KeyPair,
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
_signature: Signature, _signature: Signature,
) { ) {
panic!("publish_set_keys was called in test") panic!("publish_set_keys was called in test")
@@ -144,84 +143,34 @@ fn serialize_sign_data() {
#[test] #[test]
fn serialize_transaction() { fn serialize_transaction() {
test_read_write(&Transaction::RemoveParticipantDueToDkg { test_read_write(&Transaction::RemoveParticipant {
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng), participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
signed: random_signed_with_nonce(&mut OsRng, 0), signed: random_signed_with_nonce(&mut OsRng, 0),
}); });
{ test_read_write(&Transaction::DkgParticipation {
let mut commitments = vec![random_vec(&mut OsRng, 512)]; participation: random_vec(&mut OsRng, 4096),
for _ in 0 .. (OsRng.next_u64() % 100) { signed: random_signed_with_nonce(&mut OsRng, 0),
let mut temp = commitments[0].clone(); });
OsRng.fill_bytes(&mut temp);
commitments.push(temp);
}
test_read_write(&Transaction::DkgCommitments {
attempt: random_u32(&mut OsRng),
commitments,
signed: random_signed_with_nonce(&mut OsRng, 0),
});
}
{ test_read_write(&Transaction::DkgConfirmationNonces {
// This supports a variable share length, and variable amount of sent shares, yet share length attempt: random_u32(&mut OsRng),
// and sent shares is expected to be constant among recipients confirmation_nonces: {
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap(); let mut nonces = [0; 64];
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap(); OsRng.fill_bytes(&mut nonces);
// Create a valid vec of shares nonces
let mut shares = vec![]; },
// Create up to 150 participants signed: random_signed_with_nonce(&mut OsRng, 0),
for _ in 0 ..= (OsRng.next_u64() % 150) { });
// Give each sender multiple shares
let mut sender_shares = vec![];
for _ in 0 .. amount_of_shares {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
sender_shares.push(share);
}
shares.push(sender_shares);
}
test_read_write(&Transaction::DkgShares { test_read_write(&Transaction::DkgConfirmationShare {
attempt: random_u32(&mut OsRng),
shares,
confirmation_nonces: {
let mut nonces = [0; 64];
OsRng.fill_bytes(&mut nonces);
nonces
},
signed: random_signed_with_nonce(&mut OsRng, 1),
});
}
for i in 0 .. 2 {
test_read_write(&Transaction::InvalidDkgShare {
attempt: random_u32(&mut OsRng),
accuser: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
faulty: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
blame: if i == 0 {
None
} else {
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
},
signed: random_signed_with_nonce(&mut OsRng, 2),
});
}
test_read_write(&Transaction::DkgConfirmed {
attempt: random_u32(&mut OsRng), attempt: random_u32(&mut OsRng),
confirmation_share: { confirmation_share: {
let mut share = [0; 32]; let mut share = [0; 32];
OsRng.fill_bytes(&mut share); OsRng.fill_bytes(&mut share);
share share
}, },
signed: random_signed_with_nonce(&mut OsRng, 2), signed: random_signed_with_nonce(&mut OsRng, 1),
}); });
{ {

View File

@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashSet};
use rand_core::OsRng; use rand_core::OsRng;
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use tokio::{ use tokio::{
sync::{mpsc, broadcast}, sync::{mpsc, broadcast},
@@ -30,7 +29,7 @@ async fn sync_test() {
let mut keys = new_keys(&mut OsRng); let mut keys = new_keys(&mut OsRng);
let spec = new_spec(&mut OsRng, &keys); let spec = new_spec(&mut OsRng, &keys);
// Ensure this can have a node fail // Ensure this can have a node fail
assert!(spec.n(&[]) > spec.t()); assert!(spec.n() > spec.t());
let mut tributaries = new_tributaries(&keys, &spec) let mut tributaries = new_tributaries(&keys, &spec)
.await .await
@@ -143,7 +142,7 @@ async fn sync_test() {
// Because only `t` validators are used in a commit, take n - t nodes offline // Because only `t` validators are used in a commit, take n - t nodes offline
// leaving only `t` nodes. Which should force it to participate in the consensus // leaving only `t` nodes. Which should force it to participate in the consensus
// of next blocks. // of next blocks.
let spares = usize::from(spec.n(&[]) - spec.t()); let spares = usize::from(spec.n() - spec.t());
for thread in p2p_threads.iter().take(spares) { for thread in p2p_threads.iter().take(spares) {
thread.abort(); thread.abort();
} }

View File

@@ -37,15 +37,14 @@ async fn tx_test() {
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap(); usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
let key = keys[sender].clone(); let key = keys[sender].clone();
let attempt = 0;
let mut commitments = vec![0; 256];
OsRng.fill_bytes(&mut commitments);
// Create the TX with a null signature so we can get its sig hash
let block_before_tx = tributaries[sender].1.tip().await; let block_before_tx = tributaries[sender].1.tip().await;
let mut tx = Transaction::DkgCommitments { // Create the TX with a null signature so we can get its sig hash
attempt, let mut tx = Transaction::DkgParticipation {
commitments: vec![commitments.clone()], participation: {
let mut participation = vec![0; 4096];
OsRng.fill_bytes(&mut participation);
participation
},
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}; };
tx.sign(&mut OsRng, spec.genesis(), &key); tx.sign(&mut OsRng, spec.genesis(), &key);

View File

@@ -3,11 +3,10 @@ use std::collections::HashMap;
use scale::Encode; use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use frost::Participant; use frost::Participant;
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet}; use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
use processor_messages::coordinator::SubstrateSignableId; use processor_messages::coordinator::SubstrateSignableId;
@@ -19,7 +18,6 @@ use crate::tributary::{Label, Transaction};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
pub enum Topic { pub enum Topic {
Dkg,
DkgConfirmation, DkgConfirmation,
SubstrateSign(SubstrateSignableId), SubstrateSign(SubstrateSignableId),
Sign([u8; 32]), Sign([u8; 32]),
@@ -47,15 +45,13 @@ pub enum Accumulation {
create_db!( create_db!(
Tributary { Tributary {
SeraiBlockNumber: (hash: [u8; 32]) -> u64, SeraiBlockNumber: (hash: [u8; 32]) -> u64,
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32], SeraiDkgCompleted: (set: ValidatorSet) -> [u8; 32],
TributaryBlockNumber: (block: [u8; 32]) -> u32, TributaryBlockNumber: (block: [u8; 32]) -> u32,
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
// TODO: Revisit the point of this // TODO: Revisit the point of this
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
// TODO: Combine these two // TODO: Combine these two
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32, SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
@@ -68,11 +64,9 @@ create_db!(
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>, DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>, DkgParticipation: (genesis: [u8; 32], from: u16) -> Vec<u8>,
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>, ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, DkgKeyPair: (genesis: [u8; 32]) -> KeyPair,
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
@@ -81,7 +75,7 @@ create_db!(
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>, SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
SlashReported: (genesis: [u8; 32]) -> u16, SlashReported: (genesis: [u8; 32]) -> u16,
SlashReportCutOff: (genesis: [u8; 32]) -> u64, SlashReportCutOff: (genesis: [u8; 32]) -> u64,
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>, SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
} }
); );
@@ -124,12 +118,12 @@ impl AttemptDb {
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> { pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
let attempt = Self::get(getter, genesis, &topic); let attempt = Self::get(getter, genesis, &topic);
// Don't require explicit recognition of the Dkg topic as it starts when the chain does // Don't require explicit recognition of the DkgConfirmation topic as it starts when the chain
// does
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it // Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
// should always happen (eventually) // should always happen (eventually)
if attempt.is_none() && if attempt.is_none() &&
((topic == Topic::Dkg) || ((topic == Topic::DkgConfirmation) ||
(topic == Topic::DkgConfirmation) ||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport))) (topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
{ {
return Some(0); return Some(0);
@@ -156,16 +150,12 @@ impl ReattemptDb {
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5 // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are // Assumes no event will take longer than 15 minutes, yet grows the time in case there are
// network bandwidth issues // network bandwidth issues
let mut reattempt_delay = BASE_REATTEMPT_DELAY * let reattempt_delay = BASE_REATTEMPT_DELAY *
((AttemptDb::attempt(txn, genesis, topic) ((AttemptDb::attempt(txn, genesis, topic)
.expect("scheduling re-attempt for unknown topic") / .expect("scheduling re-attempt for unknown topic") /
3) + 3) +
1) 1)
.min(3); .min(3);
// Allow more time for DKGs since they have an extra round and much more data
if matches!(topic, Topic::Dkg) {
reattempt_delay *= 4;
}
let upon_block = current_block_number + reattempt_delay; let upon_block = current_block_number + reattempt_delay;
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]); let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);

View File

@@ -4,17 +4,16 @@ use std::collections::HashMap;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::OsRng; use rand_core::OsRng;
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use frost::dkg::Participant; use frost::dkg::Participant;
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use serai_client::validator_sets::primitives::KeyPair; use serai_client::{Signature, validator_sets::primitives::KeyPair};
use tributary::{Signed, TransactionKind, TransactionTrait}; use tributary::{Signed, TransactionKind, TransactionTrait};
use processor_messages::{ use processor_messages::{
key_gen::{self, KeyGenId}, key_gen::self,
coordinator::{self, SubstrateSignableId, SubstrateSignId}, coordinator::{self, SubstrateSignableId, SubstrateSignId},
sign::{self, SignId}, sign::{self, SignId},
}; };
@@ -39,33 +38,20 @@ pub fn dkg_confirmation_nonces(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
attempt: u32, attempt: u32,
) -> [u8; 64] { ) -> [u8; 64] {
DkgConfirmer::new(key, spec, txn, attempt) DkgConfirmer::new(key, spec, txn, attempt).preprocess()
.expect("getting DKG confirmation nonces for unknown attempt")
.preprocess()
} }
pub fn generated_key_pair<D: Db>( pub fn generated_key_pair<D: Db>(
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, genesis: [u8; 32],
spec: &TributarySpec,
key_pair: &KeyPair, key_pair: &KeyPair,
attempt: u32, ) {
) -> Result<[u8; 32], Participant> { DkgKeyPair::set(txn, genesis, key_pair);
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
DkgConfirmer::new(key, spec, txn, attempt)
.expect("claiming to have generated a key pair for an unrecognized attempt")
.share(preprocesses, key_pair)
} }
fn unflatten( fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
spec: &TributarySpec,
removed: &[<Ristretto as Ciphersuite>::G],
data: &mut HashMap<Participant, Vec<u8>>,
) {
for (validator, _) in spec.validators() { for (validator, _) in spec.validators() {
let Some(range) = spec.i(removed, validator) else { continue }; let Some(range) = spec.i(validator) else { continue };
let Some(all_segments) = data.remove(&range.start) else { let Some(all_segments) = data.remove(&range.start) else {
continue; continue;
}; };
@@ -89,7 +75,6 @@ impl<
{ {
fn accumulate( fn accumulate(
&mut self, &mut self,
removed: &[<Ristretto as Ciphersuite>::G],
data_spec: &DataSpecification, data_spec: &DataSpecification,
signer: <Ristretto as Ciphersuite>::G, signer: <Ristretto as Ciphersuite>::G,
data: &Vec<u8>, data: &Vec<u8>,
@@ -100,10 +85,7 @@ impl<
panic!("accumulating data for a participant multiple times"); panic!("accumulating data for a participant multiple times");
} }
let signer_shares = { let signer_shares = {
let Some(signer_i) = self.spec.i(removed, signer) else { let signer_i = self.spec.i(signer).expect("transaction signer wasn't a member of the set");
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
return Accumulation::NotReady;
};
u16::from(signer_i.end) - u16::from(signer_i.start) u16::from(signer_i.end) - u16::from(signer_i.start)
}; };
@@ -116,11 +98,7 @@ impl<
// If 2/3rds of the network participated in this preprocess, queue it for an automatic // If 2/3rds of the network participated in this preprocess, queue it for an automatic
// re-attempt // re-attempt
// DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) {
if (data_spec.label == Label::Preprocess) &&
received_range.contains(&self.spec.t()) &&
(data_spec.topic != Topic::DkgConfirmation)
{
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
// is an old entry // is an old entry
// This is an assert, not part of the if check, as old data shouldn't be here in the first // This is an assert, not part of the if check, as old data shouldn't be here in the first
@@ -130,10 +108,7 @@ impl<
} }
// If we have all the needed commitments/preprocesses/shares, tell the processor // If we have all the needed commitments/preprocesses/shares, tell the processor
let needs_everyone = if received_range.contains(&self.spec.t()) {
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };
if received_range.contains(&needed) {
log::debug!( log::debug!(
"accumulation for entry {:?} attempt #{} is ready", "accumulation for entry {:?} attempt #{} is ready",
&data_spec.topic, &data_spec.topic,
@@ -142,7 +117,7 @@ impl<
let mut data = HashMap::new(); let mut data = HashMap::new();
for validator in self.spec.validators().iter().map(|validator| validator.0) { for validator in self.spec.validators().iter().map(|validator| validator.0) {
let Some(i) = self.spec.i(removed, validator) else { continue }; let Some(i) = self.spec.i(validator) else { continue };
data.insert( data.insert(
i.start, i.start,
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
@@ -153,10 +128,10 @@ impl<
); );
} }
assert_eq!(data.len(), usize::from(needed)); assert_eq!(data.len(), usize::from(self.spec.t()));
// Remove our own piece of data, if we were involved // Remove our own piece of data, if we were involved
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) { if let Some(i) = self.spec.i(Ristretto::generator() * self.our_key.deref()) {
if data.remove(&i.start).is_some() { if data.remove(&i.start).is_some() {
return Accumulation::Ready(DataSet::Participating(data)); return Accumulation::Ready(DataSet::Participating(data));
} }
@@ -168,7 +143,6 @@ impl<
fn handle_data( fn handle_data(
&mut self, &mut self,
removed: &[<Ristretto as Ciphersuite>::G],
data_spec: &DataSpecification, data_spec: &DataSpecification,
bytes: &Vec<u8>, bytes: &Vec<u8>,
signed: &Signed, signed: &Signed,
@@ -214,21 +188,15 @@ impl<
// TODO: If this is shares, we need to check they are part of the selected signing set // TODO: If this is shares, we need to check they are part of the selected signing set
// Accumulate this data // Accumulate this data
self.accumulate(removed, data_spec, signed.signer, bytes) self.accumulate(data_spec, signed.signer, bytes)
} }
fn check_sign_data_len( fn check_sign_data_len(
&mut self, &mut self,
removed: &[<Ristretto as Ciphersuite>::G],
signer: <Ristretto as Ciphersuite>::G, signer: <Ristretto as Ciphersuite>::G,
len: usize, len: usize,
) -> Result<(), ()> { ) -> Result<(), ()> {
let Some(signer_i) = self.spec.i(removed, signer) else { let signer_i = self.spec.i(signer).expect("signer wasn't a member of the set");
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
// offline
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
Err(())?
};
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
self.fatal_slash( self.fatal_slash(
signer.to_bytes(), signer.to_bytes(),
@@ -255,12 +223,9 @@ impl<
} }
match tx { match tx {
Transaction::RemoveParticipantDueToDkg { participant, signed } => { Transaction::RemoveParticipant { participant, signed } => {
if self.spec.i(&[], participant).is_none() { if self.spec.i(participant).is_none() {
self.fatal_slash( self.fatal_slash(participant.to_bytes(), "RemoveParticipant vote for non-validator");
participant.to_bytes(),
"RemoveParticipantDueToDkg vote for non-validator",
);
return; return;
} }
@@ -275,268 +240,106 @@ impl<
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0); let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
let signer_votes = let signer_votes =
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?"); self.spec.i(signed.signer).expect("signer wasn't a validator for this network?");
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start); let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
VotesToRemove::set(self.txn, genesis, participant, &new_votes); VotesToRemove::set(self.txn, genesis, participant, &new_votes);
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) { if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote") self.fatal_slash(participant, "RemoveParticipant vote")
} }
} }
Transaction::DkgCommitments { attempt, commitments, signed } => { Transaction::DkgParticipation { participation, signed } => {
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { // Send the participation to the processor
self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt");
return;
};
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {
return;
};
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
unflatten(self.spec, &removed, &mut commitments);
self
.processors
.send(
self.spec.set().network,
key_gen::CoordinatorMessage::Commitments {
id: KeyGenId { session: self.spec.set().session, attempt },
commitments,
},
)
.await;
}
Accumulation::Ready(DataSet::NotParticipating) => {
assert!(
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
"NotParticipating in a DkgCommitments we weren't removed for"
);
}
Accumulation::NotReady => {}
}
}
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
return;
};
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
return;
};
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
self.fatal_slash(
signed.signer.to_bytes(),
"DkgShares for a DKG they aren't participating in",
);
return;
};
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
for shares in &shares {
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares");
return;
}
}
// Save each share as needed for blame
for (from_offset, shares) in shares.iter().enumerate() {
let from =
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
.unwrap();
for (to_offset, share) in shares.iter().enumerate() {
// 0-indexed (the enumeration) to 1-indexed (Participant)
let mut to = u16::try_from(to_offset).unwrap() + 1;
// Adjust for the omission of the sender's own shares
if to >= u16::from(sender_i.start) {
to += u16::from(sender_i.end) - u16::from(sender_i.start);
}
let to = Participant::new(to).unwrap();
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
}
}
// Filter down to only our share's bytes for handle
let our_shares = if let Some(our_i) =
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
{
if sender_i == our_i {
vec![]
} else {
// 1-indexed to 0-indexed
let mut our_i_pos = u16::from(our_i.start) - 1;
// Handle the omission of the sender's own data
if u16::from(our_i.start) > u16::from(sender_i.start) {
our_i_pos -= sender_is_len;
}
let our_i_pos = usize::from(our_i_pos);
shares
.iter_mut()
.map(|shares| {
shares
.drain(
our_i_pos ..
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
)
.collect::<Vec<_>>()
})
.collect()
}
} else {
assert!(
not_participating,
"we didn't have an i while handling DkgShares we weren't removed for"
);
// Since we're not participating, simply save vec![] for our shares
vec![]
};
// Drop shares as it's presumably been mutated into invalidity
drop(shares);
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
log::info!("got all DkgShares for {}", hex::encode(genesis));
let mut confirmation_nonces = HashMap::new();
let mut shares = HashMap::new();
for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares {
let (these_confirmation_nonces, these_shares) =
<(Vec<u8>, Vec<u8>)>::decode(&mut confirmation_nonces_and_shares.as_slice())
.unwrap();
confirmation_nonces.insert(participant, these_confirmation_nonces);
shares.insert(participant, these_shares);
}
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
// - Each of the sender's shares
// - Each of the our shares
// - Each share
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
let mut expanded_shares = vec![];
for (sender_start_i, shares) in shares {
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
if expanded_shares.len() <= our_share_i {
expanded_shares.push(HashMap::new());
}
expanded_shares[our_share_i].insert(
Participant::new(
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
)
.unwrap(),
our_share,
);
}
}
}
self
.processors
.send(
self.spec.set().network,
key_gen::CoordinatorMessage::Shares {
id: KeyGenId { session: self.spec.set().session, attempt },
shares: expanded_shares,
},
)
.await;
}
Accumulation::Ready(DataSet::NotParticipating) => {
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
}
Accumulation::NotReady => {}
}
}
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
self
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
return;
};
let Some(range) = self.spec.i(&removed, signed.signer) else {
self.fatal_slash(
signed.signer.to_bytes(),
"InvalidDkgShare for a DKG they aren't participating in",
);
return;
};
if !range.contains(&accuser) {
self.fatal_slash(
signed.signer.to_bytes(),
"accused with a Participant index which wasn't theirs",
);
return;
}
if range.contains(&faulty) {
self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare");
return;
}
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
self.fatal_slash(
signed.signer.to_bytes(),
"InvalidDkgShare had a non-existent faulty participant",
);
return;
};
self self
.processors .processors
.send( .send(
self.spec.set().network, self.spec.set().network,
key_gen::CoordinatorMessage::VerifyBlame { key_gen::CoordinatorMessage::Participation {
id: KeyGenId { session: self.spec.set().session, attempt }, session: self.spec.set().session,
accuser, participant: self
accused: faulty, .spec
share, .i(signed.signer)
blame, .expect("signer wasn't a validator for this network?")
.start,
participation,
}, },
) )
.await; .await;
} }
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { let data_spec =
self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); DataSpecification { topic: Topic::DkgConfirmation, label: Label::Preprocess, attempt };
return; match self.handle_data(&data_spec, &confirmation_nonces.to_vec(), &signed) {
}; Accumulation::Ready(DataSet::Participating(confirmation_nonces)) => {
log::info!(
"got all DkgConfirmationNonces for {}, attempt {attempt}",
hex::encode(genesis)
);
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
// Send the expected DkgConfirmationShare
// TODO: Slight race condition here due to set, publish tx, then commit txn
let key_pair = DkgKeyPair::get(self.txn, genesis)
.expect("participating in confirming key we don't have");
let mut tx = match DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
.share(confirmation_nonces, &key_pair)
{
Ok(confirmation_share) => Transaction::DkgConfirmationShare {
attempt,
confirmation_share,
signed: Transaction::empty_signed(),
},
Err(participant) => Transaction::RemoveParticipant {
participant: self.spec.reverse_lookup_i(participant).unwrap(),
signed: Transaction::empty_signed(),
},
};
tx.sign(&mut OsRng, genesis, self.our_key);
self.publish_tributary_tx.publish_tributary_tx(tx).await;
}
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
}
}
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
let data_spec = let data_spec =
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { match self.handle_data(&data_spec, &confirmation_share.to_vec(), &signed) {
Accumulation::Ready(DataSet::Participating(shares)) => { Accumulation::Ready(DataSet::Participating(shares)) => {
log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); log::info!(
"got all DkgConfirmationShare for {}, attempt {attempt}",
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { hex::encode(genesis)
panic!( );
"DkgConfirmed for everyone yet didn't have the removed parties for this attempt",
);
};
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
// TODO: This can technically happen under very very very specific timing as the txn // TODO: This can technically happen under very very very specific timing as the txn
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to // put happens before DkgConfirmationShare, yet the txn isn't guaranteed to be
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( // committed
"in DkgConfirmed handling, which happens after everyone \ let key_pair = DkgKeyPair::get(self.txn, genesis).expect(
(including us) fires DkgConfirmed, yet no confirming key pair", "in DkgConfirmationShare handling, which happens after everyone \
(including us) fires DkgConfirmationShare, yet no confirming key pair",
); );
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
.expect("confirming DKG for unrecognized attempt"); // Determine the bitstring representing who participated before we move `shares`
let validators = self.spec.validators();
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
for (participant, _) in validators {
signature_participants.push(
(participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
shares.contains_key(&self.spec.i(participant).unwrap().start),
);
}
// Produce the final signature
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt);
let sig = match confirmer.complete(preprocesses, &key_pair, shares) { let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
Ok(sig) => sig, Ok(sig) => sig,
Err(p) => { Err(p) => {
let mut tx = Transaction::RemoveParticipantDueToDkg { let mut tx = Transaction::RemoveParticipant {
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(), participant: self.spec.reverse_lookup_i(p).unwrap(),
signed: Transaction::empty_signed(), signed: Transaction::empty_signed(),
}; };
tx.sign(&mut OsRng, genesis, self.our_key); tx.sign(&mut OsRng, genesis, self.our_key);
@@ -545,23 +348,18 @@ impl<
} }
}; };
DkgLocallyCompleted::set(self.txn, genesis, &());
self self
.publish_serai_tx .publish_serai_tx
.publish_set_keys( .publish_set_keys(
self.db, self.db,
self.spec.set(), self.spec.set(),
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
key_pair, key_pair,
sig.into(), signature_participants,
Signature(sig),
) )
.await; .await;
} }
Accumulation::Ready(DataSet::NotParticipating) => { Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
panic!("wasn't a participant in DKG confirmination shares")
}
Accumulation::NotReady => {}
} }
} }
@@ -619,19 +417,8 @@ impl<
} }
Transaction::SubstrateSign(data) => { Transaction::SubstrateSign(data) => {
// Provided transactions ensure synchrony on any signing protocol, and we won't start
// signing with threshold keys before we've confirmed them on-chain
let Some(removed) =
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
else {
self.fatal_slash(
data.signed.signer.to_bytes(),
"signing despite not having set keys on substrate",
);
return;
};
let signer = data.signed.signer; let signer = data.signed.signer;
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { let Ok(()) = self.check_sign_data_len(signer, data.data.len()) else {
return; return;
}; };
let expected_len = match data.label { let expected_len = match data.label {
@@ -654,11 +441,11 @@ impl<
attempt: data.attempt, attempt: data.attempt,
}; };
let Accumulation::Ready(DataSet::Participating(mut results)) = let Accumulation::Ready(DataSet::Participating(mut results)) =
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) self.handle_data(&data_spec, &data.data.encode(), &data.signed)
else { else {
return; return;
}; };
unflatten(self.spec, &removed, &mut results); unflatten(self.spec, &mut results);
let id = SubstrateSignId { let id = SubstrateSignId {
session: self.spec.set().session, session: self.spec.set().session,
@@ -679,16 +466,7 @@ impl<
} }
Transaction::Sign(data) => { Transaction::Sign(data) => {
let Some(removed) = let Ok(()) = self.check_sign_data_len(data.signed.signer, data.data.len()) else {
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
else {
self.fatal_slash(
data.signed.signer.to_bytes(),
"signing despite not having set keys on substrate",
);
return;
};
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {
return; return;
}; };
@@ -698,9 +476,9 @@ impl<
attempt: data.attempt, attempt: data.attempt,
}; };
if let Accumulation::Ready(DataSet::Participating(mut results)) = if let Accumulation::Ready(DataSet::Participating(mut results)) =
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) self.handle_data(&data_spec, &data.data.encode(), &data.signed)
{ {
unflatten(self.spec, &removed, &mut results); unflatten(self.spec, &mut results);
let id = let id =
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
self self
@@ -741,8 +519,7 @@ impl<
} }
Transaction::SlashReport(points, signed) => { Transaction::SlashReport(points, signed) => {
// Uses &[] as we only need the length which is independent to who else was removed let signer_range = self.spec.i(signed.signer).unwrap();
let signer_range = self.spec.i(&[], signed.signer).unwrap();
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start); let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
if points.len() != (self.spec.validators().len() - 1) { if points.len() != (self.spec.validators().len() - 1) {
self.fatal_slash( self.fatal_slash(

View File

@@ -1,8 +1,3 @@
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use serai_client::validator_sets::primitives::ExternalValidatorSet;
use tributary::{ use tributary::{
ReadWrite, ReadWrite,
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
@@ -25,39 +20,6 @@ pub use handle::*;
pub mod scanner; pub mod scanner;
pub fn removed_as_of_dkg_attempt(
getter: &impl Get,
genesis: [u8; 32],
attempt: u32,
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
if attempt == 0 {
Some(vec![])
} else {
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
})
}
}
pub fn removed_as_of_set_keys(
getter: &impl Get,
set: ExternalValidatorSet,
genesis: [u8; 32],
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
// SeraiDkgCompleted has the key placed on-chain.
// This key can be uniquely mapped to an attempt so long as one participant was honest, which we
// assume as a presumably honest participant.
// Resolve from generated key to attempt to fatally slashed as of attempt.
// This expect will trigger if this is prematurely called and Substrate has tracked the keys yet
// we haven't locally synced and handled the Tributary
// All callers of this, at the time of writing, ensure the Tributary has sufficiently synced
// making the panic with context more desirable than the None
let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)
.expect("key completed on-chain didn't have an attempt related");
removed_as_of_dkg_attempt(getter, genesis, attempt)
}
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>( pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
tributary: &Tributary<D, Transaction, P>, tributary: &Tributary<D, Transaction, P>,

View File

@@ -1,17 +1,18 @@
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration}; use core::{marker::PhantomData, future::Future, time::Duration};
use std::{sync::Arc, collections::HashSet}; use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use dalek_ff_group::Ristretto; use rand_core::OsRng;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use serai_client::{ use serai_client::{
primitives::{SeraiAddress, Signature}, primitives::Signature,
validator_sets::primitives::{ExternalValidatorSet, KeyPair}, validator_sets::primitives::{KeyPair, ValidatorSet},
Serai, Serai,
}; };
@@ -39,7 +40,7 @@ pub enum RecognizedIdType {
pub trait RIDTrait { pub trait RIDTrait {
async fn recognized_id( async fn recognized_id(
&self, &self,
set: ExternalValidatorSet, set: ValidatorSet,
genesis: [u8; 32], genesis: [u8; 32],
kind: RecognizedIdType, kind: RecognizedIdType,
id: Vec<u8>, id: Vec<u8>,
@@ -48,12 +49,12 @@ pub trait RIDTrait {
#[async_trait::async_trait] #[async_trait::async_trait]
impl< impl<
FRid: Send + Future<Output = ()>, FRid: Send + Future<Output = ()>,
F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid, F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
> RIDTrait for F > RIDTrait for F
{ {
async fn recognized_id( async fn recognized_id(
&self, &self,
set: ExternalValidatorSet, set: ValidatorSet,
genesis: [u8; 32], genesis: [u8; 32],
kind: RecognizedIdType, kind: RecognizedIdType,
id: Vec<u8>, id: Vec<u8>,
@@ -67,9 +68,9 @@ pub trait PublishSeraiTransaction {
async fn publish_set_keys( async fn publish_set_keys(
&self, &self,
db: &(impl Sync + Get), db: &(impl Sync + Get),
set: ExternalValidatorSet, set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair, key_pair: KeyPair,
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature, signature: Signature,
); );
} }
@@ -87,7 +88,7 @@ mod impl_pst_for_serai {
async fn publish( async fn publish(
serai: &Serai, serai: &Serai,
db: &impl Get, db: &impl Get,
set: ExternalValidatorSet, set: ValidatorSet,
tx: serai_client::Transaction, tx: serai_client::Transaction,
meta: $Meta, meta: $Meta,
) -> bool { ) -> bool {
@@ -129,19 +130,14 @@ mod impl_pst_for_serai {
async fn publish_set_keys( async fn publish_set_keys(
&self, &self,
db: &(impl Sync + Get), db: &(impl Sync + Get),
set: ExternalValidatorSet, set: ValidatorSet,
removed: Vec<SeraiAddress>,
key_pair: KeyPair, key_pair: KeyPair,
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature, signature: Signature,
) { ) {
// TODO: BoundedVec as an arg to avoid this expect let tx =
let tx = SeraiValidatorSets::set_keys( SeraiValidatorSets::set_keys(set.network, key_pair, signature_participants, signature);
set.network, async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
removed.try_into().expect("removing more than allowed"),
key_pair,
signature,
);
async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {
if matches!(serai.keys(set).await, Ok(Some(_))) { if matches!(serai.keys(set).await, Ok(Some(_))) {
log::info!("another coordinator set key pair for {:?}", set); log::info!("another coordinator set key pair for {:?}", set);
return true; return true;
@@ -250,18 +246,15 @@ impl<
let genesis = self.spec.genesis(); let genesis = self.spec.genesis();
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
// Calculate the shares still present, spinning if not enough are // Calculate the shares still present, spinning if not enough are
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if {
// we should spin, hence storing it in a variable here
let still_present_shares = {
// Start with the original n value // Start with the original n value
let mut present_shares = self.spec.n(&[]); let mut present_shares = self.spec.n();
// Remove everyone fatally slashed // Remove everyone fatally slashed
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
for removed in &current_fatal_slashes { for removed in &current_fatal_slashes {
let original_i_for_removed = let original_i_for_removed =
self.spec.i(&[], *removed).expect("removed party was never present"); self.spec.i(*removed).expect("removed party was never present");
let removed_shares = let removed_shares =
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start); u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
present_shares -= removed_shares; present_shares -= removed_shares;
@@ -277,79 +270,17 @@ impl<
tokio::time::sleep(core::time::Duration::from_secs(60)).await; tokio::time::sleep(core::time::Duration::from_secs(60)).await;
} }
} }
}
present_shares
};
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) { for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic); let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
log::info!("re-attempting {topic:?} with attempt {attempt}"); log::info!("potentially re-attempting {topic:?} with attempt {attempt}");
// Slash people who failed to participate as expected in the prior attempt // Slash people who failed to participate as expected in the prior attempt
{ {
let prior_attempt = attempt - 1; let prior_attempt = attempt - 1;
let (removed, expected_participants) = match topic { // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
Topic::Dkg => { let expected_participants: Vec<<Ristretto as Ciphersuite>::G> = vec![];
// Every validator who wasn't removed is expected to have participated
let removed =
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
.expect("prior attempt didn't have its removed saved to disk");
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
(
removed,
self
.spec
.validators()
.into_iter()
.filter_map(|(validator, _)| {
Some(validator).filter(|validator| !removed_set.contains(validator))
})
.collect(),
)
}
Topic::DkgConfirmation => {
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
}
Topic::SubstrateSign(_) | Topic::Sign(_) => {
let removed =
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
.expect("SubstrateSign/Sign yet have yet to set keys");
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
let expected_participants = vec![];
(removed, expected_participants)
}
};
let (expected_topic, expected_label) = match topic {
Topic::Dkg => {
let n = self.spec.n(&removed);
// If we got all the DKG shares, we should be on DKG confirmation
let share_spec =
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
// preprocess is part of Topic::Dkg Label::Share
(Topic::DkgConfirmation, Label::Share)
} else {
let preprocess_spec = DataSpecification {
topic: Topic::Dkg,
label: Label::Preprocess,
attempt: prior_attempt,
};
// If we got all the DKG preprocesses, DKG shares
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
// preprocess is part of Topic::Dkg Label::Share
(Topic::Dkg, Label::Share)
} else {
(Topic::Dkg, Label::Preprocess)
}
}
}
Topic::DkgConfirmation => unreachable!(),
// If we got enough participants to move forward, then we expect shares from them all
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
};
let mut did_not_participate = vec![]; let mut did_not_participate = vec![];
for expected_participant in expected_participants { for expected_participant in expected_participants {
@@ -357,8 +288,9 @@ impl<
self.txn, self.txn,
genesis, genesis,
&DataSpecification { &DataSpecification {
topic: expected_topic, topic,
label: expected_label, // Since we got the preprocesses, we were supposed to get the shares
label: Label::Share,
attempt: prior_attempt, attempt: prior_attempt,
}, },
&expected_participant.to_bytes(), &expected_participant.to_bytes(),
@@ -374,15 +306,8 @@ impl<
// Accordingly, clear did_not_participate // Accordingly, clear did_not_participate
// TODO // TODO
// If during the DKG, explicitly mark these people as having been offline // TODO: Increment the slash points of people who didn't preprocess in some expected window
// TODO: If they were offline sufficiently long ago, don't strike them off // of time
if topic == Topic::Dkg {
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
for did_not_participate in did_not_participate {
existing.push(did_not_participate.to_bytes());
}
OfflineDuringDkg::set(self.txn, genesis, &existing);
}
// Slash everyone who didn't participate as expected // Slash everyone who didn't participate as expected
// This may be overzealous as if a minority detects a completion, they'll abort yet the // This may be overzealous as if a minority detects a completion, they'll abort yet the
@@ -412,75 +337,22 @@ impl<
then preprocesses. This only sends preprocesses). then preprocesses. This only sends preprocesses).
*/ */
match topic { match topic {
Topic::Dkg => { Topic::DkgConfirmation => {
let mut removed = current_fatal_slashes.clone(); if SeraiDkgCompleted::get(self.txn, self.spec.set()).is_none() {
log::info!("re-attempting DKG confirmation with attempt {attempt}");
let t = self.spec.t(); // Since it wasn't completed, publish our nonces for the next attempt
{ let confirmation_nonces =
let mut present_shares = still_present_shares; crate::tributary::dkg_confirmation_nonces(self.our_key, self.spec, self.txn, attempt);
let mut tx = Transaction::DkgConfirmationNonces {
// Load the parties marked as offline across the various attempts attempt,
let mut offline = OfflineDuringDkg::get(self.txn, genesis) confirmation_nonces,
.unwrap_or(vec![]) signed: Transaction::empty_signed(),
.iter()
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
.collect::<Vec<_>>();
// Pop from the list to prioritize the removal of those recently offline
while let Some(offline) = offline.pop() {
// Make sure they weren't removed already (such as due to being fatally slashed)
// This also may trigger if they were offline across multiple attempts
if removed.contains(&offline) {
continue;
}
// If we can remove them and still meet the threshold, do so
let original_i_for_offline =
self.spec.i(&[], offline).expect("offline was never present?");
let offline_shares =
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
if (present_shares - offline_shares) >= t {
present_shares -= offline_shares;
removed.push(offline);
}
// If we've removed as many people as we can, break
if present_shares == t {
break;
}
}
}
RemovedAsOfDkgAttempt::set(
self.txn,
genesis,
attempt,
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
);
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
else {
continue;
}; };
tx.sign(&mut OsRng, genesis, self.our_key);
// Since it wasn't completed, instruct the processor to start the next attempt self.publish_tributary_tx.publish_tributary_tx(tx).await;
let id =
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
let params =
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
let shares = u16::from(our_i.end) - u16::from(our_i.start);
self
.processors
.send(
self.spec.set().network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares },
)
.await;
} }
} }
Topic::DkgConfirmation => unreachable!(),
Topic::SubstrateSign(inner_id) => { Topic::SubstrateSign(inner_id) => {
let id = processor_messages::coordinator::SubstrateSignId { let id = processor_messages::coordinator::SubstrateSignId {
session: self.spec.set().session, session: self.spec.set().session,
@@ -497,6 +369,8 @@ impl<
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
.map_or(0, |cosign| cosign.block_number); .map_or(0, |cosign| cosign.block_number);
if latest_cosign < block_number { if latest_cosign < block_number {
log::info!("re-attempting cosigning {block_number:?} with attempt {attempt}");
// Instruct the processor to start the next attempt // Instruct the processor to start the next attempt
self self
.processors .processors
@@ -513,6 +387,8 @@ impl<
SubstrateSignableId::Batch(batch) => { SubstrateSignableId::Batch(batch) => {
// If the Batch hasn't appeared on-chain... // If the Batch hasn't appeared on-chain...
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() { if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
log::info!("re-attempting signing batch {batch:?} with attempt {attempt}");
// Instruct the processor to start the next attempt // Instruct the processor to start the next attempt
// The processor won't continue if it's already signed a Batch // The processor won't continue if it's already signed a Batch
// Prior checking if the Batch is on-chain just may reduce the non-participating // Prior checking if the Batch is on-chain just may reduce the non-participating
@@ -530,6 +406,11 @@ impl<
// If this Tributary hasn't been retired... // If this Tributary hasn't been retired...
// (published SlashReport/took too long to do so) // (published SlashReport/took too long to do so)
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() { if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
log::info!(
"re-attempting signing slash report for {:?} with attempt {attempt}",
self.spec.set()
);
let report = SlashReport::get(self.txn, self.spec.set()) let report = SlashReport::get(self.txn, self.spec.set())
.expect("re-attempting signing a SlashReport we don't have?"); .expect("re-attempting signing a SlashReport we don't have?");
self self
@@ -576,8 +457,7 @@ impl<
}; };
// Assign them 0 points for themselves // Assign them 0 points for themselves
report.insert(i, 0); report.insert(i, 0);
// Uses &[] as we only need the length which is independent to who else was removed let signer_i = self.spec.i(validator).unwrap();
let signer_i = self.spec.i(&[], validator).unwrap();
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start); let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
// Push `n` copies, one for each of their shares // Push `n` copies, one for each of their shares
for _ in 0 .. signer_len { for _ in 0 .. signer_len {

View File

@@ -55,7 +55,7 @@
*/ */
use core::ops::Deref; use core::ops::Deref;
use std::collections::HashMap; use std::collections::{HashSet, HashMap};
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
@@ -63,21 +63,18 @@ use rand_core::OsRng;
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
use ciphersuite::{ use frost::{
group::{ff::PrimeField, GroupEncoding}, FrostError,
Ciphersuite, dkg::{Participant, musig::musig},
ThresholdKeys,
sign::*,
}; };
use dkg_musig::musig;
use frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*};
use frost_schnorrkel::Schnorrkel; use frost_schnorrkel::Schnorrkel;
use scale::Encode; use scale::Encode;
use serai_client::{ use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message};
Public,
validator_sets::primitives::{KeyPair, musig_context, set_keys_message},
};
use serai_db::*; use serai_db::*;
@@ -86,6 +83,7 @@ use crate::tributary::TributarySpec;
create_db!( create_db!(
SigningProtocolDb { SigningProtocolDb {
CachedPreprocesses: (context: &impl Encode) -> [u8; 32] CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
DataSignedWith: (context: &impl Encode) -> (Vec<u8>, HashMap<Participant, Vec<u8>>),
} }
); );
@@ -114,16 +112,22 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
}; };
let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
let algorithm = Schnorrkel::new(b"substrate"); // Create the MuSig keys
let keys: ThresholdKeys<Ristretto> = let keys: ThresholdKeys<Ristretto> =
musig(musig_context(self.spec.set().into()), self.key.clone(), participants) musig(&musig_context(self.spec.set()), self.key, participants)
.expect("signing for a set we aren't in/validator present multiple times") .expect("signing for a set we aren't in/validator present multiple times")
.into(); .into();
// Define the algorithm
let algorithm = Schnorrkel::new(b"substrate");
// Check if we've prior preprocessed
if CachedPreprocesses::get(self.txn, &self.context).is_none() { if CachedPreprocesses::get(self.txn, &self.context).is_none() {
// If we haven't, we create a machine solely to obtain the preprocess with
let (machine, _) = let (machine, _) =
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
// Cache and save the preprocess to disk
let mut cache = machine.cache(); let mut cache = machine.cache();
assert_eq!(cache.0.len(), 32); assert_eq!(cache.0.len(), 32);
#[allow(clippy::needless_range_loop)] #[allow(clippy::needless_range_loop)]
@@ -134,13 +138,15 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
CachedPreprocesses::set(self.txn, &self.context, &cache.0); CachedPreprocesses::set(self.txn, &self.context, &cache.0);
} }
// We're now guaranteed to have the preprocess, hence why this `unwrap` is safe
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); let mut cached = Zeroizing::new(cached);
#[allow(clippy::needless_range_loop)] #[allow(clippy::needless_range_loop)]
for b in 0 .. 32 { for b in 0 .. 32 {
cached[b] ^= encryption_key_slice[b]; cached[b] ^= encryption_key_slice[b];
} }
encryption_key_slice.zeroize(); encryption_key_slice.zeroize();
// Create the machine from the cached preprocess
let (machine, preprocess) = let (machine, preprocess) =
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
@@ -153,8 +159,29 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>, mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
msg: &[u8], msg: &[u8],
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> { ) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
let machine = self.preprocess_internal(participants).0; // We can't clear the preprocess as we sitll need it to accumulate all of the shares
// We do save the message we signed so any future calls with distinct messages panic
// This assumes the txn deciding this data is committed before the share is broaadcast
if let Some((existing_msg, existing_preprocesses)) =
DataSignedWith::get(self.txn, &self.context)
{
assert_eq!(msg, &existing_msg, "obtaining a signature share for a distinct message");
assert_eq!(
&serialized_preprocesses, &existing_preprocesses,
"obtaining a signature share with a distinct set of preprocesses"
);
} else {
DataSignedWith::set(
self.txn,
&self.context,
&(msg.to_vec(), serialized_preprocesses.clone()),
);
}
// Get the preprocessed machine
let (machine, _) = self.preprocess_internal(participants);
// Deserialize all the preprocesses
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>(); let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
participants.sort(); participants.sort();
let mut preprocesses = HashMap::new(); let mut preprocesses = HashMap::new();
@@ -167,13 +194,14 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
); );
} }
// Sign the share
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
FrostError::InvalidParticipant(_, _) | FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) | FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) | FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) | FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!("{e:?}"), FrostError::MissingParticipant(_) => panic!("unexpected error during sign: {e:?}"),
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
})?; })?;
@@ -204,24 +232,24 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
} }
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by // Get the keys of the participants, noted by their threshold is, and return a new map indexed by
// the MuSig is. // their MuSig is.
fn threshold_i_map_to_keys_and_musig_i_map( fn threshold_i_map_to_keys_and_musig_i_map(
spec: &TributarySpec, spec: &TributarySpec,
removed: &[<Ristretto as Ciphersuite>::G],
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>, our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
mut map: HashMap<Participant, Vec<u8>>, mut map: HashMap<Participant, Vec<u8>>,
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) { ) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
// Insert our own index so calculations aren't offset // Insert our own index so calculations aren't offset
let our_threshold_i = spec let our_threshold_i = spec
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref()) .i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
.expect("MuSig t-of-n signing a for a protocol we were removed from") .expect("not in a set we're signing for")
.start; .start;
// Asserts we weren't unexpectedly already present
assert!(map.insert(our_threshold_i, vec![]).is_none()); assert!(map.insert(our_threshold_i, vec![]).is_none());
let spec_validators = spec.validators(); let spec_validators = spec.validators();
let key_from_threshold_i = |threshold_i| { let key_from_threshold_i = |threshold_i| {
for (key, _) in &spec_validators { for (key, _) in &spec_validators {
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start { if threshold_i == spec.i(*key).expect("validator wasn't in a set they're in").start {
return *key; return *key;
} }
} }
@@ -232,19 +260,27 @@ fn threshold_i_map_to_keys_and_musig_i_map(
let mut threshold_is = map.keys().copied().collect::<Vec<_>>(); let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
threshold_is.sort(); threshold_is.sort();
for threshold_i in threshold_is { for threshold_i in threshold_is {
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); sorted.push((
threshold_i,
key_from_threshold_i(threshold_i),
map.remove(&threshold_i).unwrap(),
));
} }
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig // Now that signers are sorted, with their shares, create a map with the is needed for MuSig
let mut participants = vec![]; let mut participants = vec![];
let mut map = HashMap::new(); let mut map = HashMap::new();
for (raw_i, (key, share)) in sorted.into_iter().enumerate() { let mut our_musig_i = None;
let musig_i = u16::try_from(raw_i).unwrap() + 1; for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
if threshold_i == our_threshold_i {
our_musig_i = Some(musig_i);
}
participants.push(key); participants.push(key);
map.insert(Participant::new(musig_i).unwrap(), share); map.insert(musig_i, share);
} }
map.remove(&our_threshold_i).unwrap(); map.remove(&our_musig_i.unwrap()).unwrap();
(participants, map) (participants, map)
} }
@@ -254,7 +290,6 @@ type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8;
pub(crate) struct DkgConfirmer<'a, T: DbTxn> { pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>, key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
spec: &'a TributarySpec, spec: &'a TributarySpec,
removed: Vec<<Ristretto as Ciphersuite>::G>,
txn: &'a mut T, txn: &'a mut T,
attempt: u32, attempt: u32,
} }
@@ -265,19 +300,19 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
spec: &'a TributarySpec, spec: &'a TributarySpec,
txn: &'a mut T, txn: &'a mut T,
attempt: u32, attempt: u32,
) -> Option<DkgConfirmer<'a, T>> { ) -> DkgConfirmer<'a, T> {
// This relies on how confirmations are inlined into the DKG protocol and they accordingly DkgConfirmer { key, spec, txn, attempt }
// share attempts
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
Some(DkgConfirmer { key, spec, removed, txn, attempt })
} }
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
let context = (b"DkgConfirmer", self.attempt); let context = (b"DkgConfirmer", self.attempt);
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
} }
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) { fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>(); // This preprocesses with just us as we only decide the participants after obtaining
// preprocesses
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
self.signing_protocol().preprocess_internal(&participants) self.signing_protocol().preprocess_internal(&participants)
} }
// Get the preprocess for this confirmation. // Get the preprocess for this confirmation.
@@ -290,14 +325,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
preprocesses: HashMap<Participant, Vec<u8>>, preprocesses: HashMap<Participant, Vec<u8>>,
key_pair: &KeyPair, key_pair: &KeyPair,
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> { ) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>(); let (participants, preprocesses) =
let preprocesses = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1; let msg = set_keys_message(&self.spec.set(), key_pair);
let msg = set_keys_message(
&self.spec.set(),
&self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::<Vec<_>>(),
key_pair,
);
self.signing_protocol().share_internal(&participants, preprocesses, &msg) self.signing_protocol().share_internal(&participants, preprocesses, &msg)
} }
// Get the share for this confirmation, if the preprocesses are valid. // Get the share for this confirmation, if the preprocesses are valid.
@@ -315,8 +345,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
key_pair: &KeyPair, key_pair: &KeyPair,
shares: HashMap<Participant, Vec<u8>>, shares: HashMap<Participant, Vec<u8>>,
) -> Result<[u8; 64], Participant> { ) -> Result<[u8; 64], Participant> {
let shares = assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1;
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
let machine = self let machine = self
.share_internal(preprocesses, key_pair) .share_internal(preprocesses, key_pair)

View File

@@ -3,14 +3,13 @@ use std::{io, collections::HashMap};
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use frost::Participant; use frost::Participant;
use scale::Encode; use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet}; use serai_client::validator_sets::primitives::ValidatorSet;
fn borsh_serialize_validators<W: io::Write>( fn borsh_serialize_validators<W: io::Write>(
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>, validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
@@ -44,32 +43,27 @@ fn borsh_deserialize_validators<R: io::Read>(
pub struct TributarySpec { pub struct TributarySpec {
serai_block: [u8; 32], serai_block: [u8; 32],
start_time: u64, start_time: u64,
set: ExternalValidatorSet, set: ValidatorSet,
#[borsh( #[borsh(
serialize_with = "borsh_serialize_validators", serialize_with = "borsh_serialize_validators",
deserialize_with = "borsh_deserialize_validators" deserialize_with = "borsh_deserialize_validators"
)] )]
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>, validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
} }
impl TributarySpec { impl TributarySpec {
pub fn new( pub fn new(
serai_block: [u8; 32], serai_block: [u8; 32],
start_time: u64, start_time: u64,
set: ExternalValidatorSet, set: ValidatorSet,
set_participants: Vec<(PublicKey, u16)>, validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
) -> TributarySpec { ) -> TributarySpec {
let mut validators = vec![]; Self { serai_block, start_time, set, validators, evrf_public_keys }
for (participant, shares) in set_participants {
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
.expect("invalid key registered as participant");
validators.push((participant, shares));
}
Self { serai_block, start_time, set, validators }
} }
pub fn set(&self) -> ExternalValidatorSet { pub fn set(&self) -> ValidatorSet {
self.set self.set
} }
@@ -89,24 +83,15 @@ impl TributarySpec {
self.start_time self.start_time
} }
pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 { pub fn n(&self) -> u16 {
self self.validators.iter().map(|(_, weight)| *weight).sum()
.validators
.iter()
.map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })
.sum()
} }
pub fn t(&self) -> u16 { pub fn t(&self) -> u16 {
// t doesn't change with regards to the amount of removed validators ((2 * self.n()) / 3) + 1
((2 * self.n(&[])) / 3) + 1
} }
pub fn i( pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
&self,
removed_validators: &[<Ristretto as Ciphersuite>::G],
key: <Ristretto as Ciphersuite>::G,
) -> Option<Range<Participant>> {
let mut all_is = HashMap::new(); let mut all_is = HashMap::new();
let mut i = 1; let mut i = 1;
for (validator, weight) in &self.validators { for (validator, weight) in &self.validators {
@@ -117,34 +102,12 @@ impl TributarySpec {
i += weight; i += weight;
} }
let original_i = all_is.get(&key)?.clone(); Some(all_is.get(&key)?.clone())
let mut result_i = original_i.clone();
for removed_validator in removed_validators {
let removed_i = all_is
.get(removed_validator)
.expect("removed validator wasn't present in set to begin with");
// If the queried key was removed, return None
if &original_i == removed_i {
return None;
}
// If the removed was before the queried, shift the queried down accordingly
if removed_i.start < original_i.start {
let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);
result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();
result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();
}
}
Some(result_i)
} }
pub fn reverse_lookup_i( pub fn reverse_lookup_i(&self, i: Participant) -> Option<<Ristretto as Ciphersuite>::G> {
&self,
removed_validators: &[<Ristretto as Ciphersuite>::G],
i: Participant,
) -> Option<<Ristretto as Ciphersuite>::G> {
for (validator, _) in &self.validators { for (validator, _) in &self.validators {
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) { if self.i(*validator).map_or(false, |range| range.contains(&i)) {
return Some(*validator); return Some(*validator);
} }
} }
@@ -154,4 +117,8 @@ impl TributarySpec {
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> { pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
} }
pub fn evrf_public_keys(&self) -> Vec<([u8; 32], Vec<u8>)> {
self.evrf_public_keys.clone()
}
} }

View File

@@ -7,13 +7,11 @@ use rand_core::{RngCore, CryptoRng};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ff::Field, GroupEncoding}, group::{ff::Field, GroupEncoding},
Ciphersuite, Ciphersuite, Ristretto,
}; };
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use frost::Participant;
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use processor_messages::coordinator::SubstrateSignableId; use processor_messages::coordinator::SubstrateSignableId;
@@ -131,32 +129,26 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub enum Transaction { pub enum Transaction {
RemoveParticipantDueToDkg { RemoveParticipant {
participant: <Ristretto as Ciphersuite>::G, participant: <Ristretto as Ciphersuite>::G,
signed: Signed, signed: Signed,
}, },
DkgCommitments { DkgParticipation {
attempt: u32, participation: Vec<u8>,
commitments: Vec<Vec<u8>>,
signed: Signed, signed: Signed,
}, },
DkgShares { DkgConfirmationNonces {
// The confirmation attempt
attempt: u32, attempt: u32,
// Sending Participant, Receiving Participant, Share // The nonces for DKG confirmation attempt #attempt
shares: Vec<Vec<Vec<u8>>>,
confirmation_nonces: [u8; 64], confirmation_nonces: [u8; 64],
signed: Signed, signed: Signed,
}, },
InvalidDkgShare { DkgConfirmationShare {
attempt: u32, // The confirmation attempt
accuser: Participant,
faulty: Participant,
blame: Option<Vec<u8>>,
signed: Signed,
},
DkgConfirmed {
attempt: u32, attempt: u32,
// The share for DKG confirmation attempt #attempt
confirmation_share: [u8; 32], confirmation_share: [u8; 32],
signed: Signed, signed: Signed,
}, },
@@ -198,29 +190,22 @@ pub enum Transaction {
impl Debug for Transaction { impl Debug for Transaction {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
match self { match self {
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt Transaction::RemoveParticipant { participant, signed } => fmt
.debug_struct("Transaction::RemoveParticipantDueToDkg") .debug_struct("Transaction::RemoveParticipant")
.field("participant", &hex::encode(participant.to_bytes())) .field("participant", &hex::encode(participant.to_bytes()))
.field("signer", &hex::encode(signed.signer.to_bytes())) .field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(), .finish_non_exhaustive(),
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt Transaction::DkgParticipation { signed, .. } => fmt
.debug_struct("Transaction::DkgCommitments") .debug_struct("Transaction::DkgParticipation")
.field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(),
Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt
.debug_struct("Transaction::DkgConfirmationNonces")
.field("attempt", attempt) .field("attempt", attempt)
.field("signer", &hex::encode(signed.signer.to_bytes())) .field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(), .finish_non_exhaustive(),
Transaction::DkgShares { attempt, signed, .. } => fmt Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt
.debug_struct("Transaction::DkgShares") .debug_struct("Transaction::DkgConfirmationShare")
.field("attempt", attempt)
.field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(),
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
.debug_struct("Transaction::InvalidDkgShare")
.field("attempt", attempt)
.field("accuser", accuser)
.field("faulty", faulty)
.finish_non_exhaustive(),
Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt
.debug_struct("Transaction::DkgConfirmed")
.field("attempt", attempt) .field("attempt", attempt)
.field("signer", &hex::encode(signed.signer.to_bytes())) .field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(), .finish_non_exhaustive(),
@@ -262,43 +247,32 @@ impl ReadWrite for Transaction {
reader.read_exact(&mut kind)?; reader.read_exact(&mut kind)?;
match kind[0] { match kind[0] {
0 => Ok(Transaction::RemoveParticipantDueToDkg { 0 => Ok(Transaction::RemoveParticipant {
participant: Ristretto::read_G(reader)?, participant: Ristretto::read_G(reader)?,
signed: Signed::read_without_nonce(reader, 0)?, signed: Signed::read_without_nonce(reader, 0)?,
}), }),
1 => { 1 => {
let mut attempt = [0; 4]; let participation = {
reader.read_exact(&mut attempt)?; let mut participation_len = [0; 4];
let attempt = u32::from_le_bytes(attempt); reader.read_exact(&mut participation_len)?;
let participation_len = u32::from_le_bytes(participation_len);
let commitments = { if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() {
let mut commitments_len = [0; 1];
reader.read_exact(&mut commitments_len)?;
let commitments_len = usize::from(commitments_len[0]);
if commitments_len == 0 {
Err(io::Error::other("zero commitments in DkgCommitments"))?;
}
let mut each_commitments_len = [0; 2];
reader.read_exact(&mut each_commitments_len)?;
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
Err(io::Error::other( Err(io::Error::other(
"commitments present in transaction exceeded transaction size limit", "participation present in transaction exceeded transaction size limit",
))?; ))?;
} }
let mut commitments = vec![vec![]; commitments_len]; let participation_len = usize::try_from(participation_len).unwrap();
for commitments in &mut commitments {
*commitments = vec![0; each_commitments_len]; let mut participation = vec![0; participation_len];
reader.read_exact(commitments)?; reader.read_exact(&mut participation)?;
} participation
commitments
}; };
let signed = Signed::read_without_nonce(reader, 0)?; let signed = Signed::read_without_nonce(reader, 0)?;
Ok(Transaction::DkgCommitments { attempt, commitments, signed }) Ok(Transaction::DkgParticipation { participation, signed })
} }
2 => { 2 => {
@@ -306,36 +280,12 @@ impl ReadWrite for Transaction {
reader.read_exact(&mut attempt)?; reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt); let attempt = u32::from_le_bytes(attempt);
let shares = {
let mut share_quantity = [0; 1];
reader.read_exact(&mut share_quantity)?;
let mut key_share_quantity = [0; 1];
reader.read_exact(&mut key_share_quantity)?;
let mut share_len = [0; 2];
reader.read_exact(&mut share_len)?;
let share_len = usize::from(u16::from_le_bytes(share_len));
let mut all_shares = vec![];
for _ in 0 .. share_quantity[0] {
let mut shares = vec![];
for _ in 0 .. key_share_quantity[0] {
let mut share = vec![0; share_len];
reader.read_exact(&mut share)?;
shares.push(share);
}
all_shares.push(shares);
}
all_shares
};
let mut confirmation_nonces = [0; 64]; let mut confirmation_nonces = [0; 64];
reader.read_exact(&mut confirmation_nonces)?; reader.read_exact(&mut confirmation_nonces)?;
let signed = Signed::read_without_nonce(reader, 1)?; let signed = Signed::read_without_nonce(reader, 0)?;
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed })
} }
3 => { 3 => {
@@ -343,53 +293,21 @@ impl ReadWrite for Transaction {
reader.read_exact(&mut attempt)?; reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt); let attempt = u32::from_le_bytes(attempt);
let mut accuser = [0; 2];
reader.read_exact(&mut accuser)?;
let accuser = Participant::new(u16::from_le_bytes(accuser))
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
let mut faulty = [0; 2];
reader.read_exact(&mut faulty)?;
let faulty = Participant::new(u16::from_le_bytes(faulty))
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
let mut blame_len = [0; 2];
reader.read_exact(&mut blame_len)?;
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
reader.read_exact(&mut blame)?;
// This shares a nonce with DkgConfirmed as only one is expected
let signed = Signed::read_without_nonce(reader, 2)?;
Ok(Transaction::InvalidDkgShare {
attempt,
accuser,
faulty,
blame: Some(blame).filter(|blame| !blame.is_empty()),
signed,
})
}
4 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let mut confirmation_share = [0; 32]; let mut confirmation_share = [0; 32];
reader.read_exact(&mut confirmation_share)?; reader.read_exact(&mut confirmation_share)?;
let signed = Signed::read_without_nonce(reader, 2)?; let signed = Signed::read_without_nonce(reader, 1)?;
Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
} }
5 => { 4 => {
let mut block = [0; 32]; let mut block = [0; 32];
reader.read_exact(&mut block)?; reader.read_exact(&mut block)?;
Ok(Transaction::CosignSubstrateBlock(block)) Ok(Transaction::CosignSubstrateBlock(block))
} }
6 => { 5 => {
let mut block = [0; 32]; let mut block = [0; 32];
reader.read_exact(&mut block)?; reader.read_exact(&mut block)?;
let mut batch = [0; 4]; let mut batch = [0; 4];
@@ -397,16 +315,16 @@ impl ReadWrite for Transaction {
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) }) Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
} }
7 => { 6 => {
let mut block = [0; 8]; let mut block = [0; 8];
reader.read_exact(&mut block)?; reader.read_exact(&mut block)?;
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
} }
8 => SignData::read(reader).map(Transaction::SubstrateSign), 7 => SignData::read(reader).map(Transaction::SubstrateSign),
9 => SignData::read(reader).map(Transaction::Sign), 8 => SignData::read(reader).map(Transaction::Sign),
10 => { 9 => {
let mut plan = [0; 32]; let mut plan = [0; 32];
reader.read_exact(&mut plan)?; reader.read_exact(&mut plan)?;
@@ -421,7 +339,7 @@ impl ReadWrite for Transaction {
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
} }
11 => { 10 => {
let mut len = [0]; let mut len = [0];
reader.read_exact(&mut len)?; reader.read_exact(&mut len)?;
let len = len[0]; let len = len[0];
@@ -446,109 +364,59 @@ impl ReadWrite for Transaction {
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self { match self {
Transaction::RemoveParticipantDueToDkg { participant, signed } => { Transaction::RemoveParticipant { participant, signed } => {
writer.write_all(&[0])?; writer.write_all(&[0])?;
writer.write_all(&participant.to_bytes())?; writer.write_all(&participant.to_bytes())?;
signed.write_without_nonce(writer) signed.write_without_nonce(writer)
} }
Transaction::DkgCommitments { attempt, commitments, signed } => { Transaction::DkgParticipation { participation, signed } => {
writer.write_all(&[1])?; writer.write_all(&[1])?;
writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?;
if commitments.is_empty() { writer.write_all(participation)?;
Err(io::Error::other("zero commitments in DkgCommitments"))?
}
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
for commitments_i in commitments {
if commitments_i.len() != commitments[0].len() {
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
}
}
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
for commitments in commitments {
writer.write_all(commitments)?;
}
signed.write_without_nonce(writer) signed.write_without_nonce(writer)
} }
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
writer.write_all(&[2])?; writer.write_all(&[2])?;
writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&attempt.to_le_bytes())?;
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
// This assumes at least one share is being sent to another party
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
let share_len = shares[0][0].len();
// For BLS12-381 G2, this would be:
// - A 32-byte share
// - A 96-byte ephemeral key
// - A 128-byte signature
// Hence why this has to be u16
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
for these_shares in shares {
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
for share in these_shares {
assert_eq!(share.len(), share_len, "sent shares were of variable length");
writer.write_all(share)?;
}
}
writer.write_all(confirmation_nonces)?; writer.write_all(confirmation_nonces)?;
signed.write_without_nonce(writer) signed.write_without_nonce(writer)
} }
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
writer.write_all(&[3])?; writer.write_all(&[3])?;
writer.write_all(&attempt.to_le_bytes())?; writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
assert!(blame.as_ref().map_or(1, Vec::len) != 0);
let blame_len =
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
writer.write_all(&blame_len.to_le_bytes())?;
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
signed.write_without_nonce(writer)
}
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
writer.write_all(&[4])?;
writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(confirmation_share)?; writer.write_all(confirmation_share)?;
signed.write_without_nonce(writer) signed.write_without_nonce(writer)
} }
Transaction::CosignSubstrateBlock(block) => { Transaction::CosignSubstrateBlock(block) => {
writer.write_all(&[5])?; writer.write_all(&[4])?;
writer.write_all(block) writer.write_all(block)
} }
Transaction::Batch { block, batch } => { Transaction::Batch { block, batch } => {
writer.write_all(&[6])?; writer.write_all(&[5])?;
writer.write_all(block)?; writer.write_all(block)?;
writer.write_all(&batch.to_le_bytes()) writer.write_all(&batch.to_le_bytes())
} }
Transaction::SubstrateBlock(block) => { Transaction::SubstrateBlock(block) => {
writer.write_all(&[7])?; writer.write_all(&[6])?;
writer.write_all(&block.to_le_bytes()) writer.write_all(&block.to_le_bytes())
} }
Transaction::SubstrateSign(data) => { Transaction::SubstrateSign(data) => {
writer.write_all(&[8])?; writer.write_all(&[7])?;
data.write(writer) data.write(writer)
} }
Transaction::Sign(data) => { Transaction::Sign(data) => {
writer.write_all(&[9])?; writer.write_all(&[8])?;
data.write(writer) data.write(writer)
} }
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
writer.write_all(&[10])?; writer.write_all(&[9])?;
writer.write_all(plan)?; writer.write_all(plan)?;
writer writer
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
@@ -557,7 +425,7 @@ impl ReadWrite for Transaction {
signature.write(writer) signature.write(writer)
} }
Transaction::SlashReport(points, signed) => { Transaction::SlashReport(points, signed) => {
writer.write_all(&[11])?; writer.write_all(&[10])?;
writer.write_all(&[u8::try_from(points.len()).unwrap()])?; writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
for points in points { for points in points {
writer.write_all(&points.to_le_bytes())?; writer.write_all(&points.to_le_bytes())?;
@@ -571,15 +439,16 @@ impl ReadWrite for Transaction {
impl TransactionTrait for Transaction { impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind<'_> {
match self { match self {
Transaction::RemoveParticipantDueToDkg { participant, signed } => { Transaction::RemoveParticipant { participant, signed } => {
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
} }
Transaction::DkgCommitments { attempt, commitments: _, signed } | Transaction::DkgParticipation { signed, .. } => {
Transaction::DkgShares { attempt, signed, .. } | TransactionKind::Signed(b"dkg".to_vec(), signed)
Transaction::InvalidDkgShare { attempt, signed, .. } | }
Transaction::DkgConfirmed { attempt, signed, .. } => { Transaction::DkgConfirmationNonces { attempt, signed, .. } |
TransactionKind::Signed((b"dkg", attempt).encode(), signed) Transaction::DkgConfirmationShare { attempt, signed, .. } => {
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
} }
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
@@ -646,11 +515,14 @@ impl Transaction {
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
let nonce = match tx { let nonce = match tx {
Transaction::RemoveParticipantDueToDkg { .. } => 0, Transaction::RemoveParticipant { .. } => 0,
Transaction::DkgCommitments { .. } => 0, Transaction::DkgParticipation { .. } => 0,
Transaction::DkgShares { .. } => 1, // Uses a nonce of 0 as it has an internal attempt counter we distinguish by
Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, Transaction::DkgConfirmationNonces { .. } => 0,
// Uses a nonce of 1 due to internal attempt counter and due to following
// DkgConfirmationNonces
Transaction::DkgConfirmationShare { .. } => 1,
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
@@ -669,11 +541,10 @@ impl Transaction {
nonce, nonce,
#[allow(clippy::match_same_arms)] #[allow(clippy::match_same_arms)]
match tx { match tx {
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } | Transaction::RemoveParticipant { ref mut signed, .. } |
Transaction::DkgCommitments { ref mut signed, .. } | Transaction::DkgParticipation { ref mut signed, .. } |
Transaction::DkgShares { ref mut signed, .. } | Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed,
Transaction::InvalidDkgShare { ref mut signed, .. } | Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),

View File

@@ -27,8 +27,7 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
blake2 = { version = "0.10", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../../crypto/dalek-ff-group" } ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }

View File

@@ -1,7 +1,6 @@
use std::collections::{VecDeque, HashSet}; use std::collections::{VecDeque, HashSet};
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};

View File

@@ -5,8 +5,7 @@ use async_trait::async_trait;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::Ciphersuite;
use scale::Decode; use scale::Decode;
use futures_channel::mpsc::UnboundedReceiver; use futures_channel::mpsc::UnboundedReceiver;

View File

@@ -1,7 +1,6 @@
use std::collections::HashMap; use std::collections::HashMap;
use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::Ciphersuite;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};

View File

@@ -11,13 +11,12 @@ use rand_chacha::ChaCha12Rng;
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ group::{
GroupEncoding, GroupEncoding,
ff::{Field, PrimeField}, ff::{Field, PrimeField},
}, },
Ciphersuite, Ciphersuite, Ristretto,
}; };
use schnorr::{ use schnorr::{
SchnorrSignature, SchnorrSignature,

View File

@@ -4,8 +4,7 @@ use scale::{Encode, Decode, IoReader};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::Ciphersuite;
use crate::{ use crate::{
transaction::{Transaction, TransactionKind, TransactionError}, transaction::{Transaction, TransactionKind, TransactionError},

View File

@@ -1,11 +1,9 @@
use std::{sync::Arc, io, collections::HashMap, fmt::Debug}; use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ff::Field, Group}, group::{ff::Field, Group},
Ciphersuite, Ciphersuite, Ristretto,
}; };
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;

View File

@@ -10,8 +10,7 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::{group::ff::Field, Ciphersuite};
use serai_db::{DbTxn, Db, MemDb}; use serai_db::{DbTxn, Db, MemDb};

View File

@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashMap};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng}; use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::{group::ff::Field, Ciphersuite};
use tendermint::ext::Commit; use tendermint::ext::Commit;

View File

@@ -6,10 +6,9 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ff::Field, Group}, group::{ff::Field, Group},
Ciphersuite, Ciphersuite, Ristretto,
}; };
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;

View File

@@ -2,8 +2,7 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::{group::ff::Field, Ciphersuite};
use crate::{ use crate::{
ReadWrite, ReadWrite,

View File

@@ -3,8 +3,7 @@ use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng}; use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto; use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
use ciphersuite::{Ciphersuite, group::ff::Field};
use scale::Encode; use scale::Encode;

View File

@@ -6,10 +6,9 @@ use thiserror::Error;
use blake2::{Digest, Blake2b512}; use blake2::{Digest, Blake2b512};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{Group, GroupEncoding}, group::{Group, GroupEncoding},
Ciphersuite, Ciphersuite, Ristretto,
}; };
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;

View File

@@ -25,7 +25,7 @@ parity-scale-codec = { version = "3", default-features = false, features = ["std
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] } futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
patchable-async-sleep = { version = "0.1", path = "../../../common/patchable-async-sleep", default-features = false } tokio = { version = "1", default-features = false, features = ["time"] }
serai-db = { path = "../../../common/db", version = "0.1", default-features = false } serai-db = { path = "../../../common/db", version = "0.1", default-features = false }

View File

@@ -1,5 +1,3 @@
#![expect(clippy::cast_possible_truncation)]
use core::fmt::Debug; use core::fmt::Debug;
use std::{ use std::{
@@ -15,7 +13,7 @@ use futures_util::{
FutureExt, StreamExt, SinkExt, FutureExt, StreamExt, SinkExt,
future::{self, Fuse}, future::{self, Fuse},
}; };
use patchable_async_sleep::sleep; use tokio::time::sleep;
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};
@@ -710,17 +708,9 @@ impl<N: Network + 'static> TendermintMachine<N> {
if let Data::Proposal(_, block) = &msg.data { if let Data::Proposal(_, block) = &msg.data {
match self.network.validate(block).await { match self.network.validate(block).await {
Ok(()) => {} Ok(()) => {}
Err(BlockError::Temporal) => { Err(BlockError::Temporal) => return Err(TendermintError::Temporal),
if self.block.round().step == Step::Propose {
self.broadcast(Data::Prevote(None));
}
Err(TendermintError::Temporal)?;
}
Err(BlockError::Fatal) => { Err(BlockError::Fatal) => {
log::warn!(target: "tendermint", "validator proposed a fatally invalid block"); log::warn!(target: "tendermint", "validator proposed a fatally invalid block");
if self.block.round().step == Step::Propose {
self.broadcast(Data::Prevote(None));
}
self self
.slash( .slash(
msg.sender, msg.sender,
@@ -739,9 +729,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
target: "tendermint", target: "tendermint",
"proposed proposed with a syntactically invalid valid round", "proposed proposed with a syntactically invalid valid round",
); );
if self.block.round().step == Step::Propose {
self.broadcast(Data::Prevote(None));
}
self self
.slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode()))) .slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode())))
.await; .await;

View File

@@ -5,7 +5,7 @@ use std::{
}; };
use futures_util::{FutureExt, future}; use futures_util::{FutureExt, future};
use patchable_async_sleep::sleep; use tokio::time::sleep;
use crate::{ use crate::{
time::CanonicalInstant, time::CanonicalInstant,

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "ciphersuite" name = "ciphersuite"
version = "0.4.2" version = "0.4.1"
description = "Ciphersuites built around ff/group" description = "Ciphersuites built around ff/group"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"] keywords = ["ciphersuite", "ff", "group"]
edition = "2021" edition = "2021"
rust-version = "1.66" rust-version = "1.74"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -24,12 +24,22 @@ rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["derive"] } zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
subtle = { version = "^2.4", default-features = false } subtle = { version = "^2.4", default-features = false }
digest = { version = "0.10", default-features = false, features = ["core-api"] } digest = { version = "0.10", default-features = false }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
sha2 = { version = "0.10", default-features = false, optional = true }
sha3 = { version = "0.10", default-features = false, optional = true }
ff = { version = "0.13", default-features = false, features = ["bits"] } ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false } group = { version = "0.13", default-features = false }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.4", default-features = false, optional = true }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, optional = true }
[dev-dependencies] [dev-dependencies]
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
@@ -38,7 +48,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { version = "0.13", path = "../ff-group-tests" } ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
[features] [features]
alloc = ["std-shims", "ff/alloc"] alloc = ["std-shims"]
std = [ std = [
"std-shims/std", "std-shims/std",
@@ -49,8 +59,27 @@ std = [
"digest/std", "digest/std",
"transcript/std", "transcript/std",
"sha2?/std",
"sha3?/std",
"ff/std", "ff/std",
"dalek-ff-group?/std",
"elliptic-curve?/std",
"p256?/std",
"k256?/std",
"minimal-ed448?/std",
] ]
dalek = ["sha2", "dalek-ff-group"]
ed25519 = ["dalek"]
ristretto = ["dalek"]
kp256 = ["sha2", "elliptic-curve"]
p256 = ["kp256", "dep:p256"]
secp256k1 = ["kp256", "k256"]
ed448 = ["sha3", "minimal-ed448"]
default = ["std"] default = ["std"]

View File

@@ -21,8 +21,6 @@ Their `hash_to_F` is the
[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html), [IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),
yet applied to their scalar field. yet applied to their scalar field.
Please see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info.
### Ed25519/Ristretto ### Ed25519/Ristretto
Ed25519/Ristretto are offered via Ed25519/Ristretto are offered via
@@ -35,8 +33,6 @@ the draft
[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html). [RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).
The domain-separation tag is naively prefixed to the message. The domain-separation tag is naively prefixed to the message.
Please see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info.
### Ed448 ### Ed448
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
@@ -46,5 +42,3 @@ to its prime-order subgroup.
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The
domain-separation tag is naively prefixed to the message. domain-separation tag is naively prefixed to the message.
Please see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info.

View File

@@ -1,55 +0,0 @@
[package]
name = "ciphersuite-kp256"
version = "0.4.0"
description = "Ciphersuites built around ff/group"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ciphersuite", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
sha2 = { version = "0.10", default-features = false }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"] }
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
ciphersuite = { path = "../", version = "0.4", default-features = false }
[dev-dependencies]
hex = { version = "0.4", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { version = "0.13", path = "../../ff-group-tests" }
[features]
alloc = ["ciphersuite/alloc"]
std = [
"rand_core/std",
"zeroize/std",
"sha2/std",
"elliptic-curve/std",
"p256/std",
"k256/std",
"ciphersuite/std",
]
default = ["std"]

View File

@@ -1,3 +0,0 @@
# Ciphersuite {k, p}256
SECP256k1 and P-256 Ciphersuites around k256 and p256.

View File

@@ -3,9 +3,9 @@ use zeroize::Zeroize;
use sha2::{Digest, Sha512}; use sha2::{Digest, Sha512};
use group::Group; use group::Group;
use crate::Scalar; use dalek_ff_group::Scalar;
use ciphersuite::Ciphersuite; use crate::Ciphersuite;
macro_rules! dalek_curve { macro_rules! dalek_curve {
( (
@@ -15,7 +15,7 @@ macro_rules! dalek_curve {
$Point: ident, $Point: ident,
$ID: literal $ID: literal
) => { ) => {
use crate::$Point; use dalek_ff_group::$Point;
impl Ciphersuite for $Ciphersuite { impl Ciphersuite for $Ciphersuite {
type F = Scalar; type F = Scalar;
@@ -40,9 +40,12 @@ macro_rules! dalek_curve {
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition /// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as /// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. /// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(any(test, feature = "ristretto"))]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ristretto; pub struct Ristretto;
#[cfg(any(test, feature = "ristretto"))]
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto"); dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
#[cfg(any(test, feature = "ristretto"))]
#[test] #[test]
fn test_ristretto() { fn test_ristretto() {
ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng); ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
@@ -68,9 +71,12 @@ fn test_ristretto() {
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition /// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as /// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. /// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
#[cfg(feature = "ed25519")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Ed25519; pub struct Ed25519;
#[cfg(feature = "ed25519")]
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519"); dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
#[cfg(feature = "ed25519")]
#[test] #[test]
fn test_ed25519() { fn test_ed25519() {
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng); ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);

View File

@@ -1,17 +1,15 @@
use zeroize::Zeroize; use zeroize::Zeroize;
use sha3::{ use digest::{
digest::{ typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput, ExtendableOutput, XofReader, HashMarker, Digest,
ExtendableOutput, XofReader, HashMarker, Digest,
},
Shake256,
}; };
use sha3::Shake256;
use group::Group; use group::Group;
use crate::{Scalar, Point}; use minimal_ed448::{Scalar, Point};
use ciphersuite::Ciphersuite; use crate::Ciphersuite;
/// Shake256, fixed to a 114-byte output, as used by Ed448. /// Shake256, fixed to a 114-byte output, as used by Ed448.
#[derive(Clone, Default)] #[derive(Clone, Default)]

View File

@@ -1,17 +1,16 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
use zeroize::Zeroize; use zeroize::Zeroize;
use sha2::Sha256; use sha2::Sha256;
use group::ff::PrimeField;
use elliptic_curve::{ use elliptic_curve::{
generic_array::GenericArray, generic_array::GenericArray,
bigint::{NonZero, CheckedAdd, Encoding, U384}, bigint::{NonZero, CheckedAdd, Encoding, U384},
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
}; };
use ciphersuite::{group::ff::PrimeField, Ciphersuite}; use crate::Ciphersuite;
macro_rules! kp_curve { macro_rules! kp_curve {
( (
@@ -108,9 +107,12 @@ fn test_oversize_dst<C: Ciphersuite>() {
/// Ciphersuite for Secp256k1. /// Ciphersuite for Secp256k1.
/// ///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16). /// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "secp256k1")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct Secp256k1; pub struct Secp256k1;
#[cfg(feature = "secp256k1")]
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1"); kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
#[cfg(feature = "secp256k1")]
#[test] #[test]
fn test_secp256k1() { fn test_secp256k1() {
ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng); ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
@@ -143,9 +145,12 @@ fn test_secp256k1() {
/// Ciphersuite for P-256. /// Ciphersuite for P-256.
/// ///
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16). /// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
#[cfg(feature = "p256")]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct P256; pub struct P256;
#[cfg(feature = "p256")]
kp_curve!("p256", p256, P256, b"P-256"); kp_curve!("p256", p256, P256, b"P-256");
#[cfg(feature = "p256")]
#[test] #[test]
fn test_p256() { fn test_p256() {
ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng); ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);

View File

@@ -2,7 +2,7 @@
Ciphersuites for elliptic curves premised on ff/group. Ciphersuites for elliptic curves premised on ff/group.
This library was This library, except for the not recommended Ed448 ciphersuite, was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06). [669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).

View File

@@ -1,12 +1,9 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("lib.md")] #![doc = include_str!("lib.md")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
use core::fmt::Debug; use core::fmt::Debug;
#[cfg(any(feature = "alloc", feature = "std"))] #[cfg(any(feature = "alloc", feature = "std"))]
#[allow(unused_imports)]
use std_shims::prelude::*;
#[cfg(any(feature = "alloc", feature = "std"))]
use std_shims::io::{self, Read}; use std_shims::io::{self, Read};
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
@@ -26,6 +23,25 @@ use group::{
#[cfg(any(feature = "alloc", feature = "std"))] #[cfg(any(feature = "alloc", feature = "std"))]
use group::GroupEncoding; use group::GroupEncoding;
#[cfg(feature = "dalek")]
mod dalek;
#[cfg(feature = "ristretto")]
pub use dalek::Ristretto;
#[cfg(feature = "ed25519")]
pub use dalek::Ed25519;
#[cfg(feature = "kp256")]
mod kp256;
#[cfg(feature = "secp256k1")]
pub use kp256::Secp256k1;
#[cfg(feature = "p256")]
pub use kp256::P256;
#[cfg(feature = "ed448")]
mod ed448;
#[cfg(feature = "ed448")]
pub use ed448::*;
/// Unified trait defining a ciphersuite around an elliptic curve. /// Unified trait defining a ciphersuite around an elliptic curve.
pub trait Ciphersuite: pub trait Ciphersuite:
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize 'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
@@ -83,9 +99,6 @@ pub trait Ciphersuite:
} }
/// Read a canonical point from something implementing std::io::Read. /// Read a canonical point from something implementing std::io::Read.
///
/// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a
/// canonical serialization.
#[cfg(any(feature = "alloc", feature = "std"))] #[cfg(any(feature = "alloc", feature = "std"))]
#[allow(non_snake_case)] #[allow(non_snake_case)]
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> { fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "dalek-ff-group" name = "dalek-ff-group"
version = "0.4.4" version = "0.4.1"
description = "ff/group bindings around curve25519-dalek" description = "ff/group bindings around curve25519-dalek"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021" edition = "2021"
rust-version = "1.65" rust-version = "1.66"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -25,22 +25,18 @@ subtle = { version = "^2.4", default-features = false }
rand_core = { version = "0.6", default-features = false } rand_core = { version = "0.6", default-features = false }
digest = { version = "0.10", default-features = false } digest = { version = "0.10", default-features = false }
sha2 = { version = "0.10", default-features = false }
ff = { version = "0.13", default-features = false, features = ["bits"] } ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false } group = { version = "0.13", default-features = false }
ciphersuite = { path = "../ciphersuite", default-features = false }
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] } curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] }
[dev-dependencies] [dev-dependencies]
hex = "0.4"
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
ff-group-tests = { path = "../ff-group-tests" } ff-group-tests = { path = "../ff-group-tests" }
[features] [features]
alloc = ["zeroize/alloc", "ciphersuite/alloc"] std = ["zeroize/std", "subtle/std", "rand_core/std", "digest/std"]
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "digest/std", "sha2/std", "ciphersuite/std"]
default = ["std"] default = ["std"]

View File

@@ -17,7 +17,7 @@ use crypto_bigint::{
impl_modulus, impl_modulus,
}; };
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes}; use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
use crate::{u8_from_bool, constant_time, math_op, math}; use crate::{u8_from_bool, constant_time, math_op, math};
@@ -35,8 +35,7 @@ impl_modulus!(
type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>; type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;
/// A constant-time implementation of the Ed25519 field. /// A constant-time implementation of the Ed25519 field.
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Default, Debug)]
#[repr(transparent)]
pub struct FieldElement(ResidueType); pub struct FieldElement(ResidueType);
// Square root of -1. // Square root of -1.
@@ -93,7 +92,7 @@ impl Neg for FieldElement {
} }
} }
impl Neg for &FieldElement { impl<'a> Neg for &'a FieldElement {
type Output = FieldElement; type Output = FieldElement;
fn neg(self) -> Self::Output { fn neg(self) -> Self::Output {
(*self).neg() (*self).neg()
@@ -217,18 +216,10 @@ impl PrimeFieldBits for FieldElement {
} }
impl FieldElement { impl FieldElement {
/// Create a FieldElement from a `crypto_bigint::U256`. /// Interpret the value as a little-endian integer, square it, and reduce it into a FieldElement.
/// pub fn from_square(value: [u8; 32]) -> FieldElement {
/// This will reduce the `U256` by the modulus, into a member of the field. let value = U256::from_le_bytes(value);
pub const fn from_u256(u256: &U256) -> Self { FieldElement(reduce(U512::from(value.mul_wide(&value))))
FieldElement(Residue::new(u256))
}
/// Create a `FieldElement` from the reduction of a 512-bit number.
///
/// The bytes are interpreted in little-endian format.
pub fn wide_reduce(value: [u8; 64]) -> Self {
FieldElement(reduce(U512::from_le_bytes(value)))
} }
/// Perform an exponentiation. /// Perform an exponentiation.
@@ -253,16 +244,7 @@ impl FieldElement {
res *= res; res *= res;
} }
} }
res *= table[usize::from(bits)];
let mut scale_by = FieldElement::ONE;
#[allow(clippy::needless_range_loop)]
for i in 0 .. 16 {
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
{
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
}
}
res *= scale_by;
bits = 0; bits = 0;
} }
} }
@@ -306,12 +288,6 @@ impl FieldElement {
} }
} }
impl FromUniformBytes<64> for FieldElement {
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
Self::wide_reduce(*bytes)
}
}
impl Sum<FieldElement> for FieldElement { impl Sum<FieldElement> for FieldElement {
fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement { fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
let mut res = FieldElement::ZERO; let mut res = FieldElement::ZERO;

View File

@@ -1,5 +1,5 @@
#![allow(deprecated)] #![allow(deprecated)]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std #![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![allow(clippy::redundant_closure_call)] #![allow(clippy::redundant_closure_call)]
@@ -30,7 +30,7 @@ use dalek::{
pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE}; pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};
use group::{ use group::{
ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes}, ff::{Field, PrimeField, FieldBits, PrimeFieldBits},
Group, GroupEncoding, Group, GroupEncoding,
prime::PrimeGroup, prime::PrimeGroup,
}; };
@@ -38,24 +38,13 @@ use group::{
mod field; mod field;
pub use field::FieldElement; pub use field::FieldElement;
mod ciphersuite;
pub use crate::ciphersuite::{Ed25519, Ristretto};
// Use black_box when possible // Use black_box when possible
#[rustversion::since(1.66)] #[rustversion::since(1.66)]
mod black_box { use core::hint::black_box;
pub(crate) fn black_box<T>(val: T) -> T {
#[allow(clippy::incompatible_msrv)]
core::hint::black_box(val)
}
}
#[rustversion::before(1.66)] #[rustversion::before(1.66)]
mod black_box { fn black_box<T>(val: T) -> T {
pub(crate) fn black_box<T>(val: T) -> T { val
val
}
} }
use black_box::black_box;
fn u8_from_bool(bit_ref: &mut bool) -> u8 { fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref); let bit_ref = black_box(bit_ref);
@@ -219,16 +208,7 @@ impl Scalar {
res *= res; res *= res;
} }
} }
res *= table[usize::from(bits)];
let mut scale_by = Scalar::ONE;
#[allow(clippy::needless_range_loop)]
for i in 0 .. 16 {
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
{
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
}
}
res *= scale_by;
bits = 0; bits = 0;
} }
} }
@@ -325,12 +305,6 @@ impl PrimeFieldBits for Scalar {
} }
} }
impl FromUniformBytes<64> for Scalar {
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
Self::from_bytes_mod_order_wide(bytes)
}
}
impl Sum<Scalar> for Scalar { impl Sum<Scalar> for Scalar {
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar { fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
Self(DScalar::sum(iter)) Self(DScalar::sum(iter))
@@ -368,12 +342,7 @@ macro_rules! dalek_group {
$BASEPOINT_POINT: ident, $BASEPOINT_POINT: ident,
$BASEPOINT_TABLE: ident $BASEPOINT_TABLE: ident
) => { ) => {
/// Wrapper around the dalek Point type. /// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup.
///
/// All operations will be restricted to a prime-order subgroup (equivalent to the group itself
/// in the case of Ristretto). The exposure of the internal element does allow bypassing this
/// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at
/// the user's risk.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
pub struct $Point(pub $DPoint); pub struct $Point(pub $DPoint);
deref_borrow!($Point, $DPoint); deref_borrow!($Point, $DPoint);

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "dkg" name = "dkg"
version = "0.6.1" version = "0.5.1"
description = "Distributed key generation over ff/group" description = "Distributed key generation over ff/group"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"] keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021" edition = "2021"
rust-version = "1.66" rust-version = "1.79"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,25 +17,84 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] } thiserror = { version = "1", default-features = false, optional = true }
thiserror = { version = "2", default-features = false } rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false } std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false }
multiexp = { path = "../multiexp", version = "0.4", default-features = false }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false }
dleq = { path = "../dleq", version = "^0.4.1", default-features = false }
# eVRF DKG dependencies
subtle = { version = "2", default-features = false, features = ["std"], optional = true }
generic-array = { version = "1", default-features = false, features = ["alloc"], optional = true }
blake2 = { version = "0.10", default-features = false, features = ["std"], optional = true }
rand_chacha = { version = "0.3", default-features = false, features = ["std"], optional = true }
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", default-features = false, optional = true }
ec-divisors = { path = "../evrf/divisors", default-features = false, optional = true }
generalized-bulletproofs-circuit-abstraction = { path = "../evrf/circuit-abstraction", optional = true }
generalized-bulletproofs-ec-gadgets = { path = "../evrf/ec-gadgets", optional = true }
secq256k1 = { path = "../evrf/secq256k1", optional = true }
embedwards25519 = { path = "../evrf/embedwards25519", optional = true }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
rand = { version = "0.8", default-features = false, features = ["std"] }
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] }
ec-divisors = { path = "../evrf/divisors", features = ["pasta"] }
pasta_curves = "0.5"
[features] [features]
std = [ std = [
"thiserror/std", "thiserror",
"rand_core/std",
"std-shims/std", "std-shims/std",
"borsh?/std", "borsh?/std",
"transcript/std",
"chacha20/std",
"ciphersuite/std", "ciphersuite/std",
"multiexp/std",
"multiexp/batch",
"schnorr/std",
"dleq/std",
"dleq/serialize"
] ]
borsh = ["dep:borsh"] borsh = ["dep:borsh"]
evrf = [
"std",
"dep:subtle",
"dep:generic-array",
"dep:blake2",
"dep:rand_chacha",
"dep:generalized-bulletproofs",
"dep:ec-divisors",
"dep:generalized-bulletproofs-circuit-abstraction",
"dep:generalized-bulletproofs-ec-gadgets",
]
evrf-secp256k1 = ["evrf", "ciphersuite/secp256k1", "secq256k1"]
evrf-ed25519 = ["evrf", "ciphersuite/ed25519", "embedwards25519"]
evrf-ristretto = ["evrf", "ciphersuite/ristretto", "embedwards25519"]
tests = ["rand_core/getrandom"]
default = ["std"] default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2021-2025 Luke Parker Copyright (c) 2021-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,15 +1,16 @@
# Distributed Key Generation # Distributed Key Generation
A crate implementing a type for keys, presumably the result of a distributed A collection of implementations of various distributed key generation protocols.
key generation protocol, and utilities from there.
This crate used to host implementations of distributed key generation protocols All included protocols resolve into the provided `Threshold` types, intended to
as well (hence the name). Those have been smashed into their own crates, such enable their modularity. Additional utilities around these types, such as
as [`dkg-musig`](https://docs.rs/dkg-musig) and promotion from one generator to another, are also provided.
[`dkg-pedpop`](https://docs.rs/dkg-pedpop).
Before being smashed, this crate was [audited by Cypher Stack in March 2023]( Currently, the only included protocol is the two-round protocol from the
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf [FROST paper](https://eprint.iacr.org/2020/852).
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06 This library was
). Any subsequent changes have not undergone auditing. [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
Any subsequent changes have not undergone auditing.

View File

@@ -1,36 +0,0 @@
[package]
name = "dkg-dealer"
version = "0.6.0"
description = "Produce dkg::ThresholdKeys with a dealer key generation"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
zeroize = { version = "^1.5", default-features = false }
rand_core = { version = "0.6", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[features]
std = [
"zeroize/std",
"rand_core/std",
"std-shims/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -1,13 +0,0 @@
# Distributed Key Generation - Dealer
This crate implements a dealer key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types. This provides a single point of
failure when the key is being generated and is NOT recommended for use outside
of tests.
This crate was originally part of (in some form) the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,68 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![no_std]
use core::ops::Deref;
use std_shims::{vec::Vec, collections::HashMap};
use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, CryptoRng};
use ciphersuite::{
group::ff::{Field, PrimeField},
Ciphersuite,
};
pub use dkg::*;
/// Create a key via a dealer key generation protocol.
pub fn key_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
threshold: u16,
participants: u16,
) -> Result<HashMap<Participant, ThresholdKeys<C>>, DkgError> {
let mut coefficients = Vec::with_capacity(usize::from(participants));
// `.max(1)` so we always generate the 0th coefficient which we'll share
for _ in 0 .. threshold.max(1) {
coefficients.push(Zeroizing::new(C::F::random(&mut *rng)));
}
fn polynomial<F: PrimeField + Zeroize>(
coefficients: &[Zeroizing<F>],
l: Participant,
) -> Zeroizing<F> {
let l = F::from(u64::from(u16::from(l)));
// This should never be reached since Participant is explicitly non-zero
assert!(l != F::ZERO, "zero participant passed to polynomial");
let mut share = Zeroizing::new(F::ZERO);
for (idx, coefficient) in coefficients.iter().rev().enumerate() {
*share += coefficient.deref();
if idx != (coefficients.len() - 1) {
*share *= l;
}
}
share
}
let group_key = C::generator() * coefficients[0].deref();
let mut secret_shares = HashMap::with_capacity(participants as usize);
let mut verification_shares = HashMap::with_capacity(participants as usize);
for i in 1 ..= participants {
let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index");
let secret_share = polynomial(&coefficients, i);
secret_shares.insert(i, secret_share.clone());
verification_shares.insert(i, C::generator() * *secret_share);
}
let mut res = HashMap::with_capacity(participants as usize);
for (i, secret_share) in secret_shares {
let keys = ThresholdKeys::new(
ThresholdParams::new(threshold, participants, i)?,
Interpolation::Lagrange,
secret_share,
verification_shares.clone(),
)?;
debug_assert_eq!(keys.group_key(), group_key);
res.insert(i, keys);
}
Ok(res)
}

View File

@@ -1,49 +0,0 @@
[package]
name = "dkg-musig"
version = "0.6.0"
description = "The MuSig key aggregation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/musig"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.79"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }
[features]
std = [
"thiserror/std",
"rand_core/std",
"std-shims/std",
"multiexp/std",
"ciphersuite/std",
"dkg/std",
]
default = ["std"]

View File

@@ -1,12 +0,0 @@
# Distributed Key Generation - MuSig
This implements the MuSig key aggregation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of (in some form) the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,162 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use core::ops::Deref;
use std_shims::{
vec,
vec::Vec,
collections::{HashSet, HashMap},
};
use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
pub use dkg::*;
#[cfg(test)]
mod tests;
/// Errors encountered when working with threshold keys.
#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)]
pub enum MusigError<C: Ciphersuite> {
/// No keys were provided.
#[error("no keys provided")]
NoKeysProvided,
/// Too many keys were provided.
#[error("too many keys (allowed {max}, provided {provided})")]
TooManyKeysProvided {
/// The maximum amount of keys allowed.
max: u16,
/// The amount of keys provided.
provided: usize,
},
/// A participant was duplicated.
#[error("a participant was duplicated")]
DuplicatedParticipant(C::G),
/// Participating, yet our public key wasn't found in the list of keys.
#[error("private key's public key wasn't present in the list of public keys")]
NotPresent,
/// An error propagated from the underlying `dkg` crate.
#[error("error from dkg ({0})")]
DkgError(DkgError),
}
fn check_keys<C: Ciphersuite>(keys: &[C::G]) -> Result<u16, MusigError<C>> {
if keys.is_empty() {
Err(MusigError::NoKeysProvided)?;
}
let keys_len = u16::try_from(keys.len())
.map_err(|_| MusigError::TooManyKeysProvided { max: u16::MAX, provided: keys.len() })?;
let mut set = HashSet::with_capacity(keys.len());
for key in keys {
let bytes = key.to_bytes().as_ref().to_vec();
if !set.insert(bytes) {
Err(MusigError::DuplicatedParticipant(*key))?;
}
}
Ok(keys_len)
}
fn binding_factor_transcript<C: Ciphersuite>(
context: [u8; 32],
keys_len: u16,
keys: &[C::G],
) -> Vec<u8> {
debug_assert_eq!(usize::from(keys_len), keys.len());
let mut transcript = vec![];
transcript.extend(&context);
transcript.extend(keys_len.to_le_bytes());
for key in keys {
transcript.extend(key.to_bytes().as_ref());
}
transcript
}
fn binding_factor<C: Ciphersuite>(mut transcript: Vec<u8>, i: u16) -> C::F {
transcript.extend(i.to_le_bytes());
C::hash_to_F(b"dkg-musig", &transcript)
}
#[allow(clippy::type_complexity)]
fn musig_key_multiexp<C: Ciphersuite>(
context: [u8; 32],
keys: &[C::G],
) -> Result<Vec<(C::F, C::G)>, MusigError<C>> {
let keys_len = check_keys::<C>(keys)?;
let transcript = binding_factor_transcript::<C>(context, keys_len, keys);
let mut multiexp = Vec::with_capacity(keys.len());
for i in 1 ..= keys_len {
multiexp.push((binding_factor::<C>(transcript.clone(), i), keys[usize::from(i - 1)]));
}
Ok(multiexp)
}
/// The group key resulting from using this library's MuSig key aggregation.
///
/// This function executes in variable time and MUST NOT be used with secret data.
pub fn musig_key_vartime<C: Ciphersuite>(
context: [u8; 32],
keys: &[C::G],
) -> Result<C::G, MusigError<C>> {
Ok(multiexp::multiexp_vartime(&musig_key_multiexp(context, keys)?))
}
/// The group key resulting from using this library's MuSig key aggregation.
pub fn musig_key<C: Ciphersuite>(context: [u8; 32], keys: &[C::G]) -> Result<C::G, MusigError<C>> {
Ok(multiexp::multiexp(&musig_key_multiexp(context, keys)?))
}
/// A n-of-n non-interactive DKG which does not guarantee the usability of the resulting key.
pub fn musig<C: Ciphersuite>(
context: [u8; 32],
private_key: Zeroizing<C::F>,
keys: &[C::G],
) -> Result<ThresholdKeys<C>, MusigError<C>> {
let our_pub_key = C::generator() * private_key.deref();
let Some(our_i) = keys.iter().position(|key| *key == our_pub_key) else {
Err(MusigError::DkgError(DkgError::NotParticipating))?
};
let keys_len: u16 = check_keys::<C>(keys)?;
let params = ThresholdParams::new(
keys_len,
keys_len,
// The `+ 1` won't fail as `keys.len() <= u16::MAX`, so any index is `< u16::MAX`
Participant::new(
u16::try_from(our_i).expect("keys.len() <= u16::MAX yet index of keys > u16::MAX?") + 1,
)
.expect("i + 1 != 0"),
)
.map_err(MusigError::DkgError)?;
let transcript = binding_factor_transcript::<C>(context, keys_len, keys);
let mut binding_factors = Vec::with_capacity(keys.len());
let mut multiexp = Vec::with_capacity(keys.len());
let mut verification_shares = HashMap::with_capacity(keys.len());
for (i, key) in (1 ..= keys_len).zip(keys.iter().copied()) {
let binding_factor = binding_factor::<C>(transcript.clone(), i);
binding_factors.push(binding_factor);
multiexp.push((binding_factor, key));
let i = Participant::new(i).expect("non-zero u16 wasn't a valid Participant index?");
verification_shares.insert(i, key);
}
let group_key = multiexp::multiexp(&multiexp);
debug_assert_eq!(our_pub_key, verification_shares[&params.i()]);
debug_assert_eq!(musig_key_vartime::<C>(context, keys), Ok(group_key));
ThresholdKeys::new(
params,
Interpolation::Constant(binding_factors),
private_key,
verification_shares,
)
.map_err(MusigError::DkgError)
}

View File

@@ -1,71 +0,0 @@
use std::collections::HashMap;
use zeroize::Zeroizing;
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::ff::Field, Ciphersuite};
use dkg_recovery::recover_key;
use crate::*;
/// Tests MuSig key generation.
#[test]
pub fn test_musig() {
const PARTICIPANTS: u16 = 5;
let mut keys = vec![];
let mut pub_keys = vec![];
for _ in 0 .. PARTICIPANTS {
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
pub_keys.push(<Ristretto as Ciphersuite>::generator() * *key);
keys.push(key);
}
const CONTEXT: [u8; 32] = *b"MuSig Test ";
// Empty signing set
musig::<Ristretto>(CONTEXT, Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO), &[])
.unwrap_err();
// Signing set we're not part of
musig::<Ristretto>(
CONTEXT,
Zeroizing::new(<Ristretto as Ciphersuite>::F::ZERO),
&[<Ristretto as Ciphersuite>::generator()],
)
.unwrap_err();
// Test with n keys
{
let mut created_keys = HashMap::new();
let mut verification_shares = HashMap::new();
let group_key = musig_key::<Ristretto>(CONTEXT, &pub_keys).unwrap();
for (i, key) in keys.iter().enumerate() {
let these_keys = musig::<Ristretto>(CONTEXT, key.clone(), &pub_keys).unwrap();
assert_eq!(these_keys.params().t(), PARTICIPANTS);
assert_eq!(these_keys.params().n(), PARTICIPANTS);
assert_eq!(usize::from(u16::from(these_keys.params().i())), i + 1);
verification_shares.insert(
these_keys.params().i(),
<Ristretto as Ciphersuite>::generator() * **these_keys.original_secret_share(),
);
assert_eq!(these_keys.group_key(), group_key);
created_keys.insert(these_keys.params().i(), these_keys);
}
for keys in created_keys.values() {
for (l, verification_share) in &verification_shares {
assert_eq!(keys.original_verification_share(*l), *verification_share);
}
}
assert_eq!(
<Ristretto as Ciphersuite>::generator() *
*recover_key(&created_keys.values().cloned().collect::<Vec<_>>()).unwrap(),
group_key
);
}
}

View File

@@ -1,37 +0,0 @@
[package]
name = "dkg-pedpop"
version = "0.6.0"
description = "The PedPoP distributed key generation protocol"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/pedpop"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.3", default-features = false, features = ["std", "recommended"] }
chacha20 = { version = "0.9", default-features = false, features = ["std", "zeroize"] }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6", default-features = false, features = ["std"] }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false }

View File

@@ -1,12 +0,0 @@
# Distributed Key Generation - PedPoP
This implements the PedPoP distributed key generation protocol for the
[`dkg`](https://docs.rs/dkg) crate's types.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,346 +0,0 @@
use std::collections::HashMap;
use rand_core::{RngCore, CryptoRng, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use crate::*;
const THRESHOLD: u16 = 3;
const PARTICIPANTS: u16 = 5;
/// Clone a map without a specific value.
fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
type PedPoPEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
type PedPoPSecretShares<C> = HashMap<Participant, PedPoPEncryptedMessage<C>>;
const CONTEXT: [u8; 32] = *b"DKG Test Key Generation ";
// Commit, then return commitment messages, enc keys, and shares
#[allow(clippy::type_complexity)]
fn commit_enc_keys_and_shares<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> (
HashMap<Participant, KeyMachine<C>>,
HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
HashMap<Participant, C::G>,
HashMap<Participant, PedPoPSecretShares<C>>,
) {
let mut machines = HashMap::new();
let mut commitments = HashMap::new();
let mut enc_keys = HashMap::new();
for i in (1 ..= PARTICIPANTS).map(|i| Participant::new(i).unwrap()) {
let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap();
let machine = KeyGenMachine::<C>::new(params, CONTEXT);
let (machine, these_commitments) = machine.generate_coefficients(rng);
machines.insert(i, machine);
commitments.insert(
i,
EncryptionKeyMessage::read::<&[u8]>(&mut these_commitments.serialize().as_ref(), params)
.unwrap(),
);
enc_keys.insert(i, commitments[&i].enc_key());
}
let mut secret_shares = HashMap::new();
let machines = machines
.drain()
.map(|(l, machine)| {
let (machine, mut shares) =
machine.generate_secret_shares(rng, clone_without(&commitments, &l)).unwrap();
let shares = shares
.drain()
.map(|(l, share)| {
(
l,
EncryptedMessage::read::<&[u8]>(
&mut share.serialize().as_ref(),
// Only t/n actually matters, so hardcode i to 1 here
ThresholdParams::new(THRESHOLD, PARTICIPANTS, Participant::new(1).unwrap()).unwrap(),
)
.unwrap(),
)
})
.collect::<HashMap<_, _>>();
secret_shares.insert(l, shares);
(l, machine)
})
.collect::<HashMap<_, _>>();
(machines, commitments, enc_keys, secret_shares)
}
fn generate_secret_shares<C: Ciphersuite>(
shares: &HashMap<Participant, PedPoPSecretShares<C>>,
recipient: Participant,
) -> PedPoPSecretShares<C> {
let mut our_secret_shares = HashMap::new();
for (i, shares) in shares {
if recipient == *i {
continue;
}
our_secret_shares.insert(*i, shares[&recipient].clone());
}
our_secret_shares
}
/// Fully perform the PedPoP key generation algorithm.
fn pedpop_gen<R: RngCore + CryptoRng, C: Ciphersuite>(
rng: &mut R,
) -> HashMap<Participant, ThresholdKeys<C>> {
let (mut machines, _, _, secret_shares) = commit_enc_keys_and_shares::<_, C>(rng);
let mut verification_shares = None;
let mut group_key = None;
machines
.drain()
.map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let these_keys = machine.calculate_share(rng, our_secret_shares).unwrap().complete();
// Verify the verification_shares are agreed upon
if verification_shares.is_none() {
verification_shares = Some(
these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>(),
);
}
assert_eq!(
verification_shares.as_ref().unwrap(),
&these_keys
.params()
.all_participant_indexes()
.map(|i| (i, these_keys.original_verification_share(i)))
.collect::<HashMap<_, _>>()
);
// Verify the group keys are agreed upon
if group_key.is_none() {
group_key = Some(these_keys.group_key());
}
assert_eq!(group_key.unwrap(), these_keys.group_key());
(i, these_keys)
})
.collect::<HashMap<_, _>>()
}
const ONE: Participant = Participant::new(1).unwrap();
const TWO: Participant = Participant::new(2).unwrap();
#[test]
fn test_pedpop() {
let _ = core::hint::black_box(pedpop_gen::<_, Ristretto>(&mut OsRng));
}
fn test_blame(
commitment_msgs: &HashMap<Participant, EncryptionKeyMessage<Ristretto, Commitments<Ristretto>>>,
machines: Vec<BlameMachine<Ristretto>>,
msg: &PedPoPEncryptedMessage<Ristretto>,
blame: &Option<EncryptionKeyProof<Ristretto>>,
) {
for machine in machines {
let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone());
assert_eq!(blamed, ONE);
// Verify additional blame also works
assert_eq!(additional.blame(ONE, TWO, msg.clone(), blame.clone()), ONE);
// Verify machines constructed with AdditionalBlameMachine::new work
assert_eq!(
AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame(
ONE,
TWO,
msg.clone(),
blame.clone()
),
ONE,
);
}
}
// TODO: Write a macro which expands to the following
#[test]
fn invalid_encryption_pop_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the PoP of the encrypted message from 1 to 2
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_pop();
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
assert_eq!(
machine.err(),
Some(PedPoPError::InvalidShare { participant: ONE, blame: None })
);
// Explicitly declare we have a blame object, which happens to be None since invalid PoP
// is self-explainable
blame = Some(None);
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_ecdh_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
// Mutate the share to trigger a blame event
// Mutates from 2 to 1, as 1 is expected to end up malicious for test_blame to pass
// While here, 2 is malicious, this is so 1 creates the blame proof
// We then malleate 1's blame proof, so 1 ends up malicious
// Doesn't simply invalidate the PoP as that won't have a blame statement
// By mutating the encrypted data, we do ensure a blame statement is created
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_key();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
// This should be largely equivalent to the prior test
#[test]
fn invalid_dleq_blame() {
let (mut machines, commitment_msgs, _, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares
.get_mut(&TWO)
.unwrap()
.get_mut(&ONE)
.unwrap()
.invalidate_msg(&mut OsRng, CONTEXT, TWO);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == ONE {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: TWO, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq();
test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_serialization_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_serialization(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}
#[test]
fn invalid_share_value_blame() {
let (mut machines, commitment_msgs, enc_keys, mut secret_shares) =
commit_enc_keys_and_shares::<_, Ristretto>(&mut OsRng);
secret_shares.get_mut(&ONE).unwrap().get_mut(&TWO).unwrap().invalidate_share_value(
&mut OsRng,
CONTEXT,
ONE,
enc_keys[&TWO],
);
let mut blame = None;
let machines = machines
.drain()
.filter_map(|(i, machine)| {
let our_secret_shares = generate_secret_shares(&secret_shares, i);
let machine = machine.calculate_share(&mut OsRng, our_secret_shares);
if i == TWO {
blame = Some(match machine.err() {
Some(PedPoPError::InvalidShare { participant: ONE, blame: Some(blame) }) => Some(blame),
_ => panic!(),
});
None
} else {
Some(machine.unwrap())
}
})
.collect::<Vec<_>>();
test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap());
}

View File

@@ -1,34 +0,0 @@
[package]
name = "dkg-promote"
version = "0.6.1"
description = "Promotions for keys from the dkg crate"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/promote"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
dleq = { path = "../../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../", version = "0.6.1", default-features = false, features = ["std"] }
[dev-dependencies]
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
dalek-ff-group = { path = "../../dalek-ff-group" }
dkg-recovery = { path = "../recovery", default-features = false, features = ["std"] }

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021-2025 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,13 +0,0 @@
# Distributed Key Generation - Promote
This crate implements 'promotions' for keys from the
[`dkg`](https://docs.rs/dkg) crate. A promotion takes a set of keys and maps it
to a different `Ciphersuite`.
This crate was originally part of the `dkg` crate, which was
[audited by Cypher Stack in March 2023](
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
), culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
). Any subsequent changes have not undergone auditing.

View File

@@ -1,113 +0,0 @@
use core::marker::PhantomData;
use std::collections::HashMap;
use zeroize::{Zeroize, Zeroizing};
use rand_core::OsRng;
use dalek_ff_group::Ristretto;
use ciphersuite::{
group::{ff::Field, Group},
Ciphersuite,
};
use dkg::*;
use dkg_recovery::recover_key;
use crate::{GeneratorPromotion, GeneratorProof};
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
struct AltGenerator<C: Ciphersuite> {
_curve: PhantomData<C>,
}
impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
type F = C::F;
type G = C::G;
type H = C::H;
const ID: &'static [u8] = b"Alternate Ciphersuite";
fn generator() -> Self::G {
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
}
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
<C as Ciphersuite>::hash_to_F(dst, data)
}
}
/// Clone a map without a specific value.
pub fn clone_without<K: Clone + core::cmp::Eq + core::hash::Hash, V: Clone>(
map: &HashMap<K, V>,
without: &K,
) -> HashMap<K, V> {
let mut res = map.clone();
res.remove(without).unwrap();
res
}
// Test promotion of threshold keys to another generator
#[test]
fn test_generator_promotion() {
// Generate a set of `ThresholdKeys`
const PARTICIPANTS: u16 = 5;
let keys: [ThresholdKeys<_>; PARTICIPANTS as usize] = {
let shares: [<Ristretto as Ciphersuite>::F; PARTICIPANTS as usize] =
core::array::from_fn(|_| <Ristretto as Ciphersuite>::F::random(&mut OsRng));
let verification_shares = (0 .. PARTICIPANTS)
.map(|i| {
(
Participant::new(i + 1).unwrap(),
<Ristretto as Ciphersuite>::generator() * shares[usize::from(i)],
)
})
.collect::<HashMap<_, _>>();
core::array::from_fn(|i| {
ThresholdKeys::new(
ThresholdParams::new(
PARTICIPANTS,
PARTICIPANTS,
Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),
)
.unwrap(),
Interpolation::Constant(vec![<Ristretto as Ciphersuite>::F::ONE; PARTICIPANTS as usize]),
Zeroizing::new(shares[i]),
verification_shares.clone(),
)
.unwrap()
})
};
// Perform the promotion
let mut promotions = HashMap::new();
let mut proofs = HashMap::new();
for keys in &keys {
let i = keys.params().i();
let (promotion, proof) =
GeneratorPromotion::<_, AltGenerator<Ristretto>>::promote(&mut OsRng, keys.clone());
promotions.insert(i, promotion);
proofs.insert(
i,
GeneratorProof::<Ristretto>::read::<&[u8]>(&mut proof.serialize().as_ref()).unwrap(),
);
}
// Complete the promotion, and verify it worked
let new_group_key = AltGenerator::<Ristretto>::generator() * *recover_key(&keys).unwrap();
for (i, promoting) in promotions.drain() {
let promoted = promoting.complete(&clone_without(&proofs, &i)).unwrap();
assert_eq!(keys[usize::from(u16::from(i) - 1)].params(), promoted.params());
assert_eq!(
keys[usize::from(u16::from(i) - 1)].original_secret_share(),
promoted.original_secret_share()
);
assert_eq!(new_group_key, promoted.group_key());
for l in 0 .. PARTICIPANTS {
let verification_share =
promoted.original_verification_share(Participant::new(l + 1).unwrap());
assert_eq!(
AltGenerator::<Ristretto>::generator() * **keys[usize::from(l)].original_secret_share(),
verification_share
);
}
}
}

Some files were not shown because too many files have changed in this diff Show More