mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 14:09:25 +00:00
Compare commits
156 Commits
1de8136739
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c24768f922 | ||
|
|
5818f1a41c | ||
|
|
1b781b4b57 | ||
|
|
63f7e220c0 | ||
|
|
7d49366373 | ||
|
|
55ed33d2d1 | ||
|
|
0066b94d38 | ||
|
|
7d54c02ec6 | ||
|
|
568324f631 | ||
|
|
eaa9a0e5a6 | ||
|
|
251996c1b0 | ||
|
|
98b9cc82a7 | ||
|
|
f8adfb56ad | ||
|
|
7a790f3a20 | ||
|
|
a7c77f8b5f | ||
|
|
da3095ed15 | ||
|
|
758d422595 | ||
|
|
9841061b49 | ||
|
|
4122a0135f | ||
|
|
b63ef32864 | ||
|
|
8be03a8fc2 | ||
|
|
677a2e5749 | ||
|
|
38bda1d586 | ||
|
|
2bc2ca6906 | ||
|
|
900a6612d7 | ||
|
|
17c1d5cd6b | ||
|
|
8a1b56a928 | ||
|
|
75964cf6da | ||
|
|
d407e35cee | ||
|
|
c8ef044acb | ||
|
|
ddbc32de4d | ||
|
|
e5ccfac19e | ||
|
|
432daae1d1 | ||
|
|
da3a85efe5 | ||
|
|
1e0240123d | ||
|
|
f6d4d1b084 | ||
|
|
1b37dd2951 | ||
|
|
f32e0609f1 | ||
|
|
ca85f9ba0c | ||
|
|
cfd1cb3a37 | ||
|
|
f2c13a0040 | ||
|
|
961f46bc04 | ||
|
|
2c4de3bab4 | ||
|
|
95c30720d2 | ||
|
|
ceede14f5c | ||
|
|
5e60ea9718 | ||
|
|
153f6f2f2f | ||
|
|
104c0d4492 | ||
|
|
7c8f13ab28 | ||
|
|
cb0deadf9a | ||
|
|
cb489f9cef | ||
|
|
cc662cb591 | ||
|
|
a8b8844e3f | ||
|
|
82b543ef75 | ||
|
|
72e80c1a3d | ||
|
|
b6edc94bcd | ||
|
|
cfce2b26e2 | ||
|
|
e87bbcda64 | ||
|
|
9f84adf8b3 | ||
|
|
3919cf55ae | ||
|
|
38dd8cb191 | ||
|
|
f2563d39cb | ||
|
|
15a9cbef40 | ||
|
|
078d6e51e5 | ||
|
|
6c33e18745 | ||
|
|
b743c9a43e | ||
|
|
0c2f2979a9 | ||
|
|
971951a1a6 | ||
|
|
92d9e908cb | ||
|
|
a32b97be88 | ||
|
|
e3809b2ff1 | ||
|
|
fd2d8b4f0a | ||
|
|
bc81614894 | ||
|
|
8df5aa2e2d | ||
|
|
b000740470 | ||
|
|
b9f554111d | ||
|
|
354c408e3e | ||
|
|
df3b60376a | ||
|
|
8d209c652e | ||
|
|
9ddad794b4 | ||
|
|
b934e484cc | ||
|
|
f8aee9b3c8 | ||
|
|
f51d77d26a | ||
|
|
0780deb643 | ||
|
|
75c38560f4 | ||
|
|
9f1c5268a5 | ||
|
|
35b113768b | ||
|
|
f2595c4939 | ||
|
|
8fcfa6d3d5 | ||
|
|
54c9d19726 | ||
|
|
25324c3cd5 | ||
|
|
ecb7df85b0 | ||
|
|
68c7acdbef | ||
|
|
8b60feed92 | ||
|
|
5c895efcd0 | ||
|
|
60e55656aa | ||
|
|
9536282418 | ||
|
|
8297d0679d | ||
|
|
d9f854b08a | ||
|
|
8aaf7f7dc6 | ||
|
|
ce447558ac | ||
|
|
fc850da30e | ||
|
|
d6f6cf1965 | ||
|
|
4438b51881 | ||
|
|
6ae0d9fad7 | ||
|
|
ad08b410a8 | ||
|
|
ec3cfd3ab7 | ||
|
|
01eb2daa0b | ||
|
|
885000f970 | ||
|
|
4be506414b | ||
|
|
1143d84e1d | ||
|
|
336922101f | ||
|
|
ffa033d978 | ||
|
|
23f986f57a | ||
|
|
bb726b58af | ||
|
|
387615705c | ||
|
|
c7f825a192 | ||
|
|
d363b1c173 | ||
|
|
d5077ae966 | ||
|
|
188fcc3cb4 | ||
|
|
cbab9486c6 | ||
|
|
a5f4c450c6 | ||
|
|
4f65a0b147 | ||
|
|
feb18d64a7 | ||
|
|
cb1e6535cb | ||
|
|
6b8cf6653a | ||
|
|
b426bfcfe8 | ||
|
|
21ce50ecf7 | ||
|
|
a4ceb2e756 | ||
|
|
eab5d9e64f | ||
|
|
e9c1235b76 | ||
|
|
dc1b8dfccd | ||
|
|
d0201cf2e5 | ||
|
|
f3d20e60b3 | ||
|
|
dafba81b40 | ||
|
|
91f8ec53d9 | ||
|
|
fc9a4a08b8 | ||
|
|
45fadb21ac | ||
|
|
28619fbee1 | ||
|
|
bbe014c3a7 | ||
|
|
fb3fadb3d3 | ||
|
|
f481d20773 | ||
|
|
599b2dec8f | ||
|
|
435f1d9ae1 | ||
|
|
d7ecab605e | ||
|
|
805fea52ec | ||
|
|
48db06f901 | ||
|
|
e9d0a5e0ed | ||
|
|
44d05518aa | ||
|
|
23b433fe6c | ||
|
|
2e57168a97 | ||
|
|
5c6160c398 | ||
|
|
9eee1d971e | ||
|
|
e6300847d6 | ||
|
|
e0a3e7bea6 | ||
|
|
cbebaa1349 |
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Bitcoin Daemon Cache
|
||||
id: cache-bitcoind
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: bitcoin.tar.gz
|
||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
@@ -37,4 +37,4 @@ runs:
|
||||
|
||||
- name: Bitcoin Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
||||
|
||||
50
.github/actions/build-dependencies/action.yml
vendored
50
.github/actions/build-dependencies/action.yml
vendored
@@ -7,13 +7,20 @@ runs:
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
# Ensure the repositories are synced
|
||||
sudo apt update -y
|
||||
|
||||
# Actually perform the removals
|
||||
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
|
||||
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
||||
# This removal command requires the prior removals due to unmet dependencies otherwise
|
||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
sudo apt autoremove -y
|
||||
sudo apt clean
|
||||
docker system prune -a --volumes
|
||||
|
||||
# Reinstall python3 as a general dependency of a functional operating system
|
||||
sudo apt install -y python3 --fix-missing
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Remove unused packages
|
||||
@@ -31,19 +38,48 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt install -y ca-certificates protobuf-compiler
|
||||
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||
choco install protoc
|
||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||
brew install protobuf
|
||||
brew install protobuf llvm
|
||||
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||
fi
|
||||
|
||||
- name: Install solc
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
cargo +1.89 install svm-rs --version =0.5.18
|
||||
svm install 0.8.26
|
||||
svm use 0.8.26
|
||||
|
||||
- name: Remove preinstalled Docker
|
||||
shell: bash
|
||||
run: |
|
||||
docker system prune -a --volumes
|
||||
sudo apt remove -y *docker*
|
||||
# Install uidmap which will be required for the explicitly installed Docker
|
||||
sudo apt install uidmap
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Update system dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt upgrade -y
|
||||
sudo apt autoremove -y
|
||||
sudo apt clean
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Install rootless Docker
|
||||
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
||||
with:
|
||||
rootless: true
|
||||
set-host: true
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Monero Wallet RPC Cache
|
||||
id: cache-monero-wallet-rpc
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: monero-wallet-rpc
|
||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: /usr/bin/monerod
|
||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
||||
nightly-2024-07-01
|
||||
nightly-2025-11-01
|
||||
|
||||
1
.github/workflows/common-tests.yml
vendored
1
.github/workflows/common-tests.yml
vendored
@@ -30,5 +30,4 @@ jobs:
|
||||
-p patchable-async-sleep \
|
||||
-p serai-db \
|
||||
-p serai-env \
|
||||
-p serai-task \
|
||||
-p simple-request
|
||||
|
||||
10
.github/workflows/crypto-tests.yml
vendored
10
.github/workflows/crypto-tests.yml
vendored
@@ -32,13 +32,15 @@ jobs:
|
||||
-p dalek-ff-group \
|
||||
-p minimal-ed448 \
|
||||
-p ciphersuite \
|
||||
-p ciphersuite-kp256 \
|
||||
-p multiexp \
|
||||
-p schnorr-signatures \
|
||||
-p dleq \
|
||||
-p generalized-bulletproofs \
|
||||
-p generalized-bulletproofs-circuit-abstraction \
|
||||
-p ec-divisors \
|
||||
-p generalized-bulletproofs-ec-gadgets \
|
||||
-p dkg \
|
||||
-p dkg-recovery \
|
||||
-p dkg-dealer \
|
||||
-p dkg-promote \
|
||||
-p dkg-musig \
|
||||
-p dkg-pedpop \
|
||||
-p modular-frost \
|
||||
-p frost-schnorrkel
|
||||
|
||||
6
.github/workflows/daily-deny.yml
vendored
6
.github/workflows/daily-deny.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo install --locked cargo-deny
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
|
||||
45
.github/workflows/lint.yml
vendored
45
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
clippy:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
||||
|
||||
- name: Run Clippy
|
||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||
@@ -46,16 +46,16 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo install --locked cargo-deny
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.4
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -73,42 +73,11 @@ jobs:
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||
cache: false
|
||||
|
||||
- name: Run forge fmt
|
||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Verify all dependencies are in use
|
||||
run: |
|
||||
cargo install cargo-machete
|
||||
cargo machete
|
||||
|
||||
slither:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Slither
|
||||
run: |
|
||||
python3 -m pip install solc-select
|
||||
solc-select install 0.8.26
|
||||
solc-select use 0.8.26
|
||||
|
||||
python3 -m pip install slither-analyzer
|
||||
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||
|
||||
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
|
||||
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||
cd processor/ethereum/router/contracts
|
||||
slither Router.sol
|
||||
cargo +1.89 install cargo-machete --version =0.8.0
|
||||
cargo +1.89 machete
|
||||
|
||||
77
.github/workflows/monero-tests.yaml
vendored
77
.github/workflows/monero-tests.yaml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Monero Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# Only run these once since they will be consistent regardless of any node
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Unit Tests Without Features
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||
|
||||
# Doesn't run unit tests with features as the tests workflow will
|
||||
|
||||
integration-tests:
|
||||
runs-on: ubuntu-latest
|
||||
# Test against all supported protocol versions
|
||||
strategy:
|
||||
matrix:
|
||||
version: [v0.17.3.2, v0.18.3.4]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
with:
|
||||
monero-version: ${{ matrix.version }}
|
||||
|
||||
- name: Run Integration Tests Without Features
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||
|
||||
- name: Run Integration Tests
|
||||
# Don't run if the the tests workflow also will
|
||||
if: ${{ matrix.version != 'v0.18.3.4' }}
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||
255
.github/workflows/msrv.yml
vendored
255
.github/workflows/msrv.yml
vendored
@@ -1,255 +0,0 @@
|
||||
name: Weekly MSRV Check
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 0"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
msrv-common:
|
||||
name: Run cargo msrv on common
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on common
|
||||
run: |
|
||||
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/env/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/db/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/task/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/request/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
|
||||
|
||||
msrv-crypto:
|
||||
name: Run cargo msrv on crypto
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on crypto
|
||||
run: |
|
||||
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
|
||||
|
||||
msrv-networks:
|
||||
name: Run cargo msrv on networks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on networks
|
||||
run: |
|
||||
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
|
||||
|
||||
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
|
||||
|
||||
msrv-message-queue:
|
||||
name: Run cargo msrv on message-queue
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on message-queue
|
||||
run: |
|
||||
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
|
||||
|
||||
msrv-processor:
|
||||
name: Run cargo msrv on processor
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on processor
|
||||
run: |
|
||||
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/messages/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/signers/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
|
||||
|
||||
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/monero/Cargo.toml
|
||||
|
||||
msrv-coordinator:
|
||||
name: Run cargo msrv on coordinator
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on coordinator
|
||||
run: |
|
||||
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
||||
|
||||
msrv-substrate:
|
||||
name: Run cargo msrv on substrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on substrate
|
||||
run: |
|
||||
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/client/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/node/Cargo.toml
|
||||
|
||||
msrv-orchestration:
|
||||
name: Run cargo msrv on orchestration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on message-queue
|
||||
run: |
|
||||
cargo msrv verify --manifest-path orchestration/Cargo.toml
|
||||
|
||||
msrv-mini:
|
||||
name: Run cargo msrv on mini
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on mini
|
||||
run: |
|
||||
cargo msrv verify --manifest-path mini/Cargo.toml
|
||||
19
.github/workflows/networks-tests.yml
vendored
19
.github/workflows/networks-tests.yml
vendored
@@ -30,23 +30,6 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p build-solidity-contracts \
|
||||
-p ethereum-schnorr-contract \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p serai-ethereum-relayer \
|
||||
-p monero-io \
|
||||
-p monero-generators \
|
||||
-p monero-primitives \
|
||||
-p monero-mlsag \
|
||||
-p monero-clsag \
|
||||
-p monero-borromean \
|
||||
-p monero-bulletproofs \
|
||||
-p monero-serai \
|
||||
-p monero-rpc \
|
||||
-p monero-simple-request-rpc \
|
||||
-p monero-address \
|
||||
-p monero-wallet \
|
||||
-p monero-seed \
|
||||
-p polyseed \
|
||||
-p monero-wallet-util \
|
||||
-p monero-serai-verify-chain
|
||||
|
||||
43
.github/workflows/pages.yml
vendored
43
.github/workflows/pages.yml
vendored
@@ -1,6 +1,7 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2022 just-the-docs
|
||||
# Copyright (c) 2022-2024 Luke Parker
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
@@ -20,31 +21,21 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||
name: Deploy Jekyll site to Pages
|
||||
name: Deploy Rust docs and Jekyll site to Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow one concurrent deployment
|
||||
# Only allow one concurrent deployment
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
@@ -53,27 +44,37 @@ jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
||||
with:
|
||||
bundler-cache: true
|
||||
cache-version: 0
|
||||
working-directory: "${{ github.workspace }}/docs"
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v3
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
||||
- name: Build with Jekyll
|
||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
shell: bash
|
||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
- name: Buld Rust docs
|
||||
run: |
|
||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||
mv target/doc docs/_site/rust
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
||||
with:
|
||||
path: "docs/_site/"
|
||||
|
||||
@@ -87,4 +88,4 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
||||
|
||||
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
@@ -39,29 +39,9 @@ jobs:
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-message-queue \
|
||||
-p serai-processor-messages \
|
||||
-p serai-processor-key-gen \
|
||||
-p serai-processor-view-keys \
|
||||
-p serai-processor-frost-attempt-manager \
|
||||
-p serai-processor-primitives \
|
||||
-p serai-processor-scanner \
|
||||
-p serai-processor-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler \
|
||||
-p serai-processor-transaction-chaining-scheduler \
|
||||
-p serai-processor-smart-contract-scheduler \
|
||||
-p serai-processor-signers \
|
||||
-p serai-processor-bin \
|
||||
-p serai-bitcoin-processor \
|
||||
-p serai-processor-ethereum-primitives \
|
||||
-p serai-processor-ethereum-test-primitives \
|
||||
-p serai-processor-ethereum-deployer \
|
||||
-p serai-processor-ethereum-router \
|
||||
-p serai-processor-ethereum-erc20 \
|
||||
-p serai-ethereum-processor \
|
||||
-p serai-monero-processor \
|
||||
-p serai-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-cosign \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,7 +1,14 @@
|
||||
target
|
||||
|
||||
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||
Cargo.lock
|
||||
!./Cargo.lock
|
||||
|
||||
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||
Dockerfile
|
||||
Dockerfile.fast-epoch
|
||||
!orchestration/runtime/Dockerfile
|
||||
|
||||
.test-logs
|
||||
|
||||
.vscode
|
||||
|
||||
6876
Cargo.lock
generated
6876
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
140
Cargo.toml
140
Cargo.toml
@@ -1,15 +1,8 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/parking_lot_core",
|
||||
"patches/parking_lot",
|
||||
"patches/zstd",
|
||||
"patches/rocksdb",
|
||||
|
||||
# std patches
|
||||
"patches/matches",
|
||||
"patches/is-terminal",
|
||||
|
||||
# Rewrites/redirects
|
||||
"patches/option-ext",
|
||||
@@ -20,7 +13,6 @@ members = [
|
||||
"common/patchable-async-sleep",
|
||||
"common/db",
|
||||
"common/env",
|
||||
"common/task",
|
||||
"common/request",
|
||||
|
||||
"crypto/transcript",
|
||||
@@ -29,76 +21,34 @@ members = [
|
||||
"crypto/dalek-ff-group",
|
||||
"crypto/ed448",
|
||||
"crypto/ciphersuite",
|
||||
"crypto/ciphersuite/kp256",
|
||||
|
||||
"crypto/multiexp",
|
||||
|
||||
"crypto/schnorr",
|
||||
"crypto/dleq",
|
||||
|
||||
"crypto/evrf/secq256k1",
|
||||
"crypto/evrf/embedwards25519",
|
||||
"crypto/evrf/generalized-bulletproofs",
|
||||
"crypto/evrf/circuit-abstraction",
|
||||
"crypto/evrf/divisors",
|
||||
"crypto/evrf/ec-gadgets",
|
||||
|
||||
"crypto/dkg",
|
||||
"crypto/dkg/recovery",
|
||||
"crypto/dkg/dealer",
|
||||
"crypto/dkg/promote",
|
||||
"crypto/dkg/musig",
|
||||
"crypto/dkg/pedpop",
|
||||
"crypto/frost",
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"networks/bitcoin",
|
||||
|
||||
"networks/ethereum/build-contracts",
|
||||
"networks/ethereum/schnorr",
|
||||
"networks/ethereum/alloy-simple-request-transport",
|
||||
"networks/ethereum",
|
||||
"networks/ethereum/relayer",
|
||||
|
||||
"networks/monero/io",
|
||||
"networks/monero/generators",
|
||||
"networks/monero/primitives",
|
||||
"networks/monero/ringct/mlsag",
|
||||
"networks/monero/ringct/clsag",
|
||||
"networks/monero/ringct/borromean",
|
||||
"networks/monero/ringct/bulletproofs",
|
||||
"networks/monero",
|
||||
"networks/monero/rpc",
|
||||
"networks/monero/rpc/simple-request",
|
||||
"networks/monero/wallet/address",
|
||||
"networks/monero/wallet",
|
||||
"networks/monero/wallet/seed",
|
||||
"networks/monero/wallet/polyseed",
|
||||
"networks/monero/wallet/util",
|
||||
"networks/monero/verify-chain",
|
||||
|
||||
"message-queue",
|
||||
|
||||
"processor/messages",
|
||||
|
||||
"processor/key-gen",
|
||||
"processor/view-keys",
|
||||
"processor/frost-attempt-manager",
|
||||
|
||||
"processor/primitives",
|
||||
"processor/scanner",
|
||||
"processor/scheduler/primitives",
|
||||
"processor/scheduler/utxo/primitives",
|
||||
"processor/scheduler/utxo/standard",
|
||||
"processor/scheduler/utxo/transaction-chaining",
|
||||
"processor/scheduler/smart-contract",
|
||||
"processor/signers",
|
||||
|
||||
"processor/bin",
|
||||
"processor/bitcoin",
|
||||
"processor/ethereum/primitives",
|
||||
"processor/ethereum/test-primitives",
|
||||
"processor/ethereum/deployer",
|
||||
"processor/ethereum/router",
|
||||
"processor/ethereum/erc20",
|
||||
"processor/ethereum",
|
||||
"processor/monero",
|
||||
"processor",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
"coordinator/tributary",
|
||||
"coordinator/cosign",
|
||||
"coordinator",
|
||||
|
||||
"substrate/primitives",
|
||||
@@ -150,51 +100,37 @@ members = [
|
||||
# to the extensive operations required for Bulletproofs
|
||||
[profile.dev.package]
|
||||
subtle = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
|
||||
ff = { opt-level = 3 }
|
||||
group = { opt-level = 3 }
|
||||
|
||||
crypto-bigint = { opt-level = 3 }
|
||||
secp256k1 = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
dalek-ff-group = { opt-level = 3 }
|
||||
minimal-ed448 = { opt-level = 3 }
|
||||
|
||||
multiexp = { opt-level = 3 }
|
||||
|
||||
secq256k1 = { opt-level = 3 }
|
||||
embedwards25519 = { opt-level = 3 }
|
||||
generalized-bulletproofs = { opt-level = 3 }
|
||||
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||
ec-divisors = { opt-level = 3 }
|
||||
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||
|
||||
dkg = { opt-level = 3 }
|
||||
|
||||
monero-generators = { opt-level = 3 }
|
||||
monero-borromean = { opt-level = 3 }
|
||||
monero-bulletproofs = { opt-level = 3 }
|
||||
monero-mlsag = { opt-level = 3 }
|
||||
monero-clsag = { opt-level = 3 }
|
||||
monero-oxide = { opt-level = 3 }
|
||||
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
overflow-checks = true
|
||||
|
||||
[patch.crates-io]
|
||||
# Dependencies from monero-oxide which originate from within our own tree
|
||||
std-shims = { path = "common/std-shims" }
|
||||
simple-request = { path = "common/request" }
|
||||
dalek-ff-group = { path = "crypto/dalek-ff-group" }
|
||||
flexible-transcript = { path = "crypto/transcript" }
|
||||
modular-frost = { path = "crypto/frost" }
|
||||
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||
parking_lot = { path = "patches/parking_lot" }
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
|
||||
# is-terminal now has an std-based solution with an equivalent API
|
||||
is-terminal = { path = "patches/is-terminal" }
|
||||
# So does matches
|
||||
# These have `std` alternatives
|
||||
matches = { path = "patches/matches" }
|
||||
home = { path = "patches/home" }
|
||||
|
||||
# directories-next was created because directories was unmaintained
|
||||
# directories-next is now unmaintained while directories is maintained
|
||||
@@ -204,12 +140,11 @@ matches = { path = "patches/matches" }
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
# The official pasta_curves repo doesn't support Zeroize
|
||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
uninlined_format_args = "allow" # TODO
|
||||
unwrap_or_default = "allow"
|
||||
map_unwrap_or = "allow"
|
||||
manual_is_multiple_of = "allow"
|
||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||
borrow_as_ptr = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_possible_truncation = "deny"
|
||||
@@ -234,13 +169,14 @@ large_stack_arrays = "deny"
|
||||
linkedlist = "deny"
|
||||
macro_use_imports = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_let_else = "deny"
|
||||
# TODO manual_let_else = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_string_new = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
match_bool = "deny"
|
||||
match_same_arms = "deny"
|
||||
missing_fields_in_debug = "deny"
|
||||
needless_continue = "deny"
|
||||
# TODO needless_continue = "deny"
|
||||
needless_pass_by_value = "deny"
|
||||
ptr_cast_constness = "deny"
|
||||
range_minus_one = "deny"
|
||||
@@ -248,9 +184,7 @@ range_plus_one = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
redundant_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
string_slice = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unchecked_time_subtraction = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnecessary_wraps = "deny"
|
||||
@@ -258,3 +192,21 @@ unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
||||
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
|
||||
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
|
||||
# primary branch, `next` is)
|
||||
needless_continue = "allow"
|
||||
needless_lifetimes = "allow"
|
||||
useless_conversion = "allow"
|
||||
empty_line_after_doc_comments = "allow"
|
||||
manual_div_ceil = "allow"
|
||||
manual_let_else = "allow"
|
||||
unnecessary_map_or = "allow"
|
||||
result_large_err = "allow"
|
||||
unneeded_struct_pattern = "allow"
|
||||
[workspace.lints.rust]
|
||||
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||
mismatched_lifetime_syntaxes = "allow"
|
||||
unused_attributes = "allow"
|
||||
unused_parens = "allow"
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
|
||||
as a reference text. This copy should be provided with any distribution of a
|
||||
crate licensed under the AGPL-3.0, as per its terms.
|
||||
|
||||
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
||||
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
|
||||
|
||||
@@ -59,7 +59,6 @@ issued at the discretion of the Immunefi program managers.
|
||||
- [Website](https://serai.exchange/): https://serai.exchange/
|
||||
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
||||
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
||||
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
|
||||
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
||||
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
||||
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "serai-db"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
description = "A simple database trait and backends for it"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.65"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -18,7 +18,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
|
||||
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
parity-db = ["dep:parity-db"]
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# Serai DB
|
||||
|
||||
An inefficient, minimal abstraction around databases.
|
||||
|
||||
The abstraction offers `get`, `put`, and `del` with helper functions and macros
|
||||
built on top. Database iteration is not offered, forcing the caller to manually
|
||||
implement indexing schemes. This ensures wide compatibility across abstracted
|
||||
databases.
|
||||
@@ -38,21 +38,12 @@ pub fn serai_db_key(
|
||||
#[macro_export]
|
||||
macro_rules! create_db {
|
||||
($db_name: ident {
|
||||
$(
|
||||
$field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
}) => {
|
||||
$(
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct $field_name$(
|
||||
<$($generic_name: $generic_type),+>
|
||||
)?$(
|
||||
(core::marker::PhantomData<($($generic_name),+)>)
|
||||
)?;
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) struct $field_name;
|
||||
impl $field_name {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
@@ -61,43 +52,18 @@ macro_rules! create_db {
|
||||
($($arg),*).encode()
|
||||
)
|
||||
}
|
||||
pub(crate) fn set(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*,
|
||||
data: &$field_type
|
||||
) {
|
||||
let key = Self::key($($arg),*);
|
||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
let key = $field_name::key($($arg),*);
|
||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||
}
|
||||
pub(crate) fn get(
|
||||
getter: &impl Get,
|
||||
$($arg: $arg_type),*
|
||||
) -> Option<$field_type> {
|
||||
getter.get(Self::key($($arg),*)).map(|data| {
|
||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
||||
borsh::from_slice(data.as_ref()).unwrap()
|
||||
})
|
||||
}
|
||||
// Returns a PhantomData of all generic types so if the generic was only used in the value,
|
||||
// not the keys, this doesn't have unused generic types
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn del(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
|
||||
txn.del(&Self::key($($arg),*));
|
||||
core::marker::PhantomData
|
||||
}
|
||||
|
||||
pub(crate) fn take(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let key = Self::key($($arg),*);
|
||||
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
|
||||
if res.is_some() {
|
||||
txn.del(key);
|
||||
}
|
||||
res
|
||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
txn.del(&$field_name::key($($arg),*))
|
||||
}
|
||||
}
|
||||
)*
|
||||
@@ -107,30 +73,19 @@ macro_rules! create_db {
|
||||
#[macro_export]
|
||||
macro_rules! db_channel {
|
||||
($db_name: ident {
|
||||
$($field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
}) => {
|
||||
$(
|
||||
create_db! {
|
||||
$db_name {
|
||||
$field_name: $(<$($generic_name: $generic_type),+>)?(
|
||||
$($arg: $arg_type,)*
|
||||
index: u32
|
||||
) -> $field_type
|
||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
||||
}
|
||||
}
|
||||
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn send(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
, value: &$field_type
|
||||
) {
|
||||
impl $field_name {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
// Use index 0 to store the amount of messages
|
||||
let messages_sent_key = Self::key($($arg,)* 0);
|
||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
@@ -141,35 +96,19 @@ macro_rules! db_channel {
|
||||
// at the same time
|
||||
let index_to_use = messages_sent + 2;
|
||||
|
||||
Self::set(txn, $($arg,)* index_to_use, value);
|
||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
||||
}
|
||||
pub(crate) fn peek(
|
||||
getter: &impl Get
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
let messages_recvd = getter.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
Self::get(getter, $($arg,)* index_to_read)
|
||||
}
|
||||
pub(crate) fn try_recv(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
let res = Self::get(txn, $($arg,)* index_to_read);
|
||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
||||
if res.is_some() {
|
||||
Self::del(txn, $($arg,)* index_to_read);
|
||||
$field_name::del(txn, $($arg),*, index_to_read);
|
||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||
}
|
||||
res
|
||||
|
||||
@@ -14,43 +14,26 @@ mod parity_db;
|
||||
#[cfg(feature = "parity-db")]
|
||||
pub use parity_db::{ParityDb, new_parity_db};
|
||||
|
||||
/// An object implementing `get`.
|
||||
/// An object implementing get.
|
||||
pub trait Get {
|
||||
/// Get a value from the database.
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
||||
}
|
||||
|
||||
/// An atomic database transaction.
|
||||
///
|
||||
/// A transaction is only required to atomically commit. It is not required that two `Get` calls
|
||||
/// made with the same transaction return the same result, if another transaction wrote to that
|
||||
/// key.
|
||||
///
|
||||
/// If two transactions are created, and both write (including deletions) to the same key, behavior
|
||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||
/// randomly, or any other action, at time of write or at time of commit.
|
||||
/// An atomic database operation.
|
||||
#[must_use]
|
||||
pub trait DbTxn: Send + Get {
|
||||
/// Write a value to this key.
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||
/// Delete the value from this key.
|
||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||
/// Commit this transaction.
|
||||
fn commit(self);
|
||||
}
|
||||
|
||||
/// A database supporting atomic transaction.
|
||||
/// A database supporting atomic operations.
|
||||
pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||
/// The type representing a database transaction.
|
||||
type Transaction<'a>: DbTxn;
|
||||
/// Calculate a key for a database entry.
|
||||
///
|
||||
/// Keys are separated by the database, the item within the database, and the item's key itself.
|
||||
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||
}
|
||||
/// Open a new transaction.
|
||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::*;
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
||||
|
||||
impl Get for MemDbTxn<'_> {
|
||||
impl<'a> Get for MemDbTxn<'a> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
if self.2.contains(key.as_ref()) {
|
||||
return None;
|
||||
@@ -23,7 +23,7 @@ impl Get for MemDbTxn<'_> {
|
||||
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
||||
}
|
||||
}
|
||||
impl DbTxn for MemDbTxn<'_> {
|
||||
impl<'a> DbTxn for MemDbTxn<'a> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.2.remove(key.as_ref());
|
||||
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
||||
|
||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.60"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
// Obtain a variable from the Serai environment/secret store.
|
||||
pub fn var(variable: &str) -> Option<String> {
|
||||
|
||||
@@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["http", "https", "async", "request", "ssl"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.70"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "std-shims"
|
||||
version = "0.1.1"
|
||||
version = "0.1.4"
|
||||
description = "A series of std shims to make alloc more feasible"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||
edition = "2021"
|
||||
rust-version = "1.80"
|
||||
rust-version = "1.64"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,8 +17,9 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
||||
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
|
||||
rustversion = { version = "1", default-features = false }
|
||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
|
||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
||||
|
||||
[features]
|
||||
std = []
|
||||
|
||||
@@ -3,4 +3,9 @@
|
||||
A crate which passes through to std when the default `std` feature is enabled,
|
||||
yet provides a series of shims when it isn't.
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`.
|
||||
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
|
||||
average case.
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
|
||||
`spin` (avoiding a requirement on `critical-section`).
|
||||
types are not guaranteed to be
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
@@ -11,3 +11,64 @@ pub mod io;
|
||||
pub use alloc::vec;
|
||||
pub use alloc::str;
|
||||
pub use alloc::string;
|
||||
|
||||
pub mod prelude {
|
||||
#[rustversion::before(1.73)]
|
||||
#[doc(hidden)]
|
||||
pub trait StdShimsDivCeil {
|
||||
fn div_ceil(self, rhs: Self) -> Self;
|
||||
}
|
||||
#[rustversion::before(1.73)]
|
||||
mod impl_divceil {
|
||||
use super::StdShimsDivCeil;
|
||||
impl StdShimsDivCeil for u8 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u16 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u32 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u64 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u128 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for usize {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[rustversion::before(1.74)]
|
||||
#[doc(hidden)]
|
||||
pub trait StdShimsIoErrorOther {
|
||||
fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>;
|
||||
}
|
||||
#[cfg(feature = "std")]
|
||||
#[rustversion::before(1.74)]
|
||||
impl StdShimsIoErrorOther for std::io::Error {
|
||||
fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
std::io::Error::new(std::io::ErrorKind::Other, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,11 @@ mod mutex_shim {
|
||||
}
|
||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::LazyLock;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::before(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::since(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::LazyLock;
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "serai-task"
|
||||
version = "0.1.0"
|
||||
description = "A task schema for Serai services"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/task"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.75"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] }
|
||||
@@ -1,3 +0,0 @@
|
||||
# Task
|
||||
|
||||
A schema to define tasks to be run ad infinitum.
|
||||
@@ -1,159 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||
|
||||
enum Closed {
|
||||
NotClosed(Option<oneshot::Receiver<()>>),
|
||||
Closed,
|
||||
}
|
||||
|
||||
/// A handle for a task.
|
||||
#[derive(Clone)]
|
||||
pub struct TaskHandle {
|
||||
run_now: mpsc::Sender<()>,
|
||||
close: mpsc::Sender<()>,
|
||||
closed: Arc<Mutex<Closed>>,
|
||||
}
|
||||
/// A task's internal structures.
|
||||
pub struct Task {
|
||||
run_now: mpsc::Receiver<()>,
|
||||
close: mpsc::Receiver<()>,
|
||||
closed: oneshot::Sender<()>,
|
||||
}
|
||||
|
||||
impl Task {
|
||||
/// Create a new task definition.
|
||||
pub fn new() -> (Self, TaskHandle) {
|
||||
// Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as
|
||||
// soon as possible
|
||||
let (run_now_send, run_now_recv) = mpsc::channel(1);
|
||||
// And any call to close satisfies all calls to close
|
||||
let (close_send, close_recv) = mpsc::channel(1);
|
||||
let (closed_send, closed_recv) = oneshot::channel();
|
||||
(
|
||||
Self { run_now: run_now_recv, close: close_recv, closed: closed_send },
|
||||
TaskHandle {
|
||||
run_now: run_now_send,
|
||||
close: close_send,
|
||||
closed: Arc::new(Mutex::new(Closed::NotClosed(Some(closed_recv)))),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskHandle {
|
||||
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||
///
|
||||
/// Panics if the task has been dropped.
|
||||
pub fn run_now(&self) {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self.run_now.try_send(()) {
|
||||
Ok(()) => {}
|
||||
// NOP on full, as this task will already be ran as soon as possible
|
||||
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||
panic!("task was unexpectedly closed when calling run_now")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Close the task.
|
||||
///
|
||||
/// Returns once the task shuts down after it finishes its current iteration (which may be of
|
||||
/// unbounded time).
|
||||
pub async fn close(self) {
|
||||
// If another instance of the handle called tfhis, don't error
|
||||
let _ = self.close.send(()).await;
|
||||
// Wait until we receive the closed message
|
||||
let mut closed = self.closed.lock().await;
|
||||
match &mut *closed {
|
||||
Closed::NotClosed(ref mut recv) => {
|
||||
assert_eq!(recv.take().unwrap().await, Ok(()), "continually ran task dropped itself?");
|
||||
*closed = Closed::Closed;
|
||||
}
|
||||
Closed::Closed => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A task to be continually ran.
|
||||
pub trait ContinuallyRan: Sized + Send {
|
||||
/// The amount of seconds before this task should be polled again.
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
||||
/// The maximum amount of seconds before this task should be run again.
|
||||
///
|
||||
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||
|
||||
/// Run an iteration of the task.
|
||||
///
|
||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||
/// (without waiting for whatever timer they were already on).
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
||||
|
||||
/// Continually run the task.
|
||||
fn continually_run(
|
||||
mut self,
|
||||
mut task: Task,
|
||||
dependents: Vec<TaskHandle>,
|
||||
) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
// The default number of seconds to sleep before running the task again
|
||||
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
||||
// The current number of seconds to sleep before running the task again
|
||||
// We increment this upon errors in order to not flood the logs with errors
|
||||
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||
// Set a limit of sleeping for two minutes
|
||||
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
||||
};
|
||||
|
||||
loop {
|
||||
// If we were told to close/all handles were dropped, drop it
|
||||
{
|
||||
let should_close = task.close.try_recv();
|
||||
match should_close {
|
||||
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
||||
Err(mpsc::error::TryRecvError::Empty) => {}
|
||||
}
|
||||
}
|
||||
|
||||
match self.run_iteration().await {
|
||||
Ok(run_dependents) => {
|
||||
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
|
||||
if run_dependents {
|
||||
for dependent in &dependents {
|
||||
dependent.run_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("{}", e);
|
||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||
}
|
||||
}
|
||||
|
||||
// Don't run the task again for another few seconds UNLESS told to run now
|
||||
tokio::select! {
|
||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||
msg = task.run_now.recv() => {
|
||||
// Check if this is firing because the handle was dropped
|
||||
if msg.is_none() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
task.closed.send(()).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||
|
||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||
|
||||
@@ -8,7 +8,6 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -21,14 +20,15 @@ workspace = true
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
|
||||
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
@@ -42,7 +42,7 @@ processor-messages = { package = "serai-processor-messages", path = "../processo
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
@@ -57,8 +57,8 @@ libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
longer-reattempts = []
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
[package]
|
||||
name = "serai-cosign"
|
||||
version = "0.1.0"
|
||||
description = "Evaluator of cosigns for the Serai network"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = ["scale"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
tokio = { version = "1", default-features = false, features = [] }
|
||||
|
||||
serai-db = { path = "../../common/db" }
|
||||
serai-task = { path = "../../common/task" }
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,121 +0,0 @@
|
||||
# Serai Cosign
|
||||
|
||||
The Serai blockchain is controlled by a set of validators referred to as the
|
||||
Serai validators. These validators could attempt to double-spend, even if every
|
||||
node on the network is a full node, via equivocating.
|
||||
|
||||
Posit:
|
||||
- The Serai validators control X SRI
|
||||
- The Serai validators produce block A swapping X SRI to Y XYZ
|
||||
- The Serai validators produce block B swapping X SRI to Z ABC
|
||||
- The Serai validators finalize block A and send to the validators for XYZ
|
||||
- The Serai validators finalize block B and send to the validators for ABC
|
||||
|
||||
This is solved via the cosigning protocol. The validators for XYZ and the
|
||||
validators for ABC each sign their view of the Serai blockchain, communicating
|
||||
amongst each other to ensure consistency.
|
||||
|
||||
The security of the cosigning protocol is not formally proven, and there are no
|
||||
claims it achieves Byzantine Fault Tolerance. This protocol is meant to be
|
||||
practical and make such attacks infeasible, when they could already be argued
|
||||
difficult to perform.
|
||||
|
||||
### Definitions
|
||||
|
||||
- Cosign: A signature from a non-Serai validator set for a Serai block
|
||||
- Cosign Commit: A collection of cosigns which achieve the necessary weight
|
||||
|
||||
### Methodology
|
||||
|
||||
Finalized blocks from the Serai network are intended to be cosigned if they
|
||||
contain burn events. Only once cosigned should non-Serai validators process
|
||||
them.
|
||||
|
||||
Cosigning occurs by a non-Serai validator set, using their threshold keys
|
||||
declared on the Serai blockchain. Once 83% of non-Serai validator sets, by
|
||||
weight, cosign a block, a cosign commit is formed. A cosign commit for a block
|
||||
is considered to also cosign for all blocks preceding it.
|
||||
|
||||
### Bounds Under Asynchrony
|
||||
|
||||
Assuming an asynchronous environment fully controlled by the adversary, 34% of
|
||||
a validator set may cause an equivocation. Control of 67% of non-Serai
|
||||
validator sets, by weight, is sufficient to produce two distinct cosign commits
|
||||
at the same position. This is due to the honest stake, 33%, being split across
|
||||
the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means
|
||||
the cosigning protocol may produce multiple cosign commits if 34% of 67%, just
|
||||
22.78%, of the non-Serai validator sets, is malicious. This would be in
|
||||
conjunction with 34% of the Serai validator set (assumed 20% of total stake),
|
||||
for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is
|
||||
an increase from the 6.8% required without the cosigning protocol.
|
||||
|
||||
### Bounds Under Synchrony
|
||||
|
||||
Assuming the honest stake within the non-Serai validator sets detect the
|
||||
malicious stake within their set prior to assisting in producing a cosign for
|
||||
their set, for which there is a multi-second window, 67% of 67% of non-Serai
|
||||
validator sets is required to produce cosigns for those sets. This raises the
|
||||
total stake requirement to 42.712% (past the usual 34% threshold).
|
||||
|
||||
### Behavior Reliant on Synchrony
|
||||
|
||||
If the Serai blockchain node detects an equivocation, it will stop responding
|
||||
to all RPC requests and stop participating in finalizing further blocks. This
|
||||
lets the node communicate the equivocating commits to other nodes (causing them
|
||||
to exhibit the same behavior), yet prevents interaction with it.
|
||||
|
||||
If cosigns representing 17% of the non-Serai validators sets by weight are
|
||||
detected for distinct blocks at the same position, the protocol halts. An
|
||||
explicit latency period of seventy seconds is enacted after receiving a cosign
|
||||
commit for the detection of such an equivocation. This is largely redundant
|
||||
given how the Serai blockchain node will presumably have halted itself by this
|
||||
time.
|
||||
|
||||
### Equivocation-Detection Avoidance
|
||||
|
||||
Malicious Serai validators could avoid detection of their equivocating if they
|
||||
produced two distinct blockchains, A and B, with different keys declared for
|
||||
the same non-Serai validator set. While the validators following A may detect
|
||||
the cosigns for distinct blocks by validators following B, the cosigns would be
|
||||
assumed invalid due to their signatures being verified against distinct keys.
|
||||
|
||||
This is prevented by requiring cosigns on the blocks which declare new keys,
|
||||
ensuring all validators have a consistent view of the keys used within the
|
||||
cosigning protocol (per the bounds of the cosigning protocol). These blocks are
|
||||
exempt from the general policy of cosign commits cosigning all prior blocks,
|
||||
preventing the newly declared keys (which aren't yet cosigned) from being used
|
||||
to cosign themselves. These cosigns are flagged as "notable", are permanently
|
||||
archived, and must be synced before a validator will move forward.
|
||||
|
||||
Cosigning the block which declares new keys also ensures agreement on the
|
||||
preceding block which declared the new set, with an exact specification of the
|
||||
participants and their weight, before it impacts the cosigning protocol.
|
||||
|
||||
### Denial of Service Concerns
|
||||
|
||||
Any historical Serai validator set may trigger a chain halt by producing an
|
||||
equivocation after their retiry. This requires 67% to be malicious. 34% of the
|
||||
active Serai validator set may also trigger a chain halt.
|
||||
|
||||
17% of non-Serai validator sets equivocating causing a halt means 5.67% of
|
||||
non-Serai validator sets' stake may cause a halt (in an asynchronous
|
||||
environment fully controlled by the adversary). In a synchronous environment
|
||||
where the honest stake cannot be split across two candidates, 11.33% of
|
||||
non-Serai validator sets' stake is required.
|
||||
|
||||
The more practical attack is for one to obtain 5.67% of non-Serai validator
|
||||
sets' stake, under any network conditions, and simply go offline. This will
|
||||
take 17% of validator sets offline with it, preventing any cosign commits
|
||||
from being performed. A fallback protocol where validators individually produce
|
||||
cosigns, removing the network's horizontal scalability but ensuring liveness,
|
||||
prevents this, restoring the additional requirements for control of an
|
||||
asynchronous network or 11.33% of non-Serai validator sets' stake.
|
||||
|
||||
### TODO
|
||||
|
||||
The Serai node no longer responding to RPC requests upon detecting any
|
||||
equivocation, and the fallback protocol where validators individually produce
|
||||
signatures, are not implemented at this time. The former means the detection of
|
||||
equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai
|
||||
validator sets' stake the DoS threshold, even without control of an
|
||||
asynchronous network.
|
||||
@@ -1,55 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::evaluator::CosignedBlocks;
|
||||
|
||||
/// How often callers should broadcast the cosigns flagged for rebroadcasting.
|
||||
pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60);
|
||||
const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10);
|
||||
const ACKNOWLEDGEMENT_DELAY: Duration =
|
||||
Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs());
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDelay {
|
||||
// The latest cosigned block number.
|
||||
LatestCosignedBlockNumber: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
/// A task to delay acknowledgement of cosigns.
|
||||
pub(crate) struct CosignDelayTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Receive the next block to mark as cosigned
|
||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||
break;
|
||||
};
|
||||
// Calculate when we should mark it as valid
|
||||
let time_valid =
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||
// Sleep until then
|
||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||
.await;
|
||||
|
||||
// Set the cosigned block
|
||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||
txn.commit();
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{
|
||||
HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns,
|
||||
intend::{GlobalSessionsChannel, BlockEventData, BlockEvents},
|
||||
};
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignEvaluator {
|
||||
// The global session currently being evaluated.
|
||||
CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession),
|
||||
}
|
||||
);
|
||||
|
||||
db_channel!(
|
||||
SubstrateCosignEvaluatorChannels {
|
||||
// (cosigned block, time cosign was evaluated)
|
||||
CosignedBlocks: () -> (u64, u64),
|
||||
}
|
||||
);
|
||||
|
||||
// This is a strict function which won't panic, even with a malicious Serai node, so long as:
|
||||
// - It's called incrementally
|
||||
// - It's only called for block numbers we've completed indexing on within the intend task
|
||||
// - It's only called for block numbers after a global session has started
|
||||
// - The global sessions channel is populated as the block declaring the session is indexed
|
||||
// Which all hold true within the context of this task and the intend task.
|
||||
//
|
||||
// This function will also ensure the currently evaluated global session is incremented once we
|
||||
// finish evaluation of the prior session.
|
||||
fn currently_evaluated_global_session_strict(
|
||||
txn: &mut impl DbTxn,
|
||||
block_number: u64,
|
||||
) -> ([u8; 32], GlobalSession) {
|
||||
let mut res = {
|
||||
let existing = match CurrentlyEvaluatedGlobalSession::get(txn) {
|
||||
Some(existing) => existing,
|
||||
None => {
|
||||
let first = GlobalSessionsChannel::try_recv(txn)
|
||||
.expect("fetching latest global session yet none declared");
|
||||
CurrentlyEvaluatedGlobalSession::set(txn, &first);
|
||||
first
|
||||
}
|
||||
};
|
||||
assert!(
|
||||
existing.1.start_block_number <= block_number,
|
||||
"candidate's start block number exceeds our block number"
|
||||
);
|
||||
existing
|
||||
};
|
||||
|
||||
if let Some(next) = GlobalSessionsChannel::peek(txn) {
|
||||
assert!(
|
||||
block_number <= next.1.start_block_number,
|
||||
"currently_evaluated_global_session_strict wasn't called incrementally"
|
||||
);
|
||||
// If it's time for this session to activate, take it from the channel and set it
|
||||
if block_number == next.1.start_block_number {
|
||||
GlobalSessionsChannel::try_recv(txn).unwrap();
|
||||
CurrentlyEvaluatedGlobalSession::set(txn, &next);
|
||||
res = next;
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// A task to determine if a block has been cosigned and we should handle it.
|
||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) request: R,
|
||||
}
|
||||
|
||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let mut known_cosign = None;
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||
else {
|
||||
break;
|
||||
};
|
||||
|
||||
match has_events {
|
||||
// Because this had notable events, we require an explicit cosign for this block by a
|
||||
// supermajority of the prior block's validator sets
|
||||
HasEvents::Notable => {
|
||||
let (global_session, global_session_info) =
|
||||
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
for set in global_session_info.sets {
|
||||
// Check if we have the cosign from this set
|
||||
if NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||
.map(|signed_cosign| signed_cosign.cosign.block_number) ==
|
||||
Some(block_number)
|
||||
{
|
||||
// Since have this cosign, add the set's weight to the weight which has cosigned
|
||||
weight_cosigned +=
|
||||
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||
})?;
|
||||
}
|
||||
}
|
||||
// Check if the sum weight doesn't cross the required threshold
|
||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||
// Request the necessary cosigns over the network
|
||||
// TODO: Add a timer to ensure this isn't called too often
|
||||
self
|
||||
.request
|
||||
.request_notable_cosigns(global_session)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
// We return an error so the delay before this task is run again increases
|
||||
return Err(format!(
|
||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||
));
|
||||
}
|
||||
}
|
||||
// Since this block didn't have any notable events, we simply require a cosign for this
|
||||
// block or a greater block by the current validator sets
|
||||
HasEvents::NonNotable => {
|
||||
// Check if this was satisfied by a cached result which wasn't calculated incrementally
|
||||
let known_cosigned = if let Some(known_cosign) = known_cosign {
|
||||
known_cosign >= block_number
|
||||
} else {
|
||||
// Clear `known_cosign` which is no longer helpful
|
||||
known_cosign = None;
|
||||
false
|
||||
};
|
||||
|
||||
// If it isn't already known to be cosigned, evaluate the latest cosigns
|
||||
if !known_cosigned {
|
||||
/*
|
||||
LatestCosign is populated with the latest cosigns for each network which don't
|
||||
exceed the latest global session we've evaluated the start of. This current block
|
||||
is during the latest global session we've evaluated the start of.
|
||||
*/
|
||||
|
||||
// Get the global session for this block
|
||||
let (global_session, global_session_info) =
|
||||
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
let mut lowest_common_block: Option<u64> = None;
|
||||
for set in global_session_info.sets {
|
||||
// Check if this set cosigned this block or not
|
||||
let Some(cosign) =
|
||||
NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if cosign.cosign.block_number >= block_number {
|
||||
weight_cosigned +=
|
||||
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the lowest block common to all of these cosigns
|
||||
lowest_common_block = lowest_common_block
|
||||
.map(|existing| existing.min(cosign.cosign.block_number))
|
||||
.or(Some(cosign.cosign.block_number));
|
||||
}
|
||||
|
||||
// Check if the sum weight doesn't cross the required threshold
|
||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||
// Request the superseding notable cosigns over the network
|
||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||
// explicitly request them
|
||||
self
|
||||
.request
|
||||
.request_notable_cosigns(global_session)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
// We return an error so the delay before this task is run again increases
|
||||
return Err(format!(
|
||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||
));
|
||||
}
|
||||
|
||||
// Update the cached result for the block we know is cosigned
|
||||
/*
|
||||
There may be a higher block which was cosigned, but once we get to this block,
|
||||
we'll re-evaluate and find it then. The alternative would be an optimistic
|
||||
re-evaluation now. Both are fine, so the lower-complexity option is preferred.
|
||||
*/
|
||||
known_cosign = lowest_common_block;
|
||||
}
|
||||
}
|
||||
// If this block has no events necessitating cosigning, we can immediately consider the
|
||||
// block cosigned (making this block a NOP)
|
||||
HasEvents::No => {}
|
||||
}
|
||||
|
||||
// Since we checked we had the necessary cosigns, send it for delay before acknowledgement
|
||||
CosignedBlocks::send(
|
||||
&mut txn,
|
||||
&(
|
||||
block_number,
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap_or(Duration::ZERO)
|
||||
.as_secs(),
|
||||
),
|
||||
);
|
||||
txn.commit();
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,181 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Amount},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::*;
|
||||
|
||||
create_db!(
|
||||
CosignIntend {
|
||||
ScanCosignFrom: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct BlockEventData {
|
||||
pub(crate) block_number: u64,
|
||||
pub(crate) has_events: HasEvents,
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
CosignIntendChannels {
|
||||
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||
BlockEvents: () -> BlockEventData,
|
||||
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_has_events_justifying_a_cosign(
|
||||
serai: &Serai,
|
||||
block_number: u64,
|
||||
) -> Result<(Block, HasEvents), String> {
|
||||
let block = serai
|
||||
.finalized_block_by_number(block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||
let serai = serai.as_of(block.hash());
|
||||
|
||||
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::Notable));
|
||||
}
|
||||
|
||||
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::NonNotable));
|
||||
}
|
||||
|
||||
Ok((block, HasEvents::No))
|
||||
}
|
||||
|
||||
/// A task to determine which blocks we should intend to cosign.
|
||||
pub(crate) struct CosignIntendTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) serai: Serai,
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||
let latest_block_number =
|
||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||
|
||||
for block_number in start_block_number ..= latest_block_number {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
let (block, mut has_events) =
|
||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
// Check we are indexing a linear chain
|
||||
if (block_number > 1) &&
|
||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||
SubstrateBlocks::get(&txn, block_number - 1)
|
||||
.expect("indexing a block but haven't indexed its parent"))
|
||||
{
|
||||
Err(format!(
|
||||
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||
block_number - 1
|
||||
))?;
|
||||
}
|
||||
SubstrateBlocks::set(&mut txn, block_number, &block.hash());
|
||||
|
||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||
|
||||
// If this is notable, it creates a new global session, which we index into the database
|
||||
// now
|
||||
if has_events == HasEvents::Notable {
|
||||
let serai = self.serai.as_of(block.hash());
|
||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||
let global_session =
|
||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||
|
||||
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
||||
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut total_stake = 0;
|
||||
for (set, key) in &sets_and_keys {
|
||||
sets.push(*set);
|
||||
keys.insert(set.network, SeraiAddress::from(*key));
|
||||
let stake = serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(set.network)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.unwrap_or(Amount(0))
|
||||
.0;
|
||||
stakes.insert(set.network, stake);
|
||||
total_stake += stake;
|
||||
}
|
||||
if total_stake == 0 {
|
||||
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||
}
|
||||
|
||||
let global_session_info = GlobalSession {
|
||||
// This session starts cosigning after this block, as this block must be cosigned by
|
||||
// the existing validators
|
||||
start_block_number: block_number + 1,
|
||||
sets,
|
||||
keys,
|
||||
stakes,
|
||||
total_stake,
|
||||
};
|
||||
GlobalSessions::set(&mut txn, global_session, &global_session_info);
|
||||
if let Some(ending_global_session) = global_session_for_this_block {
|
||||
GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number);
|
||||
}
|
||||
LatestGlobalSessionIntended::set(&mut txn, &global_session);
|
||||
GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info));
|
||||
}
|
||||
|
||||
// If there isn't anyone available to cosign this block, meaning it'll never be cosigned,
|
||||
// we flag it as not having any events requiring cosigning so we don't attempt to
|
||||
// sign/require a cosign for it
|
||||
if global_session_for_this_block.is_none() {
|
||||
has_events = HasEvents::No;
|
||||
}
|
||||
|
||||
match has_events {
|
||||
HasEvents::Notable | HasEvents::NonNotable => {
|
||||
let global_session_for_this_block = global_session_for_this_block
|
||||
.expect("global session for this block was None but still attempting to cosign it");
|
||||
let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block)
|
||||
.expect("last global session intended wasn't saved to the database");
|
||||
|
||||
// Tell each set of their expectation to cosign this block
|
||||
for set in global_session_info.sets {
|
||||
log::debug!("{:?} will be cosigning block #{block_number}", set);
|
||||
IntendedCosigns::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&CosignIntent {
|
||||
global_session: global_session_for_this_block,
|
||||
block_number,
|
||||
block_hash: block.hash(),
|
||||
notable: has_events == HasEvents::Notable,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
HasEvents::No => {}
|
||||
}
|
||||
|
||||
// Populate a singular feed with every block's status for the evluator to work off of
|
||||
BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events }));
|
||||
// Mark this block as handled, meaning we should scan from the next block moving on
|
||||
ScanCosignFrom::set(&mut txn, &(block_number + 1));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(start_block_number <= latest_block_number)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,425 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{fmt::Debug, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, SeraiAddress},
|
||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||
Public, Block, Serai, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::*;
|
||||
|
||||
/// The cosigns which are intended to be performed.
|
||||
mod intend;
|
||||
/// The evaluator of the cosigns.
|
||||
mod evaluator;
|
||||
/// The task to delay acknowledgement of the cosigns.
|
||||
mod delay;
|
||||
pub use delay::BROADCAST_FREQUENCY;
|
||||
use delay::LatestCosignedBlockNumber;
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"serai-cosign";
|
||||
|
||||
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||
///
|
||||
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
|
||||
/// distinct blocks at distinct positions within a global session, we still identify the faults.
|
||||
/*
|
||||
There is the attack where a validator set is given an alternate blockchain with a key generation
|
||||
event at block #n, while most validator sets are given a blockchain with a key generation event
|
||||
at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the
|
||||
cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block
|
||||
prior to the block being cosigned.
|
||||
|
||||
We solve this by binding cosigns to a global session ID, which has a specific start block, and
|
||||
reading the keys from the start block. This means that so long as all validator sets agree on the
|
||||
start of a global session, they can verify all cosigns produced by that session, regardless of
|
||||
how it advances. Since agreeing on the start of a global session is mandated, there's no way to
|
||||
have validator sets follow two distinct global sessions without breaking the bounds of the
|
||||
cosigning protocol.
|
||||
*/
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct GlobalSession {
|
||||
pub(crate) start_block_number: u64,
|
||||
pub(crate) sets: Vec<ValidatorSet>,
|
||||
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
||||
pub(crate) stakes: HashMap<NetworkId, u64>,
|
||||
pub(crate) total_stake: u64,
|
||||
}
|
||||
impl GlobalSession {
|
||||
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
||||
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||
}
|
||||
}
|
||||
|
||||
create_db! {
|
||||
Cosign {
|
||||
// The following are populated by the intend task and used throughout the library
|
||||
|
||||
// An index of Substrate blocks
|
||||
SubstrateBlocks: (block_number: u64) -> [u8; 32],
|
||||
// A mapping from a global session's ID to its relevant information.
|
||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||
// The last block to be cosigned by a global session.
|
||||
GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64,
|
||||
// The latest global session intended.
|
||||
//
|
||||
// This is distinct from the latest global session for which we've evaluated the cosigns for.
|
||||
LatestGlobalSessionIntended: () -> [u8; 32],
|
||||
|
||||
// The following are managed by the `intake_cosign` function present in this file
|
||||
|
||||
// The latest cosigned block for each network.
|
||||
//
|
||||
// This will only be populated with cosigns predating or during the most recent global session
|
||||
// to have its start cosigned.
|
||||
//
|
||||
// The global session changes upon a notable block, causing each global session to have exactly
|
||||
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||
// block, causing the latest cosigned block for a global session to either be the global
|
||||
// session's notable cosigns or the network's latest cosigns.
|
||||
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
||||
// Cosigns received for blocks not locally recognized as finalized.
|
||||
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||
// The global session which faulted.
|
||||
FaultedSession: () -> [u8; 32],
|
||||
}
|
||||
}
|
||||
|
||||
/// If the block has events.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
enum HasEvents {
|
||||
/// The block had a notable event.
|
||||
///
|
||||
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
|
||||
/// accordingly must be cosigned before we advance past them.
|
||||
Notable,
|
||||
/// The block had an non-notable event justifying a cosign.
|
||||
NonNotable,
|
||||
/// The block didn't have an event justifying a cosign.
|
||||
No,
|
||||
}
|
||||
|
||||
/// An intended cosign.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
struct CosignIntent {
|
||||
/// The global session this cosign is being performed under.
|
||||
global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
block_hash: [u8; 32],
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
notable: bool,
|
||||
}
|
||||
|
||||
/// A cosign.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Cosign {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The actual cosigner.
|
||||
pub cosigner: NetworkId,
|
||||
}
|
||||
|
||||
/// A signed cosign.
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SignedCosign {
|
||||
/// The cosign.
|
||||
pub cosign: Cosign,
|
||||
/// The signature for the cosign.
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
impl SignedCosign {
|
||||
fn verify_signature(&self, signer: serai_client::Public) -> bool {
|
||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||
|
||||
signer.verify_simple(COSIGN_CONTEXT, &borsh::to_vec(&self.cosign).unwrap(), &signature).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch the keys used for cosigning by a specific network.
|
||||
async fn keys_for_network(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: NetworkId,
|
||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||
let Some(latest_session) =
|
||||
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
// If this network hasn't had a session declared, move on
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Get the keys for the latest session
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: latest_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((latest_session, keys)));
|
||||
}
|
||||
|
||||
// If the latest session has yet to set keys, use the prior session
|
||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: prior_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((prior_session, keys)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
||||
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
||||
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||
// If this network doesn't have usable keys, move on
|
||||
continue;
|
||||
};
|
||||
|
||||
sets.push((ValidatorSet { network, session }, keys.0));
|
||||
}
|
||||
Ok(sets)
|
||||
}
|
||||
|
||||
/// An object usable to request notable cosigns for a block.
|
||||
pub trait RequestNotableCosigns: 'static + Send {
|
||||
/// The error type which may be encountered when requesting notable cosigns.
|
||||
type Error: Debug;
|
||||
|
||||
/// Request the notable cosigns for this global session.
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>>;
|
||||
}
|
||||
|
||||
/// An error used to indicate the cosigning protocol has faulted.
|
||||
pub struct Faulted;
|
||||
|
||||
/// The interface to manage cosigning with.
|
||||
pub struct Cosigning<D: Db> {
|
||||
db: D,
|
||||
}
|
||||
impl<D: Db> Cosigning<D> {
|
||||
/// Spawn the tasks to intend and evaluate cosigns.
|
||||
///
|
||||
/// The database specified must only be used with a singular instance of the Serai network, and
|
||||
/// only used once at any given time.
|
||||
pub fn spawn<R: RequestNotableCosigns>(
|
||||
db: D,
|
||||
serai: Serai,
|
||||
request: R,
|
||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||
) -> Self {
|
||||
let (intend_task, _intend_task_handle) = Task::new();
|
||||
let (evaluator_task, evaluator_task_handle) = Task::new();
|
||||
let (delay_task, delay_task_handle) = Task::new();
|
||||
tokio::spawn(
|
||||
(intend::CosignIntendTask { db: db.clone(), serai })
|
||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||
);
|
||||
tokio::spawn(
|
||||
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||
);
|
||||
tokio::spawn(
|
||||
(delay::CosignDelayTask { db: db.clone() })
|
||||
.continually_run(delay_task, tasks_to_run_upon_cosigning),
|
||||
);
|
||||
Self { db }
|
||||
}
|
||||
|
||||
/// The latest cosigned block number.
|
||||
pub fn latest_cosigned_block_number(&self) -> Result<u64, Faulted> {
|
||||
if FaultedSession::get(&self.db).is_some() {
|
||||
Err(Faulted)?;
|
||||
}
|
||||
|
||||
Ok(LatestCosignedBlockNumber::get(&self.db).unwrap_or(0))
|
||||
}
|
||||
|
||||
/// Fetch the notable cosigns for a global session in order to respond to requests.
|
||||
///
|
||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||
/// cosigns for this session.
|
||||
pub fn notable_cosigns(&self, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
}
|
||||
|
||||
/// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds.
|
||||
///
|
||||
/// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty
|
||||
/// cosigns, in case of a fault, to induce identification of the fault by others.
|
||||
pub fn cosigns_to_rebroadcast(&self) -> Vec<SignedCosign> {
|
||||
if let Some(faulted) = FaultedSession::get(&self.db) {
|
||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||
// identification in those who see the faulty cosigns as honest
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||
if cosign.cosign.global_session == faulted {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
} else {
|
||||
let Some(latest_global_session) = LatestGlobalSessionIntended::get(&self.db) else {
|
||||
return vec![];
|
||||
};
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) =
|
||||
NetworksLatestCosignedBlock::get(&self.db, latest_global_session, network)
|
||||
{
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
}
|
||||
}
|
||||
|
||||
/// Intake a cosign from the Serai network.
|
||||
///
|
||||
/// - Returns Err(_) if there was an error trying to validate the cosign and it should be retired
|
||||
/// later.
|
||||
/// - Returns Ok(true) if the cosign was successfully handled or could not be handled at this
|
||||
/// time.
|
||||
/// - Returns Ok(false) if the cosign was invalid.
|
||||
//
|
||||
// We collapse a cosign which shouldn't be handled yet into a valid cosign (`Ok(true)`) as we
|
||||
// assume we'll either explicitly request it if we need it or we'll naturally see it (or a later,
|
||||
// more relevant, cosign) again.
|
||||
//
|
||||
// Takes `&mut self` as this should only be called once at any given moment.
|
||||
// TODO: Don't overload bool here
|
||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<bool, String> {
|
||||
let cosign = &signed_cosign.cosign;
|
||||
let network = cosign.cosigner;
|
||||
|
||||
// Check our indexed blockchain includes a block with this block number
|
||||
let Some(our_block_hash) = SubstrateBlocks::get(&self.db, cosign.block_number) else {
|
||||
return Ok(true);
|
||||
};
|
||||
let faulty = cosign.block_hash != our_block_hash;
|
||||
|
||||
// Check this isn't a dated cosign within its global session (as it would be if rebroadcasted)
|
||||
if !faulty {
|
||||
if let Some(existing) =
|
||||
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||
{
|
||||
if existing.cosign.block_number >= cosign.block_number {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||
// Unrecognized global session
|
||||
return Ok(true);
|
||||
};
|
||||
|
||||
// Check the cosigned block number is in range to the global session
|
||||
if cosign.block_number < global_session.start_block_number {
|
||||
// Cosign is for a block predating the global session
|
||||
return Ok(false);
|
||||
}
|
||||
if !faulty {
|
||||
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||
// their final block, replacing their notable cosign
|
||||
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||
if cosign.block_number > last_block {
|
||||
// Cosign is for a block after the last block this global session should have signed
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the cosign's signature
|
||||
{
|
||||
let key = Public::from({
|
||||
let Some(key) = global_session.keys.get(&network) else {
|
||||
return Ok(false);
|
||||
};
|
||||
*key
|
||||
});
|
||||
|
||||
if !signed_cosign.verify_signature(key) {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||
// cosign
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
if !faulty {
|
||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||
let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0);
|
||||
// This global session starts the block *after* its declaration, so we want to check if the
|
||||
// block declaring it was cosigned
|
||||
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||
drop(txn);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||
NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign);
|
||||
} else {
|
||||
let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]);
|
||||
// Only handle this as a fault if this set wasn't prior faulty
|
||||
if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) {
|
||||
faults.push(signed_cosign.clone());
|
||||
Faults::set(&mut txn, cosign.global_session, &faults);
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
for fault in &faults {
|
||||
let Some(stake) = global_session.stakes.get(&fault.cosign.cosigner) else {
|
||||
Err("cosigner with recognized key didn't have a stake entry saved".to_string())?
|
||||
};
|
||||
weight_cosigned += stake;
|
||||
}
|
||||
|
||||
// Check if the sum weight means a fault has occurred
|
||||
if weight_cosigned >= ((global_session.total_stake * 17) / 100) {
|
||||
FaultedSession::set(&mut txn, &cosign.global_session);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
333
coordinator/src/cosign_evaluator.rs
Normal file
333
coordinator/src/cosign_evaluator.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
use core::time::Duration;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, Mutex, RwLock},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use borsh::BorshSerialize;
|
||||
use sp_application_crypto::RuntimePublic;
|
||||
use serai_client::{
|
||||
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
Serai, SeraiError, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, create_db};
|
||||
|
||||
use processor_messages::coordinator::cosign_block_msg;
|
||||
|
||||
use crate::{
|
||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||
substrate::LatestCosignedBlock,
|
||||
};
|
||||
|
||||
create_db! {
|
||||
CosignDb {
|
||||
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
|
||||
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
|
||||
DistinctChain: (set: ExternalValidatorSet) -> (),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CosignEvaluator<D: Db> {
|
||||
db: Mutex<D>,
|
||||
serai: Arc<Serai>,
|
||||
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
|
||||
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
|
||||
}
|
||||
|
||||
impl<D: Db> CosignEvaluator<D> {
|
||||
async fn update_latest_cosign(&self) {
|
||||
let stakes_lock = self.stakes.read().await;
|
||||
// If we haven't gotten the stake data yet, return
|
||||
let Some(stakes) = stakes_lock.as_ref() else { return };
|
||||
|
||||
let total_stake = stakes.values().copied().sum::<u64>();
|
||||
|
||||
let latest_cosigns = self.latest_cosigns.read().await;
|
||||
let mut highest_block = 0;
|
||||
for cosign in latest_cosigns.values() {
|
||||
let mut networks = HashSet::new();
|
||||
for (network, sub_cosign) in &*latest_cosigns {
|
||||
if sub_cosign.block_number >= cosign.block_number {
|
||||
networks.insert(network);
|
||||
}
|
||||
}
|
||||
let sum_stake =
|
||||
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
|
||||
let needed_stake = ((total_stake * 2) / 3) + 1;
|
||||
if (total_stake == 0) || (sum_stake > needed_stake) {
|
||||
highest_block = highest_block.max(cosign.block_number);
|
||||
}
|
||||
}
|
||||
|
||||
let mut db_lock = self.db.lock().await;
|
||||
let mut txn = db_lock.txn();
|
||||
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
|
||||
log::info!("setting latest cosigned block to {}", highest_block);
|
||||
LatestCosignedBlock::set(&mut txn, &highest_block);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
async fn update_stakes(&self) -> Result<(), SeraiError> {
|
||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||
|
||||
let mut stakes = HashMap::new();
|
||||
for network in EXTERNAL_NETWORKS {
|
||||
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
|
||||
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
|
||||
if set_key {
|
||||
stakes.insert(
|
||||
network,
|
||||
serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(network.into())
|
||||
.await?
|
||||
.expect("network which published a batch didn't have a stake set")
|
||||
.0,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Since we've successfully built stakes, set it
|
||||
*self.stakes.write().await = Some(stakes);
|
||||
|
||||
self.update_latest_cosign().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Uses Err to signify a message should be retried
|
||||
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
|
||||
// If we already have this cosign or a newer cosign, return
|
||||
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
|
||||
if latest.block_number >= cosign.block_number {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// If this an old cosign (older than a day), drop it
|
||||
let latest_block = self.serai.latest_finalized_block().await?;
|
||||
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
|
||||
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
|
||||
log::warn!("received cosign with a block number which doesn't map to a block");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
async fn set_with_keys_fn(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
|
||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
||||
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
|
||||
return Ok(None);
|
||||
};
|
||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||
Ok(Some(
|
||||
if serai
|
||||
.validator_sets()
|
||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
ExternalValidatorSet { network, session: prior_session }
|
||||
} else {
|
||||
ExternalValidatorSet { network, session: latest_session }
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
// Get the key for this network as of the prior block
|
||||
// If we have two chains, this value may be different across chains depending on if one chain
|
||||
// included the set_keys and one didn't
|
||||
// Because set_keys will force a cosign, it will force detection of distinct blocks
|
||||
// re: set_keys using keys prior to set_keys (assumed amenable to all)
|
||||
let serai = self.serai.as_of(block.header.parent_hash.into());
|
||||
|
||||
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
|
||||
log::warn!("received cosign for a block we didn't have keys for");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if !keys
|
||||
.0
|
||||
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
|
||||
{
|
||||
log::warn!("received cosigned block with an invalid signature");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"received cosign for block {} ({}) by {:?}",
|
||||
block.number(),
|
||||
hex::encode(cosign.block),
|
||||
cosign.network
|
||||
);
|
||||
|
||||
// Save this cosign to the DB
|
||||
{
|
||||
let mut db = self.db.lock().await;
|
||||
let mut txn = db.txn();
|
||||
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
|
||||
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
if cosign.block != block.hash() {
|
||||
log::error!(
|
||||
"received cosign for a distinct block at {}. we have {}. cosign had {}",
|
||||
cosign.block_number,
|
||||
hex::encode(block.hash()),
|
||||
hex::encode(cosign.block)
|
||||
);
|
||||
|
||||
let serai = self.serai.as_of(latest_block.hash());
|
||||
|
||||
let mut db = self.db.lock().await;
|
||||
// Save this set as being on a different chain
|
||||
let mut txn = db.txn();
|
||||
DistinctChain::set(&mut txn, set_with_keys, &());
|
||||
txn.commit();
|
||||
|
||||
let mut total_stake = 0;
|
||||
let mut total_on_distinct_chain = 0;
|
||||
for network in EXTERNAL_NETWORKS {
|
||||
// Get the current set for this network
|
||||
let set_with_keys = {
|
||||
let mut res;
|
||||
while {
|
||||
res = set_with_keys_fn(&serai, network).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get the set with keys when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
// Get its stake
|
||||
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
|
||||
if let Some(set_with_keys) = set_with_keys {
|
||||
let stake = {
|
||||
let mut res;
|
||||
while {
|
||||
res =
|
||||
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
if let Some(stake) = stake {
|
||||
total_stake += stake.0;
|
||||
|
||||
if DistinctChain::get(&*db, set_with_keys).is_some() {
|
||||
total_on_distinct_chain += stake.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
|
||||
if (total_stake * 17 / 100) <= total_on_distinct_chain {
|
||||
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
|
||||
}
|
||||
} else {
|
||||
{
|
||||
let mut latest_cosigns = self.latest_cosigns.write().await;
|
||||
latest_cosigns.insert(cosign.network, cosign);
|
||||
}
|
||||
self.update_latest_cosign().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
|
||||
let mut latest_cosigns = HashMap::new();
|
||||
for network in EXTERNAL_NETWORKS {
|
||||
if let Some(cosign) = LatestCosign::get(&db, network) {
|
||||
latest_cosigns.insert(network, cosign);
|
||||
}
|
||||
}
|
||||
|
||||
let evaluator = Arc::new(Self {
|
||||
db: Mutex::new(db),
|
||||
serai,
|
||||
stakes: RwLock::new(None),
|
||||
latest_cosigns: RwLock::new(latest_cosigns),
|
||||
});
|
||||
|
||||
// Spawn a task to update stakes regularly
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
loop {
|
||||
// Run this until it passes
|
||||
while evaluator.update_stakes().await.is_err() {
|
||||
log::warn!("couldn't update stakes in the cosign evaluator");
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
|
||||
sleep(Duration::from_secs(10 * 60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to receive cosigns and handle them
|
||||
let (send, mut recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
while let Some(msg) = recv.recv().await {
|
||||
while evaluator.handle_new_cosign(msg).await.is_err() {
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to rebroadcast the most recent cosigns
|
||||
tokio::spawn({
|
||||
async move {
|
||||
loop {
|
||||
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
|
||||
for cosign in cosigns {
|
||||
let mut buf = vec![];
|
||||
cosign.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
}
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Return the channel to send cosigns
|
||||
send
|
||||
}
|
||||
}
|
||||
@@ -6,9 +6,9 @@ use blake2::{
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
in_instructions::primitives::{Batch, SignedBatch},
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
};
|
||||
|
||||
pub use serai_db::*;
|
||||
@@ -18,21 +18,21 @@ use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
||||
|
||||
create_db!(
|
||||
MainDb {
|
||||
HandledMessageDb: (network: NetworkId) -> u64,
|
||||
HandledMessageDb: (network: ExternalNetworkId) -> u64,
|
||||
ActiveTributaryDb: () -> Vec<u8>,
|
||||
RetiredTributaryDb: (set: ValidatorSet) -> (),
|
||||
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
|
||||
FirstPreprocessDb: (
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8]
|
||||
) -> Vec<Vec<u8>>,
|
||||
LastReceivedBatchDb: (network: NetworkId) -> u32,
|
||||
ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
|
||||
BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
|
||||
LastVerifiedBatchDb: (network: NetworkId) -> u32,
|
||||
HandoverBatchDb: (set: ValidatorSet) -> u32,
|
||||
LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
|
||||
QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
|
||||
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
|
||||
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
|
||||
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
|
||||
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
|
||||
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
|
||||
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
|
||||
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
|
||||
}
|
||||
);
|
||||
|
||||
@@ -61,7 +61,7 @@ impl ActiveTributaryDb {
|
||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
||||
}
|
||||
|
||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
||||
let mut active = Self::active_tributaries(txn).1;
|
||||
for i in 0 .. active.len() {
|
||||
if active[i].set() == set {
|
||||
@@ -82,7 +82,7 @@ impl ActiveTributaryDb {
|
||||
impl FirstPreprocessDb {
|
||||
pub fn save_first_preprocess(
|
||||
txn: &mut impl DbTxn,
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8],
|
||||
preprocess: &Vec<Vec<u8>>,
|
||||
@@ -108,19 +108,19 @@ impl ExpectedBatchDb {
|
||||
}
|
||||
|
||||
impl HandoverBatchDb {
|
||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
|
||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
|
||||
Self::set(txn, set, &batch);
|
||||
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
||||
}
|
||||
}
|
||||
impl QueuedBatchesDb {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
|
||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
||||
batch.write(&mut batches).unwrap();
|
||||
Self::set(txn, set, &batches);
|
||||
}
|
||||
|
||||
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
||||
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
|
||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||
txn.del(Self::key(set));
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![expect(clippy::cast_possible_truncation)]
|
||||
|
||||
use core::ops::Deref;
|
||||
use std::{
|
||||
sync::{OnceLock, Arc},
|
||||
@@ -8,22 +10,24 @@ use std::{
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
use rand_core::OsRng;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{
|
||||
ff::{Field, PrimeField},
|
||||
GroupEncoding,
|
||||
},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
use frost::Participant;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::BorshSerialize;
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session},
|
||||
Public, Serai, SeraiInInstructions,
|
||||
};
|
||||
|
||||
@@ -78,7 +82,7 @@ pub struct ActiveTributary<D: Db, P: P2p> {
|
||||
#[derive(Clone)]
|
||||
pub enum TributaryEvent<D: Db, P: P2p> {
|
||||
NewTributary(ActiveTributary<D, P>),
|
||||
TributaryRetired(ValidatorSet),
|
||||
TributaryRetired(ExternalValidatorSet),
|
||||
}
|
||||
|
||||
// Creates a new tributary and sends it to all listeners.
|
||||
@@ -113,17 +117,16 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
|
||||
// If we're rebooting, we'll re-fire this message
|
||||
// This is safe due to the message-queue deduplicating based off the intent system
|
||||
let set = spec.set();
|
||||
|
||||
let our_i = spec
|
||||
.i(&[], Ristretto::generator() * key.deref())
|
||||
.expect("adding a tributary for a set we aren't in set for");
|
||||
processors
|
||||
.send(
|
||||
set.network,
|
||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||
session: set.session,
|
||||
threshold: spec.t(),
|
||||
evrf_public_keys: spec.evrf_public_keys(),
|
||||
// TODO
|
||||
// params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
||||
// shares: u16::from(our_i.end) - u16::from(our_i.start),
|
||||
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
|
||||
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
||||
shares: u16::from(our_i.end) - u16::from(our_i.start),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -145,7 +148,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
p2p: &P,
|
||||
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
|
||||
tributaries: &HashMap<Session, ActiveTributary<D, P>>,
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
msg: &processors::Message,
|
||||
) -> bool {
|
||||
#[allow(clippy::nonminimal_bool)]
|
||||
@@ -166,9 +169,12 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
|
||||
// in-set, making the Tributary relevant
|
||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||
key_gen::ProcessorMessage::Participation { session, .. } |
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } |
|
||||
key_gen::ProcessorMessage::Blame { session, .. } => Some(*session),
|
||||
key_gen::ProcessorMessage::Commitments { id, .. } |
|
||||
key_gen::ProcessorMessage::InvalidCommitments { id, .. } |
|
||||
key_gen::ProcessorMessage::Shares { id, .. } |
|
||||
key_gen::ProcessorMessage::InvalidShare { id, .. } |
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |
|
||||
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
|
||||
},
|
||||
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
||||
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
||||
@@ -190,7 +196,8 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
.iter()
|
||||
.map(|plan| plan.session)
|
||||
.filter(|session| {
|
||||
RetiredTributaryDb::get(&txn, ValidatorSet { network, session: *session }).is_none()
|
||||
RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session })
|
||||
.is_none()
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
@@ -262,14 +269,17 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}
|
||||
// This causes an action on Substrate yet not on any Tributary
|
||||
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||
let set = ValidatorSet { network, session: *session };
|
||||
let set = ExternalValidatorSet { network, session: *session };
|
||||
let signature: &[u8] = signature.as_ref();
|
||||
let signature = serai_client::Signature(signature.try_into().unwrap());
|
||||
let signature = <[u8; 64]>::try_from(signature).unwrap();
|
||||
let signature: serai_client::Signature = signature.into();
|
||||
|
||||
let slashes = crate::tributary::SlashReport::get(&txn, set)
|
||||
.expect("signed slash report despite not having slash report locally");
|
||||
let slashes_pubs =
|
||||
slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::<Vec<_>>();
|
||||
let slashes_pubs = slashes
|
||||
.iter()
|
||||
.map(|(address, points)| (Public::from(*address), *points))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let tx = serai_client::SeraiValidatorSets::report_slashes(
|
||||
network,
|
||||
@@ -279,7 +289,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
signature.clone(),
|
||||
signature,
|
||||
);
|
||||
|
||||
loop {
|
||||
@@ -390,7 +400,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
if let Some(relevant_tributary_value) = relevant_tributary {
|
||||
if RetiredTributaryDb::get(
|
||||
&txn,
|
||||
ValidatorSet { network: msg.network, session: relevant_tributary_value },
|
||||
ExternalValidatorSet { network: msg.network, session: relevant_tributary_value },
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
@@ -418,32 +428,124 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
|
||||
let txs = match msg.msg.clone() {
|
||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||
key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||
assert_eq!(session, spec.set().session);
|
||||
vec![Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }]
|
||||
}
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } => {
|
||||
assert_eq!(session, spec.set().session);
|
||||
crate::tributary::generated_key_pair::<D>(
|
||||
&mut txn,
|
||||
genesis,
|
||||
&KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
|
||||
);
|
||||
|
||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||
let confirmation_nonces =
|
||||
crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, 0);
|
||||
|
||||
vec![Transaction::DkgConfirmationNonces {
|
||||
attempt: 0,
|
||||
confirmation_nonces,
|
||||
key_gen::ProcessorMessage::Commitments { id, commitments } => {
|
||||
vec![Transaction::DkgCommitments {
|
||||
attempt: id.attempt,
|
||||
commitments,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||
assert_eq!(session, spec.set().session);
|
||||
let participant = spec.reverse_lookup_i(participant).unwrap();
|
||||
vec![Transaction::RemoveParticipant { participant, signed: Transaction::empty_signed() }]
|
||||
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
|
||||
// This doesn't have guaranteed timing
|
||||
//
|
||||
// While the party *should* be fatally slashed and not included in future attempts,
|
||||
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
|
||||
// and not included in future attempts *which begin after the latency window completes*
|
||||
let participant = spec
|
||||
.reverse_lookup_i(
|
||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
||||
faulty,
|
||||
)
|
||||
.unwrap();
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
participant,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
|
||||
|
||||
let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
|
||||
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
|
||||
let our_i = spec
|
||||
.i(&removed, pub_key)
|
||||
.expect("processor message to DKG for an attempt we aren't a validator in");
|
||||
|
||||
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
||||
// without further context, it can't be deserialized without context
|
||||
let mut tx_shares = Vec::with_capacity(shares.len());
|
||||
for shares in &mut shares {
|
||||
tx_shares.push(vec![]);
|
||||
for i in 1 ..= spec.n(&removed) {
|
||||
let i = Participant::new(i).unwrap();
|
||||
if our_i.contains(&i) {
|
||||
if shares.contains_key(&i) {
|
||||
panic!("processor sent us our own shares");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
tx_shares.last_mut().unwrap().push(
|
||||
shares.remove(&i).expect("processor didn't send share for another validator"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
vec![Transaction::DkgShares {
|
||||
attempt: id.attempt,
|
||||
shares: tx_shares,
|
||||
confirmation_nonces: nonces,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
|
||||
vec![Transaction::InvalidDkgShare {
|
||||
attempt: id.attempt,
|
||||
accuser,
|
||||
faulty,
|
||||
blame,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
|
||||
// TODO2: Check the KeyGenId fields
|
||||
|
||||
// Tell the Tributary the key pair, get back the share for the MuSig signature
|
||||
let share = crate::tributary::generated_key_pair::<D>(
|
||||
&mut txn,
|
||||
key,
|
||||
spec,
|
||||
&KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()),
|
||||
id.attempt,
|
||||
);
|
||||
|
||||
// TODO: Move this into generated_key_pair?
|
||||
match share {
|
||||
Ok(share) => {
|
||||
vec![Transaction::DkgConfirmed {
|
||||
attempt: id.attempt,
|
||||
confirmation_share: share,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
Err(p) => {
|
||||
let participant = spec
|
||||
.reverse_lookup_i(
|
||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
||||
p,
|
||||
)
|
||||
.unwrap();
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
participant,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
key_gen::ProcessorMessage::Blame { id, participant } => {
|
||||
let participant = spec
|
||||
.reverse_lookup_i(
|
||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
||||
participant,
|
||||
)
|
||||
.unwrap();
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
participant,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
},
|
||||
ProcessorMessage::Sign(msg) => match msg {
|
||||
@@ -687,7 +789,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
|
||||
processors: Pro,
|
||||
p2p: P,
|
||||
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
|
||||
) {
|
||||
let mut tributaries = HashMap::new();
|
||||
@@ -736,7 +838,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
mut db: D,
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
|
||||
) {
|
||||
let mut tributaries = HashMap::new();
|
||||
@@ -810,7 +912,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
for batch in start_id ..= last_id {
|
||||
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
|
||||
if let Some(session) = is_pre_handover {
|
||||
let set = ValidatorSet { network, session };
|
||||
let set = ExternalValidatorSet { network, session };
|
||||
let mut queued = QueuedBatchesDb::take(&mut txn, set);
|
||||
// is_handover_batch is only set for handover `Batch`s we're participating in, making
|
||||
// this safe
|
||||
@@ -828,7 +930,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
|
||||
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
|
||||
if let Some(session) = is_handover {
|
||||
for queued in QueuedBatchesDb::take(&mut txn, ValidatorSet { network, session }) {
|
||||
for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session })
|
||||
{
|
||||
to_publish.push((session, queued));
|
||||
}
|
||||
}
|
||||
@@ -875,10 +978,7 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
||||
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
||||
) {
|
||||
let mut channels = HashMap::new();
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
let (processor_send, processor_recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_processor_messages(
|
||||
db.clone(),
|
||||
@@ -1100,7 +1200,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
}
|
||||
});
|
||||
|
||||
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
|
||||
move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| {
|
||||
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
|
||||
let mut raw_db = raw_db.clone();
|
||||
let key = key.clone();
|
||||
|
||||
@@ -11,7 +11,9 @@ use rand_core::{RngCore, OsRng};
|
||||
|
||||
use scale::{Decode, Encode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||
use serai_client::{
|
||||
primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,
|
||||
};
|
||||
|
||||
use serai_db::Db;
|
||||
|
||||
@@ -69,7 +71,7 @@ const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct CosignedBlock {
|
||||
pub network: NetworkId,
|
||||
pub network: ExternalNetworkId,
|
||||
pub block_number: u64,
|
||||
pub block: [u8; 32],
|
||||
pub signature: [u8; 64],
|
||||
@@ -208,8 +210,8 @@ pub struct HeartbeatBatch {
|
||||
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||
type Id: Send + Sync + Clone + Copy + fmt::Debug;
|
||||
|
||||
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
|
||||
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
|
||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
|
||||
@@ -309,7 +311,7 @@ struct Behavior {
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Clone)]
|
||||
pub struct LibP2p {
|
||||
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
|
||||
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ExternalValidatorSet, [u8; 32])>>>,
|
||||
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
|
||||
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
|
||||
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
|
||||
@@ -397,7 +399,7 @@ impl LibP2p {
|
||||
let (receive_send, receive_recv) = mpsc::unbounded_channel();
|
||||
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
|
||||
|
||||
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
|
||||
fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic {
|
||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
||||
}
|
||||
|
||||
@@ -407,7 +409,8 @@ impl LibP2p {
|
||||
// The addrs we're currently dialing, and the networks associated with them
|
||||
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
|
||||
// The peers we're currently connected to, and the networks associated with them
|
||||
let connected_peers = Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<NetworkId>>::new()));
|
||||
let connected_peers =
|
||||
Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<ExternalNetworkId>>::new()));
|
||||
|
||||
// Find and connect to peers
|
||||
let (connect_to_network_send, mut connect_to_network_recv) =
|
||||
@@ -420,7 +423,7 @@ impl LibP2p {
|
||||
let connect_to_network_send = connect_to_network_send.clone();
|
||||
async move {
|
||||
loop {
|
||||
let connect = |network: NetworkId, addr: Multiaddr| {
|
||||
let connect = |network: ExternalNetworkId, addr: Multiaddr| {
|
||||
let dialing_peers = dialing_peers.clone();
|
||||
let connected_peers = connected_peers.clone();
|
||||
let to_dial_send = to_dial_send.clone();
|
||||
@@ -507,7 +510,7 @@ impl LibP2p {
|
||||
connect_to_network_networks.insert(network);
|
||||
}
|
||||
for network in connect_to_network_networks {
|
||||
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||
if let Ok(mut nodes) = serai.p2p_validators(network.into()).await {
|
||||
// If there's an insufficient amount of nodes known, connect to all yet add it
|
||||
// back and break
|
||||
if nodes.len() < TARGET_PEERS {
|
||||
@@ -557,7 +560,7 @@ impl LibP2p {
|
||||
|
||||
// Subscribe to any new topics
|
||||
set = subscribe_recv.recv() => {
|
||||
let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
|
||||
let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) =
|
||||
set.expect("subscribe_recv closed. are we shutting down?");
|
||||
let topic = topic_for_set(set);
|
||||
if subscribe {
|
||||
@@ -776,7 +779,7 @@ impl LibP2p {
|
||||
impl P2p for LibP2p {
|
||||
type Id = PeerId;
|
||||
|
||||
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
|
||||
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
|
||||
self
|
||||
.subscribe
|
||||
.lock()
|
||||
@@ -785,7 +788,7 @@ impl P2p for LibP2p {
|
||||
.expect("subscribe_send closed. are we shutting down?");
|
||||
}
|
||||
|
||||
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
|
||||
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
|
||||
self
|
||||
.subscribe
|
||||
.lock()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use serai_client::primitives::NetworkId;
|
||||
use serai_client::primitives::ExternalNetworkId;
|
||||
use processor_messages::{ProcessorMessage, CoordinatorMessage};
|
||||
|
||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
@@ -8,27 +8,27 @@ use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Message {
|
||||
pub id: u64,
|
||||
pub network: NetworkId,
|
||||
pub network: ExternalNetworkId,
|
||||
pub msg: ProcessorMessage,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Processors: 'static + Send + Sync + Clone {
|
||||
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>);
|
||||
async fn recv(&self, network: NetworkId) -> Message;
|
||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
|
||||
async fn recv(&self, network: ExternalNetworkId) -> Message;
|
||||
async fn ack(&self, msg: Message);
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Processors for Arc<MessageQueue> {
|
||||
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||
let msg: CoordinatorMessage = msg.into();
|
||||
let metadata =
|
||||
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
self.queue(metadata, msg).await;
|
||||
}
|
||||
async fn recv(&self, network: NetworkId) -> Message {
|
||||
async fn recv(&self, network: ExternalNetworkId) -> Message {
|
||||
let msg = self.next(Service::Processor(network)).await;
|
||||
assert_eq!(msg.from, Service::Processor(network));
|
||||
|
||||
|
||||
338
coordinator/src/substrate/cosign.rs
Normal file
338
coordinator/src/substrate/cosign.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
/*
|
||||
If:
|
||||
A) This block has events and it's been at least X blocks since the last cosign or
|
||||
B) This block doesn't have events but it's been X blocks since a skipped block which did
|
||||
have events or
|
||||
C) This block key gens (which changes who the cosigners are)
|
||||
cosign this block.
|
||||
|
||||
This creates both a minimum and maximum delay of X blocks before a block's cosigning begins,
|
||||
barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly
|
||||
spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to
|
||||
ensure any block needing cosigned is consigned within a reasonable amount of time.
|
||||
*/
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
Serai, SeraiError,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};
|
||||
|
||||
// 5 minutes, expressed in blocks
|
||||
// TODO: Pull a constant for block time
|
||||
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
enum HasEvents {
|
||||
KeyGen,
|
||||
Yes,
|
||||
No,
|
||||
}
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDb {
|
||||
ScanCosignFrom: () -> u64,
|
||||
IntendedCosign: () -> (u64, Option<u64>),
|
||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
||||
LatestCosignedBlock: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
impl IntendedCosign {
|
||||
// Sets the intended to cosign block, clearing the prior value entirely.
|
||||
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
||||
Self::set(txn, &(intended, None::<u64>));
|
||||
}
|
||||
|
||||
// Sets the cosign skipped since the last intended to cosign block.
|
||||
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
||||
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
||||
assert!(prior_skipped.is_none());
|
||||
Self::set(txn, &(intended, Some(skipped)));
|
||||
}
|
||||
}
|
||||
|
||||
impl LatestCosignedBlock {
|
||||
pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
|
||||
Self::get(getter).unwrap_or_default().max(1)
|
||||
}
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
SubstrateDbChannels {
|
||||
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
|
||||
}
|
||||
}
|
||||
|
||||
impl CosignTransactions {
|
||||
// Append a cosign transaction.
|
||||
pub fn append_cosign(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
number: u64,
|
||||
hash: [u8; 32],
|
||||
) {
|
||||
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_has_events(
|
||||
txn: &mut impl DbTxn,
|
||||
serai: &Serai,
|
||||
block: u64,
|
||||
) -> Result<HasEvents, SeraiError> {
|
||||
let cached = BlockHasEventsCache::get(txn, block);
|
||||
match cached {
|
||||
None => {
|
||||
let serai = serai.as_of(
|
||||
serai
|
||||
.finalized_block_by_number(block)
|
||||
.await?
|
||||
.expect("couldn't get block which should've been finalized")
|
||||
.hash(),
|
||||
);
|
||||
|
||||
if !serai.validator_sets().key_gen_events().await?.is_empty() {
|
||||
return Ok(HasEvents::KeyGen);
|
||||
}
|
||||
|
||||
let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&
|
||||
serai.in_instructions().batch_events().await?.is_empty() &&
|
||||
serai.validator_sets().new_set_events().await?.is_empty() &&
|
||||
serai.validator_sets().set_retired_events().await?.is_empty();
|
||||
|
||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||
|
||||
BlockHasEventsCache::set(txn, block, &has_events);
|
||||
Ok(has_events)
|
||||
}
|
||||
Some(code) => Ok(code),
|
||||
}
|
||||
}
|
||||
|
||||
async fn potentially_cosign_block(
|
||||
txn: &mut impl DbTxn,
|
||||
serai: &Serai,
|
||||
block: u64,
|
||||
skipped_block: Option<u64>,
|
||||
window_end_exclusive: u64,
|
||||
) -> Result<bool, SeraiError> {
|
||||
// The following code regarding marking cosigned if prior block is cosigned expects this block to
|
||||
// not be zero
|
||||
// While we could perform this check there, there's no reason not to optimize the entire function
|
||||
// as such
|
||||
if block == 0 {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let block_has_events = block_has_events(txn, serai, block).await?;
|
||||
|
||||
// If this block had no events and immediately follows a cosigned block, mark it as cosigned
|
||||
if (block_has_events == HasEvents::No) &&
|
||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||
{
|
||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
||||
LatestCosignedBlock::set(txn, &block);
|
||||
}
|
||||
|
||||
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
|
||||
// trigger a cosigning protocol covering it
|
||||
// This means there will be the maximum delay allowed from a block needing cosigning occurring
|
||||
// and a cosign for it triggering
|
||||
let maximally_latent_cosign_block =
|
||||
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
||||
|
||||
// If this block is within the window,
|
||||
if block < window_end_exclusive {
|
||||
// and set a key, cosign it
|
||||
if block_has_events == HasEvents::KeyGen {
|
||||
IntendedCosign::set_intended_cosign(txn, block);
|
||||
// Carry skipped if it isn't included by cosigning this block
|
||||
if let Some(skipped) = skipped_block {
|
||||
if skipped > block {
|
||||
IntendedCosign::set_skipped_cosign(txn, block);
|
||||
}
|
||||
}
|
||||
return Ok(true);
|
||||
}
|
||||
} else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {
|
||||
// Since this block was outside the window and had events/was maximally latent, cosign it
|
||||
IntendedCosign::set_intended_cosign(txn, block);
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/*
|
||||
Advances the cosign protocol as should be done per the latest block.
|
||||
|
||||
A block is considered cosigned if:
|
||||
A) It was cosigned
|
||||
B) It's the parent of a cosigned block
|
||||
C) It immediately follows a cosigned block and has no events requiring cosigning
|
||||
|
||||
This only actually performs advancement within a limited bound (generally until it finds a block
|
||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||
`latest_number` doesn't change.
|
||||
*/
|
||||
async fn advance_cosign_protocol_inner(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
latest_number: u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
let mut txn = db.txn();
|
||||
|
||||
const INITIAL_INTENDED_COSIGN: u64 = 1;
|
||||
let (last_intended_to_cosign_block, mut skipped_block) = {
|
||||
let intended_cosign = IntendedCosign::get(&txn);
|
||||
// If we haven't prior intended to cosign a block, set the intended cosign to 1
|
||||
if let Some(intended_cosign) = intended_cosign {
|
||||
intended_cosign
|
||||
} else {
|
||||
IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);
|
||||
IntendedCosign::get(&txn).unwrap()
|
||||
}
|
||||
};
|
||||
|
||||
// "windows" refers to the window of blocks where even if there's a block which should be
|
||||
// cosigned, it won't be due to proximity due to the prior cosign
|
||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||
window_end_exclusive = 1;
|
||||
}
|
||||
|
||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
||||
// new state
|
||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
||||
|
||||
// Check all blocks within the window to see if they should be cosigned
|
||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||
// do cosign them
|
||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||
// the skipped block will cause will cosign all other blocks within this window
|
||||
if skipped_block.is_none() {
|
||||
let window_end_inclusive = window_end_exclusive - 1;
|
||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||
skipped_block = Some(b);
|
||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||
IntendedCosign::set_skipped_cosign(&mut txn, b);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A block which should be cosigned
|
||||
let mut to_cosign = None;
|
||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||
let mut cosigning = vec![];
|
||||
|
||||
for block in scan_start_block ..= latest_number {
|
||||
let actual_block = serai
|
||||
.finalized_block_by_number(block)
|
||||
.await?
|
||||
.expect("couldn't get block which should've been finalized");
|
||||
|
||||
// Save the block number for this block, as needed by the cosigner to perform cosigning
|
||||
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
||||
|
||||
if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?
|
||||
{
|
||||
to_cosign = Some((block, actual_block.hash()));
|
||||
|
||||
// Get the keys as of the prior block
|
||||
// If this key sets new keys, the coordinator won't acknowledge so until we process this
|
||||
// block
|
||||
// We won't process this block until its co-signed
|
||||
// Using the keys of the prior block ensures this deadlock isn't reached
|
||||
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
||||
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
// Get the latest session to have set keys
|
||||
let set_with_keys = {
|
||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
||||
continue;
|
||||
};
|
||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||
if serai
|
||||
.validator_sets()
|
||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
ExternalValidatorSet { network, session: prior_session }
|
||||
} else {
|
||||
let set = ExternalValidatorSet { network, session: latest_session };
|
||||
if serai.validator_sets().keys(set).await?.is_none() {
|
||||
continue;
|
||||
}
|
||||
set
|
||||
}
|
||||
};
|
||||
|
||||
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
||||
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// If this TX is committed, always start future scanning from the next block
|
||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
||||
// Since we're scanning *from* the next block, tidy the cache
|
||||
BlockHasEventsCache::del(&mut txn, block);
|
||||
}
|
||||
|
||||
if let Some((number, hash)) = to_cosign {
|
||||
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
||||
// cosigned
|
||||
if cosigning.is_empty() {
|
||||
log::debug!("{} had no cosigners available, marking as cosigned", number);
|
||||
LatestCosignedBlock::set(&mut txn, &number);
|
||||
} else {
|
||||
for (set, in_set) in cosigning {
|
||||
if in_set {
|
||||
log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session);
|
||||
CosignTransactions::append_cosign(&mut txn, set, number, hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
txn.commit();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn advance_cosign_protocol(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
latest_number: u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
loop {
|
||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
||||
let scan_to = latest_number.min(scan_from + 1000);
|
||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
||||
// If we didn't limit the scan_to, break
|
||||
if scan_to == latest_number {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
use serai_client::primitives::NetworkId;
|
||||
use serai_client::primitives::ExternalNetworkId;
|
||||
|
||||
pub use serai_db::*;
|
||||
|
||||
@@ -9,7 +9,7 @@ mod inner_db {
|
||||
SubstrateDb {
|
||||
NextBlock: () -> u64,
|
||||
HandledEvent: (block: [u8; 32]) -> u32,
|
||||
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
|
||||
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@@ -6,14 +6,18 @@ use std::{
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_client::{
|
||||
SeraiError, Block, Serai, TemporalSerai,
|
||||
primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId},
|
||||
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
||||
in_instructions::InInstructionsEvent,
|
||||
coins::CoinsEvent,
|
||||
in_instructions::InInstructionsEvent,
|
||||
primitives::{BlockHash, ExternalNetworkId},
|
||||
validator_sets::{
|
||||
primitives::{ExternalValidatorSet, ValidatorSet},
|
||||
ValidatorSetsEvent,
|
||||
},
|
||||
Block, Serai, SeraiError, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::DbTxn;
|
||||
@@ -52,54 +56,21 @@ async fn handle_new_set<D: Db>(
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
serai: &Serai,
|
||||
block: &Block,
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
) -> Result<(), SeraiError> {
|
||||
if in_set(key, &serai.as_of(block.hash()), set)
|
||||
if in_set(key, &serai.as_of(block.hash()), set.into())
|
||||
.await?
|
||||
.expect("NewSet for set which doesn't exist")
|
||||
{
|
||||
log::info!("present in set {:?}", set);
|
||||
|
||||
let validators;
|
||||
let mut evrf_public_keys = vec![];
|
||||
{
|
||||
let set_data = {
|
||||
let serai = serai.as_of(block.hash());
|
||||
let serai = serai.validator_sets();
|
||||
let set_participants =
|
||||
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
|
||||
|
||||
validators = set_participants
|
||||
.iter()
|
||||
.map(|(k, w)| {
|
||||
(
|
||||
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut k.0.as_ref())
|
||||
.expect("invalid key registered as participant"),
|
||||
u16::try_from(*w).unwrap(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
for (validator, _) in set_participants {
|
||||
// This is only run for external networks which always do a DKG for Serai
|
||||
let substrate = serai
|
||||
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519)
|
||||
.await?
|
||||
.expect("Serai called NewSet on a validator without an Embedwards25519 key");
|
||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||
let network =
|
||||
if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||
serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect(
|
||||
"Serai called NewSet on a validator without the embedded key required for the network",
|
||||
)
|
||||
} else {
|
||||
substrate.clone()
|
||||
};
|
||||
evrf_public_keys.push((
|
||||
<[u8; 32]>::try_from(substrate)
|
||||
.expect("validator-sets pallet accepted a key of an invalid length"),
|
||||
network,
|
||||
));
|
||||
}
|
||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let time = if let Ok(time) = block.time() {
|
||||
@@ -123,7 +94,7 @@ async fn handle_new_set<D: Db>(
|
||||
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
||||
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
||||
|
||||
let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys);
|
||||
let spec = TributarySpec::new(block.hash(), time, set, set_data);
|
||||
|
||||
log::info!("creating new tributary for {:?}", spec.set());
|
||||
|
||||
@@ -164,7 +135,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
|
||||
};
|
||||
|
||||
let mut batch_block = HashMap::new();
|
||||
let mut batches = HashMap::<NetworkId, Vec<u32>>::new();
|
||||
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
|
||||
let mut burns = HashMap::new();
|
||||
|
||||
let serai = serai.as_of(block.hash());
|
||||
@@ -238,8 +209,8 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
block: Block,
|
||||
@@ -259,12 +230,8 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
||||
};
|
||||
|
||||
// If this is Serai, do nothing
|
||||
// We only coordinate/process external networks
|
||||
if set.network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh new set event {:?}", new_set);
|
||||
let mut txn = db.txn();
|
||||
@@ -319,10 +286,7 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
|
||||
};
|
||||
|
||||
if set.network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh accepted handover event {:?}", accepted_handover);
|
||||
// TODO: This isn't atomic with the event handling
|
||||
@@ -340,10 +304,7 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
|
||||
};
|
||||
|
||||
if set.network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh set retired event {:?}", retired_set);
|
||||
let mut txn = db.txn();
|
||||
@@ -373,8 +334,8 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
next_block: &mut u64,
|
||||
@@ -428,8 +389,8 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||
processors: Pro,
|
||||
serai: Arc<Serai>,
|
||||
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
|
||||
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
|
||||
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
) {
|
||||
log::info!("scanning substrate");
|
||||
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
|
||||
@@ -527,9 +488,12 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||
/// retry.
|
||||
pub(crate) async fn expected_next_batch(
|
||||
serai: &Serai,
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<u32, SeraiError> {
|
||||
async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
|
||||
async fn expected_next_batch_inner(
|
||||
serai: &Serai,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<u32, SeraiError> {
|
||||
let serai = serai.as_of_latest_finalized_block().await?;
|
||||
let last = serai.in_instructions().last_batch_for_network(network).await?;
|
||||
Ok(if let Some(last) = last { last + 1 } else { 0 })
|
||||
@@ -552,7 +516,7 @@ pub(crate) async fn expected_next_batch(
|
||||
/// This is deemed fine.
|
||||
pub(crate) async fn verify_published_batches<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
network: NetworkId,
|
||||
network: ExternalNetworkId,
|
||||
optimistic_up_to: u32,
|
||||
) -> Option<u32> {
|
||||
// TODO: Localize from MainDb to SubstrateDb
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::{
|
||||
collections::{VecDeque, HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
||||
|
||||
use processor_messages::CoordinatorMessage;
|
||||
|
||||
@@ -20,7 +20,7 @@ use crate::{
|
||||
pub mod tributary;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MemProcessors(pub Arc<RwLock<HashMap<NetworkId, VecDeque<CoordinatorMessage>>>>);
|
||||
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
|
||||
impl MemProcessors {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> MemProcessors {
|
||||
@@ -30,12 +30,12 @@ impl MemProcessors {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Processors for MemProcessors {
|
||||
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||
let mut processors = self.0.write().await;
|
||||
let processor = processors.entry(network).or_insert_with(VecDeque::new);
|
||||
processor.push_back(msg.into());
|
||||
}
|
||||
async fn recv(&self, _: NetworkId) -> Message {
|
||||
async fn recv(&self, _: ExternalNetworkId) -> Message {
|
||||
todo!()
|
||||
}
|
||||
async fn ack(&self, _: Message) {
|
||||
@@ -65,8 +65,8 @@ impl LocalP2p {
|
||||
impl P2p for LocalP2p {
|
||||
type Id = usize;
|
||||
|
||||
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||
let mut msg_ref = msg.as_slice();
|
||||
|
||||
@@ -7,12 +7,17 @@ use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng, OsRng};
|
||||
use futures_util::{task::Poll, poll};
|
||||
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, GroupEncoding},
|
||||
Ciphersuite,
|
||||
};
|
||||
|
||||
use sp_application_crypto::sr25519;
|
||||
use borsh::BorshDeserialize;
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
};
|
||||
|
||||
use tokio::time::sleep;
|
||||
@@ -46,24 +51,16 @@ pub fn new_spec<R: RngCore + CryptoRng>(
|
||||
|
||||
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
|
||||
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };
|
||||
|
||||
let validators = keys
|
||||
let set_participants = keys
|
||||
.iter()
|
||||
.map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
|
||||
.map(|key| {
|
||||
(sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Generate random eVRF keys as none of these test rely on them to have any structure
|
||||
let mut evrf_keys = vec![];
|
||||
for _ in 0 .. keys.len() {
|
||||
let mut substrate = [0; 32];
|
||||
OsRng.fill_bytes(&mut substrate);
|
||||
let mut network = vec![0; 64];
|
||||
OsRng.fill_bytes(&mut network);
|
||||
evrf_keys.push((substrate, network));
|
||||
}
|
||||
|
||||
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
|
||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
||||
assert_eq!(
|
||||
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
||||
res,
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
use core::time::Duration;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::Participant;
|
||||
|
||||
use sp_runtime::traits::Verify;
|
||||
use serai_client::{
|
||||
primitives::Signature,
|
||||
validator_sets::primitives::{ValidatorSet, KeyPair},
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||
};
|
||||
|
||||
use tokio::time::sleep;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
||||
|
||||
use processor_messages::{key_gen, CoordinatorMessage};
|
||||
use processor_messages::{
|
||||
key_gen::{self, KeyGenId},
|
||||
CoordinatorMessage,
|
||||
};
|
||||
|
||||
use tributary::{TransactionTrait, Tributary};
|
||||
|
||||
@@ -50,41 +55,44 @@ async fn dkg_test() {
|
||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||
|
||||
let mut txs = vec![];
|
||||
// Create DKG participation for each key
|
||||
// Create DKG commitments for each key
|
||||
for key in &keys {
|
||||
let mut participation = vec![0; 4096];
|
||||
OsRng.fill_bytes(&mut participation);
|
||||
let attempt = 0;
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
|
||||
let mut tx =
|
||||
Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
|
||||
let mut tx = Transaction::DkgCommitments {
|
||||
attempt,
|
||||
commitments: vec![commitments],
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
|
||||
// Publish t-1 participations
|
||||
let t = ((keys.len() * 2) / 3) + 1;
|
||||
for (i, tx) in txs.iter().take(t - 1).enumerate() {
|
||||
// Publish all commitments but one
|
||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
|
||||
// Wait until these are included
|
||||
for tx in txs.iter().skip(1) {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
let expected_participations = txs
|
||||
let expected_commitments: HashMap<_, _> = txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, tx)| {
|
||||
if let Transaction::DkgParticipation { participation, .. } = tx {
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
|
||||
session: spec.set().session,
|
||||
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
|
||||
participation: participation.clone(),
|
||||
})
|
||||
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
||||
} else {
|
||||
panic!("txs wasn't a DkgParticipation");
|
||||
panic!("txs had non-commitments");
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
.collect();
|
||||
|
||||
async fn new_processors(
|
||||
db: &mut MemDb,
|
||||
@@ -113,30 +121,28 @@ async fn dkg_test() {
|
||||
processors
|
||||
}
|
||||
|
||||
// Instantiate a scanner and verify it has the first two participations to report (and isn't
|
||||
// waiting for `t`)
|
||||
// Instantiate a scanner and verify it has nothing to report
|
||||
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
||||
assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
|
||||
assert!(processors.0.read().await.is_empty());
|
||||
|
||||
// Publish the rest of the participations
|
||||
// Publish the last commitment
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
for tx in txs.iter().skip(t - 1) {
|
||||
assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
|
||||
// Verify the scanner emits all KeyGen::Participations messages
|
||||
// Verify the scanner emits a KeyGen::Commitments message
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after DkgParticipation")
|
||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
||||
)
|
||||
},
|
||||
&spec,
|
||||
@@ -145,11 +151,17 @@ async fn dkg_test() {
|
||||
.await;
|
||||
{
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
assert_eq!(msgs.len(), keys.len());
|
||||
for expected in &expected_participations {
|
||||
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||
}
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
@@ -157,31 +169,38 @@ async fn dkg_test() {
|
||||
for (i, key) in keys.iter().enumerate().skip(1) {
|
||||
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
assert_eq!(msgs.len(), keys.len());
|
||||
for expected in &expected_participations {
|
||||
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||
}
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
let mut substrate_key = [0; 32];
|
||||
OsRng.fill_bytes(&mut substrate_key);
|
||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
||||
OsRng.fill_bytes(&mut network_key);
|
||||
let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap());
|
||||
|
||||
// Now do shares
|
||||
let mut txs = vec![];
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let mut txn = dbs[i].txn();
|
||||
|
||||
// Claim we've generated the key pair
|
||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
|
||||
|
||||
// Publish the nonces
|
||||
for (k, key) in keys.iter().enumerate() {
|
||||
let attempt = 0;
|
||||
let mut tx = Transaction::DkgConfirmationNonces {
|
||||
|
||||
let mut shares = vec![vec![]];
|
||||
for i in 0 .. keys.len() {
|
||||
if i != k {
|
||||
let mut share = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
shares.last_mut().unwrap().push(share);
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = dbs[k].txn();
|
||||
let mut tx = Transaction::DkgShares {
|
||||
attempt,
|
||||
shares,
|
||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
@@ -189,6 +208,133 @@ async fn dkg_test() {
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
for tx in txs.iter().skip(1) {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
// With just 4 sets of shares, nothing should happen yet
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after some shares")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
||||
)
|
||||
},
|
||||
&spec,
|
||||
&tributaries[0].1.reader(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(processors.0.read().await.len(), 1);
|
||||
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
||||
|
||||
// Publish the final set of shares
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
|
||||
// Each scanner should emit a distinct shares message
|
||||
let shares_for = |i: usize| {
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
shares: vec![txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(l, tx)| {
|
||||
if let Transaction::DkgShares { shares, .. } = tx {
|
||||
if i == l {
|
||||
None
|
||||
} else {
|
||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
||||
Some((
|
||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
||||
shares[0][relative_i].clone(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
panic!("txs had non-shares");
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<_, _>>()],
|
||||
})
|
||||
};
|
||||
|
||||
// Any scanner which has handled the prior blocks should only emit the new event
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[i],
|
||||
key,
|
||||
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||
&spec,
|
||||
&tributaries[i].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Yet new scanners should emit all events
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
// Send DkgConfirmed
|
||||
let mut substrate_key = [0; 32];
|
||||
OsRng.fill_bytes(&mut substrate_key);
|
||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
||||
OsRng.fill_bytes(&mut network_key);
|
||||
let key_pair =
|
||||
KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());
|
||||
|
||||
let mut txs = vec![];
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let attempt = 0;
|
||||
let mut txn = dbs[i].txn();
|
||||
let share =
|
||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
||||
txn.commit();
|
||||
|
||||
let mut tx = Transaction::DkgConfirmed {
|
||||
attempt,
|
||||
confirmation_share: share,
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
for (i, tx) in txs.iter().enumerate() {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
@@ -197,35 +343,6 @@ async fn dkg_test() {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
// This should not cause any new processor event as the processor doesn't handle DKG confirming
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[i],
|
||||
key,
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
|
||||
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
|
||||
&spec,
|
||||
&tributaries[i].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
|
||||
// This means in the block after the next block, the keys should be set onto Serai
|
||||
// Sleep twice as long as two blocks, in case there's some stability issue
|
||||
sleep(Duration::from_secs(
|
||||
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
|
||||
))
|
||||
.await;
|
||||
|
||||
struct CheckPublishSetKeys {
|
||||
spec: TributarySpec,
|
||||
key_pair: KeyPair,
|
||||
@@ -235,25 +352,20 @@ async fn dkg_test() {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
_db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
signature: Signature,
|
||||
) {
|
||||
assert_eq!(set, self.spec.set());
|
||||
assert!(removed.is_empty());
|
||||
assert_eq!(self.key_pair, key_pair);
|
||||
assert!(signature.verify(
|
||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
|
||||
&serai_client::Public(
|
||||
frost::dkg::musig::musig_key::<Ristretto>(
|
||||
&serai_client::validator_sets::primitives::musig_context(set),
|
||||
&self
|
||||
.spec
|
||||
.validators()
|
||||
.into_iter()
|
||||
.zip(signature_participants)
|
||||
.filter_map(|((validator, _), included)| included.then_some(validator))
|
||||
.collect::<Vec<_>>()
|
||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
|
||||
&serai_client::Public::from(
|
||||
dkg_musig::musig_key_vartime::<Ristretto>(
|
||||
serai_client::validator_sets::primitives::musig_context(set.into()),
|
||||
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
|
||||
)
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
|
||||
@@ -2,12 +2,13 @@ use core::fmt::Debug;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use ciphersuite::{group::Group, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::Group, Ciphersuite};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
primitives::Signature,
|
||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},
|
||||
};
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
|
||||
@@ -31,9 +32,9 @@ impl PublishSeraiTransaction for () {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
_db: &(impl Sync + serai_db::Get),
|
||||
_set: ValidatorSet,
|
||||
_set: ExternalValidatorSet,
|
||||
_removed: Vec<SeraiAddress>,
|
||||
_key_pair: KeyPair,
|
||||
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
_signature: Signature,
|
||||
) {
|
||||
panic!("publish_set_keys was called in test")
|
||||
@@ -84,25 +85,23 @@ fn tx_size_limit() {
|
||||
use tributary::TRANSACTION_SIZE_LIMIT;
|
||||
|
||||
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
||||
// n coefficients
|
||||
// 2 ECDH values per recipient, and the encrypted share
|
||||
let elements_outside_of_proof = max_dkg_coefficients + ((2 + 1) * MAX_KEY_SHARES_PER_SET);
|
||||
// Then Pedersen Vector Commitments for each DH done, and the associated overhead in the proof
|
||||
// It's handwaved as one commitment per DH, where we do 2 per coefficient and 1 for the explicit
|
||||
// ECDHs
|
||||
let vector_commitments = (2 * max_dkg_coefficients) + (2 * MAX_KEY_SHARES_PER_SET);
|
||||
// Then we have commitments to the `t` polynomial of length 2 + 2 nc, where nc is the amount of
|
||||
// commitments
|
||||
let t_commitments = 2 + (2 * vector_commitments);
|
||||
// The remainder of the proof should be ~30 elements
|
||||
let proof_elements = 30;
|
||||
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
||||
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
||||
// 1024 bytes for all overhead
|
||||
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
||||
assert!(
|
||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
||||
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
||||
);
|
||||
|
||||
let handwaved_dkg_size =
|
||||
((elements_outside_of_proof + vector_commitments + t_commitments + proof_elements) *
|
||||
MAX_KEY_LEN) +
|
||||
1024;
|
||||
// Further scale by two in case of any errors in the above
|
||||
assert!(u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (2 * handwaved_dkg_size));
|
||||
// Encryption key, PoP (2 elements), message
|
||||
let elements_per_share = 4;
|
||||
let handwaved_dkg_shares_size =
|
||||
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
||||
assert!(
|
||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
||||
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -145,34 +144,84 @@ fn serialize_sign_data() {
|
||||
|
||||
#[test]
|
||||
fn serialize_transaction() {
|
||||
test_read_write(&Transaction::RemoveParticipant {
|
||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
||||
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
|
||||
test_read_write(&Transaction::DkgParticipation {
|
||||
participation: random_vec(&mut OsRng, 4096),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
{
|
||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
||||
let mut temp = commitments[0].clone();
|
||||
OsRng.fill_bytes(&mut temp);
|
||||
commitments.push(temp);
|
||||
}
|
||||
test_read_write(&Transaction::DkgCommitments {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
commitments,
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::DkgConfirmationNonces {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
confirmation_nonces: {
|
||||
let mut nonces = [0; 64];
|
||||
OsRng.fill_bytes(&mut nonces);
|
||||
nonces
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
{
|
||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
||||
// and sent shares is expected to be constant among recipients
|
||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
||||
// Create a valid vec of shares
|
||||
let mut shares = vec![];
|
||||
// Create up to 150 participants
|
||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
||||
// Give each sender multiple shares
|
||||
let mut sender_shares = vec![];
|
||||
for _ in 0 .. amount_of_shares {
|
||||
let mut share = vec![0; share_len];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
sender_shares.push(share);
|
||||
}
|
||||
shares.push(sender_shares);
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::DkgConfirmationShare {
|
||||
test_read_write(&Transaction::DkgShares {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
shares,
|
||||
confirmation_nonces: {
|
||||
let mut nonces = [0; 64];
|
||||
OsRng.fill_bytes(&mut nonces);
|
||||
nonces
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0 .. 2 {
|
||||
test_read_write(&Transaction::InvalidDkgShare {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
accuser: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
faulty: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
blame: if i == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||
});
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::DkgConfirmed {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
confirmation_share: {
|
||||
let mut share = [0; 32];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
share
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||
});
|
||||
|
||||
{
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, broadcast},
|
||||
@@ -29,7 +30,7 @@ async fn sync_test() {
|
||||
let mut keys = new_keys(&mut OsRng);
|
||||
let spec = new_spec(&mut OsRng, &keys);
|
||||
// Ensure this can have a node fail
|
||||
assert!(spec.n() > spec.t());
|
||||
assert!(spec.n(&[]) > spec.t());
|
||||
|
||||
let mut tributaries = new_tributaries(&keys, &spec)
|
||||
.await
|
||||
@@ -142,7 +143,7 @@ async fn sync_test() {
|
||||
// Because only `t` validators are used in a commit, take n - t nodes offline
|
||||
// leaving only `t` nodes. Which should force it to participate in the consensus
|
||||
// of next blocks.
|
||||
let spares = usize::from(spec.n() - spec.t());
|
||||
let spares = usize::from(spec.n(&[]) - spec.t());
|
||||
for thread in p2p_threads.iter().take(spares) {
|
||||
thread.abort();
|
||||
}
|
||||
|
||||
@@ -37,14 +37,15 @@ async fn tx_test() {
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
||||
let key = keys[sender].clone();
|
||||
|
||||
let block_before_tx = tributaries[sender].1.tip().await;
|
||||
let attempt = 0;
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
|
||||
// Create the TX with a null signature so we can get its sig hash
|
||||
let mut tx = Transaction::DkgParticipation {
|
||||
participation: {
|
||||
let mut participation = vec![0; 4096];
|
||||
OsRng.fill_bytes(&mut participation);
|
||||
participation
|
||||
},
|
||||
let block_before_tx = tributaries[sender].1.tip().await;
|
||||
let mut tx = Transaction::DkgCommitments {
|
||||
attempt,
|
||||
commitments: vec![commitments.clone()],
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
||||
|
||||
@@ -3,10 +3,11 @@ use std::collections::HashMap;
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::Participant;
|
||||
|
||||
use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
|
||||
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};
|
||||
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
|
||||
@@ -18,6 +19,7 @@ use crate::tributary::{Label, Transaction};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Topic {
|
||||
Dkg,
|
||||
DkgConfirmation,
|
||||
SubstrateSign(SubstrateSignableId),
|
||||
Sign([u8; 32]),
|
||||
@@ -45,13 +47,15 @@ pub enum Accumulation {
|
||||
create_db!(
|
||||
Tributary {
|
||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||
SeraiDkgCompleted: (set: ValidatorSet) -> [u8; 32],
|
||||
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],
|
||||
|
||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||
|
||||
// TODO: Revisit the point of this
|
||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
||||
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
// TODO: Combine these two
|
||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
||||
@@ -64,9 +68,11 @@ create_db!(
|
||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||
|
||||
DkgParticipation: (genesis: [u8; 32], from: u16) -> Vec<u8>,
|
||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||
DkgKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
||||
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
|
||||
|
||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||
|
||||
@@ -75,7 +81,7 @@ create_db!(
|
||||
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
|
||||
SlashReported: (genesis: [u8; 32]) -> u16,
|
||||
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
|
||||
SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
|
||||
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,
|
||||
}
|
||||
);
|
||||
|
||||
@@ -118,12 +124,12 @@ impl AttemptDb {
|
||||
|
||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||
let attempt = Self::get(getter, genesis, &topic);
|
||||
// Don't require explicit recognition of the DkgConfirmation topic as it starts when the chain
|
||||
// does
|
||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
||||
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
||||
// should always happen (eventually)
|
||||
if attempt.is_none() &&
|
||||
((topic == Topic::DkgConfirmation) ||
|
||||
((topic == Topic::Dkg) ||
|
||||
(topic == Topic::DkgConfirmation) ||
|
||||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
||||
{
|
||||
return Some(0);
|
||||
@@ -150,12 +156,16 @@ impl ReattemptDb {
|
||||
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
||||
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
||||
// network bandwidth issues
|
||||
let reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||
let mut reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||
((AttemptDb::attempt(txn, genesis, topic)
|
||||
.expect("scheduling re-attempt for unknown topic") /
|
||||
3) +
|
||||
1)
|
||||
.min(3);
|
||||
// Allow more time for DKGs since they have an extra round and much more data
|
||||
if matches!(topic, Topic::Dkg) {
|
||||
reattempt_delay *= 4;
|
||||
}
|
||||
let upon_block = current_block_number + reattempt_delay;
|
||||
|
||||
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
||||
|
||||
@@ -4,16 +4,17 @@ use std::collections::HashMap;
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::dkg::Participant;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{Signature, validator_sets::primitives::KeyPair};
|
||||
use serai_client::validator_sets::primitives::KeyPair;
|
||||
|
||||
use tributary::{Signed, TransactionKind, TransactionTrait};
|
||||
|
||||
use processor_messages::{
|
||||
key_gen::self,
|
||||
key_gen::{self, KeyGenId},
|
||||
coordinator::{self, SubstrateSignableId, SubstrateSignId},
|
||||
sign::{self, SignId},
|
||||
};
|
||||
@@ -38,20 +39,33 @@ pub fn dkg_confirmation_nonces(
|
||||
txn: &mut impl DbTxn,
|
||||
attempt: u32,
|
||||
) -> [u8; 64] {
|
||||
DkgConfirmer::new(key, spec, txn, attempt).preprocess()
|
||||
DkgConfirmer::new(key, spec, txn, attempt)
|
||||
.expect("getting DKG confirmation nonces for unknown attempt")
|
||||
.preprocess()
|
||||
}
|
||||
|
||||
pub fn generated_key_pair<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
genesis: [u8; 32],
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &TributarySpec,
|
||||
key_pair: &KeyPair,
|
||||
) {
|
||||
DkgKeyPair::set(txn, genesis, key_pair);
|
||||
attempt: u32,
|
||||
) -> Result<[u8; 32], Participant> {
|
||||
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
|
||||
KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);
|
||||
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
||||
DkgConfirmer::new(key, spec, txn, attempt)
|
||||
.expect("claiming to have generated a key pair for an unrecognized attempt")
|
||||
.share(preprocesses, key_pair)
|
||||
}
|
||||
|
||||
fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
|
||||
fn unflatten(
|
||||
spec: &TributarySpec,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data: &mut HashMap<Participant, Vec<u8>>,
|
||||
) {
|
||||
for (validator, _) in spec.validators() {
|
||||
let Some(range) = spec.i(validator) else { continue };
|
||||
let Some(range) = spec.i(removed, validator) else { continue };
|
||||
let Some(all_segments) = data.remove(&range.start) else {
|
||||
continue;
|
||||
};
|
||||
@@ -75,6 +89,7 @@ impl<
|
||||
{
|
||||
fn accumulate(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data_spec: &DataSpecification,
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
data: &Vec<u8>,
|
||||
@@ -85,7 +100,10 @@ impl<
|
||||
panic!("accumulating data for a participant multiple times");
|
||||
}
|
||||
let signer_shares = {
|
||||
let signer_i = self.spec.i(signer).expect("transaction signer wasn't a member of the set");
|
||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
||||
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
|
||||
return Accumulation::NotReady;
|
||||
};
|
||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||
};
|
||||
|
||||
@@ -98,7 +116,11 @@ impl<
|
||||
|
||||
// If 2/3rds of the network participated in this preprocess, queue it for an automatic
|
||||
// re-attempt
|
||||
if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) {
|
||||
// DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg
|
||||
if (data_spec.label == Label::Preprocess) &&
|
||||
received_range.contains(&self.spec.t()) &&
|
||||
(data_spec.topic != Topic::DkgConfirmation)
|
||||
{
|
||||
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
|
||||
// is an old entry
|
||||
// This is an assert, not part of the if check, as old data shouldn't be here in the first
|
||||
@@ -108,7 +130,10 @@ impl<
|
||||
}
|
||||
|
||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||
if received_range.contains(&self.spec.t()) {
|
||||
let needs_everyone =
|
||||
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
|
||||
let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };
|
||||
if received_range.contains(&needed) {
|
||||
log::debug!(
|
||||
"accumulation for entry {:?} attempt #{} is ready",
|
||||
&data_spec.topic,
|
||||
@@ -117,7 +142,7 @@ impl<
|
||||
|
||||
let mut data = HashMap::new();
|
||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||
let Some(i) = self.spec.i(validator) else { continue };
|
||||
let Some(i) = self.spec.i(removed, validator) else { continue };
|
||||
data.insert(
|
||||
i.start,
|
||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||
@@ -128,10 +153,10 @@ impl<
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(data.len(), usize::from(self.spec.t()));
|
||||
assert_eq!(data.len(), usize::from(needed));
|
||||
|
||||
// Remove our own piece of data, if we were involved
|
||||
if let Some(i) = self.spec.i(Ristretto::generator() * self.our_key.deref()) {
|
||||
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {
|
||||
if data.remove(&i.start).is_some() {
|
||||
return Accumulation::Ready(DataSet::Participating(data));
|
||||
}
|
||||
@@ -143,6 +168,7 @@ impl<
|
||||
|
||||
fn handle_data(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data_spec: &DataSpecification,
|
||||
bytes: &Vec<u8>,
|
||||
signed: &Signed,
|
||||
@@ -188,15 +214,21 @@ impl<
|
||||
// TODO: If this is shares, we need to check they are part of the selected signing set
|
||||
|
||||
// Accumulate this data
|
||||
self.accumulate(data_spec, signed.signer, bytes)
|
||||
self.accumulate(removed, data_spec, signed.signer, bytes)
|
||||
}
|
||||
|
||||
fn check_sign_data_len(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
len: usize,
|
||||
) -> Result<(), ()> {
|
||||
let signer_i = self.spec.i(signer).expect("signer wasn't a member of the set");
|
||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
||||
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
|
||||
// offline
|
||||
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
|
||||
Err(())?
|
||||
};
|
||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||
self.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
@@ -223,9 +255,12 @@ impl<
|
||||
}
|
||||
|
||||
match tx {
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
if self.spec.i(participant).is_none() {
|
||||
self.fatal_slash(participant.to_bytes(), "RemoveParticipant vote for non-validator");
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
if self.spec.i(&[], participant).is_none() {
|
||||
self.fatal_slash(
|
||||
participant.to_bytes(),
|
||||
"RemoveParticipantDueToDkg vote for non-validator",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -240,106 +275,268 @@ impl<
|
||||
|
||||
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
||||
let signer_votes =
|
||||
self.spec.i(signed.signer).expect("signer wasn't a validator for this network?");
|
||||
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?");
|
||||
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
||||
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
||||
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
||||
self.fatal_slash(participant, "RemoveParticipant vote")
|
||||
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote")
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgParticipation { participation, signed } => {
|
||||
// Send the participation to the processor
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {
|
||||
return;
|
||||
};
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
||||
match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||
unflatten(self.spec, &removed, &mut commitments);
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
||||
commitments,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
assert!(
|
||||
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
|
||||
"NotParticipating in a DkgCommitments we weren't removed for"
|
||||
);
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
|
||||
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"DkgShares for a DKG they aren't participating in",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
for shares in &shares {
|
||||
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Save each share as needed for blame
|
||||
for (from_offset, shares) in shares.iter().enumerate() {
|
||||
let from =
|
||||
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
|
||||
.unwrap();
|
||||
|
||||
for (to_offset, share) in shares.iter().enumerate() {
|
||||
// 0-indexed (the enumeration) to 1-indexed (Participant)
|
||||
let mut to = u16::try_from(to_offset).unwrap() + 1;
|
||||
// Adjust for the omission of the sender's own shares
|
||||
if to >= u16::from(sender_i.start) {
|
||||
to += u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
}
|
||||
let to = Participant::new(to).unwrap();
|
||||
|
||||
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
|
||||
}
|
||||
}
|
||||
|
||||
// Filter down to only our share's bytes for handle
|
||||
let our_shares = if let Some(our_i) =
|
||||
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
{
|
||||
if sender_i == our_i {
|
||||
vec![]
|
||||
} else {
|
||||
// 1-indexed to 0-indexed
|
||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
||||
// Handle the omission of the sender's own data
|
||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
||||
our_i_pos -= sender_is_len;
|
||||
}
|
||||
let our_i_pos = usize::from(our_i_pos);
|
||||
shares
|
||||
.iter_mut()
|
||||
.map(|shares| {
|
||||
shares
|
||||
.drain(
|
||||
our_i_pos ..
|
||||
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
||||
)
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
} else {
|
||||
assert!(
|
||||
not_participating,
|
||||
"we didn't have an i while handling DkgShares we weren't removed for"
|
||||
);
|
||||
// Since we're not participating, simply save vec![] for our shares
|
||||
vec![]
|
||||
};
|
||||
// Drop shares as it's presumably been mutated into invalidity
|
||||
drop(shares);
|
||||
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
||||
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
||||
match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
||||
|
||||
let mut confirmation_nonces = HashMap::new();
|
||||
let mut shares = HashMap::new();
|
||||
for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares {
|
||||
let (these_confirmation_nonces, these_shares) =
|
||||
<(Vec<u8>, Vec<u8>)>::decode(&mut confirmation_nonces_and_shares.as_slice())
|
||||
.unwrap();
|
||||
confirmation_nonces.insert(participant, these_confirmation_nonces);
|
||||
shares.insert(participant, these_shares);
|
||||
}
|
||||
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
||||
|
||||
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
|
||||
// - Each of the sender's shares
|
||||
// - Each of the our shares
|
||||
// - Each share
|
||||
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
|
||||
let mut expanded_shares = vec![];
|
||||
for (sender_start_i, shares) in shares {
|
||||
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
|
||||
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
|
||||
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
|
||||
if expanded_shares.len() <= our_share_i {
|
||||
expanded_shares.push(HashMap::new());
|
||||
}
|
||||
expanded_shares[our_share_i].insert(
|
||||
Participant::new(
|
||||
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
|
||||
)
|
||||
.unwrap(),
|
||||
our_share,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
||||
shares: expanded_shares,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let Some(range) = self.spec.i(&removed, signed.signer) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare for a DKG they aren't participating in",
|
||||
);
|
||||
return;
|
||||
};
|
||||
if !range.contains(&accuser) {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"accused with a Participant index which wasn't theirs",
|
||||
);
|
||||
return;
|
||||
}
|
||||
if range.contains(&faulty) {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare");
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare had a non-existent faulty participant",
|
||||
);
|
||||
return;
|
||||
};
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
key_gen::CoordinatorMessage::Participation {
|
||||
session: self.spec.set().session,
|
||||
participant: self
|
||||
.spec
|
||||
.i(signed.signer)
|
||||
.expect("signer wasn't a validator for this network?")
|
||||
.start,
|
||||
participation,
|
||||
key_gen::CoordinatorMessage::VerifyBlame {
|
||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
||||
accuser,
|
||||
accused: faulty,
|
||||
share,
|
||||
blame,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
|
||||
let data_spec =
|
||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Preprocess, attempt };
|
||||
match self.handle_data(&data_spec, &confirmation_nonces.to_vec(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces)) => {
|
||||
log::info!(
|
||||
"got all DkgConfirmationNonces for {}, attempt {attempt}",
|
||||
hex::encode(genesis)
|
||||
);
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
|
||||
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
||||
|
||||
// Send the expected DkgConfirmationShare
|
||||
// TODO: Slight race condition here due to set, publish tx, then commit txn
|
||||
let key_pair = DkgKeyPair::get(self.txn, genesis)
|
||||
.expect("participating in confirming key we don't have");
|
||||
let mut tx = match DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
||||
.share(confirmation_nonces, &key_pair)
|
||||
{
|
||||
Ok(confirmation_share) => Transaction::DkgConfirmationShare {
|
||||
attempt,
|
||||
confirmation_share,
|
||||
signed: Transaction::empty_signed(),
|
||||
},
|
||||
Err(participant) => Transaction::RemoveParticipant {
|
||||
participant: self.spec.reverse_lookup_i(participant).unwrap(),
|
||||
signed: Transaction::empty_signed(),
|
||||
},
|
||||
};
|
||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
|
||||
let data_spec =
|
||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
||||
match self.handle_data(&data_spec, &confirmation_share.to_vec(), &signed) {
|
||||
match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
log::info!(
|
||||
"got all DkgConfirmationShare for {}, attempt {attempt}",
|
||||
hex::encode(genesis)
|
||||
);
|
||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
||||
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
panic!(
|
||||
"DkgConfirmed for everyone yet didn't have the removed parties for this attempt",
|
||||
);
|
||||
};
|
||||
|
||||
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
||||
|
||||
// TODO: This can technically happen under very very very specific timing as the txn
|
||||
// put happens before DkgConfirmationShare, yet the txn isn't guaranteed to be
|
||||
// committed
|
||||
let key_pair = DkgKeyPair::get(self.txn, genesis).expect(
|
||||
"in DkgConfirmationShare handling, which happens after everyone \
|
||||
(including us) fires DkgConfirmationShare, yet no confirming key pair",
|
||||
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
||||
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(
|
||||
"in DkgConfirmed handling, which happens after everyone \
|
||||
(including us) fires DkgConfirmed, yet no confirming key pair",
|
||||
);
|
||||
|
||||
// Determine the bitstring representing who participated before we move `shares`
|
||||
let validators = self.spec.validators();
|
||||
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
|
||||
for (participant, _) in validators {
|
||||
signature_participants.push(
|
||||
(participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
|
||||
shares.contains_key(&self.spec.i(participant).unwrap().start),
|
||||
);
|
||||
}
|
||||
|
||||
// Produce the final signature
|
||||
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt);
|
||||
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
||||
.expect("confirming DKG for unrecognized attempt");
|
||||
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||
Ok(sig) => sig,
|
||||
Err(p) => {
|
||||
let mut tx = Transaction::RemoveParticipant {
|
||||
participant: self.spec.reverse_lookup_i(p).unwrap(),
|
||||
let mut tx = Transaction::RemoveParticipantDueToDkg {
|
||||
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||
@@ -348,18 +545,23 @@ impl<
|
||||
}
|
||||
};
|
||||
|
||||
DkgLocallyCompleted::set(self.txn, genesis, &());
|
||||
|
||||
self
|
||||
.publish_serai_tx
|
||||
.publish_set_keys(
|
||||
self.db,
|
||||
self.spec.set(),
|
||||
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
|
||||
key_pair,
|
||||
signature_participants,
|
||||
Signature(sig),
|
||||
sig.into(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
panic!("wasn't a participant in DKG confirmination shares")
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -417,8 +619,19 @@ impl<
|
||||
}
|
||||
|
||||
Transaction::SubstrateSign(data) => {
|
||||
// Provided transactions ensure synchrony on any signing protocol, and we won't start
|
||||
// signing with threshold keys before we've confirmed them on-chain
|
||||
let Some(removed) =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
else {
|
||||
self.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let signer = data.signed.signer;
|
||||
let Ok(()) = self.check_sign_data_len(signer, data.data.len()) else {
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
let expected_len = match data.label {
|
||||
@@ -441,11 +654,11 @@ impl<
|
||||
attempt: data.attempt,
|
||||
};
|
||||
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||
self.handle_data(&data_spec, &data.data.encode(), &data.signed)
|
||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
unflatten(self.spec, &mut results);
|
||||
unflatten(self.spec, &removed, &mut results);
|
||||
|
||||
let id = SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
@@ -466,7 +679,16 @@ impl<
|
||||
}
|
||||
|
||||
Transaction::Sign(data) => {
|
||||
let Ok(()) = self.check_sign_data_len(data.signed.signer, data.data.len()) else {
|
||||
let Some(removed) =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
else {
|
||||
self.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -476,9 +698,9 @@ impl<
|
||||
attempt: data.attempt,
|
||||
};
|
||||
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||
self.handle_data(&data_spec, &data.data.encode(), &data.signed)
|
||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
||||
{
|
||||
unflatten(self.spec, &mut results);
|
||||
unflatten(self.spec, &removed, &mut results);
|
||||
let id =
|
||||
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
||||
self
|
||||
@@ -519,7 +741,8 @@ impl<
|
||||
}
|
||||
|
||||
Transaction::SlashReport(points, signed) => {
|
||||
let signer_range = self.spec.i(signed.signer).unwrap();
|
||||
// Uses &[] as we only need the length which is independent to who else was removed
|
||||
let signer_range = self.spec.i(&[], signed.signer).unwrap();
|
||||
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
||||
if points.len() != (self.spec.validators().len() - 1) {
|
||||
self.fatal_slash(
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||
|
||||
use tributary::{
|
||||
ReadWrite,
|
||||
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||
@@ -20,6 +25,39 @@ pub use handle::*;
|
||||
|
||||
pub mod scanner;
|
||||
|
||||
pub fn removed_as_of_dkg_attempt(
|
||||
getter: &impl Get,
|
||||
genesis: [u8; 32],
|
||||
attempt: u32,
|
||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||
if attempt == 0 {
|
||||
Some(vec![])
|
||||
} else {
|
||||
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
||||
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn removed_as_of_set_keys(
|
||||
getter: &impl Get,
|
||||
set: ExternalValidatorSet,
|
||||
genesis: [u8; 32],
|
||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||
// SeraiDkgCompleted has the key placed on-chain.
|
||||
// This key can be uniquely mapped to an attempt so long as one participant was honest, which we
|
||||
// assume as a presumably honest participant.
|
||||
// Resolve from generated key to attempt to fatally slashed as of attempt.
|
||||
|
||||
// This expect will trigger if this is prematurely called and Substrate has tracked the keys yet
|
||||
// we haven't locally synced and handled the Tributary
|
||||
// All callers of this, at the time of writing, ensure the Tributary has sufficiently synced
|
||||
// making the panic with context more desirable than the None
|
||||
let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)
|
||||
.expect("key completed on-chain didn't have an attempt related");
|
||||
removed_as_of_dkg_attempt(getter, genesis, attempt)
|
||||
}
|
||||
|
||||
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
tributary: &Tributary<D, Transaction, P>,
|
||||
|
||||
@@ -1,18 +1,17 @@
|
||||
use core::{marker::PhantomData, future::Future, time::Duration};
|
||||
use std::sync::Arc;
|
||||
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
primitives::Signature,
|
||||
validator_sets::primitives::{KeyPair, ValidatorSet},
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||
Serai,
|
||||
};
|
||||
|
||||
@@ -40,7 +39,7 @@ pub enum RecognizedIdType {
|
||||
pub trait RIDTrait {
|
||||
async fn recognized_id(
|
||||
&self,
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
genesis: [u8; 32],
|
||||
kind: RecognizedIdType,
|
||||
id: Vec<u8>,
|
||||
@@ -49,12 +48,12 @@ pub trait RIDTrait {
|
||||
#[async_trait::async_trait]
|
||||
impl<
|
||||
FRid: Send + Future<Output = ()>,
|
||||
F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
|
||||
F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
|
||||
> RIDTrait for F
|
||||
{
|
||||
async fn recognized_id(
|
||||
&self,
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
genesis: [u8; 32],
|
||||
kind: RecognizedIdType,
|
||||
id: Vec<u8>,
|
||||
@@ -68,9 +67,9 @@ pub trait PublishSeraiTransaction {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
signature: Signature,
|
||||
);
|
||||
}
|
||||
@@ -88,7 +87,7 @@ mod impl_pst_for_serai {
|
||||
async fn publish(
|
||||
serai: &Serai,
|
||||
db: &impl Get,
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
tx: serai_client::Transaction,
|
||||
meta: $Meta,
|
||||
) -> bool {
|
||||
@@ -130,14 +129,19 @@ mod impl_pst_for_serai {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||
signature: Signature,
|
||||
) {
|
||||
let tx =
|
||||
SeraiValidatorSets::set_keys(set.network, key_pair, signature_participants, signature);
|
||||
async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
|
||||
// TODO: BoundedVec as an arg to avoid this expect
|
||||
let tx = SeraiValidatorSets::set_keys(
|
||||
set.network,
|
||||
removed.try_into().expect("removing more than allowed"),
|
||||
key_pair,
|
||||
signature,
|
||||
);
|
||||
async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {
|
||||
if matches!(serai.keys(set).await, Ok(Some(_))) {
|
||||
log::info!("another coordinator set key pair for {:?}", set);
|
||||
return true;
|
||||
@@ -246,15 +250,18 @@ impl<
|
||||
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
||||
|
||||
// Calculate the shares still present, spinning if not enough are
|
||||
{
|
||||
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if
|
||||
// we should spin, hence storing it in a variable here
|
||||
let still_present_shares = {
|
||||
// Start with the original n value
|
||||
let mut present_shares = self.spec.n();
|
||||
let mut present_shares = self.spec.n(&[]);
|
||||
// Remove everyone fatally slashed
|
||||
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
||||
for removed in ¤t_fatal_slashes {
|
||||
let original_i_for_removed =
|
||||
self.spec.i(*removed).expect("removed party was never present");
|
||||
self.spec.i(&[], *removed).expect("removed party was never present");
|
||||
let removed_shares =
|
||||
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
||||
present_shares -= removed_shares;
|
||||
@@ -270,17 +277,79 @@ impl<
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
present_shares
|
||||
};
|
||||
|
||||
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
||||
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
||||
log::info!("potentially re-attempting {topic:?} with attempt {attempt}");
|
||||
log::info!("re-attempting {topic:?} with attempt {attempt}");
|
||||
|
||||
// Slash people who failed to participate as expected in the prior attempt
|
||||
{
|
||||
let prior_attempt = attempt - 1;
|
||||
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
||||
let expected_participants: Vec<<Ristretto as Ciphersuite>::G> = vec![];
|
||||
let (removed, expected_participants) = match topic {
|
||||
Topic::Dkg => {
|
||||
// Every validator who wasn't removed is expected to have participated
|
||||
let removed =
|
||||
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
|
||||
.expect("prior attempt didn't have its removed saved to disk");
|
||||
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
|
||||
(
|
||||
removed,
|
||||
self
|
||||
.spec
|
||||
.validators()
|
||||
.into_iter()
|
||||
.filter_map(|(validator, _)| {
|
||||
Some(validator).filter(|validator| !removed_set.contains(validator))
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
Topic::DkgConfirmation => {
|
||||
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
|
||||
}
|
||||
Topic::SubstrateSign(_) | Topic::Sign(_) => {
|
||||
let removed =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
.expect("SubstrateSign/Sign yet have yet to set keys");
|
||||
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
||||
let expected_participants = vec![];
|
||||
(removed, expected_participants)
|
||||
}
|
||||
};
|
||||
|
||||
let (expected_topic, expected_label) = match topic {
|
||||
Topic::Dkg => {
|
||||
let n = self.spec.n(&removed);
|
||||
// If we got all the DKG shares, we should be on DKG confirmation
|
||||
let share_spec =
|
||||
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
|
||||
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
|
||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
||||
// preprocess is part of Topic::Dkg Label::Share
|
||||
(Topic::DkgConfirmation, Label::Share)
|
||||
} else {
|
||||
let preprocess_spec = DataSpecification {
|
||||
topic: Topic::Dkg,
|
||||
label: Label::Preprocess,
|
||||
attempt: prior_attempt,
|
||||
};
|
||||
// If we got all the DKG preprocesses, DKG shares
|
||||
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
|
||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
||||
// preprocess is part of Topic::Dkg Label::Share
|
||||
(Topic::Dkg, Label::Share)
|
||||
} else {
|
||||
(Topic::Dkg, Label::Preprocess)
|
||||
}
|
||||
}
|
||||
}
|
||||
Topic::DkgConfirmation => unreachable!(),
|
||||
// If we got enough participants to move forward, then we expect shares from them all
|
||||
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
|
||||
};
|
||||
|
||||
let mut did_not_participate = vec![];
|
||||
for expected_participant in expected_participants {
|
||||
@@ -288,9 +357,8 @@ impl<
|
||||
self.txn,
|
||||
genesis,
|
||||
&DataSpecification {
|
||||
topic,
|
||||
// Since we got the preprocesses, we were supposed to get the shares
|
||||
label: Label::Share,
|
||||
topic: expected_topic,
|
||||
label: expected_label,
|
||||
attempt: prior_attempt,
|
||||
},
|
||||
&expected_participant.to_bytes(),
|
||||
@@ -306,8 +374,15 @@ impl<
|
||||
// Accordingly, clear did_not_participate
|
||||
// TODO
|
||||
|
||||
// TODO: Increment the slash points of people who didn't preprocess in some expected window
|
||||
// of time
|
||||
// If during the DKG, explicitly mark these people as having been offline
|
||||
// TODO: If they were offline sufficiently long ago, don't strike them off
|
||||
if topic == Topic::Dkg {
|
||||
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
|
||||
for did_not_participate in did_not_participate {
|
||||
existing.push(did_not_participate.to_bytes());
|
||||
}
|
||||
OfflineDuringDkg::set(self.txn, genesis, &existing);
|
||||
}
|
||||
|
||||
// Slash everyone who didn't participate as expected
|
||||
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
||||
@@ -337,22 +412,75 @@ impl<
|
||||
then preprocesses. This only sends preprocesses).
|
||||
*/
|
||||
match topic {
|
||||
Topic::DkgConfirmation => {
|
||||
if SeraiDkgCompleted::get(self.txn, self.spec.set()).is_none() {
|
||||
log::info!("re-attempting DKG confirmation with attempt {attempt}");
|
||||
Topic::Dkg => {
|
||||
let mut removed = current_fatal_slashes.clone();
|
||||
|
||||
// Since it wasn't completed, publish our nonces for the next attempt
|
||||
let confirmation_nonces =
|
||||
crate::tributary::dkg_confirmation_nonces(self.our_key, self.spec, self.txn, attempt);
|
||||
let mut tx = Transaction::DkgConfirmationNonces {
|
||||
attempt,
|
||||
confirmation_nonces,
|
||||
signed: Transaction::empty_signed(),
|
||||
let t = self.spec.t();
|
||||
{
|
||||
let mut present_shares = still_present_shares;
|
||||
|
||||
// Load the parties marked as offline across the various attempts
|
||||
let mut offline = OfflineDuringDkg::get(self.txn, genesis)
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
// Pop from the list to prioritize the removal of those recently offline
|
||||
while let Some(offline) = offline.pop() {
|
||||
// Make sure they weren't removed already (such as due to being fatally slashed)
|
||||
// This also may trigger if they were offline across multiple attempts
|
||||
if removed.contains(&offline) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we can remove them and still meet the threshold, do so
|
||||
let original_i_for_offline =
|
||||
self.spec.i(&[], offline).expect("offline was never present?");
|
||||
let offline_shares =
|
||||
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
|
||||
if (present_shares - offline_shares) >= t {
|
||||
present_shares -= offline_shares;
|
||||
removed.push(offline);
|
||||
}
|
||||
|
||||
// If we've removed as many people as we can, break
|
||||
if present_shares == t {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RemovedAsOfDkgAttempt::set(
|
||||
self.txn,
|
||||
genesis,
|
||||
attempt,
|
||||
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
|
||||
);
|
||||
|
||||
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
|
||||
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||
|
||||
// Since it wasn't completed, instruct the processor to start the next attempt
|
||||
let id =
|
||||
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
||||
|
||||
let params =
|
||||
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
|
||||
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
||||
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares },
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Topic::DkgConfirmation => unreachable!(),
|
||||
Topic::SubstrateSign(inner_id) => {
|
||||
let id = processor_messages::coordinator::SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
@@ -369,8 +497,6 @@ impl<
|
||||
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
||||
.map_or(0, |cosign| cosign.block_number);
|
||||
if latest_cosign < block_number {
|
||||
log::info!("re-attempting cosigning {block_number:?} with attempt {attempt}");
|
||||
|
||||
// Instruct the processor to start the next attempt
|
||||
self
|
||||
.processors
|
||||
@@ -387,8 +513,6 @@ impl<
|
||||
SubstrateSignableId::Batch(batch) => {
|
||||
// If the Batch hasn't appeared on-chain...
|
||||
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
|
||||
log::info!("re-attempting signing batch {batch:?} with attempt {attempt}");
|
||||
|
||||
// Instruct the processor to start the next attempt
|
||||
// The processor won't continue if it's already signed a Batch
|
||||
// Prior checking if the Batch is on-chain just may reduce the non-participating
|
||||
@@ -406,11 +530,6 @@ impl<
|
||||
// If this Tributary hasn't been retired...
|
||||
// (published SlashReport/took too long to do so)
|
||||
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
||||
log::info!(
|
||||
"re-attempting signing slash report for {:?} with attempt {attempt}",
|
||||
self.spec.set()
|
||||
);
|
||||
|
||||
let report = SlashReport::get(self.txn, self.spec.set())
|
||||
.expect("re-attempting signing a SlashReport we don't have?");
|
||||
self
|
||||
@@ -457,7 +576,8 @@ impl<
|
||||
};
|
||||
// Assign them 0 points for themselves
|
||||
report.insert(i, 0);
|
||||
let signer_i = self.spec.i(validator).unwrap();
|
||||
// Uses &[] as we only need the length which is independent to who else was removed
|
||||
let signer_i = self.spec.i(&[], validator).unwrap();
|
||||
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
||||
// Push `n` copies, one for each of their shares
|
||||
for _ in 0 .. signer_len {
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
*/
|
||||
|
||||
use core::ops::Deref;
|
||||
use std::collections::{HashSet, HashMap};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
@@ -63,19 +63,21 @@ use rand_core::OsRng;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
||||
use frost::{
|
||||
FrostError,
|
||||
dkg::{Participant, musig::musig},
|
||||
ThresholdKeys,
|
||||
sign::*,
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::PrimeField, GroupEncoding},
|
||||
Ciphersuite,
|
||||
};
|
||||
use dkg_musig::musig;
|
||||
use frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*};
|
||||
use frost_schnorrkel::Schnorrkel;
|
||||
|
||||
use scale::Encode;
|
||||
|
||||
#[rustfmt::skip]
|
||||
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message};
|
||||
use serai_client::{
|
||||
Public,
|
||||
validator_sets::primitives::{KeyPair, musig_context, set_keys_message},
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
@@ -84,7 +86,6 @@ use crate::tributary::TributarySpec;
|
||||
create_db!(
|
||||
SigningProtocolDb {
|
||||
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
||||
DataSignedWith: (context: &impl Encode) -> (Vec<u8>, HashMap<Participant, Vec<u8>>),
|
||||
}
|
||||
);
|
||||
|
||||
@@ -113,22 +114,16 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
};
|
||||
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
||||
|
||||
// Create the MuSig keys
|
||||
let algorithm = Schnorrkel::new(b"substrate");
|
||||
let keys: ThresholdKeys<Ristretto> =
|
||||
musig(&musig_context(self.spec.set()), self.key, participants)
|
||||
musig(musig_context(self.spec.set().into()), self.key.clone(), participants)
|
||||
.expect("signing for a set we aren't in/validator present multiple times")
|
||||
.into();
|
||||
|
||||
// Define the algorithm
|
||||
let algorithm = Schnorrkel::new(b"substrate");
|
||||
|
||||
// Check if we've prior preprocessed
|
||||
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
||||
// If we haven't, we create a machine solely to obtain the preprocess with
|
||||
let (machine, _) =
|
||||
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
||||
|
||||
// Cache and save the preprocess to disk
|
||||
let mut cache = machine.cache();
|
||||
assert_eq!(cache.0.len(), 32);
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
@@ -139,15 +134,13 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
||||
}
|
||||
|
||||
// We're now guaranteed to have the preprocess, hence why this `unwrap` is safe
|
||||
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
||||
let mut cached = Zeroizing::new(cached);
|
||||
let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached);
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for b in 0 .. 32 {
|
||||
cached[b] ^= encryption_key_slice[b];
|
||||
}
|
||||
encryption_key_slice.zeroize();
|
||||
// Create the machine from the cached preprocess
|
||||
let (machine, preprocess) =
|
||||
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
||||
|
||||
@@ -160,29 +153,8 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
msg: &[u8],
|
||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||
// We can't clear the preprocess as we sitll need it to accumulate all of the shares
|
||||
// We do save the message we signed so any future calls with distinct messages panic
|
||||
// This assumes the txn deciding this data is committed before the share is broaadcast
|
||||
if let Some((existing_msg, existing_preprocesses)) =
|
||||
DataSignedWith::get(self.txn, &self.context)
|
||||
{
|
||||
assert_eq!(msg, &existing_msg, "obtaining a signature share for a distinct message");
|
||||
assert_eq!(
|
||||
&serialized_preprocesses, &existing_preprocesses,
|
||||
"obtaining a signature share with a distinct set of preprocesses"
|
||||
);
|
||||
} else {
|
||||
DataSignedWith::set(
|
||||
self.txn,
|
||||
&self.context,
|
||||
&(msg.to_vec(), serialized_preprocesses.clone()),
|
||||
);
|
||||
}
|
||||
let machine = self.preprocess_internal(participants).0;
|
||||
|
||||
// Get the preprocessed machine
|
||||
let (machine, _) = self.preprocess_internal(participants);
|
||||
|
||||
// Deserialize all the preprocesses
|
||||
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
|
||||
participants.sort();
|
||||
let mut preprocesses = HashMap::new();
|
||||
@@ -195,14 +167,13 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
);
|
||||
}
|
||||
|
||||
// Sign the share
|
||||
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||
FrostError::InvalidParticipant(_, _) |
|
||||
FrostError::InvalidSigningSet(_) |
|
||||
FrostError::InvalidParticipantQuantity(_, _) |
|
||||
FrostError::DuplicatedParticipant(_) |
|
||||
FrostError::MissingParticipant(_) => panic!("unexpected error during sign: {e:?}"),
|
||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||
})?;
|
||||
|
||||
@@ -233,24 +204,24 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
}
|
||||
|
||||
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
||||
// their MuSig is.
|
||||
// the MuSig is.
|
||||
fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
spec: &TributarySpec,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
mut map: HashMap<Participant, Vec<u8>>,
|
||||
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||
// Insert our own index so calculations aren't offset
|
||||
let our_threshold_i = spec
|
||||
.i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||
.expect("not in a set we're signing for")
|
||||
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||
.expect("MuSig t-of-n signing a for a protocol we were removed from")
|
||||
.start;
|
||||
// Asserts we weren't unexpectedly already present
|
||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||
|
||||
let spec_validators = spec.validators();
|
||||
let key_from_threshold_i = |threshold_i| {
|
||||
for (key, _) in &spec_validators {
|
||||
if threshold_i == spec.i(*key).expect("validator wasn't in a set they're in").start {
|
||||
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start {
|
||||
return *key;
|
||||
}
|
||||
}
|
||||
@@ -261,37 +232,29 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
||||
threshold_is.sort();
|
||||
for threshold_i in threshold_is {
|
||||
sorted.push((
|
||||
threshold_i,
|
||||
key_from_threshold_i(threshold_i),
|
||||
map.remove(&threshold_i).unwrap(),
|
||||
));
|
||||
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
|
||||
}
|
||||
|
||||
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
||||
let mut participants = vec![];
|
||||
let mut map = HashMap::new();
|
||||
let mut our_musig_i = None;
|
||||
for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
|
||||
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
|
||||
if threshold_i == our_threshold_i {
|
||||
our_musig_i = Some(musig_i);
|
||||
}
|
||||
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
|
||||
let musig_i = u16::try_from(raw_i).unwrap() + 1;
|
||||
participants.push(key);
|
||||
map.insert(musig_i, share);
|
||||
map.insert(Participant::new(musig_i).unwrap(), share);
|
||||
}
|
||||
|
||||
map.remove(&our_musig_i.unwrap()).unwrap();
|
||||
map.remove(&our_threshold_i).unwrap();
|
||||
|
||||
(participants, map)
|
||||
}
|
||||
|
||||
type DkgConfirmerSigningProtocol<'a, T> =
|
||||
SigningProtocol<'a, T, (&'static [u8; 12], ValidatorSet, u32)>;
|
||||
type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>;
|
||||
|
||||
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &'a TributarySpec,
|
||||
removed: Vec<<Ristretto as Ciphersuite>::G>,
|
||||
txn: &'a mut T,
|
||||
attempt: u32,
|
||||
}
|
||||
@@ -302,19 +265,19 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
spec: &'a TributarySpec,
|
||||
txn: &'a mut T,
|
||||
attempt: u32,
|
||||
) -> DkgConfirmer<'a, T> {
|
||||
DkgConfirmer { key, spec, txn, attempt }
|
||||
) -> Option<DkgConfirmer<'a, T>> {
|
||||
// This relies on how confirmations are inlined into the DKG protocol and they accordingly
|
||||
// share attempts
|
||||
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
|
||||
Some(DkgConfirmer { key, spec, removed, txn, attempt })
|
||||
}
|
||||
|
||||
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
||||
let context = (b"DkgConfirmer", self.spec.set(), self.attempt);
|
||||
let context = (b"DkgConfirmer", self.attempt);
|
||||
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||
}
|
||||
|
||||
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||
// This preprocesses with just us as we only decide the participants after obtaining
|
||||
// preprocesses
|
||||
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
|
||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
self.signing_protocol().preprocess_internal(&participants)
|
||||
}
|
||||
// Get the preprocess for this confirmation.
|
||||
@@ -327,9 +290,14 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
key_pair: &KeyPair,
|
||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||
let (participants, preprocesses) =
|
||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
|
||||
let msg = set_keys_message(&self.spec.set(), key_pair);
|
||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
let preprocesses =
|
||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1;
|
||||
let msg = set_keys_message(
|
||||
&self.spec.set(),
|
||||
&self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::<Vec<_>>(),
|
||||
key_pair,
|
||||
);
|
||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||
}
|
||||
// Get the share for this confirmation, if the preprocesses are valid.
|
||||
@@ -347,9 +315,8 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
key_pair: &KeyPair,
|
||||
shares: HashMap<Participant, Vec<u8>>,
|
||||
) -> Result<[u8; 64], Participant> {
|
||||
assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
|
||||
|
||||
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
|
||||
let shares =
|
||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1;
|
||||
|
||||
let machine = self
|
||||
.share_internal(preprocesses, key_pair)
|
||||
|
||||
@@ -3,13 +3,14 @@ use std::{io, collections::HashMap};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::Participant;
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet};
|
||||
|
||||
fn borsh_serialize_validators<W: io::Write>(
|
||||
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||
@@ -43,27 +44,32 @@ fn borsh_deserialize_validators<R: io::Read>(
|
||||
pub struct TributarySpec {
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
set: ValidatorSet,
|
||||
set: ExternalValidatorSet,
|
||||
#[borsh(
|
||||
serialize_with = "borsh_serialize_validators",
|
||||
deserialize_with = "borsh_deserialize_validators"
|
||||
)]
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||
}
|
||||
|
||||
impl TributarySpec {
|
||||
pub fn new(
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
set: ValidatorSet,
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||
set: ExternalValidatorSet,
|
||||
set_participants: Vec<(PublicKey, u16)>,
|
||||
) -> TributarySpec {
|
||||
Self { serai_block, start_time, set, validators, evrf_public_keys }
|
||||
let mut validators = vec![];
|
||||
for (participant, shares) in set_participants {
|
||||
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
||||
.expect("invalid key registered as participant");
|
||||
validators.push((participant, shares));
|
||||
}
|
||||
|
||||
Self { serai_block, start_time, set, validators }
|
||||
}
|
||||
|
||||
pub fn set(&self) -> ValidatorSet {
|
||||
pub fn set(&self) -> ExternalValidatorSet {
|
||||
self.set
|
||||
}
|
||||
|
||||
@@ -83,15 +89,24 @@ impl TributarySpec {
|
||||
self.start_time
|
||||
}
|
||||
|
||||
pub fn n(&self) -> u16 {
|
||||
self.validators.iter().map(|(_, weight)| *weight).sum()
|
||||
pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 {
|
||||
self
|
||||
.validators
|
||||
.iter()
|
||||
.map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn t(&self) -> u16 {
|
||||
((2 * self.n()) / 3) + 1
|
||||
// t doesn't change with regards to the amount of removed validators
|
||||
((2 * self.n(&[])) / 3) + 1
|
||||
}
|
||||
|
||||
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
||||
pub fn i(
|
||||
&self,
|
||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
||||
key: <Ristretto as Ciphersuite>::G,
|
||||
) -> Option<Range<Participant>> {
|
||||
let mut all_is = HashMap::new();
|
||||
let mut i = 1;
|
||||
for (validator, weight) in &self.validators {
|
||||
@@ -102,12 +117,34 @@ impl TributarySpec {
|
||||
i += weight;
|
||||
}
|
||||
|
||||
Some(all_is.get(&key)?.clone())
|
||||
let original_i = all_is.get(&key)?.clone();
|
||||
let mut result_i = original_i.clone();
|
||||
for removed_validator in removed_validators {
|
||||
let removed_i = all_is
|
||||
.get(removed_validator)
|
||||
.expect("removed validator wasn't present in set to begin with");
|
||||
// If the queried key was removed, return None
|
||||
if &original_i == removed_i {
|
||||
return None;
|
||||
}
|
||||
|
||||
// If the removed was before the queried, shift the queried down accordingly
|
||||
if removed_i.start < original_i.start {
|
||||
let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);
|
||||
result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();
|
||||
result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();
|
||||
}
|
||||
}
|
||||
Some(result_i)
|
||||
}
|
||||
|
||||
pub fn reverse_lookup_i(&self, i: Participant) -> Option<<Ristretto as Ciphersuite>::G> {
|
||||
pub fn reverse_lookup_i(
|
||||
&self,
|
||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
||||
i: Participant,
|
||||
) -> Option<<Ristretto as Ciphersuite>::G> {
|
||||
for (validator, _) in &self.validators {
|
||||
if self.i(*validator).map_or(false, |range| range.contains(&i)) {
|
||||
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {
|
||||
return Some(*validator);
|
||||
}
|
||||
}
|
||||
@@ -117,8 +154,4 @@ impl TributarySpec {
|
||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||
}
|
||||
|
||||
pub fn evrf_public_keys(&self) -> Vec<([u8; 32], Vec<u8>)> {
|
||||
self.evrf_public_keys.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,11 +7,13 @@ use rand_core::{RngCore, CryptoRng};
|
||||
use blake2::{Digest, Blake2s256};
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
use frost::Participant;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
@@ -129,26 +131,32 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub enum Transaction {
|
||||
RemoveParticipant {
|
||||
RemoveParticipantDueToDkg {
|
||||
participant: <Ristretto as Ciphersuite>::G,
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
DkgParticipation {
|
||||
participation: Vec<u8>,
|
||||
DkgCommitments {
|
||||
attempt: u32,
|
||||
commitments: Vec<Vec<u8>>,
|
||||
signed: Signed,
|
||||
},
|
||||
DkgConfirmationNonces {
|
||||
// The confirmation attempt
|
||||
DkgShares {
|
||||
attempt: u32,
|
||||
// The nonces for DKG confirmation attempt #attempt
|
||||
// Sending Participant, Receiving Participant, Share
|
||||
shares: Vec<Vec<Vec<u8>>>,
|
||||
confirmation_nonces: [u8; 64],
|
||||
signed: Signed,
|
||||
},
|
||||
DkgConfirmationShare {
|
||||
// The confirmation attempt
|
||||
InvalidDkgShare {
|
||||
attempt: u32,
|
||||
accuser: Participant,
|
||||
faulty: Participant,
|
||||
blame: Option<Vec<u8>>,
|
||||
signed: Signed,
|
||||
},
|
||||
DkgConfirmed {
|
||||
attempt: u32,
|
||||
// The share for DKG confirmation attempt #attempt
|
||||
confirmation_share: [u8; 32],
|
||||
signed: Signed,
|
||||
},
|
||||
@@ -190,22 +198,29 @@ pub enum Transaction {
|
||||
impl Debug for Transaction {
|
||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => fmt
|
||||
.debug_struct("Transaction::RemoveParticipant")
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt
|
||||
.debug_struct("Transaction::RemoveParticipantDueToDkg")
|
||||
.field("participant", &hex::encode(participant.to_bytes()))
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgParticipation { signed, .. } => fmt
|
||||
.debug_struct("Transaction::DkgParticipation")
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt
|
||||
.debug_struct("Transaction::DkgConfirmationNonces")
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
||||
.debug_struct("Transaction::DkgCommitments")
|
||||
.field("attempt", attempt)
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt
|
||||
.debug_struct("Transaction::DkgConfirmationShare")
|
||||
Transaction::DkgShares { attempt, signed, .. } => fmt
|
||||
.debug_struct("Transaction::DkgShares")
|
||||
.field("attempt", attempt)
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
|
||||
.debug_struct("Transaction::InvalidDkgShare")
|
||||
.field("attempt", attempt)
|
||||
.field("accuser", accuser)
|
||||
.field("faulty", faulty)
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt
|
||||
.debug_struct("Transaction::DkgConfirmed")
|
||||
.field("attempt", attempt)
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
@@ -247,32 +262,43 @@ impl ReadWrite for Transaction {
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => Ok(Transaction::RemoveParticipant {
|
||||
0 => Ok(Transaction::RemoveParticipantDueToDkg {
|
||||
participant: Ristretto::read_G(reader)?,
|
||||
signed: Signed::read_without_nonce(reader, 0)?,
|
||||
}),
|
||||
|
||||
1 => {
|
||||
let participation = {
|
||||
let mut participation_len = [0; 4];
|
||||
reader.read_exact(&mut participation_len)?;
|
||||
let participation_len = u32::from_le_bytes(participation_len);
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() {
|
||||
let commitments = {
|
||||
let mut commitments_len = [0; 1];
|
||||
reader.read_exact(&mut commitments_len)?;
|
||||
let commitments_len = usize::from(commitments_len[0]);
|
||||
if commitments_len == 0 {
|
||||
Err(io::Error::other("zero commitments in DkgCommitments"))?;
|
||||
}
|
||||
|
||||
let mut each_commitments_len = [0; 2];
|
||||
reader.read_exact(&mut each_commitments_len)?;
|
||||
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
||||
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
||||
Err(io::Error::other(
|
||||
"participation present in transaction exceeded transaction size limit",
|
||||
"commitments present in transaction exceeded transaction size limit",
|
||||
))?;
|
||||
}
|
||||
let participation_len = usize::try_from(participation_len).unwrap();
|
||||
|
||||
let mut participation = vec![0; participation_len];
|
||||
reader.read_exact(&mut participation)?;
|
||||
participation
|
||||
let mut commitments = vec![vec![]; commitments_len];
|
||||
for commitments in &mut commitments {
|
||||
*commitments = vec![0; each_commitments_len];
|
||||
reader.read_exact(commitments)?;
|
||||
}
|
||||
commitments
|
||||
};
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||
|
||||
Ok(Transaction::DkgParticipation { participation, signed })
|
||||
Ok(Transaction::DkgCommitments { attempt, commitments, signed })
|
||||
}
|
||||
|
||||
2 => {
|
||||
@@ -280,12 +306,36 @@ impl ReadWrite for Transaction {
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let shares = {
|
||||
let mut share_quantity = [0; 1];
|
||||
reader.read_exact(&mut share_quantity)?;
|
||||
|
||||
let mut key_share_quantity = [0; 1];
|
||||
reader.read_exact(&mut key_share_quantity)?;
|
||||
|
||||
let mut share_len = [0; 2];
|
||||
reader.read_exact(&mut share_len)?;
|
||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
||||
|
||||
let mut all_shares = vec![];
|
||||
for _ in 0 .. share_quantity[0] {
|
||||
let mut shares = vec![];
|
||||
for _ in 0 .. key_share_quantity[0] {
|
||||
let mut share = vec![0; share_len];
|
||||
reader.read_exact(&mut share)?;
|
||||
shares.push(share);
|
||||
}
|
||||
all_shares.push(shares);
|
||||
}
|
||||
all_shares
|
||||
};
|
||||
|
||||
let mut confirmation_nonces = [0; 64];
|
||||
reader.read_exact(&mut confirmation_nonces)?;
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||
|
||||
Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed })
|
||||
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
||||
}
|
||||
|
||||
3 => {
|
||||
@@ -293,21 +343,53 @@ impl ReadWrite for Transaction {
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let mut confirmation_share = [0; 32];
|
||||
reader.read_exact(&mut confirmation_share)?;
|
||||
let mut accuser = [0; 2];
|
||||
reader.read_exact(&mut accuser)?;
|
||||
let accuser = Participant::new(u16::from_le_bytes(accuser))
|
||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||
let mut faulty = [0; 2];
|
||||
reader.read_exact(&mut faulty)?;
|
||||
let faulty = Participant::new(u16::from_le_bytes(faulty))
|
||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
||||
|
||||
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
|
||||
let mut blame_len = [0; 2];
|
||||
reader.read_exact(&mut blame_len)?;
|
||||
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
|
||||
reader.read_exact(&mut blame)?;
|
||||
|
||||
// This shares a nonce with DkgConfirmed as only one is expected
|
||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
||||
|
||||
Ok(Transaction::InvalidDkgShare {
|
||||
attempt,
|
||||
accuser,
|
||||
faulty,
|
||||
blame: Some(blame).filter(|blame| !blame.is_empty()),
|
||||
signed,
|
||||
})
|
||||
}
|
||||
|
||||
4 => {
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let mut confirmation_share = [0; 32];
|
||||
reader.read_exact(&mut confirmation_share)?;
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
||||
|
||||
Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed })
|
||||
}
|
||||
|
||||
5 => {
|
||||
let mut block = [0; 32];
|
||||
reader.read_exact(&mut block)?;
|
||||
Ok(Transaction::CosignSubstrateBlock(block))
|
||||
}
|
||||
|
||||
5 => {
|
||||
6 => {
|
||||
let mut block = [0; 32];
|
||||
reader.read_exact(&mut block)?;
|
||||
let mut batch = [0; 4];
|
||||
@@ -315,16 +397,16 @@ impl ReadWrite for Transaction {
|
||||
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
|
||||
}
|
||||
|
||||
6 => {
|
||||
7 => {
|
||||
let mut block = [0; 8];
|
||||
reader.read_exact(&mut block)?;
|
||||
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
||||
}
|
||||
|
||||
7 => SignData::read(reader).map(Transaction::SubstrateSign),
|
||||
8 => SignData::read(reader).map(Transaction::Sign),
|
||||
8 => SignData::read(reader).map(Transaction::SubstrateSign),
|
||||
9 => SignData::read(reader).map(Transaction::Sign),
|
||||
|
||||
9 => {
|
||||
10 => {
|
||||
let mut plan = [0; 32];
|
||||
reader.read_exact(&mut plan)?;
|
||||
|
||||
@@ -339,7 +421,7 @@ impl ReadWrite for Transaction {
|
||||
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
||||
}
|
||||
|
||||
10 => {
|
||||
11 => {
|
||||
let mut len = [0];
|
||||
reader.read_exact(&mut len)?;
|
||||
let len = len[0];
|
||||
@@ -364,59 +446,109 @@ impl ReadWrite for Transaction {
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&participant.to_bytes())?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgParticipation { participation, signed } => {
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(participation)?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
if commitments.is_empty() {
|
||||
Err(io::Error::other("zero commitments in DkgCommitments"))?
|
||||
}
|
||||
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
||||
for commitments_i in commitments {
|
||||
if commitments_i.len() != commitments[0].len() {
|
||||
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
|
||||
}
|
||||
}
|
||||
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
||||
for commitments in commitments {
|
||||
writer.write_all(commitments)?;
|
||||
}
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
|
||||
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
||||
writer.write_all(&[2])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
|
||||
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
|
||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
||||
// This assumes at least one share is being sent to another party
|
||||
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
||||
let share_len = shares[0][0].len();
|
||||
// For BLS12-381 G2, this would be:
|
||||
// - A 32-byte share
|
||||
// - A 96-byte ephemeral key
|
||||
// - A 128-byte signature
|
||||
// Hence why this has to be u16
|
||||
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
||||
|
||||
for these_shares in shares {
|
||||
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
||||
for share in these_shares {
|
||||
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
||||
writer.write_all(share)?;
|
||||
}
|
||||
}
|
||||
|
||||
writer.write_all(confirmation_nonces)?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||
writer.write_all(&[3])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
|
||||
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
||||
|
||||
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
||||
assert!(blame.as_ref().map_or(1, Vec::len) != 0);
|
||||
let blame_len =
|
||||
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
||||
writer.write_all(&blame_len.to_le_bytes())?;
|
||||
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
|
||||
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||
writer.write_all(&[4])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
writer.write_all(confirmation_share)?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::CosignSubstrateBlock(block) => {
|
||||
writer.write_all(&[4])?;
|
||||
writer.write_all(&[5])?;
|
||||
writer.write_all(block)
|
||||
}
|
||||
|
||||
Transaction::Batch { block, batch } => {
|
||||
writer.write_all(&[5])?;
|
||||
writer.write_all(&[6])?;
|
||||
writer.write_all(block)?;
|
||||
writer.write_all(&batch.to_le_bytes())
|
||||
}
|
||||
|
||||
Transaction::SubstrateBlock(block) => {
|
||||
writer.write_all(&[6])?;
|
||||
writer.write_all(&[7])?;
|
||||
writer.write_all(&block.to_le_bytes())
|
||||
}
|
||||
|
||||
Transaction::SubstrateSign(data) => {
|
||||
writer.write_all(&[7])?;
|
||||
data.write(writer)
|
||||
}
|
||||
Transaction::Sign(data) => {
|
||||
writer.write_all(&[8])?;
|
||||
data.write(writer)
|
||||
}
|
||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||
Transaction::Sign(data) => {
|
||||
writer.write_all(&[9])?;
|
||||
data.write(writer)
|
||||
}
|
||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||
writer.write_all(&[10])?;
|
||||
writer.write_all(plan)?;
|
||||
writer
|
||||
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
||||
@@ -425,7 +557,7 @@ impl ReadWrite for Transaction {
|
||||
signature.write(writer)
|
||||
}
|
||||
Transaction::SlashReport(points, signed) => {
|
||||
writer.write_all(&[10])?;
|
||||
writer.write_all(&[11])?;
|
||||
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
||||
for points in points {
|
||||
writer.write_all(&points.to_le_bytes())?;
|
||||
@@ -439,16 +571,15 @@ impl ReadWrite for Transaction {
|
||||
impl TransactionTrait for Transaction {
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
||||
}
|
||||
|
||||
Transaction::DkgParticipation { signed, .. } => {
|
||||
TransactionKind::Signed(b"dkg".to_vec(), signed)
|
||||
}
|
||||
Transaction::DkgConfirmationNonces { attempt, signed, .. } |
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } |
|
||||
Transaction::DkgShares { attempt, signed, .. } |
|
||||
Transaction::InvalidDkgShare { attempt, signed, .. } |
|
||||
Transaction::DkgConfirmed { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||
}
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||
@@ -515,14 +646,11 @@ impl Transaction {
|
||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
||||
let nonce = match tx {
|
||||
Transaction::RemoveParticipant { .. } => 0,
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => 0,
|
||||
|
||||
Transaction::DkgParticipation { .. } => 0,
|
||||
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by
|
||||
Transaction::DkgConfirmationNonces { .. } => 0,
|
||||
// Uses a nonce of 1 due to internal attempt counter and due to following
|
||||
// DkgConfirmationNonces
|
||||
Transaction::DkgConfirmationShare { .. } => 1,
|
||||
Transaction::DkgCommitments { .. } => 0,
|
||||
Transaction::DkgShares { .. } => 1,
|
||||
Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
|
||||
@@ -541,10 +669,11 @@ impl Transaction {
|
||||
nonce,
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match tx {
|
||||
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed,
|
||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
|
||||
Transaction::DkgCommitments { ref mut signed, .. } |
|
||||
Transaction::DkgShares { ref mut signed, .. } |
|
||||
Transaction::InvalidDkgShare { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,7 +16,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
@@ -28,7 +27,8 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
|
||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
||||
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
|
||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::{VecDeque, HashSet};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
|
||||
|
||||
@@ -5,7 +5,8 @@ use async_trait::async_trait;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use scale::Decode;
|
||||
use futures_channel::mpsc::UnboundedReceiver;
|
||||
@@ -50,17 +51,13 @@ pub(crate) use crate::tendermint::*;
|
||||
pub mod tests;
|
||||
|
||||
/// Size limit for an individual transaction.
|
||||
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||
// TODO: Add a test for these properties
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
|
||||
/// Amount of transactions a single account may have in the mempool.
|
||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
/// Block size limit.
|
||||
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious
|
||||
// participant from flooding disks and causing out of space errors in order processes.
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
|
||||
@@ -11,12 +11,13 @@ use rand_chacha::ChaCha12Rng;
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{
|
||||
GroupEncoding,
|
||||
ff::{Field, PrimeField},
|
||||
},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::{
|
||||
SchnorrSignature,
|
||||
|
||||
@@ -4,7 +4,8 @@ use scale::{Encode, Decode, IoReader};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use crate::{
|
||||
transaction::{Transaction, TransactionKind, TransactionError},
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, Group},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ use rand::rngs::OsRng;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
||||
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::{sync::Arc, collections::HashMap};
|
||||
use zeroize::Zeroizing;
|
||||
use rand::{RngCore, rngs::OsRng};
|
||||
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
||||
|
||||
use tendermint::ext::Commit;
|
||||
|
||||
|
||||
@@ -6,9 +6,10 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, Group},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@ use rand::rngs::OsRng;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
||||
|
||||
use crate::{
|
||||
ReadWrite,
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::sync::Arc;
|
||||
use zeroize::Zeroizing;
|
||||
use rand::{RngCore, rngs::OsRng};
|
||||
|
||||
use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{Ciphersuite, group::ff::Field};
|
||||
|
||||
use scale::Encode;
|
||||
|
||||
|
||||
@@ -6,9 +6,10 @@ use thiserror::Error;
|
||||
|
||||
use blake2::{Digest, Blake2b512};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{Group, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,7 +16,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![expect(clippy::cast_possible_truncation)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
|
||||
use std::{
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "ciphersuite"
|
||||
version = "0.4.1"
|
||||
version = "0.4.2"
|
||||
description = "Ciphersuites built around ff/group"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["ciphersuite", "ff", "group"]
|
||||
edition = "2021"
|
||||
rust-version = "1.80"
|
||||
rust-version = "1.66"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -24,22 +24,12 @@ rand_core = { version = "0.6", default-features = false }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
|
||||
digest = { version = "0.10", default-features = false }
|
||||
digest = { version = "0.10", default-features = false, features = ["core-api"] }
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
|
||||
sha2 = { version = "0.10", default-features = false, optional = true }
|
||||
sha3 = { version = "0.10", default-features = false, optional = true }
|
||||
|
||||
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
||||
group = { version = "0.13", default-features = false }
|
||||
|
||||
dalek-ff-group = { path = "../dalek-ff-group", version = "0.4", default-features = false, optional = true }
|
||||
|
||||
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
|
||||
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
|
||||
|
||||
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
@@ -48,7 +38,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["std-shims"]
|
||||
alloc = ["std-shims", "ff/alloc"]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
@@ -59,27 +49,8 @@ std = [
|
||||
|
||||
"digest/std",
|
||||
"transcript/std",
|
||||
"sha2?/std",
|
||||
"sha3?/std",
|
||||
|
||||
"ff/std",
|
||||
|
||||
"dalek-ff-group?/std",
|
||||
|
||||
"elliptic-curve?/std",
|
||||
"p256?/std",
|
||||
"k256?/std",
|
||||
"minimal-ed448?/std",
|
||||
]
|
||||
|
||||
dalek = ["sha2", "dalek-ff-group"]
|
||||
ed25519 = ["dalek"]
|
||||
ristretto = ["dalek"]
|
||||
|
||||
kp256 = ["sha2", "elliptic-curve"]
|
||||
p256 = ["kp256", "dep:p256"]
|
||||
secp256k1 = ["kp256", "k256"]
|
||||
|
||||
ed448 = ["sha3", "minimal-ed448"]
|
||||
|
||||
default = ["std"]
|
||||
|
||||
@@ -21,6 +21,8 @@ Their `hash_to_F` is the
|
||||
[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),
|
||||
yet applied to their scalar field.
|
||||
|
||||
Please see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info.
|
||||
|
||||
### Ed25519/Ristretto
|
||||
|
||||
Ed25519/Ristretto are offered via
|
||||
@@ -33,6 +35,8 @@ the draft
|
||||
[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).
|
||||
The domain-separation tag is naively prefixed to the message.
|
||||
|
||||
Please see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info.
|
||||
|
||||
### Ed448
|
||||
|
||||
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
|
||||
@@ -42,3 +46,5 @@ to its prime-order subgroup.
|
||||
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
|
||||
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The
|
||||
domain-separation tag is naively prefixed to the message.
|
||||
|
||||
Please see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info.
|
||||
|
||||
55
crypto/ciphersuite/kp256/Cargo.toml
Normal file
55
crypto/ciphersuite/kp256/Cargo.toml
Normal file
@@ -0,0 +1,55 @@
|
||||
[package]
|
||||
name = "ciphersuite-kp256"
|
||||
version = "0.4.0"
|
||||
description = "Ciphersuites built around ff/group"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["ciphersuite", "ff", "group"]
|
||||
edition = "2021"
|
||||
rust-version = "1.66"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
||||
|
||||
sha2 = { version = "0.10", default-features = false }
|
||||
|
||||
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"] }
|
||||
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
|
||||
|
||||
ciphersuite = { path = "../", version = "0.4", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
ff-group-tests = { version = "0.13", path = "../../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["ciphersuite/alloc"]
|
||||
std = [
|
||||
"rand_core/std",
|
||||
|
||||
"zeroize/std",
|
||||
|
||||
"sha2/std",
|
||||
|
||||
"elliptic-curve/std",
|
||||
"p256/std",
|
||||
"k256/std",
|
||||
|
||||
"ciphersuite/std",
|
||||
]
|
||||
|
||||
default = ["std"]
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Luke Parker
|
||||
Copyright (c) 2021-2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
3
crypto/ciphersuite/kp256/README.md
Normal file
3
crypto/ciphersuite/kp256/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Ciphersuite {k, p}256
|
||||
|
||||
SECP256k1 and P-256 Ciphersuites around k256 and p256.
|
||||
@@ -1,16 +1,17 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use sha2::Sha256;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
|
||||
use elliptic_curve::{
|
||||
generic_array::GenericArray,
|
||||
bigint::{NonZero, CheckedAdd, Encoding, U384},
|
||||
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
|
||||
};
|
||||
|
||||
use crate::Ciphersuite;
|
||||
use ciphersuite::{group::ff::PrimeField, Ciphersuite};
|
||||
|
||||
macro_rules! kp_curve {
|
||||
(
|
||||
@@ -107,12 +108,9 @@ fn test_oversize_dst<C: Ciphersuite>() {
|
||||
/// Ciphersuite for Secp256k1.
|
||||
///
|
||||
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
|
||||
#[cfg(feature = "secp256k1")]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Secp256k1;
|
||||
#[cfg(feature = "secp256k1")]
|
||||
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
|
||||
#[cfg(feature = "secp256k1")]
|
||||
#[test]
|
||||
fn test_secp256k1() {
|
||||
ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
|
||||
@@ -145,12 +143,9 @@ fn test_secp256k1() {
|
||||
/// Ciphersuite for P-256.
|
||||
///
|
||||
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
|
||||
#[cfg(feature = "p256")]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct P256;
|
||||
#[cfg(feature = "p256")]
|
||||
kp_curve!("p256", p256, P256, b"P-256");
|
||||
#[cfg(feature = "p256")]
|
||||
#[test]
|
||||
fn test_p256() {
|
||||
ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Ciphersuites for elliptic curves premised on ff/group.
|
||||
|
||||
This library, except for the not recommended Ed448 ciphersuite, was
|
||||
This library was
|
||||
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
|
||||
culminating in commit
|
||||
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("lib.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||
use std_shims::io::{self, Read};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
@@ -23,25 +26,6 @@ use group::{
|
||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||
use group::GroupEncoding;
|
||||
|
||||
#[cfg(feature = "dalek")]
|
||||
mod dalek;
|
||||
#[cfg(feature = "ristretto")]
|
||||
pub use dalek::Ristretto;
|
||||
#[cfg(feature = "ed25519")]
|
||||
pub use dalek::Ed25519;
|
||||
|
||||
#[cfg(feature = "kp256")]
|
||||
mod kp256;
|
||||
#[cfg(feature = "secp256k1")]
|
||||
pub use kp256::Secp256k1;
|
||||
#[cfg(feature = "p256")]
|
||||
pub use kp256::P256;
|
||||
|
||||
#[cfg(feature = "ed448")]
|
||||
mod ed448;
|
||||
#[cfg(feature = "ed448")]
|
||||
pub use ed448::*;
|
||||
|
||||
/// Unified trait defining a ciphersuite around an elliptic curve.
|
||||
pub trait Ciphersuite:
|
||||
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
|
||||
@@ -99,6 +83,9 @@ pub trait Ciphersuite:
|
||||
}
|
||||
|
||||
/// Read a canonical point from something implementing std::io::Read.
|
||||
///
|
||||
/// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a
|
||||
/// canonical serialization.
|
||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||
#[allow(non_snake_case)]
|
||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "dalek-ff-group"
|
||||
version = "0.4.1"
|
||||
version = "0.4.4"
|
||||
description = "ff/group bindings around curve25519-dalek"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.65"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -25,18 +25,22 @@ subtle = { version = "^2.4", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
digest = { version = "0.10", default-features = false }
|
||||
sha2 = { version = "0.10", default-features = false }
|
||||
|
||||
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
||||
group = { version = "0.13", default-features = false }
|
||||
ciphersuite = { path = "../ciphersuite", default-features = false }
|
||||
|
||||
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
||||
|
||||
curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
ff-group-tests = { path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
std = ["zeroize/std", "subtle/std", "rand_core/std", "digest/std"]
|
||||
alloc = ["zeroize/alloc", "ciphersuite/alloc"]
|
||||
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "digest/std", "sha2/std", "ciphersuite/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -3,9 +3,9 @@ use zeroize::Zeroize;
|
||||
use sha2::{Digest, Sha512};
|
||||
|
||||
use group::Group;
|
||||
use dalek_ff_group::Scalar;
|
||||
use crate::Scalar;
|
||||
|
||||
use crate::Ciphersuite;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
macro_rules! dalek_curve {
|
||||
(
|
||||
@@ -15,7 +15,7 @@ macro_rules! dalek_curve {
|
||||
$Point: ident,
|
||||
$ID: literal
|
||||
) => {
|
||||
use dalek_ff_group::$Point;
|
||||
use crate::$Point;
|
||||
|
||||
impl Ciphersuite for $Ciphersuite {
|
||||
type F = Scalar;
|
||||
@@ -40,12 +40,9 @@ macro_rules! dalek_curve {
|
||||
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
|
||||
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
|
||||
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
|
||||
#[cfg(any(test, feature = "ristretto"))]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Ristretto;
|
||||
#[cfg(any(test, feature = "ristretto"))]
|
||||
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
|
||||
#[cfg(any(test, feature = "ristretto"))]
|
||||
#[test]
|
||||
fn test_ristretto() {
|
||||
ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
|
||||
@@ -71,12 +68,9 @@ fn test_ristretto() {
|
||||
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
|
||||
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
|
||||
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
|
||||
#[cfg(feature = "ed25519")]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Ed25519;
|
||||
#[cfg(feature = "ed25519")]
|
||||
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
|
||||
#[cfg(feature = "ed25519")]
|
||||
#[test]
|
||||
fn test_ed25519() {
|
||||
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);
|
||||
@@ -17,7 +17,7 @@ use crypto_bigint::{
|
||||
impl_modulus,
|
||||
};
|
||||
|
||||
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
|
||||
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes};
|
||||
|
||||
use crate::{u8_from_bool, constant_time, math_op, math};
|
||||
|
||||
@@ -36,6 +36,7 @@ type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;
|
||||
|
||||
/// A constant-time implementation of the Ed25519 field.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]
|
||||
#[repr(transparent)]
|
||||
pub struct FieldElement(ResidueType);
|
||||
|
||||
// Square root of -1.
|
||||
@@ -92,7 +93,7 @@ impl Neg for FieldElement {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Neg for &'a FieldElement {
|
||||
impl Neg for &FieldElement {
|
||||
type Output = FieldElement;
|
||||
fn neg(self) -> Self::Output {
|
||||
(*self).neg()
|
||||
@@ -216,10 +217,18 @@ impl PrimeFieldBits for FieldElement {
|
||||
}
|
||||
|
||||
impl FieldElement {
|
||||
/// Interpret the value as a little-endian integer, square it, and reduce it into a FieldElement.
|
||||
pub fn from_square(value: [u8; 32]) -> FieldElement {
|
||||
let value = U256::from_le_bytes(value);
|
||||
FieldElement(reduce(U512::from(value.mul_wide(&value))))
|
||||
/// Create a FieldElement from a `crypto_bigint::U256`.
|
||||
///
|
||||
/// This will reduce the `U256` by the modulus, into a member of the field.
|
||||
pub const fn from_u256(u256: &U256) -> Self {
|
||||
FieldElement(Residue::new(u256))
|
||||
}
|
||||
|
||||
/// Create a `FieldElement` from the reduction of a 512-bit number.
|
||||
///
|
||||
/// The bytes are interpreted in little-endian format.
|
||||
pub fn wide_reduce(value: [u8; 64]) -> Self {
|
||||
FieldElement(reduce(U512::from_le_bytes(value)))
|
||||
}
|
||||
|
||||
/// Perform an exponentiation.
|
||||
@@ -244,7 +253,16 @@ impl FieldElement {
|
||||
res *= res;
|
||||
}
|
||||
}
|
||||
res *= table[usize::from(bits)];
|
||||
|
||||
let mut scale_by = FieldElement::ONE;
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0 .. 16 {
|
||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
||||
{
|
||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
||||
}
|
||||
}
|
||||
res *= scale_by;
|
||||
bits = 0;
|
||||
}
|
||||
}
|
||||
@@ -288,6 +306,12 @@ impl FieldElement {
|
||||
}
|
||||
}
|
||||
|
||||
impl FromUniformBytes<64> for FieldElement {
|
||||
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
|
||||
Self::wide_reduce(*bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl Sum<FieldElement> for FieldElement {
|
||||
fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
|
||||
let mut res = FieldElement::ZERO;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#![allow(deprecated)]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![allow(clippy::redundant_closure_call)]
|
||||
@@ -30,7 +30,7 @@ use dalek::{
|
||||
pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};
|
||||
|
||||
use group::{
|
||||
ff::{Field, PrimeField, FieldBits, PrimeFieldBits},
|
||||
ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes},
|
||||
Group, GroupEncoding,
|
||||
prime::PrimeGroup,
|
||||
};
|
||||
@@ -38,13 +38,24 @@ use group::{
|
||||
mod field;
|
||||
pub use field::FieldElement;
|
||||
|
||||
mod ciphersuite;
|
||||
pub use crate::ciphersuite::{Ed25519, Ristretto};
|
||||
|
||||
// Use black_box when possible
|
||||
#[rustversion::since(1.66)]
|
||||
use core::hint::black_box;
|
||||
#[rustversion::before(1.66)]
|
||||
fn black_box<T>(val: T) -> T {
|
||||
val
|
||||
mod black_box {
|
||||
pub(crate) fn black_box<T>(val: T) -> T {
|
||||
#[allow(clippy::incompatible_msrv)]
|
||||
core::hint::black_box(val)
|
||||
}
|
||||
}
|
||||
#[rustversion::before(1.66)]
|
||||
mod black_box {
|
||||
pub(crate) fn black_box<T>(val: T) -> T {
|
||||
val
|
||||
}
|
||||
}
|
||||
use black_box::black_box;
|
||||
|
||||
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
|
||||
let bit_ref = black_box(bit_ref);
|
||||
@@ -208,7 +219,16 @@ impl Scalar {
|
||||
res *= res;
|
||||
}
|
||||
}
|
||||
res *= table[usize::from(bits)];
|
||||
|
||||
let mut scale_by = Scalar::ONE;
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0 .. 16 {
|
||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
||||
{
|
||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
||||
}
|
||||
}
|
||||
res *= scale_by;
|
||||
bits = 0;
|
||||
}
|
||||
}
|
||||
@@ -305,6 +325,12 @@ impl PrimeFieldBits for Scalar {
|
||||
}
|
||||
}
|
||||
|
||||
impl FromUniformBytes<64> for Scalar {
|
||||
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
|
||||
Self::from_bytes_mod_order_wide(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl Sum<Scalar> for Scalar {
|
||||
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
|
||||
Self(DScalar::sum(iter))
|
||||
@@ -342,7 +368,12 @@ macro_rules! dalek_group {
|
||||
$BASEPOINT_POINT: ident,
|
||||
$BASEPOINT_TABLE: ident
|
||||
) => {
|
||||
/// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup.
|
||||
/// Wrapper around the dalek Point type.
|
||||
///
|
||||
/// All operations will be restricted to a prime-order subgroup (equivalent to the group itself
|
||||
/// in the case of Ristretto). The exposure of the internal element does allow bypassing this
|
||||
/// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at
|
||||
/// the user's risk.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct $Point(pub $DPoint);
|
||||
deref_borrow!($Point, $DPoint);
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "dkg"
|
||||
version = "0.5.1"
|
||||
version = "0.6.1"
|
||||
description = "Distributed key generation over ff/group"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
rust-version = "1.66"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,82 +17,25 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] }
|
||||
|
||||
thiserror = { version = "2", default-features = false }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||
|
||||
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
|
||||
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false }
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false }
|
||||
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false }
|
||||
dleq = { path = "../dleq", version = "^0.4.1", default-features = false }
|
||||
|
||||
# eVRF DKG dependencies
|
||||
generic-array = { version = "1", default-features = false, features = ["alloc"], optional = true }
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"], optional = true }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"], optional = true }
|
||||
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", default-features = false, optional = true }
|
||||
ec-divisors = { path = "../evrf/divisors", default-features = false, optional = true }
|
||||
generalized-bulletproofs-circuit-abstraction = { path = "../evrf/circuit-abstraction", optional = true }
|
||||
generalized-bulletproofs-ec-gadgets = { path = "../evrf/ec-gadgets", optional = true }
|
||||
|
||||
secq256k1 = { path = "../evrf/secq256k1", optional = true }
|
||||
embedwards25519 = { path = "../evrf/embedwards25519", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
|
||||
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
|
||||
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] }
|
||||
ec-divisors = { path = "../evrf/divisors", features = ["pasta"] }
|
||||
pasta_curves = "0.5"
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"thiserror/std",
|
||||
|
||||
"rand_core/std",
|
||||
|
||||
"std-shims/std",
|
||||
|
||||
"borsh?/std",
|
||||
|
||||
"transcript/std",
|
||||
"chacha20/std",
|
||||
|
||||
"ciphersuite/std",
|
||||
"multiexp/std",
|
||||
"multiexp/batch",
|
||||
|
||||
"schnorr/std",
|
||||
"dleq/std",
|
||||
"dleq/serialize"
|
||||
]
|
||||
borsh = ["dep:borsh"]
|
||||
evrf = [
|
||||
"std",
|
||||
|
||||
"dep:generic-array",
|
||||
|
||||
"dep:blake2",
|
||||
"dep:rand_chacha",
|
||||
|
||||
"dep:generalized-bulletproofs",
|
||||
"dep:ec-divisors",
|
||||
"dep:generalized-bulletproofs-circuit-abstraction",
|
||||
"dep:generalized-bulletproofs-ec-gadgets",
|
||||
]
|
||||
evrf-secp256k1 = ["evrf", "ciphersuite/secp256k1", "secq256k1"]
|
||||
evrf-ed25519 = ["evrf", "ciphersuite/ed25519", "embedwards25519"]
|
||||
evrf-ristretto = ["evrf", "ciphersuite/ristretto", "embedwards25519"]
|
||||
tests = ["rand_core/getrandom"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021-2023 Luke Parker
|
||||
Copyright (c) 2021-2025 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
# Distributed Key Generation
|
||||
|
||||
A collection of implementations of various distributed key generation protocols.
|
||||
A crate implementing a type for keys, presumably the result of a distributed
|
||||
key generation protocol, and utilities from there.
|
||||
|
||||
All included protocols resolve into the provided `Threshold` types, intended to
|
||||
enable their modularity. Additional utilities around these types, such as
|
||||
promotion from one generator to another, are also provided.
|
||||
This crate used to host implementations of distributed key generation protocols
|
||||
as well (hence the name). Those have been smashed into their own crates, such
|
||||
as [`dkg-musig`](https://docs.rs/dkg-musig) and
|
||||
[`dkg-pedpop`](https://docs.rs/dkg-pedpop).
|
||||
|
||||
Currently, the only included protocol is the two-round protocol from the
|
||||
[FROST paper](https://eprint.iacr.org/2020/852).
|
||||
|
||||
This library was
|
||||
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
|
||||
culminating in commit
|
||||
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
|
||||
Any subsequent changes have not undergone auditing.
|
||||
Before being smashed, this crate was [audited by Cypher Stack in March 2023](
|
||||
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
|
||||
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
|
||||
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
|
||||
). Any subsequent changes have not undergone auditing.
|
||||
|
||||
36
crypto/dkg/dealer/Cargo.toml
Normal file
36
crypto/dkg/dealer/Cargo.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[package]
|
||||
name = "dkg-dealer"
|
||||
version = "0.6.0"
|
||||
description = "Produce dkg::ThresholdKeys with a dealer key generation"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
||||
edition = "2021"
|
||||
rust-version = "1.66"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
|
||||
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
|
||||
dkg = { path = "../", version = "0.6", default-features = false }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"zeroize/std",
|
||||
"rand_core/std",
|
||||
"std-shims/std",
|
||||
"ciphersuite/std",
|
||||
"dkg/std",
|
||||
]
|
||||
default = ["std"]
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Luke Parker
|
||||
Copyright (c) 2021-2025 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user