mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-11 21:49:26 +00:00
Compare commits
156 Commits
b2bd5d3a44
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c24768f922 | ||
|
|
5818f1a41c | ||
|
|
1b781b4b57 | ||
|
|
63f7e220c0 | ||
|
|
7d49366373 | ||
|
|
55ed33d2d1 | ||
|
|
0066b94d38 | ||
|
|
7d54c02ec6 | ||
|
|
568324f631 | ||
|
|
eaa9a0e5a6 | ||
|
|
251996c1b0 | ||
|
|
98b9cc82a7 | ||
|
|
f8adfb56ad | ||
|
|
7a790f3a20 | ||
|
|
a7c77f8b5f | ||
|
|
da3095ed15 | ||
|
|
758d422595 | ||
|
|
9841061b49 | ||
|
|
4122a0135f | ||
|
|
b63ef32864 | ||
|
|
8be03a8fc2 | ||
|
|
677a2e5749 | ||
|
|
38bda1d586 | ||
|
|
2bc2ca6906 | ||
|
|
900a6612d7 | ||
|
|
17c1d5cd6b | ||
|
|
8a1b56a928 | ||
|
|
75964cf6da | ||
|
|
d407e35cee | ||
|
|
c8ef044acb | ||
|
|
ddbc32de4d | ||
|
|
e5ccfac19e | ||
|
|
432daae1d1 | ||
|
|
da3a85efe5 | ||
|
|
1e0240123d | ||
|
|
f6d4d1b084 | ||
|
|
1b37dd2951 | ||
|
|
f32e0609f1 | ||
|
|
ca85f9ba0c | ||
|
|
cfd1cb3a37 | ||
|
|
f2c13a0040 | ||
|
|
961f46bc04 | ||
|
|
2c4de3bab4 | ||
|
|
95c30720d2 | ||
|
|
ceede14f5c | ||
|
|
5e60ea9718 | ||
|
|
153f6f2f2f | ||
|
|
104c0d4492 | ||
|
|
7c8f13ab28 | ||
|
|
cb0deadf9a | ||
|
|
cb489f9cef | ||
|
|
cc662cb591 | ||
|
|
a8b8844e3f | ||
|
|
82b543ef75 | ||
|
|
72e80c1a3d | ||
|
|
b6edc94bcd | ||
|
|
cfce2b26e2 | ||
|
|
e87bbcda64 | ||
|
|
9f84adf8b3 | ||
|
|
3919cf55ae | ||
|
|
38dd8cb191 | ||
|
|
f2563d39cb | ||
|
|
15a9cbef40 | ||
|
|
078d6e51e5 | ||
|
|
6c33e18745 | ||
|
|
b743c9a43e | ||
|
|
0c2f2979a9 | ||
|
|
971951a1a6 | ||
|
|
92d9e908cb | ||
|
|
a32b97be88 | ||
|
|
e3809b2ff1 | ||
|
|
fd2d8b4f0a | ||
|
|
bc81614894 | ||
|
|
8df5aa2e2d | ||
|
|
b000740470 | ||
|
|
b9f554111d | ||
|
|
354c408e3e | ||
|
|
df3b60376a | ||
|
|
8d209c652e | ||
|
|
9ddad794b4 | ||
|
|
b934e484cc | ||
|
|
f8aee9b3c8 | ||
|
|
f51d77d26a | ||
|
|
0780deb643 | ||
|
|
75c38560f4 | ||
|
|
9f1c5268a5 | ||
|
|
35b113768b | ||
|
|
f2595c4939 | ||
|
|
8fcfa6d3d5 | ||
|
|
54c9d19726 | ||
|
|
25324c3cd5 | ||
|
|
ecb7df85b0 | ||
|
|
68c7acdbef | ||
|
|
8b60feed92 | ||
|
|
5c895efcd0 | ||
|
|
60e55656aa | ||
|
|
9536282418 | ||
|
|
8297d0679d | ||
|
|
d9f854b08a | ||
|
|
8aaf7f7dc6 | ||
|
|
ce447558ac | ||
|
|
fc850da30e | ||
|
|
d6f6cf1965 | ||
|
|
4438b51881 | ||
|
|
6ae0d9fad7 | ||
|
|
ad08b410a8 | ||
|
|
ec3cfd3ab7 | ||
|
|
01eb2daa0b | ||
|
|
885000f970 | ||
|
|
4be506414b | ||
|
|
1143d84e1d | ||
|
|
336922101f | ||
|
|
ffa033d978 | ||
|
|
23f986f57a | ||
|
|
bb726b58af | ||
|
|
387615705c | ||
|
|
c7f825a192 | ||
|
|
d363b1c173 | ||
|
|
d5077ae966 | ||
|
|
188fcc3cb4 | ||
|
|
cbab9486c6 | ||
|
|
a5f4c450c6 | ||
|
|
4f65a0b147 | ||
|
|
feb18d64a7 | ||
|
|
cb1e6535cb | ||
|
|
6b8cf6653a | ||
|
|
b426bfcfe8 | ||
|
|
21ce50ecf7 | ||
|
|
a4ceb2e756 | ||
|
|
eab5d9e64f | ||
|
|
e9c1235b76 | ||
|
|
dc1b8dfccd | ||
|
|
d0201cf2e5 | ||
|
|
f3d20e60b3 | ||
|
|
dafba81b40 | ||
|
|
91f8ec53d9 | ||
|
|
fc9a4a08b8 | ||
|
|
45fadb21ac | ||
|
|
28619fbee1 | ||
|
|
bbe014c3a7 | ||
|
|
fb3fadb3d3 | ||
|
|
f481d20773 | ||
|
|
599b2dec8f | ||
|
|
435f1d9ae1 | ||
|
|
d7ecab605e | ||
|
|
805fea52ec | ||
|
|
48db06f901 | ||
|
|
e9d0a5e0ed | ||
|
|
44d05518aa | ||
|
|
23b433fe6c | ||
|
|
2e57168a97 | ||
|
|
5c6160c398 | ||
|
|
9eee1d971e | ||
|
|
e6300847d6 | ||
|
|
e0a3e7bea6 | ||
|
|
cbebaa1349 |
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Bitcoin Daemon Cache
|
||||
id: cache-bitcoind
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: bitcoin.tar.gz
|
||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
@@ -37,4 +37,4 @@ runs:
|
||||
|
||||
- name: Bitcoin Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
||||
|
||||
50
.github/actions/build-dependencies/action.yml
vendored
50
.github/actions/build-dependencies/action.yml
vendored
@@ -7,13 +7,20 @@ runs:
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
# Ensure the repositories are synced
|
||||
sudo apt update -y
|
||||
|
||||
# Actually perform the removals
|
||||
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
|
||||
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
||||
# This removal command requires the prior removals due to unmet dependencies otherwise
|
||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
sudo apt autoremove -y
|
||||
sudo apt clean
|
||||
docker system prune -a --volumes
|
||||
|
||||
# Reinstall python3 as a general dependency of a functional operating system
|
||||
sudo apt install -y python3 --fix-missing
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Remove unused packages
|
||||
@@ -31,19 +38,48 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt install -y ca-certificates protobuf-compiler
|
||||
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||
choco install protoc
|
||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||
brew install protobuf
|
||||
brew install protobuf llvm
|
||||
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||
fi
|
||||
|
||||
- name: Install solc
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
cargo +1.89 install svm-rs --version =0.5.18
|
||||
svm install 0.8.26
|
||||
svm use 0.8.26
|
||||
|
||||
- name: Remove preinstalled Docker
|
||||
shell: bash
|
||||
run: |
|
||||
docker system prune -a --volumes
|
||||
sudo apt remove -y *docker*
|
||||
# Install uidmap which will be required for the explicitly installed Docker
|
||||
sudo apt install uidmap
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Update system dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update -y
|
||||
sudo apt upgrade -y
|
||||
sudo apt autoremove -y
|
||||
sudo apt clean
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Install rootless Docker
|
||||
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
||||
with:
|
||||
rootless: true
|
||||
set-host: true
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Monero Wallet RPC Cache
|
||||
id: cache-monero-wallet-rpc
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: monero-wallet-rpc
|
||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: /usr/bin/monerod
|
||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
||||
nightly-2024-07-01
|
||||
nightly-2025-11-01
|
||||
|
||||
1
.github/workflows/common-tests.yml
vendored
1
.github/workflows/common-tests.yml
vendored
@@ -30,5 +30,4 @@ jobs:
|
||||
-p patchable-async-sleep \
|
||||
-p serai-db \
|
||||
-p serai-env \
|
||||
-p serai-task \
|
||||
-p simple-request
|
||||
|
||||
10
.github/workflows/crypto-tests.yml
vendored
10
.github/workflows/crypto-tests.yml
vendored
@@ -32,13 +32,15 @@ jobs:
|
||||
-p dalek-ff-group \
|
||||
-p minimal-ed448 \
|
||||
-p ciphersuite \
|
||||
-p ciphersuite-kp256 \
|
||||
-p multiexp \
|
||||
-p schnorr-signatures \
|
||||
-p dleq \
|
||||
-p generalized-bulletproofs \
|
||||
-p generalized-bulletproofs-circuit-abstraction \
|
||||
-p ec-divisors \
|
||||
-p generalized-bulletproofs-ec-gadgets \
|
||||
-p dkg \
|
||||
-p dkg-recovery \
|
||||
-p dkg-dealer \
|
||||
-p dkg-promote \
|
||||
-p dkg-musig \
|
||||
-p dkg-pedpop \
|
||||
-p modular-frost \
|
||||
-p frost-schnorrkel
|
||||
|
||||
6
.github/workflows/daily-deny.yml
vendored
6
.github/workflows/daily-deny.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo install --locked cargo-deny
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
|
||||
45
.github/workflows/lint.yml
vendored
45
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
clippy:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
||||
|
||||
- name: Run Clippy
|
||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||
@@ -46,16 +46,16 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo install --locked cargo-deny
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.4
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -73,42 +73,11 @@ jobs:
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||
cache: false
|
||||
|
||||
- name: Run forge fmt
|
||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Verify all dependencies are in use
|
||||
run: |
|
||||
cargo install cargo-machete
|
||||
cargo machete
|
||||
|
||||
slither:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Slither
|
||||
run: |
|
||||
python3 -m pip install solc-select
|
||||
solc-select install 0.8.26
|
||||
solc-select use 0.8.26
|
||||
|
||||
python3 -m pip install slither-analyzer
|
||||
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||
|
||||
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
|
||||
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||
cd processor/ethereum/router/contracts
|
||||
slither Router.sol
|
||||
cargo +1.89 install cargo-machete --version =0.8.0
|
||||
cargo +1.89 machete
|
||||
|
||||
77
.github/workflows/monero-tests.yaml
vendored
77
.github/workflows/monero-tests.yaml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Monero Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# Only run these once since they will be consistent regardless of any node
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Unit Tests Without Features
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||
|
||||
# Doesn't run unit tests with features as the tests workflow will
|
||||
|
||||
integration-tests:
|
||||
runs-on: ubuntu-latest
|
||||
# Test against all supported protocol versions
|
||||
strategy:
|
||||
matrix:
|
||||
version: [v0.17.3.2, v0.18.3.4]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
with:
|
||||
monero-version: ${{ matrix.version }}
|
||||
|
||||
- name: Run Integration Tests Without Features
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||
|
||||
- name: Run Integration Tests
|
||||
# Don't run if the the tests workflow also will
|
||||
if: ${{ matrix.version != 'v0.18.3.4' }}
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||
256
.github/workflows/msrv.yml
vendored
256
.github/workflows/msrv.yml
vendored
@@ -1,256 +0,0 @@
|
||||
name: Weekly MSRV Check
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 0"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
msrv-common:
|
||||
name: Run cargo msrv on common
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on common
|
||||
run: |
|
||||
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/env/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/db/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/task/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/request/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
|
||||
|
||||
msrv-crypto:
|
||||
name: Run cargo msrv on crypto
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on crypto
|
||||
run: |
|
||||
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
|
||||
|
||||
msrv-networks:
|
||||
name: Run cargo msrv on networks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on networks
|
||||
run: |
|
||||
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
|
||||
|
||||
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
|
||||
|
||||
msrv-message-queue:
|
||||
name: Run cargo msrv on message-queue
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on message-queue
|
||||
run: |
|
||||
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
|
||||
|
||||
msrv-processor:
|
||||
name: Run cargo msrv on processor
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on processor
|
||||
run: |
|
||||
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/messages/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/signers/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
|
||||
|
||||
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/monero/Cargo.toml
|
||||
|
||||
msrv-coordinator:
|
||||
name: Run cargo msrv on coordinator
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on coordinator
|
||||
run: |
|
||||
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
||||
|
||||
msrv-substrate:
|
||||
name: Run cargo msrv on substrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on substrate
|
||||
run: |
|
||||
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/client/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/node/Cargo.toml
|
||||
|
||||
msrv-orchestration:
|
||||
name: Run cargo msrv on orchestration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on message-queue
|
||||
run: |
|
||||
cargo msrv verify --manifest-path orchestration/Cargo.toml
|
||||
|
||||
msrv-mini:
|
||||
name: Run cargo msrv on mini
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on mini
|
||||
run: |
|
||||
cargo msrv verify --manifest-path mini/Cargo.toml
|
||||
19
.github/workflows/networks-tests.yml
vendored
19
.github/workflows/networks-tests.yml
vendored
@@ -30,23 +30,6 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p build-solidity-contracts \
|
||||
-p ethereum-schnorr-contract \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p serai-ethereum-relayer \
|
||||
-p monero-io \
|
||||
-p monero-generators \
|
||||
-p monero-primitives \
|
||||
-p monero-mlsag \
|
||||
-p monero-clsag \
|
||||
-p monero-borromean \
|
||||
-p monero-bulletproofs \
|
||||
-p monero-serai \
|
||||
-p monero-rpc \
|
||||
-p monero-simple-request-rpc \
|
||||
-p monero-address \
|
||||
-p monero-wallet \
|
||||
-p monero-seed \
|
||||
-p polyseed \
|
||||
-p monero-wallet-util \
|
||||
-p monero-serai-verify-chain
|
||||
|
||||
43
.github/workflows/pages.yml
vendored
43
.github/workflows/pages.yml
vendored
@@ -1,6 +1,7 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2022 just-the-docs
|
||||
# Copyright (c) 2022-2024 Luke Parker
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
@@ -20,31 +21,21 @@
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||
name: Deploy Jekyll site to Pages
|
||||
name: Deploy Rust docs and Jekyll site to Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow one concurrent deployment
|
||||
# Only allow one concurrent deployment
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
@@ -53,27 +44,37 @@ jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
||||
with:
|
||||
bundler-cache: true
|
||||
cache-version: 0
|
||||
working-directory: "${{ github.workspace }}/docs"
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v3
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
||||
- name: Build with Jekyll
|
||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
shell: bash
|
||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
- name: Buld Rust docs
|
||||
run: |
|
||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||
mv target/doc docs/_site/rust
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
||||
with:
|
||||
path: "docs/_site/"
|
||||
|
||||
@@ -87,4 +88,4 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
||||
|
||||
23
.github/workflows/tests.yml
vendored
23
.github/workflows/tests.yml
vendored
@@ -39,30 +39,9 @@ jobs:
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-message-queue \
|
||||
-p serai-processor-messages \
|
||||
-p serai-processor-key-gen \
|
||||
-p serai-processor-view-keys \
|
||||
-p serai-processor-frost-attempt-manager \
|
||||
-p serai-processor-primitives \
|
||||
-p serai-processor-scanner \
|
||||
-p serai-processor-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler \
|
||||
-p serai-processor-transaction-chaining-scheduler \
|
||||
-p serai-processor-smart-contract-scheduler \
|
||||
-p serai-processor-signers \
|
||||
-p serai-processor-bin \
|
||||
-p serai-bitcoin-processor \
|
||||
-p serai-processor-ethereum-primitives \
|
||||
-p serai-processor-ethereum-test-primitives \
|
||||
-p serai-processor-ethereum-deployer \
|
||||
-p serai-processor-ethereum-router \
|
||||
-p serai-processor-ethereum-erc20 \
|
||||
-p serai-ethereum-processor \
|
||||
-p serai-monero-processor \
|
||||
-p serai-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-cosign \
|
||||
-p serai-coordinator-substrate \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,7 +1,14 @@
|
||||
target
|
||||
|
||||
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||
Cargo.lock
|
||||
!./Cargo.lock
|
||||
|
||||
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||
Dockerfile
|
||||
Dockerfile.fast-epoch
|
||||
!orchestration/runtime/Dockerfile
|
||||
|
||||
.test-logs
|
||||
|
||||
.vscode
|
||||
|
||||
6898
Cargo.lock
generated
6898
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
141
Cargo.toml
141
Cargo.toml
@@ -1,15 +1,8 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/parking_lot_core",
|
||||
"patches/parking_lot",
|
||||
"patches/zstd",
|
||||
"patches/rocksdb",
|
||||
|
||||
# std patches
|
||||
"patches/matches",
|
||||
"patches/is-terminal",
|
||||
|
||||
# Rewrites/redirects
|
||||
"patches/option-ext",
|
||||
@@ -20,7 +13,6 @@ members = [
|
||||
"common/patchable-async-sleep",
|
||||
"common/db",
|
||||
"common/env",
|
||||
"common/task",
|
||||
"common/request",
|
||||
|
||||
"crypto/transcript",
|
||||
@@ -29,77 +21,34 @@ members = [
|
||||
"crypto/dalek-ff-group",
|
||||
"crypto/ed448",
|
||||
"crypto/ciphersuite",
|
||||
"crypto/ciphersuite/kp256",
|
||||
|
||||
"crypto/multiexp",
|
||||
|
||||
"crypto/schnorr",
|
||||
"crypto/dleq",
|
||||
|
||||
"crypto/evrf/secq256k1",
|
||||
"crypto/evrf/embedwards25519",
|
||||
"crypto/evrf/generalized-bulletproofs",
|
||||
"crypto/evrf/circuit-abstraction",
|
||||
"crypto/evrf/divisors",
|
||||
"crypto/evrf/ec-gadgets",
|
||||
|
||||
"crypto/dkg",
|
||||
"crypto/dkg/recovery",
|
||||
"crypto/dkg/dealer",
|
||||
"crypto/dkg/promote",
|
||||
"crypto/dkg/musig",
|
||||
"crypto/dkg/pedpop",
|
||||
"crypto/frost",
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"networks/bitcoin",
|
||||
|
||||
"networks/ethereum/build-contracts",
|
||||
"networks/ethereum/schnorr",
|
||||
"networks/ethereum/alloy-simple-request-transport",
|
||||
"networks/ethereum",
|
||||
"networks/ethereum/relayer",
|
||||
|
||||
"networks/monero/io",
|
||||
"networks/monero/generators",
|
||||
"networks/monero/primitives",
|
||||
"networks/monero/ringct/mlsag",
|
||||
"networks/monero/ringct/clsag",
|
||||
"networks/monero/ringct/borromean",
|
||||
"networks/monero/ringct/bulletproofs",
|
||||
"networks/monero",
|
||||
"networks/monero/rpc",
|
||||
"networks/monero/rpc/simple-request",
|
||||
"networks/monero/wallet/address",
|
||||
"networks/monero/wallet",
|
||||
"networks/monero/wallet/seed",
|
||||
"networks/monero/wallet/polyseed",
|
||||
"networks/monero/wallet/util",
|
||||
"networks/monero/verify-chain",
|
||||
|
||||
"message-queue",
|
||||
|
||||
"processor/messages",
|
||||
|
||||
"processor/key-gen",
|
||||
"processor/view-keys",
|
||||
"processor/frost-attempt-manager",
|
||||
|
||||
"processor/primitives",
|
||||
"processor/scanner",
|
||||
"processor/scheduler/primitives",
|
||||
"processor/scheduler/utxo/primitives",
|
||||
"processor/scheduler/utxo/standard",
|
||||
"processor/scheduler/utxo/transaction-chaining",
|
||||
"processor/scheduler/smart-contract",
|
||||
"processor/signers",
|
||||
|
||||
"processor/bin",
|
||||
"processor/bitcoin",
|
||||
"processor/ethereum/primitives",
|
||||
"processor/ethereum/test-primitives",
|
||||
"processor/ethereum/deployer",
|
||||
"processor/ethereum/router",
|
||||
"processor/ethereum/erc20",
|
||||
"processor/ethereum",
|
||||
"processor/monero",
|
||||
"processor",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
"coordinator/tributary",
|
||||
"coordinator/cosign",
|
||||
"coordinator/substrate",
|
||||
"coordinator",
|
||||
|
||||
"substrate/primitives",
|
||||
@@ -151,51 +100,37 @@ members = [
|
||||
# to the extensive operations required for Bulletproofs
|
||||
[profile.dev.package]
|
||||
subtle = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
|
||||
ff = { opt-level = 3 }
|
||||
group = { opt-level = 3 }
|
||||
|
||||
crypto-bigint = { opt-level = 3 }
|
||||
secp256k1 = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
dalek-ff-group = { opt-level = 3 }
|
||||
minimal-ed448 = { opt-level = 3 }
|
||||
|
||||
multiexp = { opt-level = 3 }
|
||||
|
||||
secq256k1 = { opt-level = 3 }
|
||||
embedwards25519 = { opt-level = 3 }
|
||||
generalized-bulletproofs = { opt-level = 3 }
|
||||
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||
ec-divisors = { opt-level = 3 }
|
||||
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||
|
||||
dkg = { opt-level = 3 }
|
||||
|
||||
monero-generators = { opt-level = 3 }
|
||||
monero-borromean = { opt-level = 3 }
|
||||
monero-bulletproofs = { opt-level = 3 }
|
||||
monero-mlsag = { opt-level = 3 }
|
||||
monero-clsag = { opt-level = 3 }
|
||||
monero-oxide = { opt-level = 3 }
|
||||
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
overflow-checks = true
|
||||
|
||||
[patch.crates-io]
|
||||
# Dependencies from monero-oxide which originate from within our own tree
|
||||
std-shims = { path = "common/std-shims" }
|
||||
simple-request = { path = "common/request" }
|
||||
dalek-ff-group = { path = "crypto/dalek-ff-group" }
|
||||
flexible-transcript = { path = "crypto/transcript" }
|
||||
modular-frost = { path = "crypto/frost" }
|
||||
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||
parking_lot = { path = "patches/parking_lot" }
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
|
||||
# is-terminal now has an std-based solution with an equivalent API
|
||||
is-terminal = { path = "patches/is-terminal" }
|
||||
# So does matches
|
||||
# These have `std` alternatives
|
||||
matches = { path = "patches/matches" }
|
||||
home = { path = "patches/home" }
|
||||
|
||||
# directories-next was created because directories was unmaintained
|
||||
# directories-next is now unmaintained while directories is maintained
|
||||
@@ -205,12 +140,11 @@ matches = { path = "patches/matches" }
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
# The official pasta_curves repo doesn't support Zeroize
|
||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
uninlined_format_args = "allow" # TODO
|
||||
unwrap_or_default = "allow"
|
||||
map_unwrap_or = "allow"
|
||||
manual_is_multiple_of = "allow"
|
||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||
borrow_as_ptr = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_possible_truncation = "deny"
|
||||
@@ -235,13 +169,14 @@ large_stack_arrays = "deny"
|
||||
linkedlist = "deny"
|
||||
macro_use_imports = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_let_else = "deny"
|
||||
# TODO manual_let_else = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_string_new = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
match_bool = "deny"
|
||||
match_same_arms = "deny"
|
||||
missing_fields_in_debug = "deny"
|
||||
needless_continue = "deny"
|
||||
# TODO needless_continue = "deny"
|
||||
needless_pass_by_value = "deny"
|
||||
ptr_cast_constness = "deny"
|
||||
range_minus_one = "deny"
|
||||
@@ -249,9 +184,7 @@ range_plus_one = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
redundant_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
string_slice = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unchecked_time_subtraction = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnecessary_wraps = "deny"
|
||||
@@ -259,3 +192,21 @@ unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
||||
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
|
||||
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
|
||||
# primary branch, `next` is)
|
||||
needless_continue = "allow"
|
||||
needless_lifetimes = "allow"
|
||||
useless_conversion = "allow"
|
||||
empty_line_after_doc_comments = "allow"
|
||||
manual_div_ceil = "allow"
|
||||
manual_let_else = "allow"
|
||||
unnecessary_map_or = "allow"
|
||||
result_large_err = "allow"
|
||||
unneeded_struct_pattern = "allow"
|
||||
[workspace.lints.rust]
|
||||
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||
mismatched_lifetime_syntaxes = "allow"
|
||||
unused_attributes = "allow"
|
||||
unused_parens = "allow"
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
|
||||
as a reference text. This copy should be provided with any distribution of a
|
||||
crate licensed under the AGPL-3.0, as per its terms.
|
||||
|
||||
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
||||
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
|
||||
|
||||
@@ -59,7 +59,6 @@ issued at the discretion of the Immunefi program managers.
|
||||
- [Website](https://serai.exchange/): https://serai.exchange/
|
||||
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
||||
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
||||
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
|
||||
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
||||
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
||||
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "serai-db"
|
||||
version = "0.1.1"
|
||||
version = "0.1.0"
|
||||
description = "A simple database trait and backends for it"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.65"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -18,7 +18,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
|
||||
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
parity-db = ["dep:parity-db"]
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# Serai DB
|
||||
|
||||
An inefficient, minimal abstraction around databases.
|
||||
|
||||
The abstraction offers `get`, `put`, and `del` with helper functions and macros
|
||||
built on top. Database iteration is not offered, forcing the caller to manually
|
||||
implement indexing schemes. This ensures wide compatibility across abstracted
|
||||
databases.
|
||||
@@ -38,21 +38,12 @@ pub fn serai_db_key(
|
||||
#[macro_export]
|
||||
macro_rules! create_db {
|
||||
($db_name: ident {
|
||||
$(
|
||||
$field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
}) => {
|
||||
$(
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct $field_name$(
|
||||
<$($generic_name: $generic_type),+>
|
||||
)?$(
|
||||
(core::marker::PhantomData<($($generic_name),+)>)
|
||||
)?;
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) struct $field_name;
|
||||
impl $field_name {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
@@ -61,43 +52,18 @@ macro_rules! create_db {
|
||||
($($arg),*).encode()
|
||||
)
|
||||
}
|
||||
pub(crate) fn set(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*,
|
||||
data: &$field_type
|
||||
) {
|
||||
let key = Self::key($($arg),*);
|
||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
let key = $field_name::key($($arg),*);
|
||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||
}
|
||||
pub(crate) fn get(
|
||||
getter: &impl Get,
|
||||
$($arg: $arg_type),*
|
||||
) -> Option<$field_type> {
|
||||
getter.get(Self::key($($arg),*)).map(|data| {
|
||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
||||
borsh::from_slice(data.as_ref()).unwrap()
|
||||
})
|
||||
}
|
||||
// Returns a PhantomData of all generic types so if the generic was only used in the value,
|
||||
// not the keys, this doesn't have unused generic types
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn del(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
|
||||
txn.del(&Self::key($($arg),*));
|
||||
core::marker::PhantomData
|
||||
}
|
||||
|
||||
pub(crate) fn take(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let key = Self::key($($arg),*);
|
||||
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
|
||||
if res.is_some() {
|
||||
txn.del(key);
|
||||
}
|
||||
res
|
||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
txn.del(&$field_name::key($($arg),*))
|
||||
}
|
||||
}
|
||||
)*
|
||||
@@ -107,30 +73,19 @@ macro_rules! create_db {
|
||||
#[macro_export]
|
||||
macro_rules! db_channel {
|
||||
($db_name: ident {
|
||||
$($field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
}) => {
|
||||
$(
|
||||
create_db! {
|
||||
$db_name {
|
||||
$field_name: $(<$($generic_name: $generic_type),+>)?(
|
||||
$($arg: $arg_type,)*
|
||||
index: u32
|
||||
) -> $field_type
|
||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
||||
}
|
||||
}
|
||||
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn send(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
, value: &$field_type
|
||||
) {
|
||||
impl $field_name {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
// Use index 0 to store the amount of messages
|
||||
let messages_sent_key = Self::key($($arg,)* 0);
|
||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
@@ -141,35 +96,19 @@ macro_rules! db_channel {
|
||||
// at the same time
|
||||
let index_to_use = messages_sent + 2;
|
||||
|
||||
Self::set(txn, $($arg,)* index_to_use, value);
|
||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
||||
}
|
||||
pub(crate) fn peek(
|
||||
getter: &impl Get
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
let messages_recvd = getter.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
Self::get(getter, $($arg,)* index_to_read)
|
||||
}
|
||||
pub(crate) fn try_recv(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
let res = Self::get(txn, $($arg,)* index_to_read);
|
||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
||||
if res.is_some() {
|
||||
Self::del(txn, $($arg,)* index_to_read);
|
||||
$field_name::del(txn, $($arg),*, index_to_read);
|
||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||
}
|
||||
res
|
||||
|
||||
@@ -14,43 +14,26 @@ mod parity_db;
|
||||
#[cfg(feature = "parity-db")]
|
||||
pub use parity_db::{ParityDb, new_parity_db};
|
||||
|
||||
/// An object implementing `get`.
|
||||
/// An object implementing get.
|
||||
pub trait Get {
|
||||
/// Get a value from the database.
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
||||
}
|
||||
|
||||
/// An atomic database transaction.
|
||||
///
|
||||
/// A transaction is only required to atomically commit. It is not required that two `Get` calls
|
||||
/// made with the same transaction return the same result, if another transaction wrote to that
|
||||
/// key.
|
||||
///
|
||||
/// If two transactions are created, and both write (including deletions) to the same key, behavior
|
||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||
/// randomly, or any other action, at time of write or at time of commit.
|
||||
/// An atomic database operation.
|
||||
#[must_use]
|
||||
pub trait DbTxn: Send + Get {
|
||||
/// Write a value to this key.
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||
/// Delete the value from this key.
|
||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||
/// Commit this transaction.
|
||||
fn commit(self);
|
||||
}
|
||||
|
||||
/// A database supporting atomic transaction.
|
||||
/// A database supporting atomic operations.
|
||||
pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||
/// The type representing a database transaction.
|
||||
type Transaction<'a>: DbTxn;
|
||||
/// Calculate a key for a database entry.
|
||||
///
|
||||
/// Keys are separated by the database, the item within the database, and the item's key itself.
|
||||
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||
}
|
||||
/// Open a new transaction.
|
||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::*;
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
||||
|
||||
impl Get for MemDbTxn<'_> {
|
||||
impl<'a> Get for MemDbTxn<'a> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
if self.2.contains(key.as_ref()) {
|
||||
return None;
|
||||
@@ -23,7 +23,7 @@ impl Get for MemDbTxn<'_> {
|
||||
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
||||
}
|
||||
}
|
||||
impl DbTxn for MemDbTxn<'_> {
|
||||
impl<'a> DbTxn for MemDbTxn<'a> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.2.remove(key.as_ref());
|
||||
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
||||
|
||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.60"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
// Obtain a variable from the Serai environment/secret store.
|
||||
pub fn var(variable: &str) -> Option<String> {
|
||||
|
||||
@@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["http", "https", "async", "request", "ssl"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
rust-version = "1.70"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "std-shims"
|
||||
version = "0.1.1"
|
||||
version = "0.1.4"
|
||||
description = "A series of std shims to make alloc more feasible"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||
edition = "2021"
|
||||
rust-version = "1.80"
|
||||
rust-version = "1.64"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,8 +17,9 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
||||
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
|
||||
rustversion = { version = "1", default-features = false }
|
||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
|
||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
||||
|
||||
[features]
|
||||
std = []
|
||||
|
||||
@@ -3,4 +3,9 @@
|
||||
A crate which passes through to std when the default `std` feature is enabled,
|
||||
yet provides a series of shims when it isn't.
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`.
|
||||
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
|
||||
average case.
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
|
||||
`spin` (avoiding a requirement on `critical-section`).
|
||||
types are not guaranteed to be
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
@@ -11,3 +11,64 @@ pub mod io;
|
||||
pub use alloc::vec;
|
||||
pub use alloc::str;
|
||||
pub use alloc::string;
|
||||
|
||||
pub mod prelude {
|
||||
#[rustversion::before(1.73)]
|
||||
#[doc(hidden)]
|
||||
pub trait StdShimsDivCeil {
|
||||
fn div_ceil(self, rhs: Self) -> Self;
|
||||
}
|
||||
#[rustversion::before(1.73)]
|
||||
mod impl_divceil {
|
||||
use super::StdShimsDivCeil;
|
||||
impl StdShimsDivCeil for u8 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u16 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u32 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u64 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for u128 {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
impl StdShimsDivCeil for usize {
|
||||
fn div_ceil(self, rhs: Self) -> Self {
|
||||
(self + (rhs - 1)) / rhs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[rustversion::before(1.74)]
|
||||
#[doc(hidden)]
|
||||
pub trait StdShimsIoErrorOther {
|
||||
fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>;
|
||||
}
|
||||
#[cfg(feature = "std")]
|
||||
#[rustversion::before(1.74)]
|
||||
impl StdShimsIoErrorOther for std::io::Error {
|
||||
fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
std::io::Error::new(std::io::ErrorKind::Other, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,11 @@ mod mutex_shim {
|
||||
}
|
||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::LazyLock;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::before(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::since(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::LazyLock;
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "serai-task"
|
||||
version = "0.1.0"
|
||||
description = "A task schema for Serai services"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/task"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.75"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] }
|
||||
@@ -1,3 +0,0 @@
|
||||
# Task
|
||||
|
||||
A schema to define tasks to be run ad infinitum.
|
||||
@@ -1,136 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{future::Future, time::Duration};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// A handle for a task.
|
||||
///
|
||||
/// The task will only stop running once all handles for it are dropped.
|
||||
//
|
||||
// `run_now` isn't infallible if the task may have been closed. `run_now` on a closed task would
|
||||
// either need to panic (historic behavior), silently drop the fact the task can't be run, or
|
||||
// return an error. Instead of having a potential panic, and instead of modeling the error
|
||||
// behavior, this task can't be closed unless all handles are dropped, ensuring calls to `run_now`
|
||||
// are infallible.
|
||||
#[derive(Clone)]
|
||||
pub struct TaskHandle {
|
||||
run_now: mpsc::Sender<()>,
|
||||
#[allow(dead_code)] // This is used to track if all handles have been dropped
|
||||
close: mpsc::Sender<()>,
|
||||
}
|
||||
|
||||
/// A task's internal structures.
|
||||
pub struct Task {
|
||||
run_now: mpsc::Receiver<()>,
|
||||
close: mpsc::Receiver<()>,
|
||||
}
|
||||
|
||||
impl Task {
|
||||
/// Create a new task definition.
|
||||
pub fn new() -> (Self, TaskHandle) {
|
||||
// Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as
|
||||
// soon as possible
|
||||
let (run_now_send, run_now_recv) = mpsc::channel(1);
|
||||
// And any call to close satisfies all calls to close
|
||||
let (close_send, close_recv) = mpsc::channel(1);
|
||||
(
|
||||
Self { run_now: run_now_recv, close: close_recv },
|
||||
TaskHandle { run_now: run_now_send, close: close_send },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskHandle {
|
||||
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||
///
|
||||
/// Panics if the task has been dropped.
|
||||
pub fn run_now(&self) {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self.run_now.try_send(()) {
|
||||
Ok(()) => {}
|
||||
// NOP on full, as this task will already be ran as soon as possible
|
||||
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||
panic!("task was unexpectedly closed when calling run_now")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A task to be continually ran.
|
||||
pub trait ContinuallyRan: Sized + Send {
|
||||
/// The amount of seconds before this task should be polled again.
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
||||
/// The maximum amount of seconds before this task should be run again.
|
||||
///
|
||||
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||
|
||||
/// Run an iteration of the task.
|
||||
///
|
||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||
/// (without waiting for whatever timer they were already on).
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
||||
|
||||
/// Continually run the task.
|
||||
fn continually_run(
|
||||
mut self,
|
||||
mut task: Task,
|
||||
dependents: Vec<TaskHandle>,
|
||||
) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
// The default number of seconds to sleep before running the task again
|
||||
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
||||
// The current number of seconds to sleep before running the task again
|
||||
// We increment this upon errors in order to not flood the logs with errors
|
||||
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||
// Set a limit of sleeping for two minutes
|
||||
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
||||
};
|
||||
|
||||
loop {
|
||||
// If we were told to close/all handles were dropped, drop it
|
||||
{
|
||||
let should_close = task.close.try_recv();
|
||||
match should_close {
|
||||
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
||||
Err(mpsc::error::TryRecvError::Empty) => {}
|
||||
}
|
||||
}
|
||||
|
||||
match self.run_iteration().await {
|
||||
Ok(run_dependents) => {
|
||||
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
|
||||
if run_dependents {
|
||||
for dependent in &dependents {
|
||||
dependent.run_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("{}", e);
|
||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||
}
|
||||
}
|
||||
|
||||
// Don't run the task again for another few seconds UNLESS told to run now
|
||||
tokio::select! {
|
||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||
msg = task.run_now.recv() => {
|
||||
// Check if this is firing because the handle was dropped
|
||||
if msg.is_none() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||
|
||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||
|
||||
@@ -8,7 +8,6 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -21,15 +20,15 @@ workspace = true
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
|
||||
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
@@ -38,13 +37,12 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env" }
|
||||
serai-task = { path = "../common/task", version = "0.1" }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
@@ -57,12 +55,10 @@ futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||
|
||||
serai-cosign = { path = "./cosign" }
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
longer-reattempts = []
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2024 Luke Parker
|
||||
Copyright (c) 2023 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
|
||||
@@ -1,19 +1,7 @@
|
||||
# Coordinator
|
||||
|
||||
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint BFT algorithm.
|
||||
The Serai coordinator communicates with other coordinators to prepare batches
|
||||
for Serai and sign transactions.
|
||||
|
||||
- [`tributary`](./tributary) is a micro-blockchain framework. Instead of a producing a blockchain
|
||||
daemon like the Polkadot SDK or Cosmos SDK intend to, `tributary` is solely intended to be an
|
||||
embedded asynchronous task within an application.
|
||||
|
||||
The Serai coordinator spawns a tributary for each validator set it's coordinating. This allows
|
||||
the participating validators to communicate in a byzantine-fault-tolerant manner (relying on
|
||||
Tendermint for consensus).
|
||||
|
||||
- [`cosign`](./cosign) contains a library to decide which Substrate blocks should be cosigned and
|
||||
to evaluate cosigns.
|
||||
|
||||
- [`substrate`](./substrate) contains a library to index the Substrate blockchain and handle its
|
||||
events.
|
||||
|
||||
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
||||
In order to achieve consensus over gossip, and order certain events, a
|
||||
micro-blockchain is instantiated.
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
[package]
|
||||
name = "serai-cosign"
|
||||
version = "0.1.0"
|
||||
description = "Evaluator of cosigns for the Serai network"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,121 +0,0 @@
|
||||
# Serai Cosign
|
||||
|
||||
The Serai blockchain is controlled by a set of validators referred to as the
|
||||
Serai validators. These validators could attempt to double-spend, even if every
|
||||
node on the network is a full node, via equivocating.
|
||||
|
||||
Posit:
|
||||
- The Serai validators control X SRI
|
||||
- The Serai validators produce block A swapping X SRI to Y XYZ
|
||||
- The Serai validators produce block B swapping X SRI to Z ABC
|
||||
- The Serai validators finalize block A and send to the validators for XYZ
|
||||
- The Serai validators finalize block B and send to the validators for ABC
|
||||
|
||||
This is solved via the cosigning protocol. The validators for XYZ and the
|
||||
validators for ABC each sign their view of the Serai blockchain, communicating
|
||||
amongst each other to ensure consistency.
|
||||
|
||||
The security of the cosigning protocol is not formally proven, and there are no
|
||||
claims it achieves Byzantine Fault Tolerance. This protocol is meant to be
|
||||
practical and make such attacks infeasible, when they could already be argued
|
||||
difficult to perform.
|
||||
|
||||
### Definitions
|
||||
|
||||
- Cosign: A signature from a non-Serai validator set for a Serai block
|
||||
- Cosign Commit: A collection of cosigns which achieve the necessary weight
|
||||
|
||||
### Methodology
|
||||
|
||||
Finalized blocks from the Serai network are intended to be cosigned if they
|
||||
contain burn events. Only once cosigned should non-Serai validators process
|
||||
them.
|
||||
|
||||
Cosigning occurs by a non-Serai validator set, using their threshold keys
|
||||
declared on the Serai blockchain. Once 83% of non-Serai validator sets, by
|
||||
weight, cosign a block, a cosign commit is formed. A cosign commit for a block
|
||||
is considered to also cosign for all blocks preceding it.
|
||||
|
||||
### Bounds Under Asynchrony
|
||||
|
||||
Assuming an asynchronous environment fully controlled by the adversary, 34% of
|
||||
a validator set may cause an equivocation. Control of 67% of non-Serai
|
||||
validator sets, by weight, is sufficient to produce two distinct cosign commits
|
||||
at the same position. This is due to the honest stake, 33%, being split across
|
||||
the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means
|
||||
the cosigning protocol may produce multiple cosign commits if 34% of 67%, just
|
||||
22.78%, of the non-Serai validator sets, is malicious. This would be in
|
||||
conjunction with 34% of the Serai validator set (assumed 20% of total stake),
|
||||
for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is
|
||||
an increase from the 6.8% required without the cosigning protocol.
|
||||
|
||||
### Bounds Under Synchrony
|
||||
|
||||
Assuming the honest stake within the non-Serai validator sets detect the
|
||||
malicious stake within their set prior to assisting in producing a cosign for
|
||||
their set, for which there is a multi-second window, 67% of 67% of non-Serai
|
||||
validator sets is required to produce cosigns for those sets. This raises the
|
||||
total stake requirement to 42.712% (past the usual 34% threshold).
|
||||
|
||||
### Behavior Reliant on Synchrony
|
||||
|
||||
If the Serai blockchain node detects an equivocation, it will stop responding
|
||||
to all RPC requests and stop participating in finalizing further blocks. This
|
||||
lets the node communicate the equivocating commits to other nodes (causing them
|
||||
to exhibit the same behavior), yet prevents interaction with it.
|
||||
|
||||
If cosigns representing 17% of the non-Serai validators sets by weight are
|
||||
detected for distinct blocks at the same position, the protocol halts. An
|
||||
explicit latency period of seventy seconds is enacted after receiving a cosign
|
||||
commit for the detection of such an equivocation. This is largely redundant
|
||||
given how the Serai blockchain node will presumably have halted itself by this
|
||||
time.
|
||||
|
||||
### Equivocation-Detection Avoidance
|
||||
|
||||
Malicious Serai validators could avoid detection of their equivocating if they
|
||||
produced two distinct blockchains, A and B, with different keys declared for
|
||||
the same non-Serai validator set. While the validators following A may detect
|
||||
the cosigns for distinct blocks by validators following B, the cosigns would be
|
||||
assumed invalid due to their signatures being verified against distinct keys.
|
||||
|
||||
This is prevented by requiring cosigns on the blocks which declare new keys,
|
||||
ensuring all validators have a consistent view of the keys used within the
|
||||
cosigning protocol (per the bounds of the cosigning protocol). These blocks are
|
||||
exempt from the general policy of cosign commits cosigning all prior blocks,
|
||||
preventing the newly declared keys (which aren't yet cosigned) from being used
|
||||
to cosign themselves. These cosigns are flagged as "notable", are permanently
|
||||
archived, and must be synced before a validator will move forward.
|
||||
|
||||
Cosigning the block which declares new keys also ensures agreement on the
|
||||
preceding block which declared the new set, with an exact specification of the
|
||||
participants and their weight, before it impacts the cosigning protocol.
|
||||
|
||||
### Denial of Service Concerns
|
||||
|
||||
Any historical Serai validator set may trigger a chain halt by producing an
|
||||
equivocation after their retiry. This requires 67% to be malicious. 34% of the
|
||||
active Serai validator set may also trigger a chain halt.
|
||||
|
||||
17% of non-Serai validator sets equivocating causing a halt means 5.67% of
|
||||
non-Serai validator sets' stake may cause a halt (in an asynchronous
|
||||
environment fully controlled by the adversary). In a synchronous environment
|
||||
where the honest stake cannot be split across two candidates, 11.33% of
|
||||
non-Serai validator sets' stake is required.
|
||||
|
||||
The more practical attack is for one to obtain 5.67% of non-Serai validator
|
||||
sets' stake, under any network conditions, and simply go offline. This will
|
||||
take 17% of validator sets offline with it, preventing any cosign commits
|
||||
from being performed. A fallback protocol where validators individually produce
|
||||
cosigns, removing the network's horizontal scalability but ensuring liveness,
|
||||
prevents this, restoring the additional requirements for control of an
|
||||
asynchronous network or 11.33% of non-Serai validator sets' stake.
|
||||
|
||||
### TODO
|
||||
|
||||
The Serai node no longer responding to RPC requests upon detecting any
|
||||
equivocation, and the fallback protocol where validators individually produce
|
||||
signatures, are not implemented at this time. The former means the detection of
|
||||
equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai
|
||||
validator sets' stake the DoS threshold, even without control of an
|
||||
asynchronous network.
|
||||
@@ -1,55 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::evaluator::CosignedBlocks;
|
||||
|
||||
/// How often callers should broadcast the cosigns flagged for rebroadcasting.
|
||||
pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60);
|
||||
const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10);
|
||||
const ACKNOWLEDGEMENT_DELAY: Duration =
|
||||
Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs());
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDelay {
|
||||
// The latest cosigned block number.
|
||||
LatestCosignedBlockNumber: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
/// A task to delay acknowledgement of cosigns.
|
||||
pub(crate) struct CosignDelayTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Receive the next block to mark as cosigned
|
||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||
break;
|
||||
};
|
||||
// Calculate when we should mark it as valid
|
||||
let time_valid =
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||
// Sleep until then
|
||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||
.await;
|
||||
|
||||
// Set the cosigned block
|
||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||
txn.commit();
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{
|
||||
HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns,
|
||||
intend::{GlobalSessionsChannel, BlockEventData, BlockEvents},
|
||||
};
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignEvaluator {
|
||||
// The global session currently being evaluated.
|
||||
CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession),
|
||||
}
|
||||
);
|
||||
|
||||
db_channel!(
|
||||
SubstrateCosignEvaluatorChannels {
|
||||
// (cosigned block, time cosign was evaluated)
|
||||
CosignedBlocks: () -> (u64, u64),
|
||||
}
|
||||
);
|
||||
|
||||
// This is a strict function which won't panic, even with a malicious Serai node, so long as:
|
||||
// - It's called incrementally (with an increment of 1)
|
||||
// - It's only called for block numbers we've completed indexing on within the intend task
|
||||
// - It's only called for block numbers after a global session has started
|
||||
// - The global sessions channel is populated as the block declaring the session is indexed
|
||||
// Which all hold true within the context of this task and the intend task.
|
||||
//
|
||||
// This function will also ensure the currently evaluated global session is incremented once we
|
||||
// finish evaluation of the prior session.
|
||||
fn currently_evaluated_global_session_strict(
|
||||
txn: &mut impl DbTxn,
|
||||
block_number: u64,
|
||||
) -> ([u8; 32], GlobalSession) {
|
||||
let mut res = {
|
||||
let existing = match CurrentlyEvaluatedGlobalSession::get(txn) {
|
||||
Some(existing) => existing,
|
||||
None => {
|
||||
let first = GlobalSessionsChannel::try_recv(txn)
|
||||
.expect("fetching latest global session yet none declared");
|
||||
CurrentlyEvaluatedGlobalSession::set(txn, &first);
|
||||
first
|
||||
}
|
||||
};
|
||||
assert!(
|
||||
existing.1.start_block_number <= block_number,
|
||||
"candidate's start block number exceeds our block number"
|
||||
);
|
||||
existing
|
||||
};
|
||||
|
||||
if let Some(next) = GlobalSessionsChannel::peek(txn) {
|
||||
assert!(
|
||||
block_number <= next.1.start_block_number,
|
||||
"currently_evaluated_global_session_strict wasn't called incrementally"
|
||||
);
|
||||
// If it's time for this session to activate, take it from the channel and set it
|
||||
if block_number == next.1.start_block_number {
|
||||
GlobalSessionsChannel::try_recv(txn).unwrap();
|
||||
CurrentlyEvaluatedGlobalSession::set(txn, &next);
|
||||
res = next;
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> {
|
||||
CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id)
|
||||
}
|
||||
|
||||
/// A task to determine if a block has been cosigned and we should handle it.
|
||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) request: R,
|
||||
}
|
||||
|
||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let mut known_cosign = None;
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||
else {
|
||||
break;
|
||||
};
|
||||
|
||||
// Fetch the global session information
|
||||
let (global_session, global_session_info) =
|
||||
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||
|
||||
match has_events {
|
||||
// Because this had notable events, we require an explicit cosign for this block by a
|
||||
// supermajority of the prior block's validator sets
|
||||
HasEvents::Notable => {
|
||||
let mut weight_cosigned = 0;
|
||||
for set in global_session_info.sets {
|
||||
// Check if we have the cosign from this set
|
||||
if NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||
.map(|signed_cosign| signed_cosign.cosign.block_number) ==
|
||||
Some(block_number)
|
||||
{
|
||||
// Since have this cosign, add the set's weight to the weight which has cosigned
|
||||
weight_cosigned +=
|
||||
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||
})?;
|
||||
}
|
||||
}
|
||||
// Check if the sum weight doesn't cross the required threshold
|
||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||
// Request the necessary cosigns over the network
|
||||
// TODO: Add a timer to ensure this isn't called too often
|
||||
self
|
||||
.request
|
||||
.request_notable_cosigns(global_session)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
// We return an error so the delay before this task is run again increases
|
||||
return Err(format!(
|
||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||
));
|
||||
}
|
||||
|
||||
log::info!("marking notable block #{block_number} as cosigned");
|
||||
}
|
||||
// Since this block didn't have any notable events, we simply require a cosign for this
|
||||
// block or a greater block by the current validator sets
|
||||
HasEvents::NonNotable => {
|
||||
// Check if this was satisfied by a cached result which wasn't calculated incrementally
|
||||
let known_cosigned = if let Some(known_cosign) = known_cosign {
|
||||
known_cosign >= block_number
|
||||
} else {
|
||||
// Clear `known_cosign` which is no longer helpful
|
||||
known_cosign = None;
|
||||
false
|
||||
};
|
||||
|
||||
// If it isn't already known to be cosigned, evaluate the latest cosigns
|
||||
if !known_cosigned {
|
||||
/*
|
||||
LatestCosign is populated with the latest cosigns for each network which don't
|
||||
exceed the latest global session we've evaluated the start of. This current block
|
||||
is during the latest global session we've evaluated the start of.
|
||||
*/
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
let mut lowest_common_block: Option<u64> = None;
|
||||
for set in global_session_info.sets {
|
||||
// Check if this set cosigned this block or not
|
||||
let Some(cosign) =
|
||||
NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if cosign.cosign.block_number >= block_number {
|
||||
weight_cosigned +=
|
||||
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the lowest block common to all of these cosigns
|
||||
lowest_common_block = lowest_common_block
|
||||
.map(|existing| existing.min(cosign.cosign.block_number))
|
||||
.or(Some(cosign.cosign.block_number));
|
||||
}
|
||||
|
||||
// Check if the sum weight doesn't cross the required threshold
|
||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||
// Request the superseding notable cosigns over the network
|
||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||
// explicitly request them
|
||||
self
|
||||
.request
|
||||
.request_notable_cosigns(global_session)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
// We return an error so the delay before this task is run again increases
|
||||
return Err(format!(
|
||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||
));
|
||||
}
|
||||
|
||||
// Update the cached result for the block we know is cosigned
|
||||
/*
|
||||
There may be a higher block which was cosigned, but once we get to this block,
|
||||
we'll re-evaluate and find it then. The alternative would be an optimistic
|
||||
re-evaluation now. Both are fine, so the lower-complexity option is preferred.
|
||||
*/
|
||||
known_cosign = lowest_common_block;
|
||||
}
|
||||
|
||||
log::debug!("marking non-notable block #{block_number} as cosigned");
|
||||
}
|
||||
// If this block has no events necessitating cosigning, we can immediately consider the
|
||||
// block cosigned (making this block a NOP)
|
||||
HasEvents::No => {}
|
||||
}
|
||||
|
||||
// Since we checked we had the necessary cosigns, send it for delay before acknowledgement
|
||||
CosignedBlocks::send(
|
||||
&mut txn,
|
||||
&(
|
||||
block_number,
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap_or(Duration::ZERO)
|
||||
.as_secs(),
|
||||
),
|
||||
);
|
||||
txn.commit();
|
||||
|
||||
if (block_number % 500) == 0 {
|
||||
log::info!("marking block #{block_number} as cosigned");
|
||||
}
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,181 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Amount},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::*;
|
||||
|
||||
create_db!(
|
||||
CosignIntend {
|
||||
ScanCosignFrom: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct BlockEventData {
|
||||
pub(crate) block_number: u64,
|
||||
pub(crate) has_events: HasEvents,
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
CosignIntendChannels {
|
||||
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||
BlockEvents: () -> BlockEventData,
|
||||
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_has_events_justifying_a_cosign(
|
||||
serai: &Serai,
|
||||
block_number: u64,
|
||||
) -> Result<(Block, HasEvents), String> {
|
||||
let block = serai
|
||||
.finalized_block_by_number(block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||
let serai = serai.as_of(block.hash());
|
||||
|
||||
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::Notable));
|
||||
}
|
||||
|
||||
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::NonNotable));
|
||||
}
|
||||
|
||||
Ok((block, HasEvents::No))
|
||||
}
|
||||
|
||||
/// A task to determine which blocks we should intend to cosign.
|
||||
pub(crate) struct CosignIntendTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) serai: Serai,
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||
let latest_block_number =
|
||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||
|
||||
for block_number in start_block_number ..= latest_block_number {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
let (block, mut has_events) =
|
||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
// Check we are indexing a linear chain
|
||||
if (block_number > 1) &&
|
||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||
SubstrateBlocks::get(&txn, block_number - 1)
|
||||
.expect("indexing a block but haven't indexed its parent"))
|
||||
{
|
||||
Err(format!(
|
||||
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||
block_number - 1
|
||||
))?;
|
||||
}
|
||||
SubstrateBlocks::set(&mut txn, block_number, &block.hash());
|
||||
|
||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||
|
||||
// If this is notable, it creates a new global session, which we index into the database
|
||||
// now
|
||||
if has_events == HasEvents::Notable {
|
||||
let serai = self.serai.as_of(block.hash());
|
||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||
let global_session =
|
||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||
|
||||
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
||||
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut total_stake = 0;
|
||||
for (set, key) in &sets_and_keys {
|
||||
sets.push(*set);
|
||||
keys.insert(set.network, SeraiAddress::from(*key));
|
||||
let stake = serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(set.network)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.unwrap_or(Amount(0))
|
||||
.0;
|
||||
stakes.insert(set.network, stake);
|
||||
total_stake += stake;
|
||||
}
|
||||
if total_stake == 0 {
|
||||
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||
}
|
||||
|
||||
let global_session_info = GlobalSession {
|
||||
// This session starts cosigning after this block, as this block must be cosigned by
|
||||
// the existing validators
|
||||
start_block_number: block_number + 1,
|
||||
sets,
|
||||
keys,
|
||||
stakes,
|
||||
total_stake,
|
||||
};
|
||||
GlobalSessions::set(&mut txn, global_session, &global_session_info);
|
||||
if let Some(ending_global_session) = global_session_for_this_block {
|
||||
GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number);
|
||||
}
|
||||
LatestGlobalSessionIntended::set(&mut txn, &global_session);
|
||||
GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info));
|
||||
}
|
||||
|
||||
// If there isn't anyone available to cosign this block, meaning it'll never be cosigned,
|
||||
// we flag it as not having any events requiring cosigning so we don't attempt to
|
||||
// sign/require a cosign for it
|
||||
if global_session_for_this_block.is_none() {
|
||||
has_events = HasEvents::No;
|
||||
}
|
||||
|
||||
match has_events {
|
||||
HasEvents::Notable | HasEvents::NonNotable => {
|
||||
let global_session_for_this_block = global_session_for_this_block
|
||||
.expect("global session for this block was None but still attempting to cosign it");
|
||||
let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block)
|
||||
.expect("last global session intended wasn't saved to the database");
|
||||
|
||||
// Tell each set of their expectation to cosign this block
|
||||
for set in global_session_info.sets {
|
||||
log::debug!("{:?} will be cosigning block #{block_number}", set);
|
||||
IntendedCosigns::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&CosignIntent {
|
||||
global_session: global_session_for_this_block,
|
||||
block_number,
|
||||
block_hash: block.hash(),
|
||||
notable: has_events == HasEvents::Notable,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
HasEvents::No => {}
|
||||
}
|
||||
|
||||
// Populate a singular feed with every block's status for the evluator to work off of
|
||||
BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events }));
|
||||
// Mark this block as handled, meaning we should scan from the next block moving on
|
||||
ScanCosignFrom::set(&mut txn, &(block_number + 1));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(start_block_number <= latest_block_number)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,456 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{fmt::Debug, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, SeraiAddress},
|
||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||
Public, Block, Serai, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::*;
|
||||
|
||||
/// The cosigns which are intended to be performed.
|
||||
mod intend;
|
||||
/// The evaluator of the cosigns.
|
||||
mod evaluator;
|
||||
/// The task to delay acknowledgement of the cosigns.
|
||||
mod delay;
|
||||
pub use delay::BROADCAST_FREQUENCY;
|
||||
use delay::LatestCosignedBlockNumber;
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
|
||||
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||
///
|
||||
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
|
||||
/// distinct blocks at distinct positions within a global session, we still identify the faults.
|
||||
/*
|
||||
There is the attack where a validator set is given an alternate blockchain with a key generation
|
||||
event at block #n, while most validator sets are given a blockchain with a key generation event
|
||||
at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the
|
||||
cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block
|
||||
prior to the block being cosigned.
|
||||
|
||||
We solve this by binding cosigns to a global session ID, which has a specific start block, and
|
||||
reading the keys from the start block. This means that so long as all validator sets agree on the
|
||||
start of a global session, they can verify all cosigns produced by that session, regardless of
|
||||
how it advances. Since agreeing on the start of a global session is mandated, there's no way to
|
||||
have validator sets follow two distinct global sessions without breaking the bounds of the
|
||||
cosigning protocol.
|
||||
*/
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct GlobalSession {
|
||||
pub(crate) start_block_number: u64,
|
||||
pub(crate) sets: Vec<ValidatorSet>,
|
||||
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
||||
pub(crate) stakes: HashMap<NetworkId, u64>,
|
||||
pub(crate) total_stake: u64,
|
||||
}
|
||||
impl GlobalSession {
|
||||
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
||||
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||
}
|
||||
}
|
||||
|
||||
/// If the block has events.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
enum HasEvents {
|
||||
/// The block had a notable event.
|
||||
///
|
||||
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
|
||||
/// accordingly must be cosigned before we advance past them.
|
||||
Notable,
|
||||
/// The block had an non-notable event justifying a cosign.
|
||||
NonNotable,
|
||||
/// The block didn't have an event justifying a cosign.
|
||||
No,
|
||||
}
|
||||
|
||||
/// An intended cosign.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct CosignIntent {
|
||||
/// The global session this cosign is being performed under.
|
||||
global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
block_hash: [u8; 32],
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
notable: bool,
|
||||
}
|
||||
|
||||
/// A cosign.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Cosign {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The actual cosigner.
|
||||
pub cosigner: NetworkId,
|
||||
}
|
||||
|
||||
/// A signed cosign.
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SignedCosign {
|
||||
/// The cosign.
|
||||
pub cosign: Cosign,
|
||||
/// The signature for the cosign.
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
impl SignedCosign {
|
||||
fn verify_signature(&self, signer: serai_client::Public) -> bool {
|
||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||
|
||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
create_db! {
|
||||
Cosign {
|
||||
// The following are populated by the intend task and used throughout the library
|
||||
|
||||
// An index of Substrate blocks
|
||||
SubstrateBlocks: (block_number: u64) -> [u8; 32],
|
||||
// A mapping from a global session's ID to its relevant information.
|
||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||
// The last block to be cosigned by a global session.
|
||||
GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64,
|
||||
// The latest global session intended.
|
||||
//
|
||||
// This is distinct from the latest global session for which we've evaluated the cosigns for.
|
||||
LatestGlobalSessionIntended: () -> [u8; 32],
|
||||
|
||||
// The following are managed by the `intake_cosign` function present in this file
|
||||
|
||||
// The latest cosigned block for each network.
|
||||
//
|
||||
// This will only be populated with cosigns predating or during the most recent global session
|
||||
// to have its start cosigned.
|
||||
//
|
||||
// The global session changes upon a notable block, causing each global session to have exactly
|
||||
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||
// block, causing the latest cosigned block for a global session to either be the global
|
||||
// session's notable cosigns or the network's latest cosigns.
|
||||
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
||||
// Cosigns received for blocks not locally recognized as finalized.
|
||||
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||
// The global session which faulted.
|
||||
FaultedSession: () -> [u8; 32],
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch the keys used for cosigning by a specific network.
|
||||
async fn keys_for_network(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: NetworkId,
|
||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||
// The Serai network never cosigns so it has no keys for cosigning
|
||||
if network == NetworkId::Serai {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let Some(latest_session) =
|
||||
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
// If this network hasn't had a session declared, move on
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Get the keys for the latest session
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: latest_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((latest_session, keys)));
|
||||
}
|
||||
|
||||
// If the latest session has yet to set keys, use the prior session
|
||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: prior_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((prior_session, keys)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
||||
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
||||
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||
// If this network doesn't have usable keys, move on
|
||||
continue;
|
||||
};
|
||||
|
||||
sets.push((ValidatorSet { network, session }, keys.0));
|
||||
}
|
||||
Ok(sets)
|
||||
}
|
||||
|
||||
/// An object usable to request notable cosigns for a block.
|
||||
pub trait RequestNotableCosigns: 'static + Send {
|
||||
/// The error type which may be encountered when requesting notable cosigns.
|
||||
type Error: Debug;
|
||||
|
||||
/// Request the notable cosigns for this global session.
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>>;
|
||||
}
|
||||
|
||||
/// An error used to indicate the cosigning protocol has faulted.
|
||||
#[derive(Debug)]
|
||||
pub struct Faulted;
|
||||
|
||||
/// The interface to manage cosigning with.
|
||||
pub struct Cosigning<D: Db> {
|
||||
db: D,
|
||||
}
|
||||
impl<D: Db> Cosigning<D> {
|
||||
/// Spawn the tasks to intend and evaluate cosigns.
|
||||
///
|
||||
/// The database specified must only be used with a singular instance of the Serai network, and
|
||||
/// only used once at any given time.
|
||||
pub fn spawn<R: RequestNotableCosigns>(
|
||||
db: D,
|
||||
serai: Serai,
|
||||
request: R,
|
||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||
) -> Self {
|
||||
let (intend_task, _intend_task_handle) = Task::new();
|
||||
let (evaluator_task, evaluator_task_handle) = Task::new();
|
||||
let (delay_task, delay_task_handle) = Task::new();
|
||||
tokio::spawn(
|
||||
(intend::CosignIntendTask { db: db.clone(), serai })
|
||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||
);
|
||||
tokio::spawn(
|
||||
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||
);
|
||||
tokio::spawn(
|
||||
(delay::CosignDelayTask { db: db.clone() })
|
||||
.continually_run(delay_task, tasks_to_run_upon_cosigning),
|
||||
);
|
||||
Self { db }
|
||||
}
|
||||
|
||||
/// The latest cosigned block number.
|
||||
pub fn latest_cosigned_block_number(getter: &impl Get) -> Result<u64, Faulted> {
|
||||
if FaultedSession::get(getter).is_some() {
|
||||
Err(Faulted)?;
|
||||
}
|
||||
|
||||
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
||||
}
|
||||
|
||||
/// Fetch an cosigned Substrate block by its block number.
|
||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(
|
||||
SubstrateBlocks::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||
))
|
||||
}
|
||||
|
||||
/// Fetch the notable cosigns for a global session in order to respond to requests.
|
||||
///
|
||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||
/// cosigns for this session.
|
||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
}
|
||||
|
||||
/// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds.
|
||||
///
|
||||
/// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty
|
||||
/// cosigns, in case of a fault, to induce identification of the fault by others.
|
||||
pub fn cosigns_to_rebroadcast(&self) -> Vec<SignedCosign> {
|
||||
if let Some(faulted) = FaultedSession::get(&self.db) {
|
||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||
// identification in those who see the faulty cosigns as honest
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||
if cosign.cosign.global_session == faulted {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
} else {
|
||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||
return vec![];
|
||||
};
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
}
|
||||
}
|
||||
|
||||
/// Intake a cosign from the Serai network.
|
||||
///
|
||||
/// - Returns Err(_) if there was an error trying to validate the cosign and it should be retired
|
||||
/// later.
|
||||
/// - Returns Ok(true) if the cosign was successfully handled or could not be handled at this
|
||||
/// time.
|
||||
/// - Returns Ok(false) if the cosign was invalid.
|
||||
//
|
||||
// We collapse a cosign which shouldn't be handled yet into a valid cosign (`Ok(true)`) as we
|
||||
// assume we'll either explicitly request it if we need it or we'll naturally see it (or a later,
|
||||
// more relevant, cosign) again.
|
||||
//
|
||||
// Takes `&mut self` as this should only be called once at any given moment.
|
||||
// TODO: Don't overload bool here
|
||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<bool, String> {
|
||||
let cosign = &signed_cosign.cosign;
|
||||
let network = cosign.cosigner;
|
||||
|
||||
// Check our indexed blockchain includes a block with this block number
|
||||
let Some(our_block_hash) = SubstrateBlocks::get(&self.db, cosign.block_number) else {
|
||||
return Ok(true);
|
||||
};
|
||||
let faulty = cosign.block_hash != our_block_hash;
|
||||
|
||||
// Check this isn't a dated cosign within its global session (as it would be if rebroadcasted)
|
||||
if !faulty {
|
||||
if let Some(existing) =
|
||||
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||
{
|
||||
if existing.cosign.block_number >= cosign.block_number {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||
// Unrecognized global session
|
||||
return Ok(true);
|
||||
};
|
||||
|
||||
// Check the cosigned block number is in range to the global session
|
||||
if cosign.block_number < global_session.start_block_number {
|
||||
// Cosign is for a block predating the global session
|
||||
return Ok(false);
|
||||
}
|
||||
if !faulty {
|
||||
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||
// their final block, replacing their notable cosign
|
||||
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||
if cosign.block_number > last_block {
|
||||
// Cosign is for a block after the last block this global session should have signed
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the cosign's signature
|
||||
{
|
||||
let key = Public::from({
|
||||
let Some(key) = global_session.keys.get(&network) else {
|
||||
return Ok(false);
|
||||
};
|
||||
*key
|
||||
});
|
||||
|
||||
if !signed_cosign.verify_signature(key) {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||
// cosign
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
if !faulty {
|
||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||
let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0);
|
||||
// This global session starts the block *after* its declaration, so we want to check if the
|
||||
// block declaring it was cosigned
|
||||
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||
drop(txn);
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||
NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign);
|
||||
} else {
|
||||
let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]);
|
||||
// Only handle this as a fault if this set wasn't prior faulty
|
||||
if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) {
|
||||
faults.push(signed_cosign.clone());
|
||||
Faults::set(&mut txn, cosign.global_session, &faults);
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
for fault in &faults {
|
||||
let Some(stake) = global_session.stakes.get(&fault.cosign.cosigner) else {
|
||||
Err("cosigner with recognized key didn't have a stake entry saved".to_string())?
|
||||
};
|
||||
weight_cosigned += stake;
|
||||
}
|
||||
|
||||
// Check if the sum weight means a fault has occurred
|
||||
if weight_cosigned >= ((global_session.total_stake * 17) / 100) {
|
||||
FaultedSession::set(&mut txn, &cosign.global_session);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Receive intended cosigns to produce for this ValidatorSet.
|
||||
///
|
||||
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
||||
///
|
||||
/// This will drain the internal channel and not re-yield these intentions again.
|
||||
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
|
||||
let mut res: Vec<CosignIntent> = vec![];
|
||||
// While we have yet to find a notable cosign...
|
||||
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
||||
let Some(intent) = intend::IntendedCosigns::try_recv(txn, set) else { break };
|
||||
res.push(intent);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
333
coordinator/src/cosign_evaluator.rs
Normal file
333
coordinator/src/cosign_evaluator.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
use core::time::Duration;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, Mutex, RwLock},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use borsh::BorshSerialize;
|
||||
use sp_application_crypto::RuntimePublic;
|
||||
use serai_client::{
|
||||
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
Serai, SeraiError, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, create_db};
|
||||
|
||||
use processor_messages::coordinator::cosign_block_msg;
|
||||
|
||||
use crate::{
|
||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||
substrate::LatestCosignedBlock,
|
||||
};
|
||||
|
||||
create_db! {
|
||||
CosignDb {
|
||||
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
|
||||
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
|
||||
DistinctChain: (set: ExternalValidatorSet) -> (),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CosignEvaluator<D: Db> {
|
||||
db: Mutex<D>,
|
||||
serai: Arc<Serai>,
|
||||
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
|
||||
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
|
||||
}
|
||||
|
||||
impl<D: Db> CosignEvaluator<D> {
|
||||
async fn update_latest_cosign(&self) {
|
||||
let stakes_lock = self.stakes.read().await;
|
||||
// If we haven't gotten the stake data yet, return
|
||||
let Some(stakes) = stakes_lock.as_ref() else { return };
|
||||
|
||||
let total_stake = stakes.values().copied().sum::<u64>();
|
||||
|
||||
let latest_cosigns = self.latest_cosigns.read().await;
|
||||
let mut highest_block = 0;
|
||||
for cosign in latest_cosigns.values() {
|
||||
let mut networks = HashSet::new();
|
||||
for (network, sub_cosign) in &*latest_cosigns {
|
||||
if sub_cosign.block_number >= cosign.block_number {
|
||||
networks.insert(network);
|
||||
}
|
||||
}
|
||||
let sum_stake =
|
||||
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
|
||||
let needed_stake = ((total_stake * 2) / 3) + 1;
|
||||
if (total_stake == 0) || (sum_stake > needed_stake) {
|
||||
highest_block = highest_block.max(cosign.block_number);
|
||||
}
|
||||
}
|
||||
|
||||
let mut db_lock = self.db.lock().await;
|
||||
let mut txn = db_lock.txn();
|
||||
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
|
||||
log::info!("setting latest cosigned block to {}", highest_block);
|
||||
LatestCosignedBlock::set(&mut txn, &highest_block);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
async fn update_stakes(&self) -> Result<(), SeraiError> {
|
||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||
|
||||
let mut stakes = HashMap::new();
|
||||
for network in EXTERNAL_NETWORKS {
|
||||
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
|
||||
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
|
||||
if set_key {
|
||||
stakes.insert(
|
||||
network,
|
||||
serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(network.into())
|
||||
.await?
|
||||
.expect("network which published a batch didn't have a stake set")
|
||||
.0,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Since we've successfully built stakes, set it
|
||||
*self.stakes.write().await = Some(stakes);
|
||||
|
||||
self.update_latest_cosign().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Uses Err to signify a message should be retried
|
||||
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
|
||||
// If we already have this cosign or a newer cosign, return
|
||||
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
|
||||
if latest.block_number >= cosign.block_number {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// If this an old cosign (older than a day), drop it
|
||||
let latest_block = self.serai.latest_finalized_block().await?;
|
||||
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
|
||||
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
|
||||
log::warn!("received cosign with a block number which doesn't map to a block");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
async fn set_with_keys_fn(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
|
||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
||||
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
|
||||
return Ok(None);
|
||||
};
|
||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||
Ok(Some(
|
||||
if serai
|
||||
.validator_sets()
|
||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
ExternalValidatorSet { network, session: prior_session }
|
||||
} else {
|
||||
ExternalValidatorSet { network, session: latest_session }
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
// Get the key for this network as of the prior block
|
||||
// If we have two chains, this value may be different across chains depending on if one chain
|
||||
// included the set_keys and one didn't
|
||||
// Because set_keys will force a cosign, it will force detection of distinct blocks
|
||||
// re: set_keys using keys prior to set_keys (assumed amenable to all)
|
||||
let serai = self.serai.as_of(block.header.parent_hash.into());
|
||||
|
||||
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
|
||||
log::warn!("received cosign for a block we didn't have keys for");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if !keys
|
||||
.0
|
||||
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
|
||||
{
|
||||
log::warn!("received cosigned block with an invalid signature");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"received cosign for block {} ({}) by {:?}",
|
||||
block.number(),
|
||||
hex::encode(cosign.block),
|
||||
cosign.network
|
||||
);
|
||||
|
||||
// Save this cosign to the DB
|
||||
{
|
||||
let mut db = self.db.lock().await;
|
||||
let mut txn = db.txn();
|
||||
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
|
||||
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
if cosign.block != block.hash() {
|
||||
log::error!(
|
||||
"received cosign for a distinct block at {}. we have {}. cosign had {}",
|
||||
cosign.block_number,
|
||||
hex::encode(block.hash()),
|
||||
hex::encode(cosign.block)
|
||||
);
|
||||
|
||||
let serai = self.serai.as_of(latest_block.hash());
|
||||
|
||||
let mut db = self.db.lock().await;
|
||||
// Save this set as being on a different chain
|
||||
let mut txn = db.txn();
|
||||
DistinctChain::set(&mut txn, set_with_keys, &());
|
||||
txn.commit();
|
||||
|
||||
let mut total_stake = 0;
|
||||
let mut total_on_distinct_chain = 0;
|
||||
for network in EXTERNAL_NETWORKS {
|
||||
// Get the current set for this network
|
||||
let set_with_keys = {
|
||||
let mut res;
|
||||
while {
|
||||
res = set_with_keys_fn(&serai, network).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get the set with keys when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
// Get its stake
|
||||
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
|
||||
if let Some(set_with_keys) = set_with_keys {
|
||||
let stake = {
|
||||
let mut res;
|
||||
while {
|
||||
res =
|
||||
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
if let Some(stake) = stake {
|
||||
total_stake += stake.0;
|
||||
|
||||
if DistinctChain::get(&*db, set_with_keys).is_some() {
|
||||
total_on_distinct_chain += stake.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
|
||||
if (total_stake * 17 / 100) <= total_on_distinct_chain {
|
||||
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
|
||||
}
|
||||
} else {
|
||||
{
|
||||
let mut latest_cosigns = self.latest_cosigns.write().await;
|
||||
latest_cosigns.insert(cosign.network, cosign);
|
||||
}
|
||||
self.update_latest_cosign().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
|
||||
let mut latest_cosigns = HashMap::new();
|
||||
for network in EXTERNAL_NETWORKS {
|
||||
if let Some(cosign) = LatestCosign::get(&db, network) {
|
||||
latest_cosigns.insert(network, cosign);
|
||||
}
|
||||
}
|
||||
|
||||
let evaluator = Arc::new(Self {
|
||||
db: Mutex::new(db),
|
||||
serai,
|
||||
stakes: RwLock::new(None),
|
||||
latest_cosigns: RwLock::new(latest_cosigns),
|
||||
});
|
||||
|
||||
// Spawn a task to update stakes regularly
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
loop {
|
||||
// Run this until it passes
|
||||
while evaluator.update_stakes().await.is_err() {
|
||||
log::warn!("couldn't update stakes in the cosign evaluator");
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
|
||||
sleep(Duration::from_secs(10 * 60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to receive cosigns and handle them
|
||||
let (send, mut recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
while let Some(msg) = recv.recv().await {
|
||||
while evaluator.handle_new_cosign(msg).await.is_err() {
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to rebroadcast the most recent cosigns
|
||||
tokio::spawn({
|
||||
async move {
|
||||
loop {
|
||||
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
|
||||
for cosign in cosigns {
|
||||
let mut buf = vec![];
|
||||
cosign.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
}
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Return the channel to send cosigns
|
||||
send
|
||||
}
|
||||
}
|
||||
134
coordinator/src/db.rs
Normal file
134
coordinator/src/db.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use blake2::{
|
||||
digest::{consts::U32, Digest},
|
||||
Blake2b,
|
||||
};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{
|
||||
in_instructions::primitives::{Batch, SignedBatch},
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
};
|
||||
|
||||
pub use serai_db::*;
|
||||
|
||||
use ::tributary::ReadWrite;
|
||||
use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
||||
|
||||
create_db!(
|
||||
MainDb {
|
||||
HandledMessageDb: (network: ExternalNetworkId) -> u64,
|
||||
ActiveTributaryDb: () -> Vec<u8>,
|
||||
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
|
||||
FirstPreprocessDb: (
|
||||
network: ExternalNetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8]
|
||||
) -> Vec<Vec<u8>>,
|
||||
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
|
||||
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
|
||||
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
|
||||
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
|
||||
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
|
||||
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
|
||||
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
|
||||
}
|
||||
);
|
||||
|
||||
impl ActiveTributaryDb {
|
||||
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
|
||||
let bytes = Self::get(getter).unwrap_or_default();
|
||||
let mut bytes_ref: &[u8] = bytes.as_ref();
|
||||
|
||||
let mut tributaries = vec![];
|
||||
while !bytes_ref.is_empty() {
|
||||
tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
|
||||
}
|
||||
|
||||
(bytes, tributaries)
|
||||
}
|
||||
|
||||
pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {
|
||||
let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);
|
||||
for tributary in &existing {
|
||||
if tributary == spec {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
spec.serialize(&mut existing_bytes).unwrap();
|
||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
||||
}
|
||||
|
||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
||||
let mut active = Self::active_tributaries(txn).1;
|
||||
for i in 0 .. active.len() {
|
||||
if active[i].set() == set {
|
||||
active.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let mut bytes = vec![];
|
||||
for active in active {
|
||||
active.serialize(&mut bytes).unwrap();
|
||||
}
|
||||
Self::set(txn, &bytes);
|
||||
RetiredTributaryDb::set(txn, set, &());
|
||||
}
|
||||
}
|
||||
|
||||
impl FirstPreprocessDb {
|
||||
pub fn save_first_preprocess(
|
||||
txn: &mut impl DbTxn,
|
||||
network: ExternalNetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8],
|
||||
preprocess: &Vec<Vec<u8>>,
|
||||
) {
|
||||
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
|
||||
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
|
||||
return;
|
||||
}
|
||||
FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
|
||||
}
|
||||
}
|
||||
|
||||
impl ExpectedBatchDb {
|
||||
pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
|
||||
LastReceivedBatchDb::set(txn, batch.network, &batch.id);
|
||||
Self::set(
|
||||
txn,
|
||||
batch.network,
|
||||
batch.id,
|
||||
&Blake2b::<U32>::digest(batch.instructions.encode()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl HandoverBatchDb {
|
||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
|
||||
Self::set(txn, set, &batch);
|
||||
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
||||
}
|
||||
}
|
||||
impl QueuedBatchesDb {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
|
||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
||||
batch.write(&mut batches).unwrap();
|
||||
Self::set(txn, set, &batches);
|
||||
}
|
||||
|
||||
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
|
||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||
txn.del(Self::key(set));
|
||||
|
||||
let mut batches: &[u8] = &batches_vec;
|
||||
let mut res = vec![];
|
||||
while !batches.is_empty() {
|
||||
res.push(Transaction::read(&mut batches).unwrap());
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
1045
coordinator/src/p2p.rs
Normal file
1045
coordinator/src/p2p.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,127 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use futures_util::FutureExt;
|
||||
|
||||
use tributary::{ReadWrite, Block, Tributary, TributaryReader};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{
|
||||
tributary::Transaction,
|
||||
p2p::{Peer, P2p},
|
||||
};
|
||||
|
||||
// Amount of blocks in a minute
|
||||
const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||
|
||||
// Maximum amount of blocks to send in a batch of blocks
|
||||
pub const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||
|
||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||
/// tip.
|
||||
///
|
||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||
/// the sync protocol for our Tributaries.
|
||||
struct HeartbeatTask<TD: Db, P: P2p> {
|
||||
set: ValidatorSet,
|
||||
tributary: Tributary<TD, Transaction, P>,
|
||||
reader: TributaryReader<TD, Transaction>,
|
||||
p2p: P,
|
||||
}
|
||||
|
||||
impl<TD: Db, P: P2p> ContinuallyRan for HeartbeatTask<TD, P> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
||||
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
||||
|
||||
let mut tip = self.reader.tip();
|
||||
let time_since = {
|
||||
let block_time = if let Some(time_of_block) = self.reader.time_of_block(&tip) {
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_of_block)
|
||||
} else {
|
||||
// If we couldn't fetch this block's time, assume it's old
|
||||
// We don't want to declare its unix time as 0 and claim it's 50+ years old though
|
||||
log::warn!(
|
||||
"heartbeat task couldn't fetch the time of a block, flagging it as a minute old"
|
||||
);
|
||||
SystemTime::now() - TIME_TO_TRIGGER_SYNCING
|
||||
};
|
||||
SystemTime::now().duration_since(block_time).unwrap_or(Duration::ZERO)
|
||||
};
|
||||
let mut tip_is_stale = false;
|
||||
|
||||
let mut synced_block = false;
|
||||
if TIME_TO_TRIGGER_SYNCING <= time_since {
|
||||
log::warn!(
|
||||
"last known tributary block for {:?} was {} seconds ago",
|
||||
self.set,
|
||||
time_since.as_secs()
|
||||
);
|
||||
|
||||
// This requests all peers for this network, without differentiating by session
|
||||
// This should be fine as most validators should overlap across sessions
|
||||
'peer: for peer in self.p2p.peers(self.set.network).await {
|
||||
loop {
|
||||
// Create the request for blocks
|
||||
if tip_is_stale {
|
||||
tip = self.reader.tip();
|
||||
tip_is_stale = false;
|
||||
}
|
||||
// Necessary due to https://github.com/rust-lang/rust/issues/100013
|
||||
let Some(blocks) = peer.send_heartbeat(self.set, tip).boxed().await else {
|
||||
continue 'peer;
|
||||
};
|
||||
|
||||
// This is the final batch if it has less than the maximum amount of blocks
|
||||
// (signifying there weren't more blocks after this to fill the batch with)
|
||||
let final_batch = blocks.len() < BLOCKS_PER_BATCH;
|
||||
|
||||
// Sync each block
|
||||
for block_with_commit in blocks {
|
||||
let Ok(block) = Block::read(&mut block_with_commit.block.as_slice()) else {
|
||||
// TODO: Disconnect/slash this peer
|
||||
log::warn!("received invalid Block inside response to heartbeat");
|
||||
continue 'peer;
|
||||
};
|
||||
|
||||
// Attempt to sync the block
|
||||
if !self.tributary.sync_block(block, block_with_commit.commit).await {
|
||||
// The block may be invalid or may simply be stale
|
||||
continue 'peer;
|
||||
}
|
||||
|
||||
// Because we synced a block, flag the tip as stale
|
||||
tip_is_stale = true;
|
||||
// And that we did sync a block
|
||||
synced_block = true;
|
||||
}
|
||||
|
||||
// If this was the final batch, move on from this peer
|
||||
// We could assume they were honest and we are done syncing the chain, but this is a
|
||||
// bit more robust
|
||||
if final_batch {
|
||||
continue 'peer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This will cause the tak to be run less and less often, ensuring we aren't spamming the
|
||||
// net if we legitimately aren't making progress
|
||||
if !synced_block {
|
||||
Err(format!(
|
||||
"tried to sync blocks for {:?} since we haven't seen one in {} seconds but didn't",
|
||||
self.set,
|
||||
time_since.as_secs(),
|
||||
))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(synced_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::{sync::Arc, io};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
use schnorrkel::{Keypair, PublicKey, Signature};
|
||||
|
||||
use serai_client::primitives::PublicKey as Public;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use libp2p::{
|
||||
core::UpgradeInfo,
|
||||
InboundUpgrade, OutboundUpgrade,
|
||||
identity::{self, PeerId},
|
||||
noise,
|
||||
};
|
||||
|
||||
use crate::p2p::libp2p::{validators::Validators, peer_id_from_public};
|
||||
|
||||
const PROTOCOL: &str = "/serai/coordinator/validators";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct OnlyValidators {
|
||||
pub(crate) validators: Arc<RwLock<Validators>>,
|
||||
pub(crate) serai_key: Zeroizing<Keypair>,
|
||||
pub(crate) noise_keypair: identity::Keypair,
|
||||
}
|
||||
|
||||
impl OnlyValidators {
|
||||
/// The ephemeral challenge protocol for authentication.
|
||||
///
|
||||
/// We use ephemeral challenges to prevent replaying signatures from historic sessions.
|
||||
///
|
||||
/// We don't immediately send the challenge. We only send a commitment to it. This prevents our
|
||||
/// remote peer from choosing their challenge in response to our challenge, in case there was any
|
||||
/// benefit to doing so.
|
||||
async fn challenges<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||
socket: &mut noise::Output<S>,
|
||||
) -> io::Result<([u8; 32], [u8; 32])> {
|
||||
let mut our_challenge = [0; 32];
|
||||
OsRng.fill_bytes(&mut our_challenge);
|
||||
|
||||
// Write the hash of our challenge
|
||||
socket.write_all(&Blake2s256::digest(our_challenge)).await?;
|
||||
|
||||
// Read the hash of their challenge
|
||||
let mut their_challenge_commitment = [0; 32];
|
||||
socket.read_exact(&mut their_challenge_commitment).await?;
|
||||
|
||||
// Reveal our challenge
|
||||
socket.write_all(&our_challenge).await?;
|
||||
|
||||
// Read their challenge
|
||||
let mut their_challenge = [0; 32];
|
||||
socket.read_exact(&mut their_challenge).await?;
|
||||
|
||||
// Verify their challenge
|
||||
if <[u8; 32]>::from(Blake2s256::digest(their_challenge)) != their_challenge_commitment {
|
||||
Err(io::Error::other("challenge didn't match challenge commitment"))?;
|
||||
}
|
||||
|
||||
Ok((our_challenge, their_challenge))
|
||||
}
|
||||
|
||||
// We sign the two noise peer IDs and the ephemeral challenges.
|
||||
//
|
||||
// Signing the noise peer IDs ensures we're authenticating this noise connection. The only
|
||||
// expectations placed on noise are for it to prevent a MITM from impersonating the other end or
|
||||
// modifying any messages sent.
|
||||
//
|
||||
// Signing the ephemeral challenges prevents any replays. While that should be unnecessary, as
|
||||
// noise MAY prevent replays across sessions (even when the same key is used), and noise IDs
|
||||
// shouldn't be reused (so it should be fine to reuse an existing signature for these noise IDs),
|
||||
// it doesn't hurt.
|
||||
async fn authenticate<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||
&self,
|
||||
socket: &mut noise::Output<S>,
|
||||
dialer_peer_id: PeerId,
|
||||
dialer_challenge: [u8; 32],
|
||||
listener_peer_id: PeerId,
|
||||
listener_challenge: [u8; 32],
|
||||
) -> io::Result<PeerId> {
|
||||
// Write our public key
|
||||
socket.write_all(&self.serai_key.public.to_bytes()).await?;
|
||||
|
||||
let msg = borsh::to_vec(&(
|
||||
dialer_peer_id.to_bytes(),
|
||||
dialer_challenge,
|
||||
listener_peer_id.to_bytes(),
|
||||
listener_challenge,
|
||||
))
|
||||
.unwrap();
|
||||
let signature = self.serai_key.sign_simple(PROTOCOL.as_bytes(), &msg);
|
||||
socket.write_all(&signature.to_bytes()).await?;
|
||||
|
||||
let mut public_key_and_sig = [0; 96];
|
||||
socket.read_exact(&mut public_key_and_sig).await?;
|
||||
let public_key = PublicKey::from_bytes(&public_key_and_sig[.. 32])
|
||||
.map_err(|_| io::Error::other("invalid public key"))?;
|
||||
let sig = Signature::from_bytes(&public_key_and_sig[32 ..])
|
||||
.map_err(|_| io::Error::other("invalid signature serialization"))?;
|
||||
|
||||
public_key
|
||||
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
||||
.map_err(|_| io::Error::other("invalid signature"))?;
|
||||
|
||||
let peer_id = peer_id_from_public(Public::from_raw(public_key.to_bytes()));
|
||||
if !self.validators.read().await.contains(&peer_id) {
|
||||
Err(io::Error::other("peer which tried to connect isn't a known active validator"))?;
|
||||
}
|
||||
|
||||
Ok(peer_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl UpgradeInfo for OnlyValidators {
|
||||
type Info = <noise::Config as UpgradeInfo>::Info;
|
||||
type InfoIter = <noise::Config as UpgradeInfo>::InfoIter;
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
// A keypair only causes an error if its sign operation fails, which is only possible with RSA,
|
||||
// which isn't used within this codebase
|
||||
noise::Config::new(&self.noise_keypair).unwrap().protocol_info()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
|
||||
type Output = (PeerId, noise::Output<S>);
|
||||
type Error = io::Error;
|
||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||
|
||||
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||
.unwrap()
|
||||
.upgrade_inbound(socket, info)
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
let (our_challenge, dialer_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||
let dialer_serai_validator = self
|
||||
.authenticate(
|
||||
&mut socket,
|
||||
dialer_noise_peer_id,
|
||||
dialer_challenge,
|
||||
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||
our_challenge,
|
||||
)
|
||||
.await?;
|
||||
Ok((dialer_serai_validator, socket))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
|
||||
type Output = (PeerId, noise::Output<S>);
|
||||
type Error = io::Error;
|
||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||
|
||||
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||
.unwrap()
|
||||
.upgrade_outbound(socket, info)
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
let (our_challenge, listener_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||
let listener_serai_validator = self
|
||||
.authenticate(
|
||||
&mut socket,
|
||||
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||
our_challenge,
|
||||
listener_noise_peer_id,
|
||||
listener_challenge,
|
||||
)
|
||||
.await?;
|
||||
Ok((listener_serai_validator, socket))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_client::Serai;
|
||||
|
||||
use libp2p::{
|
||||
core::multiaddr::{Protocol, Multiaddr},
|
||||
swarm::dial_opts::DialOpts,
|
||||
};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::p2p::libp2p::{PORT, Peers, validators::Validators};
|
||||
|
||||
const TARGET_PEERS_PER_NETWORK: usize = 5;
|
||||
/*
|
||||
If we only tracked the target amount of peers per network, we'd risk being eclipsed by an
|
||||
adversary who immediately connects to us with their array of validators upon our boot. Their
|
||||
array would satisfy our target amount of peers, so we'd never seek more, enabling the adversary
|
||||
to be the only entity we peered with.
|
||||
|
||||
We solve this by additionally requiring an explicit amount of peers we dialed. That means we
|
||||
randomly chose to connect to these peers.
|
||||
*/
|
||||
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
||||
|
||||
pub(crate) struct DialTask {
|
||||
serai: Serai,
|
||||
validators: Validators,
|
||||
peers: Peers,
|
||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||
}
|
||||
|
||||
impl DialTask {
|
||||
pub(crate) fn new(serai: Serai, peers: Peers, to_dial: mpsc::UnboundedSender<DialOpts>) -> Self {
|
||||
DialTask { serai: serai.clone(), validators: Validators::new(serai), peers, to_dial }
|
||||
}
|
||||
}
|
||||
|
||||
impl ContinuallyRan for DialTask {
|
||||
// Only run every five minutes, not the default of every five seconds
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
self.validators.update().await?;
|
||||
|
||||
// If any of our peers is lacking, try to connect to more
|
||||
let mut dialed = false;
|
||||
let peer_counts = self
|
||||
.peers
|
||||
.peers
|
||||
.read()
|
||||
.await
|
||||
.iter()
|
||||
.map(|(network, peers)| (*network, peers.len()))
|
||||
.collect::<Vec<_>>();
|
||||
for (network, peer_count) in peer_counts {
|
||||
/*
|
||||
If we don't have the target amount of peers, and we don't have all the validators in the
|
||||
set but one, attempt to connect to more validators within this set.
|
||||
|
||||
The latter clause is so if there's a set with only 3 validators, we don't infinitely try
|
||||
to connect to the target amount of peers for this network as we never will. Instead, we
|
||||
only try to connect to most of the validators actually present.
|
||||
*/
|
||||
if (peer_count < TARGET_PEERS_PER_NETWORK) &&
|
||||
(peer_count <
|
||||
self
|
||||
.validators
|
||||
.by_network()
|
||||
.get(&network)
|
||||
.map(HashSet::len)
|
||||
.unwrap_or(0)
|
||||
.saturating_sub(1))
|
||||
{
|
||||
let mut potential_peers =
|
||||
self.serai.p2p_validators(network).await.map_err(|e| format!("{e:?}"))?;
|
||||
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
||||
if potential_peers.is_empty() {
|
||||
break;
|
||||
}
|
||||
let index_to_dial =
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
||||
.unwrap();
|
||||
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
||||
|
||||
log::info!("found peer from substrate: {randomly_selected_peer}");
|
||||
|
||||
// Map the peer from a Substrate P2P network peer to a Coordinator P2P network peer
|
||||
let mapped_peer = randomly_selected_peer
|
||||
.into_iter()
|
||||
.filter_map(|protocol| match protocol {
|
||||
// Drop PeerIds from the Substrate P2p network
|
||||
Protocol::P2p(_) => None,
|
||||
// Use our own TCP port
|
||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||
// Pass-through any other specifications (IPv4, IPv6, etc)
|
||||
other => Some(other),
|
||||
})
|
||||
.collect::<Multiaddr>();
|
||||
|
||||
log::debug!("mapped found peer: {mapped_peer}");
|
||||
|
||||
self
|
||||
.to_dial
|
||||
.send(DialOpts::unknown_peer_id().address(mapped_peer).build())
|
||||
.expect("dial receiver closed?");
|
||||
dialed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(dialed)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use libp2p::gossipsub::{
|
||||
TopicHash, IdentTopic, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder,
|
||||
IdentityTransform, AllowAllSubscriptionFilter, Behaviour,
|
||||
};
|
||||
pub use libp2p::gossipsub::Event;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
// Block size limit + 16 KB of space for signatures/metadata
|
||||
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 16384;
|
||||
|
||||
const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(80);
|
||||
|
||||
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
||||
const BASE_TOPIC: &str = "/";
|
||||
|
||||
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
|
||||
IdentTopic::new(format!("/set/{}", hex::encode(set.encode())))
|
||||
}
|
||||
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Message {
|
||||
Tributary { set: ValidatorSet, message: Vec<u8> },
|
||||
Cosign(SignedCosign),
|
||||
}
|
||||
|
||||
impl Message {
|
||||
pub(crate) fn topic(&self) -> TopicHash {
|
||||
match self {
|
||||
Message::Tributary { set, .. } => topic_for_set(*set).hash(),
|
||||
Message::Cosign(_) => IdentTopic::new(BASE_TOPIC).hash(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilter>;
|
||||
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
||||
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
||||
let heartbeat_interval = tributary::tendermint::LATENCY_TIME;
|
||||
// The amount of heartbeats which will occur within a single Tributary block
|
||||
let heartbeats_per_block = tributary::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
||||
// libp2p-rs defaults to 5, whereas ours will be ~8
|
||||
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
||||
// libp2p-rs defaults to 3 whereas ours will be ~4
|
||||
let heartbeats_to_gossip = heartbeats_per_block;
|
||||
|
||||
let config = ConfigBuilder::default()
|
||||
.protocol_id_prefix(LIBP2P_PROTOCOL)
|
||||
.history_length(usize::try_from(heartbeats_to_keep).unwrap())
|
||||
.history_gossip(usize::try_from(heartbeats_to_gossip).unwrap())
|
||||
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
|
||||
.max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)
|
||||
.idle_timeout(KEEP_ALIVE_INTERVAL + Duration::from_secs(5))
|
||||
.duplicate_cache_time(Duration::from_millis((heartbeats_to_keep * heartbeat_interval).into()))
|
||||
.validation_mode(ValidationMode::Anonymous)
|
||||
// Uses a content based message ID to avoid duplicates as much as possible
|
||||
.message_id_fn(|msg| {
|
||||
MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat()))
|
||||
})
|
||||
.build();
|
||||
|
||||
let mut gossip = Behavior::new(MessageAuthenticity::Anonymous, config.unwrap()).unwrap();
|
||||
|
||||
// Subscribe to the base topic
|
||||
let topic = IdentTopic::new(BASE_TOPIC);
|
||||
let _ = gossip.subscribe(&topic);
|
||||
|
||||
gossip
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use schnorrkel::Keypair;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, PublicKey},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
use libp2p::{
|
||||
multihash::Multihash,
|
||||
identity::{self, PeerId},
|
||||
tcp::Config as TcpConfig,
|
||||
yamux,
|
||||
swarm::NetworkBehaviour,
|
||||
SwarmBuilder,
|
||||
};
|
||||
|
||||
use crate::p2p::TributaryBlockWithCommit;
|
||||
|
||||
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||
mod validators;
|
||||
use validators::UpdateValidatorsTask;
|
||||
|
||||
/// The authentication protocol upgrade to limit the P2P network to active validators.
|
||||
mod authenticate;
|
||||
use authenticate::OnlyValidators;
|
||||
|
||||
/// The dial task, to find new peers to connect to
|
||||
mod dial;
|
||||
use dial::DialTask;
|
||||
|
||||
/// The request-response messages and behavior
|
||||
mod reqres;
|
||||
use reqres::{Request, Response};
|
||||
|
||||
/// The gossip messages and behavior
|
||||
mod gossip;
|
||||
|
||||
/// The swarm task, running it and dispatching to/from it
|
||||
mod swarm;
|
||||
use swarm::SwarmTask;
|
||||
|
||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||
|
||||
// usize::max, manually implemented, as max isn't a const fn
|
||||
const MAX_LIBP2P_MESSAGE_SIZE: usize =
|
||||
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
||||
} else {
|
||||
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
||||
};
|
||||
|
||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||
// 0 represents the identity Multihash, that no hash was performed
|
||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
||||
}
|
||||
|
||||
struct Peer<'a> {
|
||||
outbound_requests: &'a mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
id: PeerId,
|
||||
}
|
||||
impl crate::p2p::Peer<'_> for Peer<'_> {
|
||||
fn send_heartbeat(
|
||||
&self,
|
||||
set: ValidatorSet,
|
||||
latest_block_hash: [u8; 32],
|
||||
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>> {
|
||||
const HEARBEAT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
async move {
|
||||
let request = Request::Heartbeat { set, latest_block_hash };
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self
|
||||
.outbound_requests
|
||||
.send((self.id, request, sender))
|
||||
.expect("outbound requests recv channel was dropped?");
|
||||
match tokio::time::timeout(HEARBEAT_TIMEOUT, receiver).await.ok()?.ok()? {
|
||||
Response::None => Some(vec![]),
|
||||
Response::Blocks(blocks) => Some(blocks),
|
||||
// TODO: Disconnect this peer
|
||||
Response::NotableCosigns(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Peers {
|
||||
peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
|
||||
}
|
||||
|
||||
#[derive(NetworkBehaviour)]
|
||||
struct Behavior {
|
||||
reqres: reqres::Behavior,
|
||||
gossip: gossip::Behavior,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Libp2p {
|
||||
peers: Peers,
|
||||
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
}
|
||||
|
||||
impl Libp2p {
|
||||
pub(crate) fn new(serai_key: &Zeroizing<Keypair>, serai: Serai) -> Libp2p {
|
||||
// Define the object we track peers with
|
||||
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
||||
|
||||
// Define the dial task
|
||||
let (dial_task_def, dial_task) = Task::new();
|
||||
let (to_dial_send, to_dial_recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn(
|
||||
DialTask::new(serai.clone(), peers.clone(), to_dial_send)
|
||||
.continually_run(dial_task_def, vec![]),
|
||||
);
|
||||
|
||||
// Define the Validators object used for validating new connections
|
||||
let connection_validators = UpdateValidatorsTask::spawn(serai.clone());
|
||||
let new_only_validators = |noise_keypair: &identity::Keypair| -> Result<_, ()> {
|
||||
Ok(OnlyValidators {
|
||||
serai_key: serai_key.clone(),
|
||||
validators: connection_validators.clone(),
|
||||
noise_keypair: noise_keypair.clone(),
|
||||
})
|
||||
};
|
||||
|
||||
let new_yamux = || {
|
||||
let mut config = yamux::Config::default();
|
||||
// 1 MiB default + max message size
|
||||
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
|
||||
// 256 KiB default + max message size
|
||||
config.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
|
||||
config
|
||||
};
|
||||
|
||||
let behavior = Behavior { reqres: reqres::new_behavior(), gossip: gossip::new_behavior() };
|
||||
|
||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||
.with_tokio()
|
||||
.with_tcp(TcpConfig::default().nodelay(false), new_only_validators, new_yamux)
|
||||
.unwrap()
|
||||
.with_behaviour(|_| behavior)
|
||||
.unwrap()
|
||||
.build();
|
||||
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
swarm.listen_on(format!("/ip6/::/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
|
||||
let swarm_validators = UpdateValidatorsTask::spawn(serai);
|
||||
|
||||
let (gossip_send, gossip_recv) = mpsc::unbounded_channel();
|
||||
let (signed_cosigns_send, signed_cosigns_recv) = mpsc::unbounded_channel();
|
||||
let (tributary_gossip_send, tributary_gossip_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (outbound_requests_send, outbound_requests_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (heartbeat_requests_send, heartbeat_requests_recv) = mpsc::unbounded_channel();
|
||||
let (notable_cosign_requests_send, notable_cosign_requests_recv) = mpsc::unbounded_channel();
|
||||
let (inbound_request_responses_send, inbound_request_responses_recv) =
|
||||
mpsc::unbounded_channel();
|
||||
|
||||
// Create the swarm task
|
||||
SwarmTask::spawn(
|
||||
dial_task,
|
||||
to_dial_recv,
|
||||
swarm_validators,
|
||||
peers,
|
||||
swarm,
|
||||
gossip_recv,
|
||||
signed_cosigns_send,
|
||||
tributary_gossip_send,
|
||||
outbound_requests_recv,
|
||||
heartbeat_requests_send,
|
||||
notable_cosign_requests_send,
|
||||
inbound_request_responses_recv,
|
||||
);
|
||||
|
||||
// gossip_send, signed_cosigns_recv, tributary_gossip_recv, outbound_requests_send,
|
||||
// heartbeat_requests_recv, notable_cosign_requests_recv, inbound_request_responses_send
|
||||
todo!("TODO");
|
||||
}
|
||||
}
|
||||
|
||||
impl tributary::P2p for Libp2p {
|
||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move { todo!("TODO") }
|
||||
}
|
||||
}
|
||||
|
||||
impl crate::p2p::P2p for Libp2p {
|
||||
type Peer<'a> = Peer<'a>;
|
||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||
async move {
|
||||
let Some(peer_ids) = self.peers.peers.read().await.get(&network).cloned() else {
|
||||
return vec![];
|
||||
};
|
||||
let mut res = vec![];
|
||||
for id in peer_ids {
|
||||
res.push(Peer { outbound_requests: &self.outbound_requests, id });
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
use core::{fmt, time::Duration};
|
||||
use std::io;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
|
||||
use libp2p::request_response::{
|
||||
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||
};
|
||||
pub use request_response::Message;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use crate::p2p::TributaryBlockWithCommit;
|
||||
|
||||
/// The maximum message size for the request-response protocol
|
||||
// This is derived from the heartbeat message size as it's our largest message
|
||||
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||
(tributary::BLOCK_SIZE_LIMIT * crate::p2p::heartbeat::BLOCKS_PER_BATCH) + 1024;
|
||||
|
||||
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
||||
|
||||
/// Requests which can be made via the request-response protocol.
|
||||
#[derive(Clone, Copy, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Request {
|
||||
/// A keep-alive to prevent our connections from being dropped.
|
||||
KeepAlive,
|
||||
/// A heartbeat informing our peers of our latest block, for the specified blockchain, on regular
|
||||
/// intervals.
|
||||
///
|
||||
/// If our peers have more blocks than us, they're expected to respond with those blocks.
|
||||
Heartbeat { set: ValidatorSet, latest_block_hash: [u8; 32] },
|
||||
/// A request for the notable cosigns for a global session.
|
||||
NotableCosigns { global_session: [u8; 32] },
|
||||
}
|
||||
|
||||
/// Responses which can be received via the request-response protocol.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Response {
|
||||
None,
|
||||
Blocks(Vec<TributaryBlockWithCommit>),
|
||||
NotableCosigns(Vec<SignedCosign>),
|
||||
}
|
||||
impl fmt::Debug for Response {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Response::None => fmt.debug_struct("Response::None").finish(),
|
||||
Response::Blocks(_) => fmt.debug_struct("Response::Block").finish_non_exhaustive(),
|
||||
Response::NotableCosigns(_) => {
|
||||
fmt.debug_struct("Response::NotableCosigns").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The codec used for the request-response protocol.
|
||||
///
|
||||
/// We don't use CBOR or JSON, but use borsh to create `Vec<u8>`s we then length-prefix. While
|
||||
/// ideally, we'd use borsh directly with the `io` traits defined here, they're async and there
|
||||
/// isn't an amenable API within borsh for incremental deserialization.
|
||||
#[derive(Default, Clone, Copy, Debug)]
|
||||
pub(crate) struct Codec;
|
||||
impl Codec {
|
||||
async fn read<M: BorshDeserialize>(io: &mut (impl Unpin + AsyncRead)) -> io::Result<M> {
|
||||
let mut len = [0; 4];
|
||||
io.read_exact(&mut len).await?;
|
||||
let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?");
|
||||
if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||
Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?;
|
||||
}
|
||||
// This may be a non-trivial allocation easily causable
|
||||
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
||||
// the max message size should be sufficiently sane
|
||||
let mut buf = vec![0; len];
|
||||
io.read_exact(&mut buf).await?;
|
||||
let mut buf = buf.as_slice();
|
||||
let res = M::deserialize(&mut buf)?;
|
||||
if !buf.is_empty() {
|
||||
Err(io::Error::other("p2p message had extra data appended to it"))?;
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
async fn write(io: &mut (impl Unpin + AsyncWrite), msg: &impl BorshSerialize) -> io::Result<()> {
|
||||
let msg = borsh::to_vec(msg).unwrap();
|
||||
io.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await?;
|
||||
io.write_all(&msg).await
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl CodecTrait for Codec {
|
||||
type Protocol = &'static str;
|
||||
type Request = Request;
|
||||
type Response = Response;
|
||||
|
||||
async fn read_request<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Request> {
|
||||
Self::read(io).await
|
||||
}
|
||||
async fn read_response<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
proto: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Response> {
|
||||
Self::read(io).await
|
||||
}
|
||||
async fn write_request<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut W,
|
||||
req: Request,
|
||||
) -> io::Result<()> {
|
||||
Self::write(io, &req).await
|
||||
}
|
||||
async fn write_response<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
proto: &Self::Protocol,
|
||||
io: &mut W,
|
||||
res: Response,
|
||||
) -> io::Result<()> {
|
||||
Self::write(io, &res).await
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Event = GenericEvent<Request, Response>;
|
||||
|
||||
pub(crate) type Behavior = Behaviour<Codec>;
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
let mut config = Config::default();
|
||||
config.set_request_timeout(Duration::from_secs(5));
|
||||
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||
}
|
||||
@@ -1,336 +0,0 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||
|
||||
use serai_task::TaskHandle;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use libp2p::{
|
||||
identity::PeerId,
|
||||
request_response::{RequestId, ResponseChannel},
|
||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||
};
|
||||
|
||||
use crate::p2p::libp2p::{
|
||||
Peers, BehaviorEvent, Behavior,
|
||||
validators::Validators,
|
||||
reqres::{self, Request, Response},
|
||||
gossip,
|
||||
};
|
||||
|
||||
const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(80);
|
||||
const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60);
|
||||
|
||||
/*
|
||||
`SwarmTask` handles everything we need the `Swarm` object for. The goal is to minimize the
|
||||
contention on this task. Unfortunately, the `Swarm` object itself is needed for a variety of
|
||||
purposes making this a rather large task.
|
||||
|
||||
Responsibilities include:
|
||||
- Actually dialing new peers (the selection process occurs in another task)
|
||||
- Maintaining the peers structure (as we need the Swarm object to see who our peers are)
|
||||
- Gossiping messages
|
||||
- Dispatching gossiped messages
|
||||
- Sending requests
|
||||
- Dispatching responses to requests
|
||||
- Dispatching received requests
|
||||
- Sending responses
|
||||
*/
|
||||
pub(crate) struct SwarmTask {
|
||||
dial_task: TaskHandle,
|
||||
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||
last_dial_task_run: Instant,
|
||||
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
peers: Peers,
|
||||
rebuild_peers_at: Instant,
|
||||
|
||||
swarm: Swarm<Behavior>,
|
||||
|
||||
last_message: Instant,
|
||||
|
||||
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
tributary_gossip: mpsc::UnboundedSender<(ValidatorSet, Vec<u8>)>,
|
||||
|
||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
|
||||
|
||||
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||
/* TODO
|
||||
let cosigns = Cosigning::<D>::notable_cosigns(&self.db, global_session);
|
||||
let res = reqres::Response::NotableCosigns(cosigns);
|
||||
let _: Result<_, _> = self.swarm.behaviour_mut().reqres.send_response(channel, res);
|
||||
*/
|
||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||
}
|
||||
|
||||
impl SwarmTask {
|
||||
fn handle_gossip(&mut self, event: gossip::Event) {
|
||||
match event {
|
||||
gossip::Event::Message { message, .. } => {
|
||||
let Ok(message) = gossip::Message::deserialize(&mut message.data.as_slice()) else {
|
||||
// TODO: Penalize the PeerId which sent this message
|
||||
return;
|
||||
};
|
||||
match message {
|
||||
gossip::Message::Tributary { set, message } => {
|
||||
let _: Result<_, _> = self.tributary_gossip.send((set, message));
|
||||
}
|
||||
gossip::Message::Cosign(signed_cosign) => {
|
||||
let _: Result<_, _> = self.signed_cosigns.send(signed_cosign);
|
||||
}
|
||||
}
|
||||
}
|
||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_reqres(&mut self, event: reqres::Event) {
|
||||
match event {
|
||||
reqres::Event::Message { message, .. } => match message {
|
||||
reqres::Message::Request { request_id, request, channel } => match request {
|
||||
reqres::Request::KeepAlive => {
|
||||
let _: Result<_, _> =
|
||||
self.swarm.behaviour_mut().reqres.send_response(channel, Response::None);
|
||||
}
|
||||
reqres::Request::Heartbeat { set, latest_block_hash } => {
|
||||
self.inbound_request_response_channels.insert(request_id, channel);
|
||||
let _: Result<_, _> =
|
||||
self.heartbeat_requests.send((request_id, set, latest_block_hash));
|
||||
}
|
||||
reqres::Request::NotableCosigns { global_session } => {
|
||||
self.inbound_request_response_channels.insert(request_id, channel);
|
||||
let _: Result<_, _> = self.notable_cosign_requests.send((request_id, global_session));
|
||||
}
|
||||
},
|
||||
reqres::Message::Response { request_id, response } => {
|
||||
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||
let _: Result<_, _> = channel.send(response);
|
||||
}
|
||||
}
|
||||
},
|
||||
reqres::Event::OutboundFailure { request_id, .. } => {
|
||||
// Send None as the response for the request
|
||||
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||
let _: Result<_, _> = channel.send(Response::None);
|
||||
}
|
||||
}
|
||||
reqres::Event::InboundFailure { .. } | reqres::Event::ResponseSent { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run(mut self) {
|
||||
loop {
|
||||
let time_till_keep_alive = Instant::now().saturating_duration_since(self.last_message);
|
||||
let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now());
|
||||
|
||||
tokio::select! {
|
||||
() = tokio::time::sleep(time_till_keep_alive) => {
|
||||
let peers = self.swarm.connected_peers().copied().collect::<Vec<_>>();
|
||||
let behavior = self.swarm.behaviour_mut();
|
||||
for peer in peers {
|
||||
behavior.reqres.send_request(&peer, Request::KeepAlive);
|
||||
}
|
||||
self.last_message = Instant::now();
|
||||
}
|
||||
|
||||
// Dial peers we're instructed to
|
||||
dial_opts = self.to_dial.recv() => {
|
||||
let dial_opts = dial_opts.expect("DialTask was closed?");
|
||||
let _: Result<_, _> = self.swarm.dial(dial_opts);
|
||||
}
|
||||
|
||||
/*
|
||||
Rebuild the peers every 10 minutes.
|
||||
|
||||
This protects against any race conditions/edge cases we have in our logic to track peers,
|
||||
along with unrepresented behavior such as when a peer changes the networks they're active
|
||||
in. This lets the peer tracking logic simply be 'good enough' to not become horribly
|
||||
corrupt over the span of `TIME_BETWEEN_REBUILD_PEERS`.
|
||||
|
||||
We also use this to disconnect all peers who are no longer active in any network.
|
||||
*/
|
||||
() = tokio::time::sleep(time_till_rebuild_peers) => {
|
||||
let validators_by_network = self.validators.read().await.by_network().clone();
|
||||
let connected_peers = self.swarm.connected_peers().copied().collect::<HashSet<_>>();
|
||||
|
||||
// We initially populate the list of peers to disconnect with all peers
|
||||
let mut to_disconnect = connected_peers.clone();
|
||||
|
||||
// Build the new peers object
|
||||
let mut peers = HashMap::new();
|
||||
for (network, validators) in validators_by_network {
|
||||
peers.insert(network, validators.intersection(&connected_peers).copied().collect());
|
||||
|
||||
// If this peer is in this validator set, don't keep it flagged for disconnection
|
||||
to_disconnect.retain(|peer| !validators.contains(peer));
|
||||
}
|
||||
|
||||
// Write the new peers object
|
||||
*self.peers.peers.write().await = peers;
|
||||
self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS;
|
||||
|
||||
// Disconnect all peers marked for disconnection
|
||||
for peer in to_disconnect {
|
||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle swarm events
|
||||
event = self.swarm.next() => {
|
||||
// `Swarm::next` will never return `Poll::Ready(None)`
|
||||
// https://docs.rs/
|
||||
// libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E
|
||||
let event = event.unwrap();
|
||||
match event {
|
||||
// New connection, so update peers
|
||||
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||
let Some(networks) =
|
||||
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||
let mut peers = self.peers.peers.write().await;
|
||||
for network in networks {
|
||||
peers.entry(network).or_insert_with(HashSet::new).insert(peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Connection closed, so update peers
|
||||
SwarmEvent::ConnectionClosed { peer_id, .. } => {
|
||||
let Some(networks) =
|
||||
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||
let mut peers = self.peers.peers.write().await;
|
||||
for network in networks {
|
||||
peers.entry(network).or_insert_with(HashSet::new).remove(&peer_id);
|
||||
}
|
||||
|
||||
/*
|
||||
We want to re-run the dial task, since we lost a peer, in case we should find new
|
||||
peers. This opens a DoS where a validator repeatedly opens/closes connections to
|
||||
force iterations of the dial task. We prevent this by setting a minimum distance
|
||||
since the last explicit iteration.
|
||||
|
||||
This is suboptimal. If we have several disconnects in immediate proximity, we'll
|
||||
trigger the dial task upon the first (where we may still have enough peers we
|
||||
shouldn't dial more) but not the last (where we may have so few peers left we
|
||||
should dial more). This is accepted as the dial task will eventually run on its
|
||||
natural timer.
|
||||
*/
|
||||
const MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL: Duration = Duration::from_secs(60);
|
||||
let now = Instant::now();
|
||||
if (self.last_dial_task_run + MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL) < now {
|
||||
self.dial_task.run_now();
|
||||
self.last_dial_task_run = now;
|
||||
}
|
||||
}
|
||||
|
||||
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
|
||||
self.handle_reqres(event)
|
||||
}
|
||||
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
|
||||
self.handle_gossip(event)
|
||||
}
|
||||
|
||||
// We don't handle any of these
|
||||
SwarmEvent::IncomingConnection { .. } |
|
||||
SwarmEvent::IncomingConnectionError { .. } |
|
||||
SwarmEvent::OutgoingConnectionError { .. } |
|
||||
SwarmEvent::NewListenAddr { .. } |
|
||||
SwarmEvent::ExpiredListenAddr { .. } |
|
||||
SwarmEvent::ListenerClosed { .. } |
|
||||
SwarmEvent::ListenerError { .. } |
|
||||
SwarmEvent::Dialing { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
message = self.gossip.recv() => {
|
||||
let message = message.expect("channel for messages to gossip was closed?");
|
||||
let topic = message.topic();
|
||||
let message = borsh::to_vec(&message).unwrap();
|
||||
let _: Result<_, _> = self.swarm.behaviour_mut().gossip.publish(topic, message);
|
||||
self.last_message = Instant::now();
|
||||
}
|
||||
|
||||
request = self.outbound_requests.recv() => {
|
||||
let (peer, request, response_channel) =
|
||||
request.expect("channel for requests was closed?");
|
||||
let request_id = self.swarm.behaviour_mut().reqres.send_request(&peer, request);
|
||||
self.outbound_request_responses.insert(request_id, response_channel);
|
||||
}
|
||||
|
||||
response = self.inbound_request_responses.recv() => {
|
||||
let (request_id, response) =
|
||||
response.expect("channel for inbound request responses was closed?");
|
||||
if let Some(channel) = self.inbound_request_response_channels.remove(&request_id) {
|
||||
let _: Result<_, _> =
|
||||
self.swarm.behaviour_mut().reqres.send_response(channel, response);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn spawn(
|
||||
dial_task: TaskHandle,
|
||||
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
peers: Peers,
|
||||
|
||||
swarm: Swarm<Behavior>,
|
||||
|
||||
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
tributary_gossip: mpsc::UnboundedSender<(ValidatorSet, Vec<u8>)>,
|
||||
|
||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
|
||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||
) {
|
||||
tokio::spawn(
|
||||
SwarmTask {
|
||||
dial_task,
|
||||
to_dial,
|
||||
last_dial_task_run: Instant::now(),
|
||||
|
||||
validators,
|
||||
peers,
|
||||
rebuild_peers_at: Instant::now() + TIME_BETWEEN_REBUILD_PEERS,
|
||||
|
||||
swarm,
|
||||
|
||||
last_message: Instant::now(),
|
||||
|
||||
gossip,
|
||||
signed_cosigns,
|
||||
tributary_gossip,
|
||||
|
||||
outbound_requests,
|
||||
outbound_request_responses: HashMap::new(),
|
||||
|
||||
inbound_request_response_channels: HashMap::new(),
|
||||
heartbeat_requests,
|
||||
notable_cosign_requests,
|
||||
inbound_request_responses,
|
||||
}
|
||||
.run(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
use core::{borrow::Borrow, future::Future};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
use libp2p::PeerId;
|
||||
|
||||
use futures_util::stream::{StreamExt, FuturesUnordered};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::p2p::libp2p::peer_id_from_public;
|
||||
|
||||
pub(crate) struct Validators {
|
||||
serai: Serai,
|
||||
|
||||
// A cache for which session we're populated with the validators of
|
||||
sessions: HashMap<NetworkId, Session>,
|
||||
// The validators by network
|
||||
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
||||
// The validators and their networks
|
||||
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
||||
}
|
||||
|
||||
impl Validators {
|
||||
pub(crate) fn new(serai: Serai) -> Self {
|
||||
Validators {
|
||||
serai,
|
||||
sessions: HashMap::new(),
|
||||
by_network: HashMap::new(),
|
||||
validators: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn session_changes(
|
||||
serai: impl Borrow<Serai>,
|
||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, String> {
|
||||
let temporal_serai =
|
||||
serai.borrow().as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||
let temporal_serai = temporal_serai.validator_sets();
|
||||
|
||||
let mut session_changes = vec![];
|
||||
{
|
||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||
// we poll it till it yields all futures with the most minimal processing possible
|
||||
let mut futures = FuturesUnordered::new();
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
let sessions = sessions.borrow();
|
||||
futures.push(async move {
|
||||
let session = match temporal_serai.session(network).await {
|
||||
Ok(Some(session)) => session,
|
||||
Ok(None) => return Ok(None),
|
||||
Err(e) => return Err(format!("{e:?}")),
|
||||
};
|
||||
|
||||
if sessions.get(&network) == Some(&session) {
|
||||
Ok(None)
|
||||
} else {
|
||||
match temporal_serai.active_network_validators(network).await {
|
||||
Ok(validators) => Ok(Some((
|
||||
network,
|
||||
session,
|
||||
validators.into_iter().map(peer_id_from_public).collect(),
|
||||
))),
|
||||
Err(e) => Err(format!("{e:?}")),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
while let Some(session_change) = futures.next().await {
|
||||
if let Some(session_change) = session_change? {
|
||||
session_changes.push(session_change);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(session_changes)
|
||||
}
|
||||
|
||||
fn incorporate_session_changes(
|
||||
&mut self,
|
||||
session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
|
||||
) {
|
||||
for (network, session, validators) in session_changes {
|
||||
// Remove the existing validators
|
||||
for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) {
|
||||
// Get all networks this validator is in
|
||||
let mut networks = self.validators.remove(&validator).unwrap();
|
||||
// Remove this one
|
||||
networks.remove(&network);
|
||||
// Insert the networks back if the validator was present in other networks
|
||||
if !networks.is_empty() {
|
||||
self.validators.insert(validator, networks);
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new validators
|
||||
for validator in validators.iter().copied() {
|
||||
self.validators.entry(validator).or_insert_with(HashSet::new).insert(network);
|
||||
}
|
||||
self.by_network.insert(network, validators);
|
||||
|
||||
// Update the session we have populated
|
||||
self.sessions.insert(network, session);
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the view of the validators.
|
||||
pub(crate) async fn update(&mut self) -> Result<(), String> {
|
||||
let session_changes = Self::session_changes(&self.serai, &self.sessions).await?;
|
||||
self.incorporate_session_changes(session_changes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
||||
&self.by_network
|
||||
}
|
||||
|
||||
pub(crate) fn contains(&self, peer_id: &PeerId) -> bool {
|
||||
self.validators.contains_key(peer_id)
|
||||
}
|
||||
|
||||
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
||||
self.validators.get(peer_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// A task which updates a set of validators.
|
||||
///
|
||||
/// The validators managed by this tak will have their exclusive lock held for a minimal amount of
|
||||
/// time while the update occurs to minimize the disruption to the services relying on it.
|
||||
pub(crate) struct UpdateValidatorsTask {
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
}
|
||||
|
||||
impl UpdateValidatorsTask {
|
||||
/// Spawn a new instance of the UpdateValidatorsTask.
|
||||
///
|
||||
/// This returns a reference to the Validators it updates after spawning itself.
|
||||
pub(crate) fn spawn(serai: Serai) -> Arc<RwLock<Validators>> {
|
||||
// The validators which will be updated
|
||||
let validators = Arc::new(RwLock::new(Validators::new(serai)));
|
||||
|
||||
// Define the task
|
||||
let (update_validators_task, update_validators_task_handle) = Task::new();
|
||||
// Forget the handle, as dropping the handle would stop the task
|
||||
core::mem::forget(update_validators_task_handle);
|
||||
// Spawn the task
|
||||
tokio::spawn(
|
||||
(Self { validators: validators.clone() }).continually_run(update_validators_task, vec![]),
|
||||
);
|
||||
|
||||
// Return the validators
|
||||
validators
|
||||
}
|
||||
}
|
||||
|
||||
impl ContinuallyRan for UpdateValidatorsTask {
|
||||
// Only run every minute, not the default of every five seconds
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let session_changes = {
|
||||
let validators = self.validators.read().await;
|
||||
Validators::session_changes(validators.serai.clone(), validators.sessions.clone())
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
};
|
||||
self.validators.write().await.incorporate_session_changes(session_changes);
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
use core::future::Future;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
/// The libp2p-backed P2p network
|
||||
mod libp2p;
|
||||
|
||||
/// The heartbeat task, effecting sync of Tributaries
|
||||
mod heartbeat;
|
||||
|
||||
/// A tributary block and its commit.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct TributaryBlockWithCommit {
|
||||
pub(crate) block: Vec<u8>,
|
||||
pub(crate) commit: Vec<u8>,
|
||||
}
|
||||
|
||||
trait Peer<'a>: Send {
|
||||
fn send_heartbeat(
|
||||
&self,
|
||||
set: ValidatorSet,
|
||||
latest_block_hash: [u8; 32],
|
||||
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>>;
|
||||
}
|
||||
|
||||
trait P2p: Send + Sync + tributary::P2p {
|
||||
type Peer<'a>: Peer<'a>;
|
||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||
}
|
||||
46
coordinator/src/processors.rs
Normal file
46
coordinator/src/processors.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use serai_client::primitives::ExternalNetworkId;
|
||||
use processor_messages::{ProcessorMessage, CoordinatorMessage};
|
||||
|
||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Message {
|
||||
pub id: u64,
|
||||
pub network: ExternalNetworkId,
|
||||
pub msg: ProcessorMessage,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait Processors: 'static + Send + Sync + Clone {
|
||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
|
||||
async fn recv(&self, network: ExternalNetworkId) -> Message;
|
||||
async fn ack(&self, msg: Message);
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Processors for Arc<MessageQueue> {
|
||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||
let msg: CoordinatorMessage = msg.into();
|
||||
let metadata =
|
||||
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
self.queue(metadata, msg).await;
|
||||
}
|
||||
async fn recv(&self, network: ExternalNetworkId) -> Message {
|
||||
let msg = self.next(Service::Processor(network)).await;
|
||||
assert_eq!(msg.from, Service::Processor(network));
|
||||
|
||||
let id = msg.id;
|
||||
|
||||
// Deserialize it into a ProcessorMessage
|
||||
let msg: ProcessorMessage =
|
||||
borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded ProcessorMessage");
|
||||
|
||||
return Message { id, network, msg };
|
||||
}
|
||||
async fn ack(&self, msg: Message) {
|
||||
MessageQueue::ack(self, Service::Processor(msg.network), msg.id).await
|
||||
}
|
||||
}
|
||||
338
coordinator/src/substrate/cosign.rs
Normal file
338
coordinator/src/substrate/cosign.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
/*
|
||||
If:
|
||||
A) This block has events and it's been at least X blocks since the last cosign or
|
||||
B) This block doesn't have events but it's been X blocks since a skipped block which did
|
||||
have events or
|
||||
C) This block key gens (which changes who the cosigners are)
|
||||
cosign this block.
|
||||
|
||||
This creates both a minimum and maximum delay of X blocks before a block's cosigning begins,
|
||||
barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly
|
||||
spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to
|
||||
ensure any block needing cosigned is consigned within a reasonable amount of time.
|
||||
*/
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
Serai, SeraiError,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};
|
||||
|
||||
// 5 minutes, expressed in blocks
|
||||
// TODO: Pull a constant for block time
|
||||
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
enum HasEvents {
|
||||
KeyGen,
|
||||
Yes,
|
||||
No,
|
||||
}
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDb {
|
||||
ScanCosignFrom: () -> u64,
|
||||
IntendedCosign: () -> (u64, Option<u64>),
|
||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
||||
LatestCosignedBlock: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
impl IntendedCosign {
|
||||
// Sets the intended to cosign block, clearing the prior value entirely.
|
||||
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
||||
Self::set(txn, &(intended, None::<u64>));
|
||||
}
|
||||
|
||||
// Sets the cosign skipped since the last intended to cosign block.
|
||||
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
||||
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
||||
assert!(prior_skipped.is_none());
|
||||
Self::set(txn, &(intended, Some(skipped)));
|
||||
}
|
||||
}
|
||||
|
||||
impl LatestCosignedBlock {
|
||||
pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
|
||||
Self::get(getter).unwrap_or_default().max(1)
|
||||
}
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
SubstrateDbChannels {
|
||||
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
|
||||
}
|
||||
}
|
||||
|
||||
impl CosignTransactions {
|
||||
// Append a cosign transaction.
|
||||
pub fn append_cosign(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
number: u64,
|
||||
hash: [u8; 32],
|
||||
) {
|
||||
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_has_events(
|
||||
txn: &mut impl DbTxn,
|
||||
serai: &Serai,
|
||||
block: u64,
|
||||
) -> Result<HasEvents, SeraiError> {
|
||||
let cached = BlockHasEventsCache::get(txn, block);
|
||||
match cached {
|
||||
None => {
|
||||
let serai = serai.as_of(
|
||||
serai
|
||||
.finalized_block_by_number(block)
|
||||
.await?
|
||||
.expect("couldn't get block which should've been finalized")
|
||||
.hash(),
|
||||
);
|
||||
|
||||
if !serai.validator_sets().key_gen_events().await?.is_empty() {
|
||||
return Ok(HasEvents::KeyGen);
|
||||
}
|
||||
|
||||
let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&
|
||||
serai.in_instructions().batch_events().await?.is_empty() &&
|
||||
serai.validator_sets().new_set_events().await?.is_empty() &&
|
||||
serai.validator_sets().set_retired_events().await?.is_empty();
|
||||
|
||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||
|
||||
BlockHasEventsCache::set(txn, block, &has_events);
|
||||
Ok(has_events)
|
||||
}
|
||||
Some(code) => Ok(code),
|
||||
}
|
||||
}
|
||||
|
||||
async fn potentially_cosign_block(
|
||||
txn: &mut impl DbTxn,
|
||||
serai: &Serai,
|
||||
block: u64,
|
||||
skipped_block: Option<u64>,
|
||||
window_end_exclusive: u64,
|
||||
) -> Result<bool, SeraiError> {
|
||||
// The following code regarding marking cosigned if prior block is cosigned expects this block to
|
||||
// not be zero
|
||||
// While we could perform this check there, there's no reason not to optimize the entire function
|
||||
// as such
|
||||
if block == 0 {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let block_has_events = block_has_events(txn, serai, block).await?;
|
||||
|
||||
// If this block had no events and immediately follows a cosigned block, mark it as cosigned
|
||||
if (block_has_events == HasEvents::No) &&
|
||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||
{
|
||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
||||
LatestCosignedBlock::set(txn, &block);
|
||||
}
|
||||
|
||||
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
|
||||
// trigger a cosigning protocol covering it
|
||||
// This means there will be the maximum delay allowed from a block needing cosigning occurring
|
||||
// and a cosign for it triggering
|
||||
let maximally_latent_cosign_block =
|
||||
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
||||
|
||||
// If this block is within the window,
|
||||
if block < window_end_exclusive {
|
||||
// and set a key, cosign it
|
||||
if block_has_events == HasEvents::KeyGen {
|
||||
IntendedCosign::set_intended_cosign(txn, block);
|
||||
// Carry skipped if it isn't included by cosigning this block
|
||||
if let Some(skipped) = skipped_block {
|
||||
if skipped > block {
|
||||
IntendedCosign::set_skipped_cosign(txn, block);
|
||||
}
|
||||
}
|
||||
return Ok(true);
|
||||
}
|
||||
} else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {
|
||||
// Since this block was outside the window and had events/was maximally latent, cosign it
|
||||
IntendedCosign::set_intended_cosign(txn, block);
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
/*
|
||||
Advances the cosign protocol as should be done per the latest block.
|
||||
|
||||
A block is considered cosigned if:
|
||||
A) It was cosigned
|
||||
B) It's the parent of a cosigned block
|
||||
C) It immediately follows a cosigned block and has no events requiring cosigning
|
||||
|
||||
This only actually performs advancement within a limited bound (generally until it finds a block
|
||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||
`latest_number` doesn't change.
|
||||
*/
|
||||
async fn advance_cosign_protocol_inner(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
latest_number: u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
let mut txn = db.txn();
|
||||
|
||||
const INITIAL_INTENDED_COSIGN: u64 = 1;
|
||||
let (last_intended_to_cosign_block, mut skipped_block) = {
|
||||
let intended_cosign = IntendedCosign::get(&txn);
|
||||
// If we haven't prior intended to cosign a block, set the intended cosign to 1
|
||||
if let Some(intended_cosign) = intended_cosign {
|
||||
intended_cosign
|
||||
} else {
|
||||
IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);
|
||||
IntendedCosign::get(&txn).unwrap()
|
||||
}
|
||||
};
|
||||
|
||||
// "windows" refers to the window of blocks where even if there's a block which should be
|
||||
// cosigned, it won't be due to proximity due to the prior cosign
|
||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||
window_end_exclusive = 1;
|
||||
}
|
||||
|
||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
||||
// new state
|
||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
||||
|
||||
// Check all blocks within the window to see if they should be cosigned
|
||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||
// do cosign them
|
||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||
// the skipped block will cause will cosign all other blocks within this window
|
||||
if skipped_block.is_none() {
|
||||
let window_end_inclusive = window_end_exclusive - 1;
|
||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||
skipped_block = Some(b);
|
||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||
IntendedCosign::set_skipped_cosign(&mut txn, b);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A block which should be cosigned
|
||||
let mut to_cosign = None;
|
||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||
let mut cosigning = vec![];
|
||||
|
||||
for block in scan_start_block ..= latest_number {
|
||||
let actual_block = serai
|
||||
.finalized_block_by_number(block)
|
||||
.await?
|
||||
.expect("couldn't get block which should've been finalized");
|
||||
|
||||
// Save the block number for this block, as needed by the cosigner to perform cosigning
|
||||
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
||||
|
||||
if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?
|
||||
{
|
||||
to_cosign = Some((block, actual_block.hash()));
|
||||
|
||||
// Get the keys as of the prior block
|
||||
// If this key sets new keys, the coordinator won't acknowledge so until we process this
|
||||
// block
|
||||
// We won't process this block until its co-signed
|
||||
// Using the keys of the prior block ensures this deadlock isn't reached
|
||||
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
||||
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
// Get the latest session to have set keys
|
||||
let set_with_keys = {
|
||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
||||
continue;
|
||||
};
|
||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||
if serai
|
||||
.validator_sets()
|
||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
ExternalValidatorSet { network, session: prior_session }
|
||||
} else {
|
||||
let set = ExternalValidatorSet { network, session: latest_session };
|
||||
if serai.validator_sets().keys(set).await?.is_none() {
|
||||
continue;
|
||||
}
|
||||
set
|
||||
}
|
||||
};
|
||||
|
||||
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
||||
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// If this TX is committed, always start future scanning from the next block
|
||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
||||
// Since we're scanning *from* the next block, tidy the cache
|
||||
BlockHasEventsCache::del(&mut txn, block);
|
||||
}
|
||||
|
||||
if let Some((number, hash)) = to_cosign {
|
||||
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
||||
// cosigned
|
||||
if cosigning.is_empty() {
|
||||
log::debug!("{} had no cosigners available, marking as cosigned", number);
|
||||
LatestCosignedBlock::set(&mut txn, &number);
|
||||
} else {
|
||||
for (set, in_set) in cosigning {
|
||||
if in_set {
|
||||
log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session);
|
||||
CosignTransactions::append_cosign(&mut txn, set, number, hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
txn.commit();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn advance_cosign_protocol(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
latest_number: u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
loop {
|
||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
||||
let scan_to = latest_number.min(scan_from + 1000);
|
||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
||||
// If we didn't limit the scan_to, break
|
||||
if scan_to == latest_number {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
32
coordinator/src/substrate/db.rs
Normal file
32
coordinator/src/substrate/db.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
use serai_client::primitives::ExternalNetworkId;
|
||||
|
||||
pub use serai_db::*;
|
||||
|
||||
mod inner_db {
|
||||
use super::*;
|
||||
|
||||
create_db!(
|
||||
SubstrateDb {
|
||||
NextBlock: () -> u64,
|
||||
HandledEvent: (block: [u8; 32]) -> u32,
|
||||
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
|
||||
}
|
||||
);
|
||||
}
|
||||
pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb};
|
||||
|
||||
pub struct HandledEvent;
|
||||
impl HandledEvent {
|
||||
fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
|
||||
inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1)
|
||||
}
|
||||
pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {
|
||||
let next = Self::next_to_handle_event(getter, block);
|
||||
assert!(next >= event_id);
|
||||
next == event_id
|
||||
}
|
||||
pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {
|
||||
assert!(Self::next_to_handle_event(txn, block) == index);
|
||||
inner_db::HandledEvent::set(txn, block, &index);
|
||||
}
|
||||
}
|
||||
547
coordinator/src/substrate/mod.rs
Normal file
547
coordinator/src/substrate/mod.rs
Normal file
@@ -0,0 +1,547 @@
|
||||
use core::{ops::Deref, time::Duration};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_client::{
|
||||
coins::CoinsEvent,
|
||||
in_instructions::InInstructionsEvent,
|
||||
primitives::{BlockHash, ExternalNetworkId},
|
||||
validator_sets::{
|
||||
primitives::{ExternalValidatorSet, ValidatorSet},
|
||||
ValidatorSetsEvent,
|
||||
},
|
||||
Block, Serai, SeraiError, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::DbTxn;
|
||||
|
||||
use processor_messages::SubstrateContext;
|
||||
|
||||
use tokio::{sync::mpsc, time::sleep};
|
||||
|
||||
use crate::{
|
||||
Db,
|
||||
processors::Processors,
|
||||
tributary::{TributarySpec, SeraiDkgCompleted},
|
||||
};
|
||||
|
||||
mod db;
|
||||
pub use db::*;
|
||||
|
||||
mod cosign;
|
||||
pub use cosign::*;
|
||||
|
||||
async fn in_set(
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &TemporalSerai<'_>,
|
||||
set: ValidatorSet,
|
||||
) -> Result<Option<bool>, SeraiError> {
|
||||
let Some(participants) = serai.validator_sets().participants(set.network).await? else {
|
||||
return Ok(None);
|
||||
};
|
||||
let key = (Ristretto::generator() * key.deref()).to_bytes();
|
||||
Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key)))
|
||||
}
|
||||
|
||||
async fn handle_new_set<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
serai: &Serai,
|
||||
block: &Block,
|
||||
set: ExternalValidatorSet,
|
||||
) -> Result<(), SeraiError> {
|
||||
if in_set(key, &serai.as_of(block.hash()), set.into())
|
||||
.await?
|
||||
.expect("NewSet for set which doesn't exist")
|
||||
{
|
||||
log::info!("present in set {:?}", set);
|
||||
|
||||
let set_data = {
|
||||
let serai = serai.as_of(block.hash());
|
||||
let serai = serai.validator_sets();
|
||||
let set_participants =
|
||||
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
|
||||
|
||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let time = if let Ok(time) = block.time() {
|
||||
time
|
||||
} else {
|
||||
assert_eq!(block.number(), 0);
|
||||
// Use the next block's time
|
||||
loop {
|
||||
let Ok(Some(res)) = serai.finalized_block_by_number(1).await else {
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
continue;
|
||||
};
|
||||
break res.time().unwrap();
|
||||
}
|
||||
};
|
||||
// The block time is in milliseconds yet the Tributary is in seconds
|
||||
let time = time / 1000;
|
||||
// Since this block is in the past, and Tendermint doesn't play nice with starting chains after
|
||||
// their start time (though it does eventually work), delay the start time by 120 seconds
|
||||
// This is meant to handle ~20 blocks of lack of finalization for this first block
|
||||
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
||||
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
||||
|
||||
let spec = TributarySpec::new(block.hash(), time, set, set_data);
|
||||
|
||||
log::info!("creating new tributary for {:?}", spec.set());
|
||||
|
||||
// Save it to the database now, not on the channel receiver's side, so this is safe against
|
||||
// reboots
|
||||
// If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries
|
||||
// If this txn doesn't finish, this will be re-fired
|
||||
// If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the
|
||||
// prior fired event may have not been received yet
|
||||
crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec);
|
||||
|
||||
new_tributary_spec.send(spec).unwrap();
|
||||
} else {
|
||||
log::info!("not present in new set {:?}", set);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_batch_and_burns<Pro: Processors>(
|
||||
txn: &mut impl DbTxn,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
block: &Block,
|
||||
) -> Result<(), SeraiError> {
|
||||
// Track which networks had events with a Vec in ordr to preserve the insertion order
|
||||
// While that shouldn't be needed, ensuring order never hurts, and may enable design choices
|
||||
// with regards to Processor <-> Coordinator message passing
|
||||
let mut networks_with_event = vec![];
|
||||
let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| {
|
||||
// Don't insert this network multiple times
|
||||
// A Vec is still used in order to maintain the insertion order
|
||||
if !networks_with_event.contains(&network) {
|
||||
networks_with_event.push(network);
|
||||
burns.insert(network, vec![]);
|
||||
batches.insert(network, vec![]);
|
||||
}
|
||||
};
|
||||
|
||||
let mut batch_block = HashMap::new();
|
||||
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
|
||||
let mut burns = HashMap::new();
|
||||
|
||||
let serai = serai.as_of(block.hash());
|
||||
for batch in serai.in_instructions().batch_events().await? {
|
||||
if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } =
|
||||
batch
|
||||
{
|
||||
network_had_event(&mut burns, &mut batches, network);
|
||||
|
||||
BatchInstructionsHashDb::set(txn, network, id, &instructions_hash);
|
||||
|
||||
// Make sure this is the only Batch event for this network in this Block
|
||||
assert!(batch_block.insert(network, network_block).is_none());
|
||||
|
||||
// Add the batch included by this block
|
||||
batches.get_mut(&network).unwrap().push(id);
|
||||
} else {
|
||||
panic!("Batch event wasn't Batch: {batch:?}");
|
||||
}
|
||||
}
|
||||
|
||||
for burn in serai.coins().burn_with_instruction_events().await? {
|
||||
if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn {
|
||||
let network = instruction.balance.coin.network();
|
||||
network_had_event(&mut burns, &mut batches, network);
|
||||
|
||||
// network_had_event should register an entry in burns
|
||||
burns.get_mut(&network).unwrap().push(instruction);
|
||||
} else {
|
||||
panic!("Burn event wasn't Burn: {burn:?}");
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
|
||||
|
||||
for network in networks_with_event {
|
||||
let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
|
||||
block
|
||||
} else {
|
||||
// If it's had a batch or a burn, it must have had a block acknowledged
|
||||
serai
|
||||
.in_instructions()
|
||||
.latest_block_for_network(network)
|
||||
.await?
|
||||
.expect("network had a batch/burn yet never set a latest block")
|
||||
};
|
||||
|
||||
processors
|
||||
.send(
|
||||
network,
|
||||
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
context: SubstrateContext {
|
||||
serai_time: block.time().unwrap() / 1000,
|
||||
network_latest_finalized_block,
|
||||
},
|
||||
block: block.number(),
|
||||
burns: burns.remove(&network).unwrap(),
|
||||
batches: batches.remove(&network).unwrap(),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Handle a specific Substrate block, returning an error when it fails to get data
|
||||
// (not blocking / holding)
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_block<D: Db, Pro: Processors>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
block: Block,
|
||||
) -> Result<(), SeraiError> {
|
||||
let hash = block.hash();
|
||||
|
||||
// Define an indexed event ID.
|
||||
let mut event_id = 0;
|
||||
|
||||
// If a new validator set was activated, create tributary/inform processor to do a DKG
|
||||
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
|
||||
// Individually mark each event as handled so on reboot, we minimize duplicates
|
||||
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
|
||||
// events will successfully be incrementally handled
|
||||
// (though the Serai connection should be stable, making this unnecessary)
|
||||
let ValidatorSetsEvent::NewSet { set } = new_set else {
|
||||
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
||||
};
|
||||
|
||||
// We only coordinate/process external networks
|
||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh new set event {:?}", new_set);
|
||||
let mut txn = db.txn();
|
||||
handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
event_id += 1;
|
||||
}
|
||||
|
||||
// If a key pair was confirmed, inform the processor
|
||||
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh key gen event {:?}", key_gen);
|
||||
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
|
||||
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
||||
};
|
||||
let substrate_key = key_pair.0 .0;
|
||||
processors
|
||||
.send(
|
||||
set.network,
|
||||
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
||||
context: SubstrateContext {
|
||||
serai_time: block.time().unwrap() / 1000,
|
||||
network_latest_finalized_block: serai
|
||||
.as_of(block.hash())
|
||||
.in_instructions()
|
||||
.latest_block_for_network(set.network)
|
||||
.await?
|
||||
// The processor treats this as a magic value which will cause it to find a network
|
||||
// block which has a time greater than or equal to the Serai time
|
||||
.unwrap_or(BlockHash([0; 32])),
|
||||
},
|
||||
session: set.session,
|
||||
key_pair,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO: If we were in the set, yet were removed, drop the tributary
|
||||
|
||||
let mut txn = db.txn();
|
||||
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
event_id += 1;
|
||||
}
|
||||
|
||||
for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {
|
||||
let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {
|
||||
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
|
||||
};
|
||||
|
||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh accepted handover event {:?}", accepted_handover);
|
||||
// TODO: This isn't atomic with the event handling
|
||||
// Send a oneshot receiver so we can await the response?
|
||||
perform_slash_report.send(set).unwrap();
|
||||
let mut txn = db.txn();
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
event_id += 1;
|
||||
}
|
||||
|
||||
for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {
|
||||
let ValidatorSetsEvent::SetRetired { set } = retired_set else {
|
||||
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
|
||||
};
|
||||
|
||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh set retired event {:?}", retired_set);
|
||||
let mut txn = db.txn();
|
||||
crate::ActiveTributaryDb::retire_tributary(&mut txn, set);
|
||||
tributary_retired.send(set).unwrap();
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
event_id += 1;
|
||||
}
|
||||
|
||||
// Finally, tell the processor of acknowledged blocks/burns
|
||||
// This uses a single event as unlike prior events which individually executed code, all
|
||||
// following events share data collection
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
let mut txn = db.txn();
|
||||
handle_batch_and_burns(&mut txn, processors, serai, &block).await?;
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_new_blocks<D: Db, Pro: Processors>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
next_block: &mut u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
// Check if there's been a new Substrate block
|
||||
let latest_number = serai.latest_finalized_block().await?.number();
|
||||
|
||||
// Advance the cosigning protocol
|
||||
advance_cosign_protocol(db, key, serai, latest_number).await?;
|
||||
|
||||
// Reduce to the latest cosigned block
|
||||
let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));
|
||||
|
||||
if latest_number < *next_block {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for b in *next_block ..= latest_number {
|
||||
let block = serai
|
||||
.finalized_block_by_number(b)
|
||||
.await?
|
||||
.expect("couldn't get block before the latest finalized block");
|
||||
|
||||
log::info!("handling substrate block {b}");
|
||||
handle_block(
|
||||
db,
|
||||
key,
|
||||
new_tributary_spec,
|
||||
perform_slash_report,
|
||||
tributary_retired,
|
||||
processors,
|
||||
serai,
|
||||
block,
|
||||
)
|
||||
.await?;
|
||||
*next_block += 1;
|
||||
|
||||
let mut txn = db.txn();
|
||||
NextBlock::set(&mut txn, next_block);
|
||||
txn.commit();
|
||||
|
||||
log::info!("handled substrate block {b}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn scan_task<D: Db, Pro: Processors>(
|
||||
mut db: D,
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
processors: Pro,
|
||||
serai: Arc<Serai>,
|
||||
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||
) {
|
||||
log::info!("scanning substrate");
|
||||
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
|
||||
|
||||
/*
|
||||
let new_substrate_block_notifier = {
|
||||
let serai = &serai;
|
||||
move || async move {
|
||||
loop {
|
||||
match serai.newly_finalized_block().await {
|
||||
Ok(sub) => return sub,
|
||||
Err(e) => {
|
||||
log::error!("couldn't communicate with serai node: {e}");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
*/
|
||||
// TODO: Restore the above subscription-based system
|
||||
// That would require moving serai-client from HTTP to websockets
|
||||
let new_substrate_block_notifier = {
|
||||
let serai = &serai;
|
||||
move |next_substrate_block| async move {
|
||||
loop {
|
||||
match serai.latest_finalized_block().await {
|
||||
Ok(latest) => {
|
||||
if latest.header.number >= next_substrate_block {
|
||||
return latest;
|
||||
}
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("couldn't communicate with serai node: {e}");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
loop {
|
||||
// await the next block, yet if our notifier had an error, re-create it
|
||||
{
|
||||
let Ok(_) = tokio::time::timeout(
|
||||
Duration::from_secs(60),
|
||||
new_substrate_block_notifier(next_substrate_block),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
// Timed out, which may be because Serai isn't finalizing or may be some issue with the
|
||||
// notifier
|
||||
if serai.latest_finalized_block().await.map(|block| block.number()).ok() ==
|
||||
Some(next_substrate_block.saturating_sub(1))
|
||||
{
|
||||
log::info!("serai hasn't finalized a block in the last 60s...");
|
||||
}
|
||||
continue;
|
||||
};
|
||||
|
||||
/*
|
||||
// next_block is a Option<Result>
|
||||
if next_block.and_then(Result::ok).is_none() {
|
||||
substrate_block_notifier = new_substrate_block_notifier(next_substrate_block);
|
||||
continue;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
match handle_new_blocks(
|
||||
&mut db,
|
||||
&key,
|
||||
&new_tributary_spec,
|
||||
&perform_slash_report,
|
||||
&tributary_retired,
|
||||
&processors,
|
||||
&serai,
|
||||
&mut next_substrate_block,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {}
|
||||
Err(e) => {
|
||||
log::error!("couldn't communicate with serai node: {e}");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the expected ID for the next Batch.
|
||||
///
|
||||
/// Will log an error and apply a slight sleep on error, letting the caller simply immediately
|
||||
/// retry.
|
||||
pub(crate) async fn expected_next_batch(
|
||||
serai: &Serai,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<u32, SeraiError> {
|
||||
async fn expected_next_batch_inner(
|
||||
serai: &Serai,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<u32, SeraiError> {
|
||||
let serai = serai.as_of_latest_finalized_block().await?;
|
||||
let last = serai.in_instructions().last_batch_for_network(network).await?;
|
||||
Ok(if let Some(last) = last { last + 1 } else { 0 })
|
||||
}
|
||||
match expected_next_batch_inner(serai, network).await {
|
||||
Ok(next) => Ok(next),
|
||||
Err(e) => {
|
||||
log::error!("couldn't get the expected next batch from substrate: {e:?}");
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies `Batch`s which have already been indexed from Substrate.
|
||||
///
|
||||
/// Spins if a distinct `Batch` is detected on-chain.
|
||||
///
|
||||
/// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected.
|
||||
/// This is deemed fine.
|
||||
pub(crate) async fn verify_published_batches<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
network: ExternalNetworkId,
|
||||
optimistic_up_to: u32,
|
||||
) -> Option<u32> {
|
||||
// TODO: Localize from MainDb to SubstrateDb
|
||||
let last = crate::LastVerifiedBatchDb::get(txn, network);
|
||||
for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to {
|
||||
let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else {
|
||||
break;
|
||||
};
|
||||
let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap();
|
||||
if on_chain != off_chain {
|
||||
// Halt operations on this network and spin, as this is a critical fault
|
||||
loop {
|
||||
log::error!(
|
||||
"{}! network: {:?} id: {} off-chain: {} on-chain: {}",
|
||||
"on-chain batch doesn't match off-chain",
|
||||
network,
|
||||
id,
|
||||
hex::encode(off_chain),
|
||||
hex::encode(on_chain),
|
||||
);
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
crate::LastVerifiedBatchDb::set(txn, network, &id);
|
||||
}
|
||||
|
||||
crate::LastVerifiedBatchDb::get(txn, network)
|
||||
}
|
||||
125
coordinator/src/tests/mod.rs
Normal file
125
coordinator/src/tests/mod.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use core::fmt::Debug;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{VecDeque, HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
||||
|
||||
use processor_messages::CoordinatorMessage;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
processors::{Message, Processors},
|
||||
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
|
||||
};
|
||||
|
||||
pub mod tributary;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
|
||||
impl MemProcessors {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> MemProcessors {
|
||||
MemProcessors(Arc::new(RwLock::new(HashMap::new())))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Processors for MemProcessors {
|
||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||
let mut processors = self.0.write().await;
|
||||
let processor = processors.entry(network).or_insert_with(VecDeque::new);
|
||||
processor.push_back(msg.into());
|
||||
}
|
||||
async fn recv(&self, _: ExternalNetworkId) -> Message {
|
||||
todo!()
|
||||
}
|
||||
async fn ack(&self, _: Message) {
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LocalP2p(
|
||||
usize,
|
||||
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
|
||||
);
|
||||
|
||||
impl LocalP2p {
|
||||
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
||||
let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators])));
|
||||
let mut res = vec![];
|
||||
for i in 0 .. validators {
|
||||
res.push(LocalP2p(i, shared.clone()));
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl P2p for LocalP2p {
|
||||
type Id = usize;
|
||||
|
||||
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||
let mut msg_ref = msg.as_slice();
|
||||
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
|
||||
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
|
||||
}
|
||||
|
||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||
// Content-based deduplication
|
||||
let mut lock = self.1.write().await;
|
||||
{
|
||||
let already_sent = &mut lock.0;
|
||||
if already_sent.contains(&msg) {
|
||||
return;
|
||||
}
|
||||
already_sent.insert(msg.clone());
|
||||
}
|
||||
let queues = &mut lock.1;
|
||||
|
||||
let kind_len = (match kind {
|
||||
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
||||
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
||||
})
|
||||
.len();
|
||||
let msg = msg[kind_len ..].to_vec();
|
||||
|
||||
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
||||
if i == self.0 {
|
||||
continue;
|
||||
}
|
||||
msg_queue.push_back((self.0, kind, msg.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
async fn receive(&self) -> P2pMessage<Self> {
|
||||
// This is a cursed way to implement an async read from a Vec
|
||||
loop {
|
||||
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
|
||||
return P2pMessage { sender, kind, msg };
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TributaryP2p for LocalP2p {
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||
<Self as P2p>::broadcast(
|
||||
self,
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
|
||||
msg,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
240
coordinator/src/tests/tributary/chain.rs
Normal file
240
coordinator/src/tests/tributary/chain.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
use std::{
|
||||
time::{Duration, SystemTime},
|
||||
collections::HashSet,
|
||||
};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng, OsRng};
|
||||
use futures_util::{task::Poll, poll};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, GroupEncoding},
|
||||
Ciphersuite,
|
||||
};
|
||||
|
||||
use sp_application_crypto::sr25519;
|
||||
use borsh::BorshDeserialize;
|
||||
use serai_client::{
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
||||
};
|
||||
|
||||
use tokio::time::sleep;
|
||||
|
||||
use serai_db::MemDb;
|
||||
|
||||
use tributary::Tributary;
|
||||
|
||||
use crate::{
|
||||
GossipMessageKind, P2pMessageKind, P2p,
|
||||
tributary::{Transaction, TributarySpec},
|
||||
tests::LocalP2p,
|
||||
};
|
||||
|
||||
pub fn new_keys<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
) -> Vec<Zeroizing<<Ristretto as Ciphersuite>::F>> {
|
||||
let mut keys = vec![];
|
||||
for _ in 0 .. 5 {
|
||||
keys.push(Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut *rng)));
|
||||
}
|
||||
keys
|
||||
}
|
||||
|
||||
pub fn new_spec<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],
|
||||
) -> TributarySpec {
|
||||
let mut serai_block = [0; 32];
|
||||
rng.fill_bytes(&mut serai_block);
|
||||
|
||||
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };
|
||||
|
||||
let set_participants = keys
|
||||
.iter()
|
||||
.map(|key| {
|
||||
(sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
||||
assert_eq!(
|
||||
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
||||
res,
|
||||
);
|
||||
res
|
||||
}
|
||||
|
||||
pub async fn new_tributaries(
|
||||
keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],
|
||||
spec: &TributarySpec,
|
||||
) -> Vec<(MemDb, LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)> {
|
||||
let p2p = LocalP2p::new(keys.len());
|
||||
let mut res = vec![];
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let db = MemDb::new();
|
||||
res.push((
|
||||
db.clone(),
|
||||
p2p[i].clone(),
|
||||
Tributary::<_, Transaction, _>::new(
|
||||
db,
|
||||
spec.genesis(),
|
||||
spec.start_time(),
|
||||
key.clone(),
|
||||
spec.validators(),
|
||||
p2p[i].clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap(),
|
||||
));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub async fn run_tributaries(
|
||||
mut tributaries: Vec<(LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)>,
|
||||
) {
|
||||
loop {
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
if tributary.handle_message(&msg.msg).await {
|
||||
p2p.broadcast(msg.kind, msg.msg).await;
|
||||
}
|
||||
}
|
||||
_ => panic!("unexpected p2p message found"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn wait_for_tx_inclusion(
|
||||
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
||||
mut last_checked: [u8; 32],
|
||||
hash: [u8; 32],
|
||||
) -> [u8; 32] {
|
||||
let reader = tributary.reader();
|
||||
loop {
|
||||
let tip = tributary.tip().await;
|
||||
if tip == last_checked {
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut queue = vec![reader.block(&tip).unwrap()];
|
||||
let mut block = None;
|
||||
while {
|
||||
let parent = queue.last().unwrap().parent();
|
||||
if parent == tributary.genesis() {
|
||||
false
|
||||
} else {
|
||||
block = Some(reader.block(&parent).unwrap());
|
||||
block.as_ref().unwrap().hash() != last_checked
|
||||
}
|
||||
} {
|
||||
queue.push(block.take().unwrap());
|
||||
}
|
||||
|
||||
while let Some(block) = queue.pop() {
|
||||
for tx in &block.transactions {
|
||||
if tx.hash() == hash {
|
||||
return block.hash();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
last_checked = tip;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tributary_test() {
|
||||
let keys = new_keys(&mut OsRng);
|
||||
let spec = new_spec(&mut OsRng, &keys);
|
||||
|
||||
let mut tributaries = new_tributaries(&keys, &spec)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut blocks = 0;
|
||||
let mut last_block = spec.genesis();
|
||||
|
||||
// Doesn't use run_tributaries as we want to wind these down at a certain point
|
||||
// run_tributaries will run them ad infinitum
|
||||
let timeout = SystemTime::now() + Duration::from_secs(65);
|
||||
while (blocks < 10) && (SystemTime::now().duration_since(timeout).is_err()) {
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
tributary.handle_message(&msg.msg).await;
|
||||
}
|
||||
_ => panic!("unexpected p2p message found"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tip = tributaries[0].1.tip().await;
|
||||
if tip != last_block {
|
||||
last_block = tip;
|
||||
blocks += 1;
|
||||
}
|
||||
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
if blocks != 10 {
|
||||
panic!("tributary chain test hit timeout");
|
||||
}
|
||||
|
||||
// Handle all existing messages
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
tributary.handle_message(&msg.msg).await;
|
||||
}
|
||||
_ => panic!("unexpected p2p message found"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle_message informed the Tendermint machine, yet it still has to process it
|
||||
// Sleep for a second accordingly
|
||||
// TODO: Is there a better way to handle this?
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// All tributaries should agree on the tip, within a block
|
||||
let mut tips = HashSet::new();
|
||||
for (_, tributary) in &tributaries {
|
||||
tips.insert(tributary.tip().await);
|
||||
}
|
||||
assert!(tips.len() <= 2);
|
||||
if tips.len() == 2 {
|
||||
for tip in &tips {
|
||||
// Find a Tributary where this isn't the tip
|
||||
for (_, tributary) in &tributaries {
|
||||
let Some(after) = tributary.reader().block_after(tip) else { continue };
|
||||
// Make sure the block after is the other tip
|
||||
assert!(tips.contains(&after));
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert_eq!(tips.len(), 1);
|
||||
return;
|
||||
}
|
||||
panic!("tributary had different tip with a variance exceeding one block");
|
||||
}
|
||||
394
coordinator/src/tests/tributary/dkg.rs
Normal file
394
coordinator/src/tests/tributary/dkg.rs
Normal file
@@ -0,0 +1,394 @@
|
||||
use core::time::Duration;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::Participant;
|
||||
|
||||
use sp_runtime::traits::Verify;
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||
};
|
||||
|
||||
use tokio::time::sleep;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
||||
|
||||
use processor_messages::{
|
||||
key_gen::{self, KeyGenId},
|
||||
CoordinatorMessage,
|
||||
};
|
||||
|
||||
use tributary::{TransactionTrait, Tributary};
|
||||
|
||||
use crate::{
|
||||
tributary::{
|
||||
Transaction, TributarySpec,
|
||||
scanner::{PublishSeraiTransaction, handle_new_blocks},
|
||||
},
|
||||
tests::{
|
||||
MemProcessors, LocalP2p,
|
||||
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
||||
},
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn dkg_test() {
|
||||
env_logger::init();
|
||||
|
||||
let keys = new_keys(&mut OsRng);
|
||||
let spec = new_spec(&mut OsRng, &keys);
|
||||
|
||||
let full_tributaries = new_tributaries(&keys, &spec).await;
|
||||
let mut dbs = vec![];
|
||||
let mut tributaries = vec![];
|
||||
for (db, p2p, tributary) in full_tributaries {
|
||||
dbs.push(db);
|
||||
tributaries.push((p2p, tributary));
|
||||
}
|
||||
|
||||
// Run the tributaries in the background
|
||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||
|
||||
let mut txs = vec![];
|
||||
// Create DKG commitments for each key
|
||||
for key in &keys {
|
||||
let attempt = 0;
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
|
||||
let mut tx = Transaction::DkgCommitments {
|
||||
attempt,
|
||||
commitments: vec![commitments],
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
|
||||
// Publish all commitments but one
|
||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
|
||||
// Wait until these are included
|
||||
for tx in txs.iter().skip(1) {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
let expected_commitments: HashMap<_, _> = txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, tx)| {
|
||||
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
||||
} else {
|
||||
panic!("txs had non-commitments");
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
async fn new_processors(
|
||||
db: &mut MemDb,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &TributarySpec,
|
||||
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
||||
) -> MemProcessors {
|
||||
let processors = MemProcessors::new();
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
db,
|
||||
key,
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called in new_processors")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx in new_processors"
|
||||
)
|
||||
},
|
||||
spec,
|
||||
&tributary.reader(),
|
||||
)
|
||||
.await;
|
||||
processors
|
||||
}
|
||||
|
||||
// Instantiate a scanner and verify it has nothing to report
|
||||
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
||||
assert!(processors.0.read().await.is_empty());
|
||||
|
||||
// Publish the last commitment
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
|
||||
// Verify the scanner emits a KeyGen::Commitments message
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
||||
)
|
||||
},
|
||||
&spec,
|
||||
&tributaries[0].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
// Verify all keys exhibit this scanner behavior
|
||||
for (i, key) in keys.iter().enumerate().skip(1) {
|
||||
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
// Now do shares
|
||||
let mut txs = vec![];
|
||||
for (k, key) in keys.iter().enumerate() {
|
||||
let attempt = 0;
|
||||
|
||||
let mut shares = vec![vec![]];
|
||||
for i in 0 .. keys.len() {
|
||||
if i != k {
|
||||
let mut share = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
shares.last_mut().unwrap().push(share);
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = dbs[k].txn();
|
||||
let mut tx = Transaction::DkgShares {
|
||||
attempt,
|
||||
shares,
|
||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
txn.commit();
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
for tx in txs.iter().skip(1) {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
// With just 4 sets of shares, nothing should happen yet
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after some shares")
|
||||
},
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async {
|
||||
panic!(
|
||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
||||
)
|
||||
},
|
||||
&spec,
|
||||
&tributaries[0].1.reader(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(processors.0.read().await.len(), 1);
|
||||
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
||||
|
||||
// Publish the final set of shares
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
|
||||
// Each scanner should emit a distinct shares message
|
||||
let shares_for = |i: usize| {
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
shares: vec![txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(l, tx)| {
|
||||
if let Transaction::DkgShares { shares, .. } = tx {
|
||||
if i == l {
|
||||
None
|
||||
} else {
|
||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
||||
Some((
|
||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
||||
shares[0][relative_i].clone(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
panic!("txs had non-shares");
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<_, _>>()],
|
||||
})
|
||||
};
|
||||
|
||||
// Any scanner which has handled the prior blocks should only emit the new event
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[i],
|
||||
key,
|
||||
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
||||
&processors,
|
||||
&(),
|
||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||
&spec,
|
||||
&tributaries[i].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Yet new scanners should emit all events
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
||||
let mut msgs = processors.0.write().await;
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||
let mut expected_commitments = expected_commitments.clone();
|
||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msgs.pop_front().unwrap(),
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
||||
commitments: expected_commitments
|
||||
})
|
||||
);
|
||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
||||
assert!(msgs.is_empty());
|
||||
}
|
||||
|
||||
// Send DkgConfirmed
|
||||
let mut substrate_key = [0; 32];
|
||||
OsRng.fill_bytes(&mut substrate_key);
|
||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
||||
OsRng.fill_bytes(&mut network_key);
|
||||
let key_pair =
|
||||
KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());
|
||||
|
||||
let mut txs = vec![];
|
||||
for (i, key) in keys.iter().enumerate() {
|
||||
let attempt = 0;
|
||||
let mut txn = dbs[i].txn();
|
||||
let share =
|
||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
||||
txn.commit();
|
||||
|
||||
let mut tx = Transaction::DkgConfirmed {
|
||||
attempt,
|
||||
confirmation_share: share,
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||
txs.push(tx);
|
||||
}
|
||||
let block_before_tx = tributaries[0].1.tip().await;
|
||||
for (i, tx) in txs.iter().enumerate() {
|
||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
}
|
||||
for tx in &txs {
|
||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||
}
|
||||
|
||||
struct CheckPublishSetKeys {
|
||||
spec: TributarySpec,
|
||||
key_pair: KeyPair,
|
||||
}
|
||||
#[async_trait::async_trait]
|
||||
impl PublishSeraiTransaction for CheckPublishSetKeys {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
_db: &(impl Sync + Get),
|
||||
set: ExternalValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature: Signature,
|
||||
) {
|
||||
assert_eq!(set, self.spec.set());
|
||||
assert!(removed.is_empty());
|
||||
assert_eq!(self.key_pair, key_pair);
|
||||
assert!(signature.verify(
|
||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
|
||||
&serai_client::Public::from(
|
||||
dkg_musig::musig_key_vartime::<Ristretto>(
|
||||
serai_client::validator_sets::primitives::musig_context(set.into()),
|
||||
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
|
||||
)
|
||||
.unwrap()
|
||||
.to_bytes()
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// The scanner should successfully try to publish a transaction with a validly signed signature
|
||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||
&mut dbs[0],
|
||||
&keys[0],
|
||||
&|_, _, _, _| async {
|
||||
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
||||
},
|
||||
&processors,
|
||||
&CheckPublishSetKeys { spec: spec.clone(), key_pair: key_pair.clone() },
|
||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||
&spec,
|
||||
&tributaries[0].1.reader(),
|
||||
)
|
||||
.await;
|
||||
{
|
||||
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
||||
}
|
||||
}
|
||||
74
coordinator/src/tests/tributary/handle_p2p.rs
Normal file
74
coordinator/src/tests/tributary/handle_p2p.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use core::time::Duration;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, broadcast},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use serai_db::MemDb;
|
||||
|
||||
use tributary::Tributary;
|
||||
|
||||
use crate::{
|
||||
tributary::Transaction,
|
||||
ActiveTributary, TributaryEvent,
|
||||
p2p::handle_p2p_task,
|
||||
tests::{
|
||||
LocalP2p,
|
||||
tributary::{new_keys, new_spec, new_tributaries},
|
||||
},
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn handle_p2p_test() {
|
||||
let keys = new_keys(&mut OsRng);
|
||||
let spec = new_spec(&mut OsRng, &keys);
|
||||
|
||||
let mut tributaries = new_tributaries(&keys, &spec)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut tributary_senders = vec![];
|
||||
let mut tributary_arcs = vec![];
|
||||
for (p2p, tributary) in tributaries.drain(..) {
|
||||
let tributary = Arc::new(tributary);
|
||||
tributary_arcs.push(tributary.clone());
|
||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
||||
new_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
||||
.map_err(|_| "failed to send ActiveTributary")
|
||||
.unwrap();
|
||||
tributary_senders.push(new_tributary_send);
|
||||
}
|
||||
let tributaries = tributary_arcs;
|
||||
|
||||
// After two blocks of time, we should have a new block
|
||||
// We don't wait one block of time as we may have missed the chance for this block
|
||||
sleep(Duration::from_secs((2 * Tributary::<MemDb, Transaction, LocalP2p>::block_time()).into()))
|
||||
.await;
|
||||
let tip = tributaries[0].tip().await;
|
||||
assert!(tip != spec.genesis());
|
||||
|
||||
// Sleep one second to make sure this block propagates
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
// Make sure every tributary has it
|
||||
for tributary in &tributaries {
|
||||
assert!(tributary.reader().block(&tip).is_some());
|
||||
}
|
||||
|
||||
// Then after another block of time, we should have yet another new block
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
let new_tip = tributaries[0].tip().await;
|
||||
assert!(new_tip != tip);
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
for tributary in tributaries {
|
||||
assert!(tributary.reader().block(&new_tip).is_some());
|
||||
}
|
||||
}
|
||||
294
coordinator/src/tests/tributary/mod.rs
Normal file
294
coordinator/src/tests/tributary/mod.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
use core::fmt::Debug;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::Group, Ciphersuite};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},
|
||||
};
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
|
||||
use tributary::{ReadWrite, tests::random_signed_with_nonce};
|
||||
|
||||
use crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction};
|
||||
|
||||
mod chain;
|
||||
pub use chain::*;
|
||||
|
||||
mod tx;
|
||||
|
||||
mod dkg;
|
||||
// TODO: Test the other transactions
|
||||
|
||||
mod handle_p2p;
|
||||
mod sync;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PublishSeraiTransaction for () {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
_db: &(impl Sync + serai_db::Get),
|
||||
_set: ExternalValidatorSet,
|
||||
_removed: Vec<SeraiAddress>,
|
||||
_key_pair: KeyPair,
|
||||
_signature: Signature,
|
||||
) {
|
||||
panic!("publish_set_keys was called in test")
|
||||
}
|
||||
}
|
||||
|
||||
fn random_u32<R: RngCore>(rng: &mut R) -> u32 {
|
||||
u32::try_from(rng.next_u64() >> 32).unwrap()
|
||||
}
|
||||
|
||||
fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
|
||||
let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap();
|
||||
let mut res = vec![0; len];
|
||||
rng.fill_bytes(&mut res);
|
||||
res
|
||||
}
|
||||
|
||||
fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(
|
||||
rng: &mut R,
|
||||
plan: Id,
|
||||
label: Label,
|
||||
) -> SignData<Id> {
|
||||
SignData {
|
||||
plan,
|
||||
attempt: random_u32(&mut OsRng),
|
||||
label,
|
||||
|
||||
data: {
|
||||
let mut res = vec![];
|
||||
for _ in 0 ..= (rng.next_u64() % 255) {
|
||||
res.push(random_vec(&mut OsRng, 512));
|
||||
}
|
||||
res
|
||||
},
|
||||
|
||||
signed: random_signed_with_nonce(&mut OsRng, label.nonce()),
|
||||
}
|
||||
}
|
||||
|
||||
fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
|
||||
assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tx_size_limit() {
|
||||
use serai_client::validator_sets::primitives::MAX_KEY_LEN;
|
||||
|
||||
use tributary::TRANSACTION_SIZE_LIMIT;
|
||||
|
||||
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
||||
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
||||
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
||||
// 1024 bytes for all overhead
|
||||
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
||||
assert!(
|
||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
||||
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
||||
);
|
||||
|
||||
// Encryption key, PoP (2 elements), message
|
||||
let elements_per_share = 4;
|
||||
let handwaved_dkg_shares_size =
|
||||
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
||||
assert!(
|
||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
||||
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_sign_data() {
|
||||
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: &SignData<Id>) {
|
||||
let mut buf = vec![];
|
||||
value.write(&mut buf).unwrap();
|
||||
assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap())
|
||||
}
|
||||
|
||||
let mut plan = [0; 3];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
let mut plan = [0; 5];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
let mut plan = [0; 8];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
let mut plan = [0; 24];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_transaction() {
|
||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
||||
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
|
||||
{
|
||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
||||
let mut temp = commitments[0].clone();
|
||||
OsRng.fill_bytes(&mut temp);
|
||||
commitments.push(temp);
|
||||
}
|
||||
test_read_write(&Transaction::DkgCommitments {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
commitments,
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
||||
// and sent shares is expected to be constant among recipients
|
||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
||||
// Create a valid vec of shares
|
||||
let mut shares = vec![];
|
||||
// Create up to 150 participants
|
||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
||||
// Give each sender multiple shares
|
||||
let mut sender_shares = vec![];
|
||||
for _ in 0 .. amount_of_shares {
|
||||
let mut share = vec![0; share_len];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
sender_shares.push(share);
|
||||
}
|
||||
shares.push(sender_shares);
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::DkgShares {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
shares,
|
||||
confirmation_nonces: {
|
||||
let mut nonces = [0; 64];
|
||||
OsRng.fill_bytes(&mut nonces);
|
||||
nonces
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||
});
|
||||
}
|
||||
|
||||
for i in 0 .. 2 {
|
||||
test_read_write(&Transaction::InvalidDkgShare {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
accuser: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
faulty: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
blame: if i == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||
});
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::DkgConfirmed {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
confirmation_share: {
|
||||
let mut share = [0; 32];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
share
|
||||
},
|
||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
||||
});
|
||||
|
||||
{
|
||||
let mut block = [0; 32];
|
||||
OsRng.fill_bytes(&mut block);
|
||||
test_read_write(&Transaction::CosignSubstrateBlock(block));
|
||||
}
|
||||
|
||||
{
|
||||
let mut block = [0; 32];
|
||||
OsRng.fill_bytes(&mut block);
|
||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
||||
test_read_write(&Transaction::Batch { block, batch });
|
||||
}
|
||||
test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64()));
|
||||
|
||||
{
|
||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
||||
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
||||
&mut OsRng,
|
||||
SubstrateSignableId::Batch(batch),
|
||||
Label::Preprocess,
|
||||
)));
|
||||
}
|
||||
{
|
||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
||||
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
||||
&mut OsRng,
|
||||
SubstrateSignableId::Batch(batch),
|
||||
Label::Share,
|
||||
)));
|
||||
}
|
||||
|
||||
{
|
||||
let mut plan = [0; 32];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));
|
||||
}
|
||||
{
|
||||
let mut plan = [0; 32];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));
|
||||
}
|
||||
|
||||
{
|
||||
let mut plan = [0; 32];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()];
|
||||
OsRng.fill_bytes(&mut tx_hash);
|
||||
test_read_write(&Transaction::SignCompleted {
|
||||
plan,
|
||||
tx_hash,
|
||||
first_signer: random_signed_with_nonce(&mut OsRng, 2).signer,
|
||||
signature: random_signed_with_nonce(&mut OsRng, 2).signature,
|
||||
});
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::SlashReport(
|
||||
{
|
||||
let amount =
|
||||
usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap();
|
||||
let mut points = vec![];
|
||||
for _ in 0 .. amount {
|
||||
points.push((OsRng.next_u64() >> 32).try_into().unwrap());
|
||||
}
|
||||
points
|
||||
},
|
||||
random_signed_with_nonce(&mut OsRng, 0),
|
||||
));
|
||||
}
|
||||
166
coordinator/src/tests/tributary/sync.rs
Normal file
166
coordinator/src/tests/tributary/sync.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
use core::time::Duration;
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, broadcast},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use serai_db::MemDb;
|
||||
|
||||
use tributary::Tributary;
|
||||
|
||||
use crate::{
|
||||
tributary::Transaction,
|
||||
ActiveTributary, TributaryEvent,
|
||||
p2p::{heartbeat_tributaries_task, handle_p2p_task},
|
||||
tests::{
|
||||
LocalP2p,
|
||||
tributary::{new_keys, new_spec, new_tributaries},
|
||||
},
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn sync_test() {
|
||||
let mut keys = new_keys(&mut OsRng);
|
||||
let spec = new_spec(&mut OsRng, &keys);
|
||||
// Ensure this can have a node fail
|
||||
assert!(spec.n(&[]) > spec.t());
|
||||
|
||||
let mut tributaries = new_tributaries(&keys, &spec)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Keep a Tributary back, effectively having it offline
|
||||
let syncer_key = keys.pop().unwrap();
|
||||
let (syncer_p2p, syncer_tributary) = tributaries.pop().unwrap();
|
||||
|
||||
// Have the rest form a P2P net
|
||||
let mut tributary_senders = vec![];
|
||||
let mut tributary_arcs = vec![];
|
||||
let mut p2p_threads = vec![];
|
||||
for (p2p, tributary) in tributaries.drain(..) {
|
||||
let tributary = Arc::new(tributary);
|
||||
tributary_arcs.push(tributary.clone());
|
||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
||||
new_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
||||
.map_err(|_| "failed to send ActiveTributary")
|
||||
.unwrap();
|
||||
tributary_senders.push(new_tributary_send);
|
||||
p2p_threads.push(thread);
|
||||
}
|
||||
let tributaries = tributary_arcs;
|
||||
|
||||
// After four blocks of time, we should have a new block
|
||||
// We don't wait one block of time as we may have missed the chance for the first block
|
||||
// We don't wait two blocks because we may have missed the chance, and then had a failure to
|
||||
// propose by our 'offline' validator, which would cause the Tendermint round time to increase,
|
||||
// requiring a longer delay
|
||||
let block_time = u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time());
|
||||
sleep(Duration::from_secs(4 * block_time)).await;
|
||||
let tip = tributaries[0].tip().await;
|
||||
assert!(tip != spec.genesis());
|
||||
|
||||
// Sleep one second to make sure this block propagates
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
// Make sure every tributary has it
|
||||
for tributary in &tributaries {
|
||||
assert!(tributary.reader().block(&tip).is_some());
|
||||
}
|
||||
|
||||
// Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's
|
||||
// pending P2P messages
|
||||
syncer_p2p.1.write().await.1.last_mut().unwrap().clear();
|
||||
|
||||
// Have it join the net
|
||||
let syncer_key = Ristretto::generator() * *syncer_key;
|
||||
let syncer_tributary = Arc::new(syncer_tributary);
|
||||
let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv));
|
||||
syncer_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary {
|
||||
spec: spec.clone(),
|
||||
tributary: syncer_tributary.clone(),
|
||||
}))
|
||||
.map_err(|_| "failed to send ActiveTributary to syncer")
|
||||
.unwrap();
|
||||
|
||||
// It shouldn't automatically catch up. If it somehow was, our test would be broken
|
||||
// Sanity check this
|
||||
let tip = tributaries[0].tip().await;
|
||||
// Wait until a new block occurs
|
||||
sleep(Duration::from_secs(3 * block_time)).await;
|
||||
// Make sure a new block actually occurred
|
||||
assert!(tributaries[0].tip().await != tip);
|
||||
// Make sure the new block alone didn't trigger catching up
|
||||
assert_eq!(syncer_tributary.tip().await, spec.genesis());
|
||||
|
||||
// Start the heartbeat protocol
|
||||
let (syncer_heartbeat_tributary_send, syncer_heartbeat_tributary_recv) = broadcast::channel(5);
|
||||
tokio::spawn(heartbeat_tributaries_task(syncer_p2p, syncer_heartbeat_tributary_recv));
|
||||
syncer_heartbeat_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary {
|
||||
spec: spec.clone(),
|
||||
tributary: syncer_tributary.clone(),
|
||||
}))
|
||||
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
||||
.unwrap();
|
||||
|
||||
// The heartbeat is once every 10 blocks, with some limitations
|
||||
sleep(Duration::from_secs(20 * block_time)).await;
|
||||
assert!(syncer_tributary.tip().await != spec.genesis());
|
||||
|
||||
// Verify it synced to the tip
|
||||
let syncer_tip = {
|
||||
let tributary = &tributaries[0];
|
||||
|
||||
let tip = tributary.tip().await;
|
||||
let syncer_tip = syncer_tributary.tip().await;
|
||||
// Allow a one block tolerance in case of race conditions
|
||||
assert!(
|
||||
HashSet::from([tip, tributary.reader().block(&tip).unwrap().parent()]).contains(&syncer_tip)
|
||||
);
|
||||
syncer_tip
|
||||
};
|
||||
|
||||
sleep(Duration::from_secs(block_time)).await;
|
||||
|
||||
// Verify it's now keeping up
|
||||
assert!(syncer_tributary.tip().await != syncer_tip);
|
||||
|
||||
// Verify it's now participating in consensus
|
||||
// Because only `t` validators are used in a commit, take n - t nodes offline
|
||||
// leaving only `t` nodes. Which should force it to participate in the consensus
|
||||
// of next blocks.
|
||||
let spares = usize::from(spec.n(&[]) - spec.t());
|
||||
for thread in p2p_threads.iter().take(spares) {
|
||||
thread.abort();
|
||||
}
|
||||
|
||||
// wait for a block
|
||||
sleep(Duration::from_secs(block_time)).await;
|
||||
|
||||
if syncer_tributary
|
||||
.reader()
|
||||
.parsed_commit(&syncer_tributary.tip().await)
|
||||
.unwrap()
|
||||
.validators
|
||||
.iter()
|
||||
.any(|signer| signer == &syncer_key.to_bytes())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
panic!("synced tributary didn't start participating in consensus");
|
||||
}
|
||||
63
coordinator/src/tests/tributary/tx.rs
Normal file
63
coordinator/src/tests/tributary/tx.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use tokio::time::sleep;
|
||||
|
||||
use serai_db::MemDb;
|
||||
|
||||
use tributary::{
|
||||
transaction::Transaction as TransactionTrait, Transaction as TributaryTransaction, Tributary,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
tributary::Transaction,
|
||||
tests::{
|
||||
LocalP2p,
|
||||
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
||||
},
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn tx_test() {
|
||||
let keys = new_keys(&mut OsRng);
|
||||
let spec = new_spec(&mut OsRng, &keys);
|
||||
|
||||
let tributaries = new_tributaries(&keys, &spec)
|
||||
.await
|
||||
.into_iter()
|
||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Run the tributaries in the background
|
||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||
|
||||
// Send a TX from a random Tributary
|
||||
let sender =
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
||||
let key = keys[sender].clone();
|
||||
|
||||
let attempt = 0;
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
|
||||
// Create the TX with a null signature so we can get its sig hash
|
||||
let block_before_tx = tributaries[sender].1.tip().await;
|
||||
let mut tx = Transaction::DkgCommitments {
|
||||
attempt,
|
||||
commitments: vec![commitments.clone()],
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
||||
|
||||
assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
let included_in = wait_for_tx_inclusion(&tributaries[sender].1, block_before_tx, tx.hash()).await;
|
||||
// Also sleep for the block time to ensure the block is synced around before we run checks on it
|
||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||
|
||||
// All tributaries should have acknowledged this transaction in a block
|
||||
for (_, tributary) in tributaries {
|
||||
let block = tributary.reader().block(&included_in).unwrap();
|
||||
assert_eq!(block.transactions, vec![TributaryTransaction::Application(tx.clone())]);
|
||||
}
|
||||
}
|
||||
@@ -3,418 +3,196 @@ use std::collections::HashMap;
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ValidatorSet};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::Participant;
|
||||
|
||||
use messages::sign::{VariantSignId, SignId};
|
||||
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};
|
||||
|
||||
use serai_db::*;
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
|
||||
use crate::tributary::transaction::SigningProtocolRound;
|
||||
pub use serai_db::*;
|
||||
|
||||
use tributary::ReadWrite;
|
||||
|
||||
use crate::tributary::{Label, Transaction};
|
||||
|
||||
/// A topic within the database which the group participates in
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Topic {
|
||||
/// Vote to remove a participant
|
||||
RemoveParticipant { participant: SeraiAddress },
|
||||
|
||||
// DkgParticipation isn't represented here as participations are immediately sent to the
|
||||
// processor, not accumulated within this databse
|
||||
/// Participation in the signing protocol to confirm the DKG results on Substrate
|
||||
DkgConfirmation { attempt: u32, round: SigningProtocolRound },
|
||||
|
||||
/// The local view of the SlashReport, to be aggregated into the final SlashReport
|
||||
SlashReport,
|
||||
|
||||
/// Participation in a signing protocol
|
||||
Sign { id: VariantSignId, attempt: u32, round: SigningProtocolRound },
|
||||
pub enum Topic {
|
||||
Dkg,
|
||||
DkgConfirmation,
|
||||
SubstrateSign(SubstrateSignableId),
|
||||
Sign([u8; 32]),
|
||||
}
|
||||
|
||||
enum Participating {
|
||||
Participated,
|
||||
Everyone,
|
||||
// A struct to refer to a piece of data all validators will presumably provide a value for.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
||||
pub struct DataSpecification {
|
||||
pub topic: Topic,
|
||||
pub label: Label,
|
||||
pub attempt: u32,
|
||||
}
|
||||
|
||||
impl Topic {
|
||||
// The topic used by the next attempt of this protocol
|
||||
fn next_attempt_topic(self) -> Option<Topic> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation {
|
||||
attempt: attempt + 1,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
}),
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, round: _ } => {
|
||||
Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The topic for the re-attempt to schedule
|
||||
fn reattempt_topic(self) -> Option<(u32, Topic)> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
let attempt = attempt + 1;
|
||||
Some((
|
||||
attempt,
|
||||
Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess },
|
||||
))
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
let attempt = attempt + 1;
|
||||
Some((attempt, Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }))
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// The SignId for this topic
|
||||
//
|
||||
// Returns None if Topic isn't Topic::Sign
|
||||
pub(crate) fn sign_id(self, set: ValidatorSet) -> Option<messages::sign::SignId> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { .. } => None,
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }),
|
||||
}
|
||||
}
|
||||
|
||||
/// The topic which precedes this topic as a prerequisite
|
||||
///
|
||||
/// The preceding topic must define this topic as succeeding
|
||||
fn preceding_topic(self) -> Option<Topic> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => None,
|
||||
SigningProtocolRound::Share => {
|
||||
Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess })
|
||||
}
|
||||
},
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => None,
|
||||
SigningProtocolRound::Share => {
|
||||
Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess })
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// The topic which succeeds this topic, with this topic as a prerequisite
|
||||
///
|
||||
/// The succeeding topic must define this topic as preceding
|
||||
fn succeeding_topic(self) -> Option<Topic> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => None,
|
||||
Topic::DkgConfirmation { attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share })
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
Topic::SlashReport { .. } => None,
|
||||
Topic::Sign { id, attempt, round } => match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share })
|
||||
}
|
||||
SigningProtocolRound::Share => None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn requires_whitelisting(&self) -> bool {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
// We don't require whitelisting to remove a participant
|
||||
Topic::RemoveParticipant { .. } => false,
|
||||
// We don't require whitelisting for the first attempt, solely the re-attempts
|
||||
Topic::DkgConfirmation { attempt, .. } => *attempt != 0,
|
||||
// We don't require whitelisting for the slash report
|
||||
Topic::SlashReport { .. } => false,
|
||||
// We do require whitelisting for every sign protocol
|
||||
Topic::Sign { .. } => true,
|
||||
}
|
||||
}
|
||||
|
||||
fn required_participation(&self, n: u64) -> u64 {
|
||||
let _ = self;
|
||||
// All of our topics require 2/3rds participation
|
||||
((2 * n) / 3) + 1
|
||||
}
|
||||
|
||||
fn participating(&self) -> Participating {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
Topic::RemoveParticipant { .. } => Participating::Everyone,
|
||||
Topic::DkgConfirmation { .. } => Participating::Participated,
|
||||
Topic::SlashReport { .. } => Participating::Everyone,
|
||||
Topic::Sign { .. } => Participating::Participated,
|
||||
}
|
||||
}
|
||||
pub enum DataSet {
|
||||
Participating(HashMap<Participant, Vec<u8>>),
|
||||
NotParticipating,
|
||||
}
|
||||
|
||||
/// The resulting data set from an accumulation
|
||||
pub(crate) enum DataSet<D: Borshy> {
|
||||
/// Accumulating this did not produce a data set to act on
|
||||
/// (non-existent, not ready, prior handled, not participating, etc.)
|
||||
None,
|
||||
/// The data set was ready and we are participating in this event
|
||||
Participating(HashMap<SeraiAddress, D>),
|
||||
pub enum Accumulation {
|
||||
Ready(DataSet),
|
||||
NotReady,
|
||||
}
|
||||
|
||||
trait Borshy: BorshSerialize + BorshDeserialize {}
|
||||
impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
|
||||
|
||||
// TODO: Move from genesis to set for indexing
|
||||
create_db!(
|
||||
CoordinatorTributary {
|
||||
// The last handled tributary block's (number, hash)
|
||||
LastHandledTributaryBlock: (set: ValidatorSet) -> (u64, [u8; 32]),
|
||||
Tributary {
|
||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],
|
||||
|
||||
// The slash points a validator has accrued, with u64::MAX representing a fatal slash.
|
||||
SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u64,
|
||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||
|
||||
// The latest Substrate block to cosign.
|
||||
LatestSubstrateBlockToCosign: (set: ValidatorSet) -> [u8; 32],
|
||||
// If we're actively cosigning or not.
|
||||
ActivelyCosigning: (set: ValidatorSet) -> (),
|
||||
// TODO: Revisit the point of this
|
||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
||||
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
// TODO: Combine these two
|
||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
||||
|
||||
// The weight accumulated for a topic.
|
||||
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
|
||||
// The entries accumulated for a topic, by validator.
|
||||
Accumulated: <D: Borshy>(set: ValidatorSet, topic: Topic, validator: SeraiAddress) -> D,
|
||||
VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (),
|
||||
VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16,
|
||||
|
||||
// Topics to be recognized as of a certain block number due to the reattempt protocol.
|
||||
Reattempt: (set: ValidatorSet, block_number: u64) -> Vec<Topic>,
|
||||
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
||||
ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec<Topic>,
|
||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||
|
||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
||||
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
|
||||
|
||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||
|
||||
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
|
||||
|
||||
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
|
||||
SlashReported: (genesis: [u8; 32]) -> u16,
|
||||
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
|
||||
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,
|
||||
}
|
||||
);
|
||||
|
||||
db_channel!(
|
||||
CoordinatorTributary {
|
||||
ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage,
|
||||
}
|
||||
);
|
||||
|
||||
pub(crate) struct TributaryDb;
|
||||
impl TributaryDb {
|
||||
pub(crate) fn last_handled_tributary_block(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
) -> Option<(u64, [u8; 32])> {
|
||||
LastHandledTributaryBlock::get(getter, set)
|
||||
}
|
||||
pub(crate) fn set_last_handled_tributary_block(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
block_number: u64,
|
||||
block_hash: [u8; 32],
|
||||
) {
|
||||
LastHandledTributaryBlock::set(txn, set, &(block_number, block_hash));
|
||||
}
|
||||
|
||||
pub(crate) fn latest_substrate_block_to_cosign(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
) -> Option<[u8; 32]> {
|
||||
LatestSubstrateBlockToCosign::get(getter, set)
|
||||
}
|
||||
pub(crate) fn set_latest_substrate_block_to_cosign(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
) {
|
||||
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
||||
}
|
||||
pub(crate) fn actively_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) -> bool {
|
||||
ActivelyCosigning::get(txn, set).is_some()
|
||||
}
|
||||
pub(crate) fn start_cosigning(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
substrate_block_number: u64,
|
||||
) {
|
||||
assert!(
|
||||
ActivelyCosigning::get(txn, set).is_none(),
|
||||
"starting cosigning while already cosigning"
|
||||
);
|
||||
ActivelyCosigning::set(txn, set, &());
|
||||
|
||||
TributaryDb::recognize_topic(
|
||||
txn,
|
||||
set,
|
||||
Topic::Sign {
|
||||
id: VariantSignId::Cosign(substrate_block_number),
|
||||
attempt: 0,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
},
|
||||
);
|
||||
}
|
||||
pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||
assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning");
|
||||
}
|
||||
|
||||
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
||||
AccumulatedWeight::set(txn, set, topic, &0);
|
||||
}
|
||||
|
||||
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
||||
for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) {
|
||||
// TODO: Slash all people who preprocessed but didn't share
|
||||
Self::recognize_topic(txn, set, topic);
|
||||
if let Some(id) = topic.sign_id(set) {
|
||||
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn fatal_slash(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
validator: SeraiAddress,
|
||||
reason: &str,
|
||||
) {
|
||||
log::warn!("{validator} fatally slashed: {reason}");
|
||||
SlashPoints::set(txn, set, validator, &u64::MAX);
|
||||
}
|
||||
|
||||
pub(crate) fn is_fatally_slashed(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
validator: SeraiAddress,
|
||||
) -> bool {
|
||||
SlashPoints::get(getter, set, validator).unwrap_or(0) == u64::MAX
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn accumulate<D: Borshy>(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
validators: &[SeraiAddress],
|
||||
total_weight: u64,
|
||||
block_number: u64,
|
||||
topic: Topic,
|
||||
validator: SeraiAddress,
|
||||
validator_weight: u64,
|
||||
data: &D,
|
||||
) -> DataSet<D> {
|
||||
// This function will only be called once for a (validator, topic) tuple due to how we handle
|
||||
// nonces on transactions (deterministically to the topic)
|
||||
|
||||
let accumulated_weight = AccumulatedWeight::get(txn, set, topic);
|
||||
if topic.requires_whitelisting() && accumulated_weight.is_none() {
|
||||
Self::fatal_slash(txn, set, validator, "participated in unrecognized topic");
|
||||
return DataSet::None;
|
||||
}
|
||||
let mut accumulated_weight = accumulated_weight.unwrap_or(0);
|
||||
|
||||
// Check if there's a preceding topic, this validator participated
|
||||
let preceding_topic = topic.preceding_topic();
|
||||
if let Some(preceding_topic) = preceding_topic {
|
||||
if Accumulated::<D>::get(txn, set, preceding_topic, validator).is_none() {
|
||||
Self::fatal_slash(
|
||||
txn,
|
||||
set,
|
||||
validator,
|
||||
"participated in topic without participating in prior",
|
||||
);
|
||||
return DataSet::None;
|
||||
}
|
||||
}
|
||||
|
||||
// The complete lack of validation on the data by these NOPs opens the potential for spam here
|
||||
|
||||
// If we've already accumulated past the threshold, NOP
|
||||
if accumulated_weight >= topic.required_participation(total_weight) {
|
||||
return DataSet::None;
|
||||
}
|
||||
// If this is for an old attempt, NOP
|
||||
if let Some(next_attempt_topic) = topic.next_attempt_topic() {
|
||||
if AccumulatedWeight::get(txn, set, next_attempt_topic).is_some() {
|
||||
return DataSet::None;
|
||||
}
|
||||
}
|
||||
|
||||
// Accumulate the data
|
||||
accumulated_weight += validator_weight;
|
||||
AccumulatedWeight::set(txn, set, topic, &accumulated_weight);
|
||||
Accumulated::set(txn, set, topic, validator, data);
|
||||
|
||||
// Check if we now cross the weight threshold
|
||||
if accumulated_weight >= topic.required_participation(total_weight) {
|
||||
// Queue this for re-attempt after enough time passes
|
||||
let reattempt_topic = topic.reattempt_topic();
|
||||
if let Some((attempt, reattempt_topic)) = reattempt_topic {
|
||||
// 5 minutes
|
||||
#[cfg(not(feature = "longer-reattempts"))]
|
||||
const BASE_REATTEMPT_DELAY: u32 =
|
||||
(5u32 * 60 * 1000).div_ceil(tributary::tendermint::TARGET_BLOCK_TIME);
|
||||
|
||||
// 10 minutes, intended for latent environments like the GitHub CI
|
||||
#[cfg(feature = "longer-reattempts")]
|
||||
const BASE_REATTEMPT_DELAY: u32 =
|
||||
(10u32 * 60 * 1000).div_ceil(tributary::tendermint::TARGET_BLOCK_TIME);
|
||||
|
||||
// Linearly scale the time for the protocol with the attempt number
|
||||
let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY);
|
||||
|
||||
let recognize_at = block_number + blocks_till_reattempt;
|
||||
let mut queued = Reattempt::get(txn, set, recognize_at).unwrap_or(Vec::with_capacity(1));
|
||||
queued.push(reattempt_topic);
|
||||
Reattempt::set(txn, set, recognize_at, &queued);
|
||||
}
|
||||
|
||||
// Register the succeeding topic
|
||||
let succeeding_topic = topic.succeeding_topic();
|
||||
if let Some(succeeding_topic) = succeeding_topic {
|
||||
Self::recognize_topic(txn, set, succeeding_topic);
|
||||
}
|
||||
|
||||
// Fetch and return all participations
|
||||
let mut data_set = HashMap::with_capacity(validators.len());
|
||||
for validator in validators {
|
||||
if let Some(data) = Accumulated::<D>::get(txn, set, topic, *validator) {
|
||||
// Clean this data up if there's not a re-attempt topic
|
||||
// If there is a re-attempt topic, we clean it up upon re-attempt
|
||||
if reattempt_topic.is_none() {
|
||||
Accumulated::<D>::del(txn, set, topic, *validator);
|
||||
}
|
||||
data_set.insert(*validator, data);
|
||||
}
|
||||
}
|
||||
let participated = data_set.contains_key(&validator);
|
||||
match topic.participating() {
|
||||
Participating::Participated => {
|
||||
if participated {
|
||||
DataSet::Participating(data_set)
|
||||
} else {
|
||||
DataSet::None
|
||||
}
|
||||
}
|
||||
Participating::Everyone => DataSet::Participating(data_set),
|
||||
}
|
||||
} else {
|
||||
DataSet::None
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn send_message(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ValidatorSet,
|
||||
message: impl Into<messages::CoordinatorMessage>,
|
||||
) {
|
||||
ProcessorMessages::send(txn, set, &message.into());
|
||||
impl FatalSlashes {
|
||||
pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
|
||||
FatalSlashes::get(getter, genesis)
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FatallySlashed {
|
||||
pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) {
|
||||
Self::set(txn, genesis, account, &());
|
||||
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();
|
||||
|
||||
// Don't append if we already have it, which can occur upon multiple faults
|
||||
if existing.iter().any(|existing| existing == &account) {
|
||||
return;
|
||||
}
|
||||
|
||||
existing.push(account);
|
||||
FatalSlashes::set(txn, genesis, &existing);
|
||||
}
|
||||
}
|
||||
|
||||
impl AttemptDb {
|
||||
pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) {
|
||||
Self::set(txn, genesis, &topic, &0u32);
|
||||
}
|
||||
|
||||
pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) -> u32 {
|
||||
let next =
|
||||
Self::attempt(txn, genesis, topic).expect("starting next attempt for unknown topic") + 1;
|
||||
Self::set(txn, genesis, &topic, &next);
|
||||
next
|
||||
}
|
||||
|
||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||
let attempt = Self::get(getter, genesis, &topic);
|
||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
||||
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
||||
// should always happen (eventually)
|
||||
if attempt.is_none() &&
|
||||
((topic == Topic::Dkg) ||
|
||||
(topic == Topic::DkgConfirmation) ||
|
||||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
||||
{
|
||||
return Some(0);
|
||||
}
|
||||
attempt
|
||||
}
|
||||
}
|
||||
|
||||
impl ReattemptDb {
|
||||
pub fn schedule_reattempt(
|
||||
txn: &mut impl DbTxn,
|
||||
genesis: [u8; 32],
|
||||
current_block_number: u32,
|
||||
topic: Topic,
|
||||
) {
|
||||
// 5 minutes
|
||||
#[cfg(not(feature = "longer-reattempts"))]
|
||||
const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;
|
||||
|
||||
// 10 minutes, intended for latent environments like the GitHub CI
|
||||
#[cfg(feature = "longer-reattempts")]
|
||||
const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;
|
||||
|
||||
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
||||
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
||||
// network bandwidth issues
|
||||
let mut reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||
((AttemptDb::attempt(txn, genesis, topic)
|
||||
.expect("scheduling re-attempt for unknown topic") /
|
||||
3) +
|
||||
1)
|
||||
.min(3);
|
||||
// Allow more time for DKGs since they have an extra round and much more data
|
||||
if matches!(topic, Topic::Dkg) {
|
||||
reattempt_delay *= 4;
|
||||
}
|
||||
let upon_block = current_block_number + reattempt_delay;
|
||||
|
||||
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
||||
reattempts.push(topic);
|
||||
Self::set(txn, genesis, upon_block, &reattempts);
|
||||
}
|
||||
|
||||
pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) -> Vec<Topic> {
|
||||
let res = Self::get(txn, genesis, block_number).unwrap_or(vec![]);
|
||||
if !res.is_empty() {
|
||||
Self::del(txn, genesis, block_number);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl SignedTransactionDb {
|
||||
pub fn take_signed_transaction(
|
||||
txn: &mut impl DbTxn,
|
||||
order: &[u8],
|
||||
nonce: u32,
|
||||
) -> Option<Transaction> {
|
||||
let res = SignedTransactionDb::get(txn, order, nonce)
|
||||
.map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap());
|
||||
if res.is_some() {
|
||||
Self::del(txn, order, nonce);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
777
coordinator/src/tributary/handle.rs
Normal file
777
coordinator/src/tributary/handle.rs
Normal file
@@ -0,0 +1,777 @@
|
||||
use core::ops::Deref;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::OsRng;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::dkg::Participant;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::validator_sets::primitives::KeyPair;
|
||||
|
||||
use tributary::{Signed, TransactionKind, TransactionTrait};
|
||||
|
||||
use processor_messages::{
|
||||
key_gen::{self, KeyGenId},
|
||||
coordinator::{self, SubstrateSignableId, SubstrateSignId},
|
||||
sign::{self, SignId},
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::{
|
||||
processors::Processors,
|
||||
tributary::{
|
||||
*,
|
||||
signing_protocol::DkgConfirmer,
|
||||
scanner::{
|
||||
RecognizedIdType, RIDTrait, PublishSeraiTransaction, PTTTrait, TributaryBlockHandler,
|
||||
},
|
||||
},
|
||||
P2p,
|
||||
};
|
||||
|
||||
pub fn dkg_confirmation_nonces(
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &TributarySpec,
|
||||
txn: &mut impl DbTxn,
|
||||
attempt: u32,
|
||||
) -> [u8; 64] {
|
||||
DkgConfirmer::new(key, spec, txn, attempt)
|
||||
.expect("getting DKG confirmation nonces for unknown attempt")
|
||||
.preprocess()
|
||||
}
|
||||
|
||||
pub fn generated_key_pair<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &TributarySpec,
|
||||
key_pair: &KeyPair,
|
||||
attempt: u32,
|
||||
) -> Result<[u8; 32], Participant> {
|
||||
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
|
||||
KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);
|
||||
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
||||
DkgConfirmer::new(key, spec, txn, attempt)
|
||||
.expect("claiming to have generated a key pair for an unrecognized attempt")
|
||||
.share(preprocesses, key_pair)
|
||||
}
|
||||
|
||||
fn unflatten(
|
||||
spec: &TributarySpec,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data: &mut HashMap<Participant, Vec<u8>>,
|
||||
) {
|
||||
for (validator, _) in spec.validators() {
|
||||
let Some(range) = spec.i(removed, validator) else { continue };
|
||||
let Some(all_segments) = data.remove(&range.start) else {
|
||||
continue;
|
||||
};
|
||||
let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap();
|
||||
for i in u16::from(range.start) .. u16::from(range.end) {
|
||||
let i = Participant::new(i).unwrap();
|
||||
data.insert(i, data_vec.remove(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
D: Db,
|
||||
T: DbTxn,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
PTT: PTTTrait,
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
> TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P>
|
||||
{
|
||||
fn accumulate(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data_spec: &DataSpecification,
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
data: &Vec<u8>,
|
||||
) -> Accumulation {
|
||||
log::debug!("accumulating entry for {:?} attempt #{}", &data_spec.topic, &data_spec.attempt);
|
||||
let genesis = self.spec.genesis();
|
||||
if DataDb::get(self.txn, genesis, data_spec, &signer.to_bytes()).is_some() {
|
||||
panic!("accumulating data for a participant multiple times");
|
||||
}
|
||||
let signer_shares = {
|
||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
||||
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
|
||||
return Accumulation::NotReady;
|
||||
};
|
||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||
};
|
||||
|
||||
let prior_received = DataReceived::get(self.txn, genesis, data_spec).unwrap_or_default();
|
||||
let now_received = prior_received + signer_shares;
|
||||
DataReceived::set(self.txn, genesis, data_spec, &now_received);
|
||||
DataDb::set(self.txn, genesis, data_spec, &signer.to_bytes(), data);
|
||||
|
||||
let received_range = (prior_received + 1) ..= now_received;
|
||||
|
||||
// If 2/3rds of the network participated in this preprocess, queue it for an automatic
|
||||
// re-attempt
|
||||
// DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg
|
||||
if (data_spec.label == Label::Preprocess) &&
|
||||
received_range.contains(&self.spec.t()) &&
|
||||
(data_spec.topic != Topic::DkgConfirmation)
|
||||
{
|
||||
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
|
||||
// is an old entry
|
||||
// This is an assert, not part of the if check, as old data shouldn't be here in the first
|
||||
// place
|
||||
assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt));
|
||||
ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic);
|
||||
}
|
||||
|
||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||
let needs_everyone =
|
||||
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
|
||||
let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };
|
||||
if received_range.contains(&needed) {
|
||||
log::debug!(
|
||||
"accumulation for entry {:?} attempt #{} is ready",
|
||||
&data_spec.topic,
|
||||
&data_spec.attempt
|
||||
);
|
||||
|
||||
let mut data = HashMap::new();
|
||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||
let Some(i) = self.spec.i(removed, validator) else { continue };
|
||||
data.insert(
|
||||
i.start,
|
||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||
data
|
||||
} else {
|
||||
continue;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(data.len(), usize::from(needed));
|
||||
|
||||
// Remove our own piece of data, if we were involved
|
||||
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {
|
||||
if data.remove(&i.start).is_some() {
|
||||
return Accumulation::Ready(DataSet::Participating(data));
|
||||
}
|
||||
}
|
||||
return Accumulation::Ready(DataSet::NotParticipating);
|
||||
}
|
||||
Accumulation::NotReady
|
||||
}
|
||||
|
||||
fn handle_data(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data_spec: &DataSpecification,
|
||||
bytes: &Vec<u8>,
|
||||
signed: &Signed,
|
||||
) -> Accumulation {
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else {
|
||||
// Premature publication of a valid ID/publication of an invalid ID
|
||||
self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt");
|
||||
return Accumulation::NotReady;
|
||||
};
|
||||
|
||||
// If they've already published a TX for this attempt, slash
|
||||
// This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a
|
||||
// cheap check to leave in for safety
|
||||
if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "published data multiple times");
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
|
||||
// If the attempt is lesser than the blockchain's, return
|
||||
if data_spec.attempt < curr_attempt {
|
||||
log::debug!(
|
||||
"dated attempt published onto tributary for topic {:?} (used attempt {}, current {})",
|
||||
data_spec.topic,
|
||||
data_spec.attempt,
|
||||
curr_attempt
|
||||
);
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
// If the attempt is greater, this is a premature publication, full slash
|
||||
if data_spec.attempt > curr_attempt {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"published data with an attempt which hasn't started",
|
||||
);
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
|
||||
// TODO: We can also full slash if shares before all commitments, or share before the
|
||||
// necessary preprocesses
|
||||
|
||||
// TODO: If this is shares, we need to check they are part of the selected signing set
|
||||
|
||||
// Accumulate this data
|
||||
self.accumulate(removed, data_spec, signed.signer, bytes)
|
||||
}
|
||||
|
||||
fn check_sign_data_len(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
len: usize,
|
||||
) -> Result<(), ()> {
|
||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
||||
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
|
||||
// offline
|
||||
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
|
||||
Err(())?
|
||||
};
|
||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||
self.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
"signer published a distinct amount of sign data than they had shares",
|
||||
);
|
||||
Err(())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: Don't call fatal_slash in here, return the party to fatal_slash to ensure no further
|
||||
// execution occurs
|
||||
pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) {
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
// Don't handle transactions from fatally slashed participants
|
||||
// This prevents removed participants from sabotaging the removal signing sessions and so on
|
||||
// TODO: Because fatally slashed participants can still publish onto the blockchain, they have
|
||||
// a notable DoS ability
|
||||
if let TransactionKind::Signed(_, signed) = tx.kind() {
|
||||
if FatallySlashed::get(self.txn, genesis, signed.signer.to_bytes()).is_some() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
if self.spec.i(&[], participant).is_none() {
|
||||
self.fatal_slash(
|
||||
participant.to_bytes(),
|
||||
"RemoveParticipantDueToDkg vote for non-validator",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let participant = participant.to_bytes();
|
||||
let signer = signed.signer.to_bytes();
|
||||
|
||||
assert!(
|
||||
VotedToRemove::get(self.txn, genesis, signer, participant).is_none(),
|
||||
"VotedToRemove multiple times despite a single nonce being allocated",
|
||||
);
|
||||
VotedToRemove::set(self.txn, genesis, signer, participant, &());
|
||||
|
||||
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
||||
let signer_votes =
|
||||
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?");
|
||||
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
||||
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
||||
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
||||
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote")
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {
|
||||
return;
|
||||
};
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
||||
match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||
unflatten(self.spec, &removed, &mut commitments);
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
key_gen::CoordinatorMessage::Commitments {
|
||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
||||
commitments,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
assert!(
|
||||
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
|
||||
"NotParticipating in a DkgCommitments we weren't removed for"
|
||||
);
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
|
||||
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"DkgShares for a DKG they aren't participating in",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
for shares in &shares {
|
||||
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Save each share as needed for blame
|
||||
for (from_offset, shares) in shares.iter().enumerate() {
|
||||
let from =
|
||||
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
|
||||
.unwrap();
|
||||
|
||||
for (to_offset, share) in shares.iter().enumerate() {
|
||||
// 0-indexed (the enumeration) to 1-indexed (Participant)
|
||||
let mut to = u16::try_from(to_offset).unwrap() + 1;
|
||||
// Adjust for the omission of the sender's own shares
|
||||
if to >= u16::from(sender_i.start) {
|
||||
to += u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
}
|
||||
let to = Participant::new(to).unwrap();
|
||||
|
||||
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
|
||||
}
|
||||
}
|
||||
|
||||
// Filter down to only our share's bytes for handle
|
||||
let our_shares = if let Some(our_i) =
|
||||
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
{
|
||||
if sender_i == our_i {
|
||||
vec![]
|
||||
} else {
|
||||
// 1-indexed to 0-indexed
|
||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
||||
// Handle the omission of the sender's own data
|
||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
||||
our_i_pos -= sender_is_len;
|
||||
}
|
||||
let our_i_pos = usize::from(our_i_pos);
|
||||
shares
|
||||
.iter_mut()
|
||||
.map(|shares| {
|
||||
shares
|
||||
.drain(
|
||||
our_i_pos ..
|
||||
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
||||
)
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
} else {
|
||||
assert!(
|
||||
not_participating,
|
||||
"we didn't have an i while handling DkgShares we weren't removed for"
|
||||
);
|
||||
// Since we're not participating, simply save vec![] for our shares
|
||||
vec![]
|
||||
};
|
||||
// Drop shares as it's presumably been mutated into invalidity
|
||||
drop(shares);
|
||||
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
||||
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
||||
match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
||||
|
||||
let mut confirmation_nonces = HashMap::new();
|
||||
let mut shares = HashMap::new();
|
||||
for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares {
|
||||
let (these_confirmation_nonces, these_shares) =
|
||||
<(Vec<u8>, Vec<u8>)>::decode(&mut confirmation_nonces_and_shares.as_slice())
|
||||
.unwrap();
|
||||
confirmation_nonces.insert(participant, these_confirmation_nonces);
|
||||
shares.insert(participant, these_shares);
|
||||
}
|
||||
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
||||
|
||||
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
|
||||
// - Each of the sender's shares
|
||||
// - Each of the our shares
|
||||
// - Each share
|
||||
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
|
||||
let mut expanded_shares = vec![];
|
||||
for (sender_start_i, shares) in shares {
|
||||
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
|
||||
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
|
||||
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
|
||||
if expanded_shares.len() <= our_share_i {
|
||||
expanded_shares.push(HashMap::new());
|
||||
}
|
||||
expanded_shares[our_share_i].insert(
|
||||
Participant::new(
|
||||
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
|
||||
)
|
||||
.unwrap(),
|
||||
our_share,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
||||
shares: expanded_shares,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let Some(range) = self.spec.i(&removed, signed.signer) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare for a DKG they aren't participating in",
|
||||
);
|
||||
return;
|
||||
};
|
||||
if !range.contains(&accuser) {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"accused with a Participant index which wasn't theirs",
|
||||
);
|
||||
return;
|
||||
}
|
||||
if range.contains(&faulty) {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare");
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare had a non-existent faulty participant",
|
||||
);
|
||||
return;
|
||||
};
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
key_gen::CoordinatorMessage::VerifyBlame {
|
||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
||||
accuser,
|
||||
accused: faulty,
|
||||
share,
|
||||
blame,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
|
||||
let data_spec =
|
||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
||||
match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
||||
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
panic!(
|
||||
"DkgConfirmed for everyone yet didn't have the removed parties for this attempt",
|
||||
);
|
||||
};
|
||||
|
||||
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
||||
// TODO: This can technically happen under very very very specific timing as the txn
|
||||
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
||||
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(
|
||||
"in DkgConfirmed handling, which happens after everyone \
|
||||
(including us) fires DkgConfirmed, yet no confirming key pair",
|
||||
);
|
||||
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
||||
.expect("confirming DKG for unrecognized attempt");
|
||||
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||
Ok(sig) => sig,
|
||||
Err(p) => {
|
||||
let mut tx = Transaction::RemoveParticipantDueToDkg {
|
||||
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
DkgLocallyCompleted::set(self.txn, genesis, &());
|
||||
|
||||
self
|
||||
.publish_serai_tx
|
||||
.publish_set_keys(
|
||||
self.db,
|
||||
self.spec.set(),
|
||||
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
|
||||
key_pair,
|
||||
sig.into(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
panic!("wasn't a participant in DKG confirmination shares")
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::CosignSubstrateBlock(hash) => {
|
||||
AttemptDb::recognize_topic(
|
||||
self.txn,
|
||||
genesis,
|
||||
Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)),
|
||||
);
|
||||
|
||||
let block_number = SeraiBlockNumber::get(self.txn, hash)
|
||||
.expect("CosignSubstrateBlock yet didn't save Serai block number");
|
||||
let msg = coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||
id: SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
id: SubstrateSignableId::CosigningSubstrateBlock(hash),
|
||||
attempt: 0,
|
||||
},
|
||||
block_number,
|
||||
};
|
||||
self.processors.send(self.spec.set().network, msg).await;
|
||||
}
|
||||
|
||||
Transaction::Batch { block: _, batch } => {
|
||||
// Because this Batch has achieved synchrony, its batch ID should be authorized
|
||||
AttemptDb::recognize_topic(
|
||||
self.txn,
|
||||
genesis,
|
||||
Topic::SubstrateSign(SubstrateSignableId::Batch(batch)),
|
||||
);
|
||||
self
|
||||
.recognized_id
|
||||
.recognized_id(
|
||||
self.spec.set(),
|
||||
genesis,
|
||||
RecognizedIdType::Batch,
|
||||
batch.to_le_bytes().to_vec(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Transaction::SubstrateBlock(block) => {
|
||||
let plan_ids = PlanIds::get(self.txn, &genesis, block).expect(
|
||||
"synced a tributary block finalizing a substrate block in a provided transaction \
|
||||
despite us not providing that transaction",
|
||||
);
|
||||
|
||||
for id in plan_ids {
|
||||
AttemptDb::recognize_topic(self.txn, genesis, Topic::Sign(id));
|
||||
self
|
||||
.recognized_id
|
||||
.recognized_id(self.spec.set(), genesis, RecognizedIdType::Plan, id.to_vec())
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::SubstrateSign(data) => {
|
||||
// Provided transactions ensure synchrony on any signing protocol, and we won't start
|
||||
// signing with threshold keys before we've confirmed them on-chain
|
||||
let Some(removed) =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
else {
|
||||
self.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let signer = data.signed.signer;
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
let expected_len = match data.label {
|
||||
Label::Preprocess => 64,
|
||||
Label::Share => 32,
|
||||
};
|
||||
for data in &data.data {
|
||||
if data.len() != expected_len {
|
||||
self.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
"unexpected length data for substrate signing protocol",
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let data_spec = DataSpecification {
|
||||
topic: Topic::SubstrateSign(data.plan),
|
||||
label: data.label,
|
||||
attempt: data.attempt,
|
||||
};
|
||||
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
unflatten(self.spec, &removed, &mut results);
|
||||
|
||||
let id = SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
id: data.plan,
|
||||
attempt: data.attempt,
|
||||
};
|
||||
let msg = match data.label {
|
||||
Label::Preprocess => coordinator::CoordinatorMessage::SubstratePreprocesses {
|
||||
id,
|
||||
preprocesses: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(),
|
||||
},
|
||||
Label::Share => coordinator::CoordinatorMessage::SubstrateShares {
|
||||
id,
|
||||
shares: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(),
|
||||
},
|
||||
};
|
||||
self.processors.send(self.spec.set().network, msg).await;
|
||||
}
|
||||
|
||||
Transaction::Sign(data) => {
|
||||
let Some(removed) =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
else {
|
||||
self.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let data_spec = DataSpecification {
|
||||
topic: Topic::Sign(data.plan),
|
||||
label: data.label,
|
||||
attempt: data.attempt,
|
||||
};
|
||||
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
||||
{
|
||||
unflatten(self.spec, &removed, &mut results);
|
||||
let id =
|
||||
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
match data.label {
|
||||
Label::Preprocess => {
|
||||
sign::CoordinatorMessage::Preprocesses { id, preprocesses: results }
|
||||
}
|
||||
Label::Share => sign::CoordinatorMessage::Shares { id, shares: results },
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => {
|
||||
log::info!(
|
||||
"on-chain SignCompleted claims {} completes {}",
|
||||
hex::encode(&tx_hash),
|
||||
hex::encode(plan)
|
||||
);
|
||||
|
||||
if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() {
|
||||
self.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed");
|
||||
return;
|
||||
};
|
||||
|
||||
// TODO: Confirm this signer hasn't prior published a completion
|
||||
|
||||
let msg = sign::CoordinatorMessage::Completed {
|
||||
session: self.spec.set().session,
|
||||
id: plan,
|
||||
tx: tx_hash,
|
||||
};
|
||||
self.processors.send(self.spec.set().network, msg).await;
|
||||
}
|
||||
|
||||
Transaction::SlashReport(points, signed) => {
|
||||
// Uses &[] as we only need the length which is independent to who else was removed
|
||||
let signer_range = self.spec.i(&[], signed.signer).unwrap();
|
||||
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
||||
if points.len() != (self.spec.validators().len() - 1) {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"submitted a distinct amount of slash points to participants",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if SlashReports::get(self.txn, genesis, signed.signer.to_bytes()).is_some() {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "submitted multiple slash points");
|
||||
return;
|
||||
}
|
||||
SlashReports::set(self.txn, genesis, signed.signer.to_bytes(), &points);
|
||||
|
||||
let prior_reported = SlashReported::get(self.txn, genesis).unwrap_or(0);
|
||||
let now_reported = prior_reported + signer_len;
|
||||
SlashReported::set(self.txn, genesis, &now_reported);
|
||||
|
||||
if (prior_reported < self.spec.t()) && (now_reported >= self.spec.t()) {
|
||||
SlashReportCutOff::set(
|
||||
self.txn,
|
||||
genesis,
|
||||
// 30 minutes into the future
|
||||
&(u64::from(self.block_number) +
|
||||
((30 * 60 * 1000) / u64::from(tributary::tendermint::TARGET_BLOCK_TIME))),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,101 @@
|
||||
mod transaction;
|
||||
pub use transaction::Transaction;
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||
|
||||
use tributary::{
|
||||
ReadWrite,
|
||||
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||
Tributary,
|
||||
};
|
||||
|
||||
mod db;
|
||||
pub use db::*;
|
||||
|
||||
mod scan;
|
||||
mod spec;
|
||||
pub use spec::TributarySpec;
|
||||
|
||||
mod transaction;
|
||||
pub use transaction::{Label, SignData, Transaction};
|
||||
|
||||
mod signing_protocol;
|
||||
|
||||
mod handle;
|
||||
pub use handle::*;
|
||||
|
||||
pub mod scanner;
|
||||
|
||||
pub fn removed_as_of_dkg_attempt(
|
||||
getter: &impl Get,
|
||||
genesis: [u8; 32],
|
||||
attempt: u32,
|
||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||
if attempt == 0 {
|
||||
Some(vec![])
|
||||
} else {
|
||||
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
||||
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn removed_as_of_set_keys(
|
||||
getter: &impl Get,
|
||||
set: ExternalValidatorSet,
|
||||
genesis: [u8; 32],
|
||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
||||
// SeraiDkgCompleted has the key placed on-chain.
|
||||
// This key can be uniquely mapped to an attempt so long as one participant was honest, which we
|
||||
// assume as a presumably honest participant.
|
||||
// Resolve from generated key to attempt to fatally slashed as of attempt.
|
||||
|
||||
// This expect will trigger if this is prematurely called and Substrate has tracked the keys yet
|
||||
// we haven't locally synced and handled the Tributary
|
||||
// All callers of this, at the time of writing, ensure the Tributary has sufficiently synced
|
||||
// making the panic with context more desirable than the None
|
||||
let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)
|
||||
.expect("key completed on-chain didn't have an attempt related");
|
||||
removed_as_of_dkg_attempt(getter, genesis, attempt)
|
||||
}
|
||||
|
||||
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
tributary: &Tributary<D, Transaction, P>,
|
||||
tx: Transaction,
|
||||
) {
|
||||
log::debug!("publishing transaction {}", hex::encode(tx.hash()));
|
||||
|
||||
let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() {
|
||||
let signer = signed.signer;
|
||||
|
||||
// Safe as we should deterministically create transactions, meaning if this is already on-disk,
|
||||
// it's what we're saving now
|
||||
SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize());
|
||||
|
||||
(order, signer)
|
||||
} else {
|
||||
panic!("non-signed transaction passed to publish_signed_transaction");
|
||||
};
|
||||
|
||||
// If we're trying to publish 5, when the last transaction published was 3, this will delay
|
||||
// publication until the point in time we publish 4
|
||||
while let Some(tx) = SignedTransactionDb::take_signed_transaction(
|
||||
txn,
|
||||
&order,
|
||||
tributary
|
||||
.next_nonce(&signer, &order)
|
||||
.await
|
||||
.expect("we don't have a nonce, meaning we aren't a participant on this tributary"),
|
||||
) {
|
||||
// We need to return a proper error here to enable that, due to a race condition around
|
||||
// multiple publications
|
||||
match tributary.add_transaction(tx.clone()).await {
|
||||
Ok(_) => {}
|
||||
// Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces
|
||||
Err(TransactionError::InvalidNonce) => {
|
||||
log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?")
|
||||
}
|
||||
Err(e) => panic!("created an invalid transaction: {e:?}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,408 +0,0 @@
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ciphersuite::group::GroupEncoding;
|
||||
|
||||
use serai_client::{
|
||||
primitives::SeraiAddress,
|
||||
validator_sets::primitives::{ValidatorSet, Slash},
|
||||
};
|
||||
|
||||
use tributary::{
|
||||
Signed as TributarySigned, TransactionKind, TransactionTrait,
|
||||
Transaction as TributaryTransaction, Block, TributaryReader,
|
||||
tendermint::{
|
||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
||||
TendermintNetwork,
|
||||
},
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use messages::sign::VariantSignId;
|
||||
|
||||
use crate::tributary::{
|
||||
db::*,
|
||||
transaction::{SigningProtocolRound, Signed, Transaction},
|
||||
};
|
||||
|
||||
struct ScanBlock<'a, D: DbTxn, TD: Db> {
|
||||
txn: &'a mut D,
|
||||
set: ValidatorSet,
|
||||
validators: &'a [SeraiAddress],
|
||||
total_weight: u64,
|
||||
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
||||
tributary: &'a TributaryReader<TD, Transaction>,
|
||||
}
|
||||
impl<'a, D: DbTxn, TD: Db> ScanBlock<'a, D, TD> {
|
||||
fn potentially_start_cosign(&mut self) {
|
||||
// Don't start a new cosigning instance if we're actively running one
|
||||
if TributaryDb::actively_cosigning(self.txn, self.set) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Start cosigning the latest intended-to-be-cosigned block
|
||||
let Some(latest_substrate_block_to_cosign) =
|
||||
TributaryDb::latest_substrate_block_to_cosign(self.txn, self.set)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
let substrate_block_number = todo!("TODO");
|
||||
|
||||
// Mark us as actively cosigning
|
||||
TributaryDb::start_cosigning(self.txn, self.set, substrate_block_number);
|
||||
// Send the message for the processor to start signing
|
||||
TributaryDb::send_message(
|
||||
self.txn,
|
||||
self.set,
|
||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||
session: self.set.session,
|
||||
block_number: substrate_block_number,
|
||||
block: latest_substrate_block_to_cosign,
|
||||
},
|
||||
);
|
||||
}
|
||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
||||
let signer = |signed: Signed| SeraiAddress(signed.signer.to_bytes());
|
||||
|
||||
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
||||
// Don't handle transactions from those fatally slashed
|
||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
||||
if TributaryDb::is_fatally_slashed(self.txn, self.set, SeraiAddress(signer.to_bytes())) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
match tx {
|
||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
let signer = signer(signed);
|
||||
|
||||
// Check the participant voted to be removed actually exists
|
||||
if !self.validators.iter().any(|validator| *validator == participant) {
|
||||
TributaryDb::fatal_slash(
|
||||
self.txn,
|
||||
self.set,
|
||||
signer,
|
||||
"voted to remove non-existent participant",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
match TributaryDb::accumulate(
|
||||
self.txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
Topic::RemoveParticipant { participant },
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&(),
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(_) => {
|
||||
TributaryDb::fatal_slash(self.txn, self.set, participant, "voted to remove");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Send the participation to the processor
|
||||
Transaction::DkgParticipation { participation, signed } => {
|
||||
TributaryDb::send_message(
|
||||
self.txn,
|
||||
self.set,
|
||||
messages::key_gen::CoordinatorMessage::Participation {
|
||||
session: self.set.session,
|
||||
participant: todo!("TODO"),
|
||||
participation,
|
||||
},
|
||||
);
|
||||
}
|
||||
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
||||
// Accumulate the preprocesses into our own FROST attempt manager
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
||||
// Accumulate the shares into our own FROST attempt manager
|
||||
todo!("TODO")
|
||||
}
|
||||
|
||||
Transaction::Cosign { substrate_block_hash } => {
|
||||
// Update the latest intended-to-be-cosigned Substrate block
|
||||
TributaryDb::set_latest_substrate_block_to_cosign(self.txn, self.set, substrate_block_hash);
|
||||
// Start a new cosign if we weren't already working on one
|
||||
self.potentially_start_cosign();
|
||||
}
|
||||
Transaction::Cosigned { substrate_block_hash } => {
|
||||
TributaryDb::finish_cosigning(self.txn, self.set);
|
||||
|
||||
// Fetch the latest intended-to-be-cosigned block
|
||||
let Some(latest_substrate_block_to_cosign) =
|
||||
TributaryDb::latest_substrate_block_to_cosign(self.txn, self.set)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
// If this is the block we just cosigned, return, preventing us from signing it again
|
||||
if latest_substrate_block_to_cosign == substrate_block_hash {
|
||||
return;
|
||||
}
|
||||
|
||||
// Since we do have a new cosign to work on, start it
|
||||
self.potentially_start_cosign();
|
||||
}
|
||||
Transaction::SubstrateBlock { hash } => {
|
||||
// Whitelist all of the IDs this Substrate block causes to be signed
|
||||
todo!("TODO")
|
||||
}
|
||||
Transaction::Batch { hash } => {
|
||||
// Whitelist the signing of this batch, publishing our own preprocess
|
||||
todo!("TODO")
|
||||
}
|
||||
|
||||
Transaction::SlashReport { slash_points, signed } => {
|
||||
let signer = signer(signed);
|
||||
|
||||
if slash_points.len() != self.validators.len() {
|
||||
TributaryDb::fatal_slash(
|
||||
self.txn,
|
||||
self.set,
|
||||
signer,
|
||||
"slash report was for a distinct amount of signers",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
||||
match TributaryDb::accumulate(
|
||||
self.txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
Topic::SlashReport,
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&slash_points,
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(data_set) => {
|
||||
// Find the median reported slashes for this validator
|
||||
// TODO: This lets 34% perform a fatal slash. Should that be allowed?
|
||||
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
||||
for i in 0 .. self.validators.len() {
|
||||
let mut this_validator =
|
||||
data_set.values().map(|report| report[i]).collect::<Vec<_>>();
|
||||
this_validator.sort_unstable();
|
||||
// Choose the median, where if there are two median values, the lower one is chosen
|
||||
let median_index = if (this_validator.len() % 2) == 1 {
|
||||
this_validator.len() / 2
|
||||
} else {
|
||||
(this_validator.len() / 2) - 1
|
||||
};
|
||||
median_slash_report.push(this_validator[median_index]);
|
||||
}
|
||||
|
||||
// We only publish slashes for the `f` worst performers to:
|
||||
// 1) Effect amnesty if there were network disruptions which affected everyone
|
||||
// 2) Ensure the signing threshold doesn't have a disincentive to do their job
|
||||
|
||||
// Find the worst performer within the signing threshold's slash points
|
||||
let f = (self.validators.len() - 1) / 3;
|
||||
let worst_validator_in_supermajority_slash_points = {
|
||||
let mut sorted_slash_points = median_slash_report.clone();
|
||||
sorted_slash_points.sort_unstable();
|
||||
// This won't be a valid index if `f == 0`, which means we don't have any validators
|
||||
// to slash
|
||||
let index_of_first_validator_to_slash = self.validators.len() - f;
|
||||
let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1;
|
||||
sorted_slash_points[index_of_worst_validator_in_supermajority]
|
||||
};
|
||||
|
||||
// Perform the amortization
|
||||
for slash_points in &mut median_slash_report {
|
||||
*slash_points =
|
||||
slash_points.saturating_sub(worst_validator_in_supermajority_slash_points)
|
||||
}
|
||||
let amortized_slash_report = median_slash_report;
|
||||
|
||||
// Create the resulting slash report
|
||||
let mut slash_report = vec![];
|
||||
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
||||
if points != 0 {
|
||||
slash_report.push(Slash { key: validator.into(), points });
|
||||
}
|
||||
}
|
||||
assert!(slash_report.len() <= f);
|
||||
|
||||
// Recognize the topic for signing the slash report
|
||||
TributaryDb::recognize_topic(
|
||||
self.txn,
|
||||
self.set,
|
||||
Topic::Sign {
|
||||
id: VariantSignId::SlashReport,
|
||||
attempt: 0,
|
||||
round: SigningProtocolRound::Preprocess,
|
||||
},
|
||||
);
|
||||
// Send the message for the processor to start signing
|
||||
TributaryDb::send_message(
|
||||
self.txn,
|
||||
self.set,
|
||||
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||
session: self.set.session,
|
||||
report: slash_report,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Transaction::Sign { id, attempt, round, data, signed } => {
|
||||
let topic = Topic::Sign { id, attempt, round };
|
||||
let signer = signer(signed);
|
||||
|
||||
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
||||
TributaryDb::fatal_slash(
|
||||
self.txn,
|
||||
self.set,
|
||||
signer,
|
||||
"signer signed with a distinct amount of key shares than they had key shares",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
match TributaryDb::accumulate(
|
||||
self.txn,
|
||||
self.set,
|
||||
self.validators,
|
||||
self.total_weight,
|
||||
block_number,
|
||||
topic,
|
||||
signer,
|
||||
self.validator_weights[&signer],
|
||||
&data,
|
||||
) {
|
||||
DataSet::None => {}
|
||||
DataSet::Participating(data_set) => {
|
||||
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
||||
let flatten_data_set = |data_set| todo!("TODO");
|
||||
let data_set = flatten_data_set(data_set);
|
||||
TributaryDb::send_message(
|
||||
self.txn,
|
||||
self.set,
|
||||
match round {
|
||||
SigningProtocolRound::Preprocess => {
|
||||
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
||||
}
|
||||
SigningProtocolRound::Share => {
|
||||
messages::sign::CoordinatorMessage::Shares { id, shares: data_set }
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
||||
TributaryDb::start_of_block(self.txn, self.set, block_number);
|
||||
|
||||
for tx in block.transactions {
|
||||
match tx {
|
||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
||||
// Since the evidence is on the chain, it will have already been validated
|
||||
// We can just punish the signer
|
||||
let data = match ev {
|
||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||
};
|
||||
/* TODO
|
||||
let msgs = (
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),
|
||||
if data.1.is_some() {
|
||||
Some(
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.1.unwrap())
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
);
|
||||
|
||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||
// errors, mark the node as fatally slashed
|
||||
TributaryDb::fatal_slash(
|
||||
self.txn, msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}"));
|
||||
*/
|
||||
todo!("TODO")
|
||||
}
|
||||
TributaryTransaction::Application(tx) => {
|
||||
self.handle_application_tx(block_number, tx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ScanTributaryTask<D: Db, TD: Db> {
|
||||
db: D,
|
||||
set: ValidatorSet,
|
||||
validators: Vec<SeraiAddress>,
|
||||
total_weight: u64,
|
||||
validator_weights: HashMap<SeraiAddress, u64>,
|
||||
tributary: TributaryReader<TD, Transaction>,
|
||||
}
|
||||
impl<D: Db, TD: Db> ContinuallyRan for ScanTributaryTask<D, TD> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let (mut last_block_number, mut last_block_hash) =
|
||||
TributaryDb::last_handled_tributary_block(&self.db, self.set)
|
||||
.unwrap_or((0, self.tributary.genesis()));
|
||||
|
||||
let mut made_progess = false;
|
||||
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
||||
let block = self.tributary.block(&next).unwrap();
|
||||
let block_number = last_block_number + 1;
|
||||
let block_hash = block.hash();
|
||||
|
||||
// Make sure we have all of the provided transactions for this block
|
||||
for tx in &block.transactions {
|
||||
let TransactionKind::Provided(order) = tx.kind() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// make sure we have all the provided txs in this block locally
|
||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
||||
return Err(format!(
|
||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
||||
self.set
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
(ScanBlock {
|
||||
txn: &mut txn,
|
||||
set: self.set,
|
||||
validators: &self.validators,
|
||||
total_weight: self.total_weight,
|
||||
validator_weights: &self.validator_weights,
|
||||
tributary: &self.tributary,
|
||||
})
|
||||
.handle_block(block_number, block);
|
||||
TributaryDb::set_last_handled_tributary_block(&mut txn, self.set, block_number, block_hash);
|
||||
last_block_number = block_number;
|
||||
last_block_hash = block_hash;
|
||||
txn.commit();
|
||||
|
||||
made_progess = true;
|
||||
}
|
||||
|
||||
Ok(made_progess)
|
||||
}
|
||||
}
|
||||
}
|
||||
805
coordinator/src/tributary/scanner.rs
Normal file
805
coordinator/src/tributary/scanner.rs
Normal file
@@ -0,0 +1,805 @@
|
||||
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_db::DbTxn;
|
||||
|
||||
use processor_messages::coordinator::{SubstrateSignId, SubstrateSignableId};
|
||||
|
||||
use tributary::{
|
||||
TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader,
|
||||
tendermint::{
|
||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
||||
TendermintNetwork,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{Db, processors::Processors, substrate::BatchInstructionsHashDb, tributary::*, P2p};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
pub enum RecognizedIdType {
|
||||
Batch,
|
||||
Plan,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait RIDTrait {
|
||||
async fn recognized_id(
|
||||
&self,
|
||||
set: ExternalValidatorSet,
|
||||
genesis: [u8; 32],
|
||||
kind: RecognizedIdType,
|
||||
id: Vec<u8>,
|
||||
);
|
||||
}
|
||||
#[async_trait::async_trait]
|
||||
impl<
|
||||
FRid: Send + Future<Output = ()>,
|
||||
F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
|
||||
> RIDTrait for F
|
||||
{
|
||||
async fn recognized_id(
|
||||
&self,
|
||||
set: ExternalValidatorSet,
|
||||
genesis: [u8; 32],
|
||||
kind: RecognizedIdType,
|
||||
id: Vec<u8>,
|
||||
) {
|
||||
(self)(set, genesis, kind, id).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait PublishSeraiTransaction {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
db: &(impl Sync + Get),
|
||||
set: ExternalValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature: Signature,
|
||||
);
|
||||
}
|
||||
|
||||
mod impl_pst_for_serai {
|
||||
use super::*;
|
||||
|
||||
use serai_client::SeraiValidatorSets;
|
||||
|
||||
// Uses a macro because Rust can't resolve the lifetimes/generics around the check function
|
||||
// check is expected to return true if the effect has already occurred
|
||||
// The generated publish function will return true if *we* published the transaction
|
||||
macro_rules! common_pst {
|
||||
($Meta: ty, $check: ident) => {
|
||||
async fn publish(
|
||||
serai: &Serai,
|
||||
db: &impl Get,
|
||||
set: ExternalValidatorSet,
|
||||
tx: serai_client::Transaction,
|
||||
meta: $Meta,
|
||||
) -> bool {
|
||||
loop {
|
||||
match serai.publish(&tx).await {
|
||||
Ok(_) => return true,
|
||||
// This is assumed to be some ephemeral error due to the assumed fault-free
|
||||
// creation
|
||||
// TODO2: Differentiate connection errors from invariants
|
||||
Err(e) => {
|
||||
// The following block is irrelevant, and can/likely will fail, if we're publishing
|
||||
// a TX for an old session
|
||||
// If we're on a newer session, move on
|
||||
if crate::RetiredTributaryDb::get(db, set).is_some() {
|
||||
log::warn!("trying to publish a TX relevant to set {set:?} which isn't the latest");
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Ok(serai) = serai.as_of_latest_finalized_block().await {
|
||||
let serai = serai.validator_sets();
|
||||
|
||||
// Check if someone else published the TX in question
|
||||
if $check(serai, set, meta).await {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
log::error!("couldn't connect to Serai node to publish TX: {e:?}");
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PublishSeraiTransaction for Serai {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
db: &(impl Sync + Get),
|
||||
set: ExternalValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
signature: Signature,
|
||||
) {
|
||||
// TODO: BoundedVec as an arg to avoid this expect
|
||||
let tx = SeraiValidatorSets::set_keys(
|
||||
set.network,
|
||||
removed.try_into().expect("removing more than allowed"),
|
||||
key_pair,
|
||||
signature,
|
||||
);
|
||||
async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {
|
||||
if matches!(serai.keys(set).await, Ok(Some(_))) {
|
||||
log::info!("another coordinator set key pair for {:?}", set);
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
common_pst!((), check);
|
||||
if publish(self, db, set, tx, ()).await {
|
||||
log::info!("published set keys for {set:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub trait PTTTrait {
|
||||
async fn publish_tributary_tx(&self, tx: Transaction);
|
||||
}
|
||||
#[async_trait::async_trait]
|
||||
impl<FPtt: Send + Future<Output = ()>, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F {
|
||||
async fn publish_tributary_tx(&self, tx: Transaction) {
|
||||
(self)(tx).await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TributaryBlockHandler<
|
||||
'a,
|
||||
D: Db,
|
||||
T: DbTxn,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
PTT: PTTTrait,
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
> {
|
||||
pub db: &'a D,
|
||||
pub txn: &'a mut T,
|
||||
pub our_key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
pub recognized_id: &'a RID,
|
||||
pub processors: &'a Pro,
|
||||
pub publish_serai_tx: &'a PST,
|
||||
pub publish_tributary_tx: &'a PTT,
|
||||
pub spec: &'a TributarySpec,
|
||||
block: Block<Transaction>,
|
||||
pub block_number: u32,
|
||||
_p2p: PhantomData<P>,
|
||||
}
|
||||
|
||||
impl<
|
||||
D: Db,
|
||||
T: DbTxn,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
PTT: PTTTrait,
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
> TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P>
|
||||
{
|
||||
pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason);
|
||||
FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing);
|
||||
|
||||
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
||||
}
|
||||
|
||||
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
|
||||
// Tributary post-DKG
|
||||
// https://github.com/serai-dex/serai/issues/426
|
||||
|
||||
async fn handle(mut self) {
|
||||
log::info!("found block for Tributary {:?}", self.spec.set());
|
||||
|
||||
let transactions = self.block.transactions.clone();
|
||||
for tx in transactions {
|
||||
match tx {
|
||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
||||
// Since the evidence is on the chain, it should already have been validated
|
||||
// We can just punish the signer
|
||||
let data = match ev {
|
||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||
};
|
||||
let msgs = (
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),
|
||||
if data.1.is_some() {
|
||||
Some(
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.1.unwrap())
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
);
|
||||
|
||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||
// errors, mark the node as fatally slashed
|
||||
self.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}"));
|
||||
}
|
||||
TributaryTransaction::Application(tx) => {
|
||||
self.handle_application_tx(tx).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
||||
|
||||
// Calculate the shares still present, spinning if not enough are
|
||||
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if
|
||||
// we should spin, hence storing it in a variable here
|
||||
let still_present_shares = {
|
||||
// Start with the original n value
|
||||
let mut present_shares = self.spec.n(&[]);
|
||||
// Remove everyone fatally slashed
|
||||
for removed in ¤t_fatal_slashes {
|
||||
let original_i_for_removed =
|
||||
self.spec.i(&[], *removed).expect("removed party was never present");
|
||||
let removed_shares =
|
||||
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
||||
present_shares -= removed_shares;
|
||||
}
|
||||
|
||||
// Spin if the present shares don't satisfy the required threshold
|
||||
if present_shares < self.spec.t() {
|
||||
loop {
|
||||
log::error!(
|
||||
"fatally slashed so many participants for {:?} we no longer meet the threshold",
|
||||
self.spec.set()
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
|
||||
present_shares
|
||||
};
|
||||
|
||||
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
||||
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
||||
log::info!("re-attempting {topic:?} with attempt {attempt}");
|
||||
|
||||
// Slash people who failed to participate as expected in the prior attempt
|
||||
{
|
||||
let prior_attempt = attempt - 1;
|
||||
let (removed, expected_participants) = match topic {
|
||||
Topic::Dkg => {
|
||||
// Every validator who wasn't removed is expected to have participated
|
||||
let removed =
|
||||
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
|
||||
.expect("prior attempt didn't have its removed saved to disk");
|
||||
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
|
||||
(
|
||||
removed,
|
||||
self
|
||||
.spec
|
||||
.validators()
|
||||
.into_iter()
|
||||
.filter_map(|(validator, _)| {
|
||||
Some(validator).filter(|validator| !removed_set.contains(validator))
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
Topic::DkgConfirmation => {
|
||||
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
|
||||
}
|
||||
Topic::SubstrateSign(_) | Topic::Sign(_) => {
|
||||
let removed =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
.expect("SubstrateSign/Sign yet have yet to set keys");
|
||||
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
||||
let expected_participants = vec![];
|
||||
(removed, expected_participants)
|
||||
}
|
||||
};
|
||||
|
||||
let (expected_topic, expected_label) = match topic {
|
||||
Topic::Dkg => {
|
||||
let n = self.spec.n(&removed);
|
||||
// If we got all the DKG shares, we should be on DKG confirmation
|
||||
let share_spec =
|
||||
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
|
||||
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
|
||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
||||
// preprocess is part of Topic::Dkg Label::Share
|
||||
(Topic::DkgConfirmation, Label::Share)
|
||||
} else {
|
||||
let preprocess_spec = DataSpecification {
|
||||
topic: Topic::Dkg,
|
||||
label: Label::Preprocess,
|
||||
attempt: prior_attempt,
|
||||
};
|
||||
// If we got all the DKG preprocesses, DKG shares
|
||||
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
|
||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
||||
// preprocess is part of Topic::Dkg Label::Share
|
||||
(Topic::Dkg, Label::Share)
|
||||
} else {
|
||||
(Topic::Dkg, Label::Preprocess)
|
||||
}
|
||||
}
|
||||
}
|
||||
Topic::DkgConfirmation => unreachable!(),
|
||||
// If we got enough participants to move forward, then we expect shares from them all
|
||||
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
|
||||
};
|
||||
|
||||
let mut did_not_participate = vec![];
|
||||
for expected_participant in expected_participants {
|
||||
if DataDb::get(
|
||||
self.txn,
|
||||
genesis,
|
||||
&DataSpecification {
|
||||
topic: expected_topic,
|
||||
label: expected_label,
|
||||
attempt: prior_attempt,
|
||||
},
|
||||
&expected_participant.to_bytes(),
|
||||
)
|
||||
.is_none()
|
||||
{
|
||||
did_not_participate.push(expected_participant);
|
||||
}
|
||||
}
|
||||
|
||||
// If a supermajority didn't participate as expected, the protocol was likely aborted due
|
||||
// to detection of a completion or some larger networking error
|
||||
// Accordingly, clear did_not_participate
|
||||
// TODO
|
||||
|
||||
// If during the DKG, explicitly mark these people as having been offline
|
||||
// TODO: If they were offline sufficiently long ago, don't strike them off
|
||||
if topic == Topic::Dkg {
|
||||
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
|
||||
for did_not_participate in did_not_participate {
|
||||
existing.push(did_not_participate.to_bytes());
|
||||
}
|
||||
OfflineDuringDkg::set(self.txn, genesis, &existing);
|
||||
}
|
||||
|
||||
// Slash everyone who didn't participate as expected
|
||||
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
||||
// supermajority will cause the above allowance to not trigger, causing an honest minority
|
||||
// to be slashed
|
||||
// At the end of the protocol, the accumulated slashes are reduced by the amount obtained
|
||||
// by the worst-performing member of the supermajority, and this is expected to
|
||||
// sufficiently compensate for slashes which occur under normal operation
|
||||
// TODO
|
||||
}
|
||||
|
||||
/*
|
||||
All of these have the same common flow:
|
||||
|
||||
1) Check if this re-attempt is actually needed
|
||||
2) If so, dispatch whatever events as needed
|
||||
|
||||
This is because we *always* re-attempt any protocol which had participation. That doesn't
|
||||
mean we *should* re-attempt this protocol.
|
||||
|
||||
The alternatives were:
|
||||
1) Note on-chain we completed a protocol, halting re-attempts upon 34%.
|
||||
2) Vote on-chain to re-attempt a protocol.
|
||||
|
||||
This schema doesn't have any additional messages upon the success case (whereas
|
||||
alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and
|
||||
then preprocesses. This only sends preprocesses).
|
||||
*/
|
||||
match topic {
|
||||
Topic::Dkg => {
|
||||
let mut removed = current_fatal_slashes.clone();
|
||||
|
||||
let t = self.spec.t();
|
||||
{
|
||||
let mut present_shares = still_present_shares;
|
||||
|
||||
// Load the parties marked as offline across the various attempts
|
||||
let mut offline = OfflineDuringDkg::get(self.txn, genesis)
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
// Pop from the list to prioritize the removal of those recently offline
|
||||
while let Some(offline) = offline.pop() {
|
||||
// Make sure they weren't removed already (such as due to being fatally slashed)
|
||||
// This also may trigger if they were offline across multiple attempts
|
||||
if removed.contains(&offline) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we can remove them and still meet the threshold, do so
|
||||
let original_i_for_offline =
|
||||
self.spec.i(&[], offline).expect("offline was never present?");
|
||||
let offline_shares =
|
||||
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
|
||||
if (present_shares - offline_shares) >= t {
|
||||
present_shares -= offline_shares;
|
||||
removed.push(offline);
|
||||
}
|
||||
|
||||
// If we've removed as many people as we can, break
|
||||
if present_shares == t {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RemovedAsOfDkgAttempt::set(
|
||||
self.txn,
|
||||
genesis,
|
||||
attempt,
|
||||
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
|
||||
);
|
||||
|
||||
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
|
||||
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Since it wasn't completed, instruct the processor to start the next attempt
|
||||
let id =
|
||||
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
||||
|
||||
let params =
|
||||
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
|
||||
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
||||
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares },
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Topic::DkgConfirmation => unreachable!(),
|
||||
Topic::SubstrateSign(inner_id) => {
|
||||
let id = processor_messages::coordinator::SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
id: inner_id,
|
||||
attempt,
|
||||
};
|
||||
match inner_id {
|
||||
SubstrateSignableId::CosigningSubstrateBlock(block) => {
|
||||
let block_number = SeraiBlockNumber::get(self.txn, block)
|
||||
.expect("couldn't get the block number for prior attempted cosign");
|
||||
|
||||
// Check if the cosigner has a signature from our set for this block/a newer one
|
||||
let latest_cosign =
|
||||
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
||||
.map_or(0, |cosign| cosign.block_number);
|
||||
if latest_cosign < block_number {
|
||||
// Instruct the processor to start the next attempt
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||
id,
|
||||
block_number,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
SubstrateSignableId::Batch(batch) => {
|
||||
// If the Batch hasn't appeared on-chain...
|
||||
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
|
||||
// Instruct the processor to start the next attempt
|
||||
// The processor won't continue if it's already signed a Batch
|
||||
// Prior checking if the Batch is on-chain just may reduce the non-participating
|
||||
// 33% from publishing their re-attempt messages
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::coordinator::CoordinatorMessage::BatchReattempt { id },
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
SubstrateSignableId::SlashReport => {
|
||||
// If this Tributary hasn't been retired...
|
||||
// (published SlashReport/took too long to do so)
|
||||
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
||||
let report = SlashReport::get(self.txn, self.spec.set())
|
||||
.expect("re-attempting signing a SlashReport we don't have?");
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||
id,
|
||||
report,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Topic::Sign(id) => {
|
||||
// Instruct the processor to start the next attempt
|
||||
// If it has already noted a completion, it won't send a preprocess and will simply drop
|
||||
// the re-attempt message
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::sign::CoordinatorMessage::Reattempt {
|
||||
id: processor_messages::sign::SignId {
|
||||
session: self.spec.set().session,
|
||||
id,
|
||||
attempt,
|
||||
},
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if Some(u64::from(self.block_number)) == SlashReportCutOff::get(self.txn, genesis) {
|
||||
// Grab every slash report
|
||||
let mut all_reports = vec![];
|
||||
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
|
||||
let Some(mut report) = SlashReports::get(self.txn, genesis, validator.to_bytes()) else {
|
||||
continue;
|
||||
};
|
||||
// Assign them 0 points for themselves
|
||||
report.insert(i, 0);
|
||||
// Uses &[] as we only need the length which is independent to who else was removed
|
||||
let signer_i = self.spec.i(&[], validator).unwrap();
|
||||
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
||||
// Push `n` copies, one for each of their shares
|
||||
for _ in 0 .. signer_len {
|
||||
all_reports.push(report.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// For each participant, grab their median
|
||||
let mut medians = vec![];
|
||||
for p in 0 .. self.spec.validators().len() {
|
||||
let mut median_calc = vec![];
|
||||
for report in &all_reports {
|
||||
median_calc.push(report[p]);
|
||||
}
|
||||
median_calc.sort_unstable();
|
||||
medians.push(median_calc[median_calc.len() / 2]);
|
||||
}
|
||||
|
||||
// Grab the points of the last party within the best-performing threshold
|
||||
// This is done by first expanding the point values by the amount of shares
|
||||
let mut sorted_medians = vec![];
|
||||
for (i, (_, shares)) in self.spec.validators().into_iter().enumerate() {
|
||||
for _ in 0 .. shares {
|
||||
sorted_medians.push(medians[i]);
|
||||
}
|
||||
}
|
||||
// Then performing the sort
|
||||
sorted_medians.sort_unstable();
|
||||
let worst_points_by_party_within_threshold = sorted_medians[usize::from(self.spec.t()) - 1];
|
||||
|
||||
// Reduce everyone's points by this value
|
||||
for median in &mut medians {
|
||||
*median = median.saturating_sub(worst_points_by_party_within_threshold);
|
||||
}
|
||||
|
||||
// The threshold now has the proper incentive to report this as they no longer suffer
|
||||
// negative effects
|
||||
//
|
||||
// Additionally, if all validators had degraded performance, they don't all get penalized for
|
||||
// what's likely outside their control (as it occurred universally)
|
||||
|
||||
// Mark everyone fatally slashed with u32::MAX
|
||||
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
|
||||
if FatallySlashed::get(self.txn, genesis, validator.to_bytes()).is_some() {
|
||||
medians[i] = u32::MAX;
|
||||
}
|
||||
}
|
||||
|
||||
let mut report = vec![];
|
||||
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
|
||||
if medians[i] != 0 {
|
||||
report.push((validator.to_bytes(), medians[i]));
|
||||
}
|
||||
}
|
||||
|
||||
// This does lock in the report, meaning further slash point accumulations won't be reported
|
||||
// They still have value to be locally tracked due to local decisions made based off
|
||||
// accumulated slash reports
|
||||
SlashReport::set(self.txn, self.spec.set(), &report);
|
||||
|
||||
// Start a signing protocol for this
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||
id: SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
id: SubstrateSignableId::SlashReport,
|
||||
attempt: 0,
|
||||
},
|
||||
report,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn handle_new_blocks<
|
||||
D: Db,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
PTT: PTTTrait,
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
recognized_id: &RID,
|
||||
processors: &Pro,
|
||||
publish_serai_tx: &PST,
|
||||
publish_tributary_tx: &PTT,
|
||||
spec: &TributarySpec,
|
||||
tributary: &TributaryReader<D, Transaction>,
|
||||
) {
|
||||
let genesis = tributary.genesis();
|
||||
let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis);
|
||||
let mut block_number = TributaryBlockNumber::get(db, last_block).unwrap_or(0);
|
||||
while let Some(next) = tributary.block_after(&last_block) {
|
||||
let block = tributary.block(&next).unwrap();
|
||||
block_number += 1;
|
||||
|
||||
// Make sure we have all of the provided transactions for this block
|
||||
for tx in &block.transactions {
|
||||
// Provided TXs will appear first in the Block, so we can break after we hit a non-Provided
|
||||
let TransactionKind::Provided(order) = tx.kind() else {
|
||||
break;
|
||||
};
|
||||
|
||||
// make sure we have all the provided txs in this block locally
|
||||
if !tributary.locally_provided_txs_in_block(&block.hash(), order) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
let mut db_clone = db.clone();
|
||||
let mut txn = db_clone.txn();
|
||||
TributaryBlockNumber::set(&mut txn, next, &block_number);
|
||||
(TributaryBlockHandler {
|
||||
db,
|
||||
txn: &mut txn,
|
||||
spec,
|
||||
our_key: key,
|
||||
recognized_id,
|
||||
processors,
|
||||
publish_serai_tx,
|
||||
publish_tributary_tx,
|
||||
block,
|
||||
block_number,
|
||||
_p2p: PhantomData::<P>,
|
||||
})
|
||||
.handle()
|
||||
.await;
|
||||
last_block = next;
|
||||
LastHandledBlock::set(&mut txn, genesis, &next);
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn scan_tributaries_task<
|
||||
D: Db,
|
||||
Pro: Processors,
|
||||
P: P2p,
|
||||
RID: 'static + Send + Sync + Clone + RIDTrait,
|
||||
>(
|
||||
raw_db: D,
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
recognized_id: RID,
|
||||
processors: Pro,
|
||||
serai: Arc<Serai>,
|
||||
mut tributary_event: broadcast::Receiver<crate::TributaryEvent<D, P>>,
|
||||
) {
|
||||
log::info!("scanning tributaries");
|
||||
|
||||
loop {
|
||||
match tributary_event.recv().await {
|
||||
Ok(crate::TributaryEvent::NewTributary(crate::ActiveTributary { spec, tributary })) => {
|
||||
// For each Tributary, spawn a dedicated scanner task
|
||||
tokio::spawn({
|
||||
let raw_db = raw_db.clone();
|
||||
let key = key.clone();
|
||||
let recognized_id = recognized_id.clone();
|
||||
let processors = processors.clone();
|
||||
let serai = serai.clone();
|
||||
async move {
|
||||
let spec = &spec;
|
||||
let reader = tributary.reader();
|
||||
let mut tributary_db = raw_db.clone();
|
||||
loop {
|
||||
// Check if the set was retired, and if so, don't further operate
|
||||
if crate::db::RetiredTributaryDb::get(&raw_db, spec.set()).is_some() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Obtain the next block notification now to prevent obtaining it immediately after
|
||||
// the next block occurs
|
||||
let next_block_notification = tributary.next_block_notification().await;
|
||||
|
||||
handle_new_blocks::<_, _, _, _, _, P>(
|
||||
&mut tributary_db,
|
||||
&key,
|
||||
&recognized_id,
|
||||
&processors,
|
||||
&*serai,
|
||||
&|tx: Transaction| {
|
||||
let tributary = tributary.clone();
|
||||
async move {
|
||||
match tributary.add_transaction(tx.clone()).await {
|
||||
Ok(_) => {}
|
||||
// Can happen as this occurs on a distinct DB TXN
|
||||
Err(TransactionError::InvalidNonce) => {
|
||||
log::warn!(
|
||||
"publishing TX {tx:?} returned InvalidNonce. was it already added?"
|
||||
)
|
||||
}
|
||||
Err(e) => panic!("created an invalid transaction: {e:?}"),
|
||||
}
|
||||
}
|
||||
},
|
||||
spec,
|
||||
&reader,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Run either when the notification fires, or every interval of block_time
|
||||
let _ = tokio::time::timeout(
|
||||
Duration::from_secs(tributary::Tributary::<D, Transaction, P>::block_time().into()),
|
||||
next_block_notification,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
// The above loop simply checks the DB every few seconds, voiding the need for this event
|
||||
Ok(crate::TributaryEvent::TributaryRetired(_)) => {}
|
||||
Err(broadcast::error::RecvError::Lagged(_)) => {
|
||||
panic!("scan_tributaries lagged to handle tributary_event")
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
328
coordinator/src/tributary/signing_protocol.rs
Normal file
328
coordinator/src/tributary/signing_protocol.rs
Normal file
@@ -0,0 +1,328 @@
|
||||
/*
|
||||
A MuSig-based signing protocol executed with the validators' keys.
|
||||
|
||||
This is used for confirming the results of a DKG on-chain, an operation requiring all validators
|
||||
which aren't specified as removed while still satisfying a supermajority.
|
||||
|
||||
Since we're using the validator's keys, as needed for their being the root of trust, the
|
||||
coordinator must perform the signing. This is distinct from all other group-signing operations,
|
||||
as they're all done by the processor.
|
||||
|
||||
The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern.
|
||||
While we could individually tack votes, that'd require logic to prevent voting multiple times and
|
||||
tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and
|
||||
the list's weight exceeds the threshold.
|
||||
|
||||
Instead of maintaining state in memory, a combination of the DB and re-execution are used. This
|
||||
is deemed acceptable re: performance as:
|
||||
|
||||
1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent.
|
||||
2) This is an O(n) algorithm.
|
||||
3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET.
|
||||
|
||||
Accordingly, this should be tolerable.
|
||||
|
||||
As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises
|
||||
concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from
|
||||
the nonces being context-bound under a BFT protocol. The flow is as follows:
|
||||
|
||||
1) Decide the nonce.
|
||||
2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be
|
||||
signed*.
|
||||
3) Sign and publish the signature share.
|
||||
|
||||
In order for nonce re-use to occur, the received nonce commitments (or the message to be signed)
|
||||
would have to be distinct and sign would have to be called again.
|
||||
|
||||
Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The
|
||||
only way to operate on distinct received messages would be if:
|
||||
|
||||
1) A logical flaw exists, letting new messages over write prior messages
|
||||
2) A reorganization occurred from chain A to chain B, and with it, different messages
|
||||
|
||||
Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While
|
||||
a significant amount of processes may be byzantine, leading to BFT being broken, that still will
|
||||
not trigger a reorganization. The only way to move to a distinct chain, with distinct messages,
|
||||
would be by rebuilding the local process (this time following chain B). Upon any complete
|
||||
rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial
|
||||
rebuilds which is accepted.
|
||||
|
||||
Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the
|
||||
commitments generated from the decided nonces are in fact its commitments on-chain (TODO).
|
||||
|
||||
TODO: We also need to review how we're handling Processor preprocesses and likely implement the
|
||||
same on-chain-preprocess-matches-presumed-preprocess check before publishing shares.
|
||||
*/
|
||||
|
||||
use core::ops::Deref;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::PrimeField, GroupEncoding},
|
||||
Ciphersuite,
|
||||
};
|
||||
use dkg_musig::musig;
|
||||
use frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*};
|
||||
use frost_schnorrkel::Schnorrkel;
|
||||
|
||||
use scale::Encode;
|
||||
|
||||
use serai_client::{
|
||||
Public,
|
||||
validator_sets::primitives::{KeyPair, musig_context, set_keys_message},
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::tributary::TributarySpec;
|
||||
|
||||
create_db!(
|
||||
SigningProtocolDb {
|
||||
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
||||
}
|
||||
);
|
||||
|
||||
struct SigningProtocol<'a, T: DbTxn, C: Encode> {
|
||||
pub(crate) key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
pub(crate) spec: &'a TributarySpec,
|
||||
pub(crate) txn: &'a mut T,
|
||||
pub(crate) context: C,
|
||||
}
|
||||
|
||||
impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
fn preprocess_internal(
|
||||
&mut self,
|
||||
participants: &[<Ristretto as Ciphersuite>::G],
|
||||
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||
// Encrypt the cached preprocess as recovery of it will enable recovering the private key
|
||||
// While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and
|
||||
// shouldn't be trusted as one
|
||||
let mut encryption_key = {
|
||||
let mut encryption_key_preimage =
|
||||
Zeroizing::new(b"Cached Preprocess Encryption Key".to_vec());
|
||||
encryption_key_preimage.extend(self.context.encode());
|
||||
let repr = Zeroizing::new(self.key.to_repr());
|
||||
encryption_key_preimage.extend(repr.deref());
|
||||
Blake2s256::digest(&encryption_key_preimage)
|
||||
};
|
||||
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
||||
|
||||
let algorithm = Schnorrkel::new(b"substrate");
|
||||
let keys: ThresholdKeys<Ristretto> =
|
||||
musig(musig_context(self.spec.set().into()), self.key.clone(), participants)
|
||||
.expect("signing for a set we aren't in/validator present multiple times")
|
||||
.into();
|
||||
|
||||
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
||||
let (machine, _) =
|
||||
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
||||
|
||||
let mut cache = machine.cache();
|
||||
assert_eq!(cache.0.len(), 32);
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for b in 0 .. 32 {
|
||||
cache.0[b] ^= encryption_key_slice[b];
|
||||
}
|
||||
|
||||
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
||||
}
|
||||
|
||||
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
||||
let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached);
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for b in 0 .. 32 {
|
||||
cached[b] ^= encryption_key_slice[b];
|
||||
}
|
||||
encryption_key_slice.zeroize();
|
||||
let (machine, preprocess) =
|
||||
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
||||
|
||||
(machine, preprocess.serialize().try_into().unwrap())
|
||||
}
|
||||
|
||||
fn share_internal(
|
||||
&mut self,
|
||||
participants: &[<Ristretto as Ciphersuite>::G],
|
||||
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
msg: &[u8],
|
||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||
let machine = self.preprocess_internal(participants).0;
|
||||
|
||||
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
|
||||
participants.sort();
|
||||
let mut preprocesses = HashMap::new();
|
||||
for participant in participants {
|
||||
preprocesses.insert(
|
||||
participant,
|
||||
machine
|
||||
.read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice())
|
||||
.map_err(|_| participant)?,
|
||||
);
|
||||
}
|
||||
|
||||
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||
FrostError::InvalidParticipant(_, _) |
|
||||
FrostError::InvalidSigningSet(_) |
|
||||
FrostError::InvalidParticipantQuantity(_, _) |
|
||||
FrostError::DuplicatedParticipant(_) |
|
||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||
})?;
|
||||
|
||||
Ok((machine, share.serialize().try_into().unwrap()))
|
||||
}
|
||||
|
||||
fn complete_internal(
|
||||
machine: AlgorithmSignatureMachine<Ristretto, Schnorrkel>,
|
||||
shares: HashMap<Participant, Vec<u8>>,
|
||||
) -> Result<[u8; 64], Participant> {
|
||||
let shares = shares
|
||||
.into_iter()
|
||||
.map(|(p, share)| {
|
||||
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
||||
})
|
||||
.collect::<Result<HashMap<_, _>, _>>()?;
|
||||
let signature = machine.complete(shares).map_err(|e| match e {
|
||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||
FrostError::InvalidParticipant(_, _) |
|
||||
FrostError::InvalidSigningSet(_) |
|
||||
FrostError::InvalidParticipantQuantity(_, _) |
|
||||
FrostError::DuplicatedParticipant(_) |
|
||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||
})?;
|
||||
Ok(signature.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
||||
// the MuSig is.
|
||||
fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
spec: &TributarySpec,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
mut map: HashMap<Participant, Vec<u8>>,
|
||||
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||
// Insert our own index so calculations aren't offset
|
||||
let our_threshold_i = spec
|
||||
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||
.expect("MuSig t-of-n signing a for a protocol we were removed from")
|
||||
.start;
|
||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||
|
||||
let spec_validators = spec.validators();
|
||||
let key_from_threshold_i = |threshold_i| {
|
||||
for (key, _) in &spec_validators {
|
||||
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start {
|
||||
return *key;
|
||||
}
|
||||
}
|
||||
panic!("requested info for threshold i which doesn't exist")
|
||||
};
|
||||
|
||||
let mut sorted = vec![];
|
||||
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
||||
threshold_is.sort();
|
||||
for threshold_i in threshold_is {
|
||||
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
|
||||
}
|
||||
|
||||
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
||||
let mut participants = vec![];
|
||||
let mut map = HashMap::new();
|
||||
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
|
||||
let musig_i = u16::try_from(raw_i).unwrap() + 1;
|
||||
participants.push(key);
|
||||
map.insert(Participant::new(musig_i).unwrap(), share);
|
||||
}
|
||||
|
||||
map.remove(&our_threshold_i).unwrap();
|
||||
|
||||
(participants, map)
|
||||
}
|
||||
|
||||
type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>;
|
||||
|
||||
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &'a TributarySpec,
|
||||
removed: Vec<<Ristretto as Ciphersuite>::G>,
|
||||
txn: &'a mut T,
|
||||
attempt: u32,
|
||||
}
|
||||
|
||||
impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
pub(crate) fn new<'a>(
|
||||
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &'a TributarySpec,
|
||||
txn: &'a mut T,
|
||||
attempt: u32,
|
||||
) -> Option<DkgConfirmer<'a, T>> {
|
||||
// This relies on how confirmations are inlined into the DKG protocol and they accordingly
|
||||
// share attempts
|
||||
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
|
||||
Some(DkgConfirmer { key, spec, removed, txn, attempt })
|
||||
}
|
||||
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
||||
let context = (b"DkgConfirmer", self.attempt);
|
||||
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||
}
|
||||
|
||||
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
self.signing_protocol().preprocess_internal(&participants)
|
||||
}
|
||||
// Get the preprocess for this confirmation.
|
||||
pub(crate) fn preprocess(&mut self) -> [u8; 64] {
|
||||
self.preprocess_internal().1
|
||||
}
|
||||
|
||||
fn share_internal(
|
||||
&mut self,
|
||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
key_pair: &KeyPair,
|
||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
let preprocesses =
|
||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1;
|
||||
let msg = set_keys_message(
|
||||
&self.spec.set(),
|
||||
&self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::<Vec<_>>(),
|
||||
key_pair,
|
||||
);
|
||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||
}
|
||||
// Get the share for this confirmation, if the preprocesses are valid.
|
||||
pub(crate) fn share(
|
||||
&mut self,
|
||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
key_pair: &KeyPair,
|
||||
) -> Result<[u8; 32], Participant> {
|
||||
self.share_internal(preprocesses, key_pair).map(|(_, share)| share)
|
||||
}
|
||||
|
||||
pub(crate) fn complete(
|
||||
&mut self,
|
||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||
key_pair: &KeyPair,
|
||||
shares: HashMap<Participant, Vec<u8>>,
|
||||
) -> Result<[u8; 64], Participant> {
|
||||
let shares =
|
||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1;
|
||||
|
||||
let machine = self
|
||||
.share_internal(preprocesses, key_pair)
|
||||
.expect("trying to complete a machine which failed to preprocess")
|
||||
.0;
|
||||
|
||||
DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares)
|
||||
}
|
||||
}
|
||||
157
coordinator/src/tributary/spec.rs
Normal file
157
coordinator/src/tributary/spec.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use core::{ops::Range, fmt::Debug};
|
||||
use std::{io, collections::HashMap};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
use frost::Participant;
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet};
|
||||
|
||||
fn borsh_serialize_validators<W: io::Write>(
|
||||
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||
writer: &mut W,
|
||||
) -> Result<(), io::Error> {
|
||||
let len = u16::try_from(validators.len()).unwrap();
|
||||
BorshSerialize::serialize(&len, writer)?;
|
||||
for validator in validators {
|
||||
BorshSerialize::serialize(&validator.0.to_bytes(), writer)?;
|
||||
BorshSerialize::serialize(&validator.1, writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn borsh_deserialize_validators<R: io::Read>(
|
||||
reader: &mut R,
|
||||
) -> Result<Vec<(<Ristretto as Ciphersuite>::G, u16)>, io::Error> {
|
||||
let len: u16 = BorshDeserialize::deserialize_reader(reader)?;
|
||||
let mut res = vec![];
|
||||
for _ in 0 .. len {
|
||||
let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?;
|
||||
let point = Option::from(<Ristretto as Ciphersuite>::G::from_bytes(&compressed))
|
||||
.ok_or_else(|| io::Error::other("invalid point for validator"))?;
|
||||
let weight: u16 = BorshDeserialize::deserialize_reader(reader)?;
|
||||
res.push((point, weight));
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct TributarySpec {
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
set: ExternalValidatorSet,
|
||||
#[borsh(
|
||||
serialize_with = "borsh_serialize_validators",
|
||||
deserialize_with = "borsh_deserialize_validators"
|
||||
)]
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||
}
|
||||
|
||||
impl TributarySpec {
|
||||
pub fn new(
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
set: ExternalValidatorSet,
|
||||
set_participants: Vec<(PublicKey, u16)>,
|
||||
) -> TributarySpec {
|
||||
let mut validators = vec![];
|
||||
for (participant, shares) in set_participants {
|
||||
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
||||
.expect("invalid key registered as participant");
|
||||
validators.push((participant, shares));
|
||||
}
|
||||
|
||||
Self { serai_block, start_time, set, validators }
|
||||
}
|
||||
|
||||
pub fn set(&self) -> ExternalValidatorSet {
|
||||
self.set
|
||||
}
|
||||
|
||||
pub fn genesis(&self) -> [u8; 32] {
|
||||
// Calculate the genesis for this Tributary
|
||||
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
||||
// This locks it to a specific Serai chain
|
||||
genesis.append_message(b"serai_block", self.serai_block);
|
||||
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
||||
genesis.append_message(b"network", self.set.network.encode());
|
||||
let genesis = genesis.challenge(b"genesis");
|
||||
let genesis_ref: &[u8] = genesis.as_ref();
|
||||
genesis_ref[.. 32].try_into().unwrap()
|
||||
}
|
||||
|
||||
pub fn start_time(&self) -> u64 {
|
||||
self.start_time
|
||||
}
|
||||
|
||||
pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 {
|
||||
self
|
||||
.validators
|
||||
.iter()
|
||||
.map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn t(&self) -> u16 {
|
||||
// t doesn't change with regards to the amount of removed validators
|
||||
((2 * self.n(&[])) / 3) + 1
|
||||
}
|
||||
|
||||
pub fn i(
|
||||
&self,
|
||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
||||
key: <Ristretto as Ciphersuite>::G,
|
||||
) -> Option<Range<Participant>> {
|
||||
let mut all_is = HashMap::new();
|
||||
let mut i = 1;
|
||||
for (validator, weight) in &self.validators {
|
||||
all_is.insert(
|
||||
*validator,
|
||||
Range { start: Participant::new(i).unwrap(), end: Participant::new(i + weight).unwrap() },
|
||||
);
|
||||
i += weight;
|
||||
}
|
||||
|
||||
let original_i = all_is.get(&key)?.clone();
|
||||
let mut result_i = original_i.clone();
|
||||
for removed_validator in removed_validators {
|
||||
let removed_i = all_is
|
||||
.get(removed_validator)
|
||||
.expect("removed validator wasn't present in set to begin with");
|
||||
// If the queried key was removed, return None
|
||||
if &original_i == removed_i {
|
||||
return None;
|
||||
}
|
||||
|
||||
// If the removed was before the queried, shift the queried down accordingly
|
||||
if removed_i.start < original_i.start {
|
||||
let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);
|
||||
result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();
|
||||
result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();
|
||||
}
|
||||
}
|
||||
Some(result_i)
|
||||
}
|
||||
|
||||
pub fn reverse_lookup_i(
|
||||
&self,
|
||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
||||
i: Participant,
|
||||
) -> Option<<Ristretto as Ciphersuite>::G> {
|
||||
for (validator, _) in &self.validators {
|
||||
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {
|
||||
return Some(*validator);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||
}
|
||||
}
|
||||
@@ -4,335 +4,713 @@ use std::io;
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use blake2::{digest::typenum::U32, Digest, Blake2b};
|
||||
use blake2::{Digest, Blake2s256};
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
use frost::Participant;
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
|
||||
|
||||
use messages::sign::VariantSignId;
|
||||
use scale::{Encode, Decode};
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
|
||||
use tributary::{
|
||||
ReadWrite,
|
||||
transaction::{
|
||||
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
|
||||
},
|
||||
TRANSACTION_SIZE_LIMIT, ReadWrite,
|
||||
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||
};
|
||||
|
||||
/// The round this data is for, within a signing protocol.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
pub enum SigningProtocolRound {
|
||||
/// A preprocess.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
||||
pub enum Label {
|
||||
Preprocess,
|
||||
/// A signature share.
|
||||
Share,
|
||||
}
|
||||
|
||||
impl SigningProtocolRound {
|
||||
fn nonce(&self) -> u32 {
|
||||
impl Label {
|
||||
// TODO: Should nonces be u8 thanks to our use of topics?
|
||||
pub fn nonce(&self) -> u32 {
|
||||
match self {
|
||||
SigningProtocolRound::Preprocess => 0,
|
||||
SigningProtocolRound::Share => 1,
|
||||
Label::Preprocess => 0,
|
||||
Label::Share => 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// `tributary::Signed` but without the nonce.
|
||||
///
|
||||
/// All of our nonces are deterministic to the type of transaction and fields within.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signed {
|
||||
/// The signer.
|
||||
pub signer: <Ristretto as Ciphersuite>::G,
|
||||
/// The signature.
|
||||
pub signature: SchnorrSignature<Ristretto>,
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct SignData<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> {
|
||||
pub plan: Id,
|
||||
pub attempt: u32,
|
||||
pub label: Label,
|
||||
|
||||
pub data: Vec<Vec<u8>>,
|
||||
|
||||
pub signed: Signed,
|
||||
}
|
||||
|
||||
impl BorshSerialize for Signed {
|
||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
|
||||
writer.write_all(self.signer.to_bytes().as_ref())?;
|
||||
self.signature.write(writer)
|
||||
}
|
||||
}
|
||||
impl BorshDeserialize for Signed {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
|
||||
let signer = Ristretto::read_G(reader)?;
|
||||
let signature = SchnorrSignature::read(reader)?;
|
||||
Ok(Self { signer, signature })
|
||||
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> Debug for SignData<Id> {
|
||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||
fmt
|
||||
.debug_struct("SignData")
|
||||
.field("id", &hex::encode(self.plan.encode()))
|
||||
.field("attempt", &self.attempt)
|
||||
.field("label", &self.label)
|
||||
.field("signer", &hex::encode(self.signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl Signed {
|
||||
/// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
|
||||
fn nonce(&self, nonce: u32) -> TributarySigned {
|
||||
TributarySigned { signer: self.signer, nonce, signature: self.signature }
|
||||
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
||||
pub(crate) fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let plan = Id::decode(&mut scale::IoReader(&mut *reader))
|
||||
.map_err(|_| io::Error::other("invalid plan in SignData"))?;
|
||||
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let mut label = [0; 1];
|
||||
reader.read_exact(&mut label)?;
|
||||
let label = match label[0] {
|
||||
0 => Label::Preprocess,
|
||||
1 => Label::Share,
|
||||
_ => Err(io::Error::other("invalid label in SignData"))?,
|
||||
};
|
||||
|
||||
let data = {
|
||||
let mut data_pieces = [0];
|
||||
reader.read_exact(&mut data_pieces)?;
|
||||
if data_pieces[0] == 0 {
|
||||
Err(io::Error::other("zero pieces of data in SignData"))?;
|
||||
}
|
||||
let mut all_data = vec![];
|
||||
for _ in 0 .. data_pieces[0] {
|
||||
let mut data_len = [0; 2];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
||||
reader.read_exact(&mut data)?;
|
||||
all_data.push(data);
|
||||
}
|
||||
all_data
|
||||
};
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, label.nonce())?;
|
||||
|
||||
Ok(SignData { plan, attempt, label, data, signed })
|
||||
}
|
||||
|
||||
pub(crate) fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.plan.encode())?;
|
||||
writer.write_all(&self.attempt.to_le_bytes())?;
|
||||
writer.write_all(&[match self.label {
|
||||
Label::Preprocess => 0,
|
||||
Label::Share => 1,
|
||||
}])?;
|
||||
|
||||
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
|
||||
for data in &self.data {
|
||||
if data.len() > u16::MAX.into() {
|
||||
// Currently, the largest individual preprocess is a Monero transaction
|
||||
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
||||
// key image and proof (96 bytes)
|
||||
// Even with all of that, we could support 227 inputs in a single TX
|
||||
// Monero is limited to ~120 inputs per TX
|
||||
//
|
||||
// Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess
|
||||
Err(io::Error::other("signing data exceeded 65535 bytes"))?;
|
||||
}
|
||||
writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(data)?;
|
||||
}
|
||||
|
||||
self.signed.write_without_nonce(writer)
|
||||
}
|
||||
}
|
||||
|
||||
/// The Tributary transaction definition used by Serai
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub enum Transaction {
|
||||
/// A vote to remove a participant for invalid behavior
|
||||
RemoveParticipant {
|
||||
/// The participant to remove
|
||||
participant: SeraiAddress,
|
||||
/// The transaction's signer and signature
|
||||
RemoveParticipantDueToDkg {
|
||||
participant: <Ristretto as Ciphersuite>::G,
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// A participation in the DKG
|
||||
DkgParticipation {
|
||||
participation: Vec<u8>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
/// The preprocess to confirm the DKG results on-chain
|
||||
DkgConfirmationPreprocess {
|
||||
/// The attempt number of this signing protocol
|
||||
DkgCommitments {
|
||||
attempt: u32,
|
||||
// The preprocess
|
||||
preprocess: [u8; 64],
|
||||
/// The transaction's signer and signature
|
||||
commitments: Vec<Vec<u8>>,
|
||||
signed: Signed,
|
||||
},
|
||||
/// The signature share to confirm the DKG results on-chain
|
||||
DkgConfirmationShare {
|
||||
/// The attempt number of this signing protocol
|
||||
DkgShares {
|
||||
attempt: u32,
|
||||
// The signature share
|
||||
share: [u8; 32],
|
||||
/// The transaction's signer and signature
|
||||
// Sending Participant, Receiving Participant, Share
|
||||
shares: Vec<Vec<Vec<u8>>>,
|
||||
confirmation_nonces: [u8; 64],
|
||||
signed: Signed,
|
||||
},
|
||||
InvalidDkgShare {
|
||||
attempt: u32,
|
||||
accuser: Participant,
|
||||
faulty: Participant,
|
||||
blame: Option<Vec<u8>>,
|
||||
signed: Signed,
|
||||
},
|
||||
DkgConfirmed {
|
||||
attempt: u32,
|
||||
confirmation_share: [u8; 32],
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
/// Intend to co-sign a finalized Substrate block
|
||||
///
|
||||
/// When the time comes to start a new co-signing protocol, the most recent Substrate block will
|
||||
/// be the one selected to be cosigned.
|
||||
Cosign {
|
||||
/// The hash of the Substrate block to sign
|
||||
substrate_block_hash: [u8; 32],
|
||||
},
|
||||
// Co-sign a Substrate block.
|
||||
CosignSubstrateBlock([u8; 32]),
|
||||
|
||||
/// The cosign for a Substrate block
|
||||
///
|
||||
/// After producing this cosign, we need to start work on the latest intended-to-be cosigned
|
||||
/// block. That requires agreement on when this cosign was produced, which we solve by embedding
|
||||
/// this cosign on chain.
|
||||
///
|
||||
/// We ideally don't have this transaction at all. The coordinator, without access to any of the
|
||||
/// key shares, could observe the FROST signing session and determine a successful completion.
|
||||
/// Unfortunately, that functionality is not present in modular-frost, so we do need to support
|
||||
/// *some* asynchronous flow (where the processor or P2P network informs us of the successful
|
||||
/// completion).
|
||||
///
|
||||
/// If we use a `Provided` transaction, that requires everyone observe this cosign.
|
||||
///
|
||||
/// If we use an `Unsigned` transaction, we can't verify the cosign signature inside
|
||||
/// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since
|
||||
/// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`,
|
||||
/// we can't verify the signature against the group's public key unless we also include that (but
|
||||
/// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary
|
||||
/// blobs on chain).
|
||||
///
|
||||
/// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally
|
||||
/// slash. We have horrible performance though as for 100 validators, all 100 will publish this
|
||||
/// transaction.
|
||||
///
|
||||
/// We could use a signed `Unsigned` transaction, where it includes a signer and signature but
|
||||
/// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on
|
||||
/// its contents.
|
||||
///
|
||||
/// The optimal choice is likely to use a `Provided` transaction. We don't actually need to
|
||||
/// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in
|
||||
/// question no longer needs to produced, which would mean the cosigning protocol at-large
|
||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
||||
Cosigned { substrate_block_hash: [u8; 32] },
|
||||
|
||||
/// Acknowledge a Substrate block
|
||||
///
|
||||
/// This is provided after the block has been cosigned.
|
||||
///
|
||||
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
||||
/// resulting from its handling.
|
||||
SubstrateBlock {
|
||||
/// The hash of the Substrate block
|
||||
hash: [u8; 32],
|
||||
},
|
||||
|
||||
/// Acknowledge a Batch
|
||||
///
|
||||
/// Once everyone has acknowledged the Batch, we can begin signing it.
|
||||
// When we have synchrony on a batch, we can allow signing it
|
||||
// TODO (never?): This is less efficient compared to an ExternalBlock provided transaction,
|
||||
// which would be binding over the block hash and automatically achieve synchrony on all
|
||||
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline
|
||||
// with the current processor, yet it would still be an improvement.
|
||||
Batch {
|
||||
/// The hash of the Batch's serialization.
|
||||
///
|
||||
/// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
|
||||
/// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
|
||||
/// way to do that.
|
||||
hash: [u8; 32],
|
||||
block: [u8; 32],
|
||||
batch: u32,
|
||||
},
|
||||
// When a Serai block is finalized, with the contained batches, we can allow the associated plan
|
||||
// IDs
|
||||
SubstrateBlock(u64),
|
||||
|
||||
SubstrateSign(SignData<SubstrateSignableId>),
|
||||
Sign(SignData<[u8; 32]>),
|
||||
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
|
||||
// reporters (who should all report the same thing)
|
||||
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
|
||||
// many TXs without penalty
|
||||
// Here, they're denoted as the first_signer, as only the signer of the first TX to be included
|
||||
// with this pairing will be remembered on-chain
|
||||
SignCompleted {
|
||||
plan: [u8; 32],
|
||||
tx_hash: Vec<u8>,
|
||||
first_signer: <Ristretto as Ciphersuite>::G,
|
||||
signature: SchnorrSignature<Ristretto>,
|
||||
},
|
||||
|
||||
/// Data from a signing protocol.
|
||||
Sign {
|
||||
/// The ID of the object being signed
|
||||
id: VariantSignId,
|
||||
/// The attempt number of this signing protocol
|
||||
attempt: u32,
|
||||
/// The round this data is for, within the signing protocol
|
||||
round: SigningProtocolRound,
|
||||
/// The data itself
|
||||
///
|
||||
/// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
|
||||
/// this transaction has.
|
||||
data: Vec<Vec<u8>>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
SlashReport(Vec<u32>, Signed),
|
||||
}
|
||||
|
||||
/// The local view of slashes observed by the transaction's sender
|
||||
SlashReport {
|
||||
/// The slash points accrued by each validator
|
||||
slash_points: Vec<u32>,
|
||||
/// The transaction's signer and signature
|
||||
signed: Signed,
|
||||
},
|
||||
impl Debug for Transaction {
|
||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||
match self {
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt
|
||||
.debug_struct("Transaction::RemoveParticipantDueToDkg")
|
||||
.field("participant", &hex::encode(participant.to_bytes()))
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
||||
.debug_struct("Transaction::DkgCommitments")
|
||||
.field("attempt", attempt)
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgShares { attempt, signed, .. } => fmt
|
||||
.debug_struct("Transaction::DkgShares")
|
||||
.field("attempt", attempt)
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
|
||||
.debug_struct("Transaction::InvalidDkgShare")
|
||||
.field("attempt", attempt)
|
||||
.field("accuser", accuser)
|
||||
.field("faulty", faulty)
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt
|
||||
.debug_struct("Transaction::DkgConfirmed")
|
||||
.field("attempt", attempt)
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::CosignSubstrateBlock(block) => fmt
|
||||
.debug_struct("Transaction::CosignSubstrateBlock")
|
||||
.field("block", &hex::encode(block))
|
||||
.finish(),
|
||||
Transaction::Batch { block, batch } => fmt
|
||||
.debug_struct("Transaction::Batch")
|
||||
.field("block", &hex::encode(block))
|
||||
.field("batch", &batch)
|
||||
.finish(),
|
||||
Transaction::SubstrateBlock(block) => {
|
||||
fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish()
|
||||
}
|
||||
Transaction::SubstrateSign(sign_data) => {
|
||||
fmt.debug_struct("Transaction::SubstrateSign").field("sign_data", sign_data).finish()
|
||||
}
|
||||
Transaction::Sign(sign_data) => {
|
||||
fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish()
|
||||
}
|
||||
Transaction::SignCompleted { plan, tx_hash, .. } => fmt
|
||||
.debug_struct("Transaction::SignCompleted")
|
||||
.field("plan", &hex::encode(plan))
|
||||
.field("tx_hash", &hex::encode(tx_hash))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::SlashReport(points, signed) => fmt
|
||||
.debug_struct("Transaction::SignCompleted")
|
||||
.field("points", points)
|
||||
.field("signed", signed)
|
||||
.finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadWrite for Transaction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
borsh::from_reader(reader)
|
||||
let mut kind = [0];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => Ok(Transaction::RemoveParticipantDueToDkg {
|
||||
participant: Ristretto::read_G(reader)?,
|
||||
signed: Signed::read_without_nonce(reader, 0)?,
|
||||
}),
|
||||
|
||||
1 => {
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let commitments = {
|
||||
let mut commitments_len = [0; 1];
|
||||
reader.read_exact(&mut commitments_len)?;
|
||||
let commitments_len = usize::from(commitments_len[0]);
|
||||
if commitments_len == 0 {
|
||||
Err(io::Error::other("zero commitments in DkgCommitments"))?;
|
||||
}
|
||||
|
||||
let mut each_commitments_len = [0; 2];
|
||||
reader.read_exact(&mut each_commitments_len)?;
|
||||
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
||||
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
||||
Err(io::Error::other(
|
||||
"commitments present in transaction exceeded transaction size limit",
|
||||
))?;
|
||||
}
|
||||
let mut commitments = vec![vec![]; commitments_len];
|
||||
for commitments in &mut commitments {
|
||||
*commitments = vec![0; each_commitments_len];
|
||||
reader.read_exact(commitments)?;
|
||||
}
|
||||
commitments
|
||||
};
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||
|
||||
Ok(Transaction::DkgCommitments { attempt, commitments, signed })
|
||||
}
|
||||
|
||||
2 => {
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let shares = {
|
||||
let mut share_quantity = [0; 1];
|
||||
reader.read_exact(&mut share_quantity)?;
|
||||
|
||||
let mut key_share_quantity = [0; 1];
|
||||
reader.read_exact(&mut key_share_quantity)?;
|
||||
|
||||
let mut share_len = [0; 2];
|
||||
reader.read_exact(&mut share_len)?;
|
||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
||||
|
||||
let mut all_shares = vec![];
|
||||
for _ in 0 .. share_quantity[0] {
|
||||
let mut shares = vec![];
|
||||
for _ in 0 .. key_share_quantity[0] {
|
||||
let mut share = vec![0; share_len];
|
||||
reader.read_exact(&mut share)?;
|
||||
shares.push(share);
|
||||
}
|
||||
all_shares.push(shares);
|
||||
}
|
||||
all_shares
|
||||
};
|
||||
|
||||
let mut confirmation_nonces = [0; 64];
|
||||
reader.read_exact(&mut confirmation_nonces)?;
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||
|
||||
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
||||
}
|
||||
|
||||
3 => {
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let mut accuser = [0; 2];
|
||||
reader.read_exact(&mut accuser)?;
|
||||
let accuser = Participant::new(u16::from_le_bytes(accuser))
|
||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
||||
|
||||
let mut faulty = [0; 2];
|
||||
reader.read_exact(&mut faulty)?;
|
||||
let faulty = Participant::new(u16::from_le_bytes(faulty))
|
||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
||||
|
||||
let mut blame_len = [0; 2];
|
||||
reader.read_exact(&mut blame_len)?;
|
||||
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
|
||||
reader.read_exact(&mut blame)?;
|
||||
|
||||
// This shares a nonce with DkgConfirmed as only one is expected
|
||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
||||
|
||||
Ok(Transaction::InvalidDkgShare {
|
||||
attempt,
|
||||
accuser,
|
||||
faulty,
|
||||
blame: Some(blame).filter(|blame| !blame.is_empty()),
|
||||
signed,
|
||||
})
|
||||
}
|
||||
|
||||
4 => {
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let mut confirmation_share = [0; 32];
|
||||
reader.read_exact(&mut confirmation_share)?;
|
||||
|
||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
||||
|
||||
Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed })
|
||||
}
|
||||
|
||||
5 => {
|
||||
let mut block = [0; 32];
|
||||
reader.read_exact(&mut block)?;
|
||||
Ok(Transaction::CosignSubstrateBlock(block))
|
||||
}
|
||||
|
||||
6 => {
|
||||
let mut block = [0; 32];
|
||||
reader.read_exact(&mut block)?;
|
||||
let mut batch = [0; 4];
|
||||
reader.read_exact(&mut batch)?;
|
||||
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
|
||||
}
|
||||
|
||||
7 => {
|
||||
let mut block = [0; 8];
|
||||
reader.read_exact(&mut block)?;
|
||||
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
||||
}
|
||||
|
||||
8 => SignData::read(reader).map(Transaction::SubstrateSign),
|
||||
9 => SignData::read(reader).map(Transaction::Sign),
|
||||
|
||||
10 => {
|
||||
let mut plan = [0; 32];
|
||||
reader.read_exact(&mut plan)?;
|
||||
|
||||
let mut tx_hash_len = [0];
|
||||
reader.read_exact(&mut tx_hash_len)?;
|
||||
let mut tx_hash = vec![0; usize::from(tx_hash_len[0])];
|
||||
reader.read_exact(&mut tx_hash)?;
|
||||
|
||||
let first_signer = Ristretto::read_G(reader)?;
|
||||
let signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
||||
|
||||
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
||||
}
|
||||
|
||||
11 => {
|
||||
let mut len = [0];
|
||||
reader.read_exact(&mut len)?;
|
||||
let len = len[0];
|
||||
// If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct
|
||||
// validators (the amount of validators reported on) will be at most
|
||||
// `MAX_KEY_SHARES_PER_SET - 1`
|
||||
if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) {
|
||||
Err(io::Error::other("more points reported than allowed validator"))?;
|
||||
}
|
||||
let mut points = vec![0u32; len.into()];
|
||||
for points in &mut points {
|
||||
let mut these_points = [0; 4];
|
||||
reader.read_exact(&mut these_points)?;
|
||||
*points = u32::from_le_bytes(these_points);
|
||||
}
|
||||
Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?))
|
||||
}
|
||||
|
||||
_ => Err(io::Error::other("invalid transaction type")),
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
borsh::to_writer(writer, self)
|
||||
match self {
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&participant.to_bytes())?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
if commitments.is_empty() {
|
||||
Err(io::Error::other("zero commitments in DkgCommitments"))?
|
||||
}
|
||||
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
||||
for commitments_i in commitments {
|
||||
if commitments_i.len() != commitments[0].len() {
|
||||
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
|
||||
}
|
||||
}
|
||||
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
||||
for commitments in commitments {
|
||||
writer.write_all(commitments)?;
|
||||
}
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
||||
writer.write_all(&[2])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
|
||||
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
|
||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
||||
// This assumes at least one share is being sent to another party
|
||||
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
||||
let share_len = shares[0][0].len();
|
||||
// For BLS12-381 G2, this would be:
|
||||
// - A 32-byte share
|
||||
// - A 96-byte ephemeral key
|
||||
// - A 128-byte signature
|
||||
// Hence why this has to be u16
|
||||
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
||||
|
||||
for these_shares in shares {
|
||||
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
||||
for share in these_shares {
|
||||
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
||||
writer.write_all(share)?;
|
||||
}
|
||||
}
|
||||
|
||||
writer.write_all(confirmation_nonces)?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||
writer.write_all(&[3])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
|
||||
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
||||
|
||||
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
||||
assert!(blame.as_ref().map_or(1, Vec::len) != 0);
|
||||
let blame_len =
|
||||
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
||||
writer.write_all(&blame_len.to_le_bytes())?;
|
||||
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
|
||||
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||
writer.write_all(&[4])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
writer.write_all(confirmation_share)?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::CosignSubstrateBlock(block) => {
|
||||
writer.write_all(&[5])?;
|
||||
writer.write_all(block)
|
||||
}
|
||||
|
||||
Transaction::Batch { block, batch } => {
|
||||
writer.write_all(&[6])?;
|
||||
writer.write_all(block)?;
|
||||
writer.write_all(&batch.to_le_bytes())
|
||||
}
|
||||
|
||||
Transaction::SubstrateBlock(block) => {
|
||||
writer.write_all(&[7])?;
|
||||
writer.write_all(&block.to_le_bytes())
|
||||
}
|
||||
|
||||
Transaction::SubstrateSign(data) => {
|
||||
writer.write_all(&[8])?;
|
||||
data.write(writer)
|
||||
}
|
||||
Transaction::Sign(data) => {
|
||||
writer.write_all(&[9])?;
|
||||
data.write(writer)
|
||||
}
|
||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||
writer.write_all(&[10])?;
|
||||
writer.write_all(plan)?;
|
||||
writer
|
||||
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
||||
writer.write_all(tx_hash)?;
|
||||
writer.write_all(&first_signer.to_bytes())?;
|
||||
signature.write(writer)
|
||||
}
|
||||
Transaction::SlashReport(points, signed) => {
|
||||
writer.write_all(&[11])?;
|
||||
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
||||
for points in points {
|
||||
writer.write_all(&points.to_le_bytes())?;
|
||||
}
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionTrait for Transaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => {
|
||||
TransactionKind::Signed((b"RemoveParticipant", participant).encode(), signed.nonce(0))
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
||||
}
|
||||
|
||||
Transaction::DkgParticipation { signed, .. } => {
|
||||
TransactionKind::Signed(b"DkgParticipation".encode(), signed.nonce(0))
|
||||
}
|
||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(0))
|
||||
}
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(1))
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } |
|
||||
Transaction::DkgShares { attempt, signed, .. } |
|
||||
Transaction::InvalidDkgShare { attempt, signed, .. } |
|
||||
Transaction::DkgConfirmed { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||
}
|
||||
|
||||
Transaction::Cosign { .. } => TransactionKind::Provided("CosignSubstrateBlock"),
|
||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||
|
||||
Transaction::Sign { id, attempt, round, signed, .. } => {
|
||||
TransactionKind::Signed((b"Sign", id, attempt).encode(), signed.nonce(round.nonce()))
|
||||
Transaction::Batch { .. } => TransactionKind::Provided("batch"),
|
||||
Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"),
|
||||
|
||||
Transaction::SubstrateSign(data) => {
|
||||
TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed)
|
||||
}
|
||||
Transaction::Sign(data) => {
|
||||
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
|
||||
}
|
||||
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
|
||||
|
||||
Transaction::SlashReport { signed, .. } => {
|
||||
TransactionKind::Signed(b"SlashReport".encode(), signed.nonce(0))
|
||||
Transaction::SlashReport(_, signed) => {
|
||||
TransactionKind::Signed(b"slash_report".to_vec(), signed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn hash(&self) -> [u8; 32] {
|
||||
let mut tx = ReadWrite::serialize(self);
|
||||
let mut tx = self.serialize();
|
||||
if let TransactionKind::Signed(_, signed) = self.kind() {
|
||||
// Make sure the part we're cutting off is the signature
|
||||
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
||||
}
|
||||
Blake2b::<U32>::digest(&tx).into()
|
||||
Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into()
|
||||
}
|
||||
|
||||
// This is a stateless verification which we use to enforce some size limits.
|
||||
fn verify(&self) -> Result<(), TransactionError> {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self {
|
||||
// Fixed-length TX
|
||||
Transaction::RemoveParticipant { .. } => {}
|
||||
// TODO: Check SubstrateSign's lengths here
|
||||
|
||||
// TODO: MAX_DKG_PARTICIPATION_LEN
|
||||
Transaction::DkgParticipation { .. } => {}
|
||||
// These are fixed-length TXs
|
||||
Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } => {}
|
||||
|
||||
// Provided TXs
|
||||
Transaction::Cosign { .. } |
|
||||
Transaction::Cosigned { .. } |
|
||||
Transaction::SubstrateBlock { .. } |
|
||||
Transaction::Batch { .. } => {}
|
||||
|
||||
Transaction::Sign { data, .. } => {
|
||||
if data.len() > usize::try_from(MAX_KEY_SHARES_PER_SET).unwrap() {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
// TODO: MAX_SIGN_LEN
|
||||
if let Transaction::SignCompleted { first_signer, signature, .. } = self {
|
||||
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::SlashReport { slash_points, .. } => {
|
||||
if slash_points.len() > usize::try_from(MAX_KEY_SHARES_PER_SET).unwrap() {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
// Used to initially construct transactions so we can then get sig hashes and perform signing
|
||||
pub fn empty_signed() -> Signed {
|
||||
Signed {
|
||||
signer: Ristretto::generator(),
|
||||
nonce: 0,
|
||||
signature: SchnorrSignature::<Ristretto> {
|
||||
R: Ristretto::generator(),
|
||||
s: <Ristretto as Ciphersuite>::F::ZERO,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sign a transaction
|
||||
//
|
||||
// Panics if signing a transaction type which isn't `TransactionKind::Signed`
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
genesis: [u8; 32],
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
) {
|
||||
fn signed(tx: &mut Transaction) -> &mut Signed {
|
||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
||||
match tx {
|
||||
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
||||
let nonce = match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => 0,
|
||||
|
||||
Transaction::DkgCommitments { .. } => 0,
|
||||
Transaction::DkgShares { .. } => 1,
|
||||
Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
|
||||
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
||||
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
||||
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
||||
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
||||
|
||||
Transaction::Sign { ref mut signed, .. } => signed,
|
||||
Transaction::SubstrateSign(data) => data.label.nonce(),
|
||||
Transaction::Sign(data) => data.label.nonce(),
|
||||
|
||||
Transaction::SlashReport { ref mut signed, .. } => signed,
|
||||
}
|
||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||
|
||||
Transaction::SlashReport(_, _) => 0,
|
||||
};
|
||||
|
||||
(
|
||||
nonce,
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
|
||||
Transaction::DkgCommitments { ref mut signed, .. } |
|
||||
Transaction::DkgShares { ref mut signed, .. } |
|
||||
Transaction::InvalidDkgShare { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
|
||||
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
||||
|
||||
Transaction::SubstrateSign(ref mut data) => &mut data.signed,
|
||||
Transaction::Sign(ref mut data) => &mut data.signed,
|
||||
|
||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||
|
||||
Transaction::SlashReport(_, ref mut signed) => signed,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Decide the nonce to sign with
|
||||
let (nonce, signed_ref) = signed(self);
|
||||
signed_ref.signer = Ristretto::generator() * key.deref();
|
||||
signed_ref.nonce = nonce;
|
||||
|
||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
||||
|
||||
{
|
||||
// Set the signer and the nonce
|
||||
let signed = signed(self);
|
||||
signed.signer = Ristretto::generator() * key.deref();
|
||||
signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
||||
}
|
||||
|
||||
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
|
||||
signed(self).1.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
||||
let sig_hash = self.sig_hash(genesis);
|
||||
signed(self).1.signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
||||
}
|
||||
|
||||
// Sign the signature
|
||||
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
||||
pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F {
|
||||
if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self {
|
||||
let mut transcript =
|
||||
RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted");
|
||||
transcript.append_message(b"plan", plan);
|
||||
transcript.append_message(b"tx_hash", tx_hash);
|
||||
transcript.append_message(b"signer", first_signer.to_bytes());
|
||||
transcript.append_message(b"nonce", signature.R.to_bytes());
|
||||
Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge"))
|
||||
} else {
|
||||
panic!("sign_completed_challenge called on transaction which wasn't SignCompleted")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
[package]
|
||||
name = "serai-coordinator-substrate"
|
||||
version = "0.1.0"
|
||||
description = "Serai Coordinator's Substrate Scanner"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/substrate"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
futures = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
|
||||
serai-cosign = { path = "../cosign", version = "0.1" }
|
||||
|
||||
messages = { package = "serai-processor-messages", version = "0.1", path = "../../processor/messages" }
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,14 +0,0 @@
|
||||
# Serai Coordinate Substrate Scanner
|
||||
|
||||
This is the scanner of the Serai blockchain for the purposes of Serai's coordinator.
|
||||
|
||||
Two event streams are defined:
|
||||
|
||||
- Canonical events, which must be handled by every validator, regardless of the sets they're present
|
||||
in. These are represented by `serai_processor_messages::substrate::CoordinatorMessage`.
|
||||
- Ephemeral events, which only need to be handled by the validators present within the sets they
|
||||
relate to. These are represented by two channels, `NewSet` and `SignSlashReport`.
|
||||
|
||||
The canonical event stream is available without provision of a validator's public key. The ephemeral
|
||||
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
|
||||
there are no ordering guarantees across the two.
|
||||
@@ -1,221 +0,0 @@
|
||||
use std::future::Future;
|
||||
|
||||
use futures::stream::{StreamExt, FuturesOrdered};
|
||||
|
||||
use serai_client::Serai;
|
||||
|
||||
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use serai_cosign::Cosigning;
|
||||
|
||||
create_db!(
|
||||
CoordinatorSubstrateCanonical {
|
||||
NextBlock: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
/// The event stream for canonical events.
|
||||
pub struct CanonicalEventStream<D: Db> {
|
||||
db: D,
|
||||
serai: Serai,
|
||||
}
|
||||
|
||||
impl<D: Db> CanonicalEventStream<D> {
|
||||
/// Create a new canonical event stream.
|
||||
///
|
||||
/// Only one of these may exist over the provided database.
|
||||
pub fn new(db: D, serai: Serai) -> Self {
|
||||
Self { db, serai }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||
let latest_finalized_block =
|
||||
Cosigning::<D>::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
// These are all the events which generate canonical messages
|
||||
struct CanonicalEvents {
|
||||
time: u64,
|
||||
key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
|
||||
burn_events: Vec<serai_client::coins::CoinsEvent>,
|
||||
}
|
||||
|
||||
// For a cosigned block, fetch all relevant events
|
||||
let scan = {
|
||||
let db = self.db.clone();
|
||||
let serai = &self.serai;
|
||||
move |block_number| {
|
||||
let block_hash = Cosigning::<D>::cosigned_block(&db, block_number);
|
||||
|
||||
async move {
|
||||
let block_hash = match block_hash {
|
||||
Ok(Some(block_hash)) => block_hash,
|
||||
Ok(None) => {
|
||||
panic!("iterating to latest cosigned block but couldn't get cosigned block")
|
||||
}
|
||||
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
||||
};
|
||||
let temporal_serai = serai.as_of(block_hash);
|
||||
let temporal_serai_validators = temporal_serai.validator_sets();
|
||||
let temporal_serai_instructions = temporal_serai.in_instructions();
|
||||
let temporal_serai_coins = temporal_serai.coins();
|
||||
|
||||
let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
|
||||
tokio::try_join!(
|
||||
serai.block(block_hash),
|
||||
temporal_serai_validators.key_gen_events(),
|
||||
temporal_serai_validators.set_retired_events(),
|
||||
temporal_serai_instructions.batch_events(),
|
||||
temporal_serai_coins.burn_with_instruction_events(),
|
||||
)
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
let Some(block) = block else {
|
||||
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
||||
};
|
||||
|
||||
let time = if block_number == 0 {
|
||||
block.time().unwrap_or(0)
|
||||
} else {
|
||||
// Serai's block time is in milliseconds
|
||||
block
|
||||
.time()
|
||||
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
|
||||
1000
|
||||
};
|
||||
|
||||
Ok((
|
||||
block_number,
|
||||
CanonicalEvents {
|
||||
time,
|
||||
key_gen_events,
|
||||
set_retired_events,
|
||||
batch_events,
|
||||
burn_events,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Sync the next set of upcoming blocks all at once to minimize latency
|
||||
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 10;
|
||||
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
|
||||
// sufficiently polled. Considering our processing loop is minimal and it does poll this,
|
||||
// it's fine.
|
||||
let mut set = FuturesOrdered::new();
|
||||
for block_number in
|
||||
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
|
||||
{
|
||||
set.push_back(scan(block_number));
|
||||
}
|
||||
|
||||
for block_number in next_block ..= latest_finalized_block {
|
||||
// Get the next block in our queue
|
||||
let (popped_block_number, block) = set.next().await.unwrap()?;
|
||||
assert_eq!(block_number, popped_block_number);
|
||||
// Re-populate the queue
|
||||
if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block {
|
||||
set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE));
|
||||
}
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
for key_gen in block.key_gen_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
|
||||
else {
|
||||
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
|
||||
};
|
||||
crate::Canonical::send(
|
||||
&mut txn,
|
||||
set.network,
|
||||
&CoordinatorMessage::SetKeys {
|
||||
serai_time: block.time,
|
||||
session: set.session,
|
||||
key_pair: key_pair.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
for set_retired in block.set_retired_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
|
||||
else {
|
||||
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
||||
};
|
||||
crate::Canonical::send(
|
||||
&mut txn,
|
||||
set.network,
|
||||
&CoordinatorMessage::SlashesReported { session: set.session },
|
||||
);
|
||||
}
|
||||
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
let mut batch = None;
|
||||
for this_batch in &block.batch_events {
|
||||
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
||||
network: batch_network,
|
||||
publishing_session,
|
||||
id,
|
||||
external_network_block_hash,
|
||||
in_instructions_hash,
|
||||
in_instruction_results,
|
||||
} = this_batch
|
||||
else {
|
||||
panic!("Batch event wasn't a Batch event: {this_batch:?}");
|
||||
};
|
||||
if network == *batch_network {
|
||||
if batch.is_some() {
|
||||
Err("Serai block had multiple batches for the same network".to_string())?;
|
||||
}
|
||||
batch = Some(ExecutedBatch {
|
||||
id: *id,
|
||||
publisher: *publishing_session,
|
||||
external_network_block_hash: *external_network_block_hash,
|
||||
in_instructions_hash: *in_instructions_hash,
|
||||
in_instruction_results: in_instruction_results
|
||||
.iter()
|
||||
.map(|bit| {
|
||||
if *bit {
|
||||
InInstructionResult::Succeeded
|
||||
} else {
|
||||
InInstructionResult::Failed
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let mut burns = vec![];
|
||||
for burn in &block.burn_events {
|
||||
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
||||
&burn
|
||||
else {
|
||||
panic!("Burn event wasn't a Burn.in event: {burn:?}");
|
||||
};
|
||||
if instruction.balance.coin.network() == network {
|
||||
burns.push(instruction.clone());
|
||||
}
|
||||
}
|
||||
|
||||
crate::Canonical::send(
|
||||
&mut txn,
|
||||
network,
|
||||
&CoordinatorMessage::Block { serai_block_number: block_number, batch, burns },
|
||||
);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(next_block <= latest_finalized_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
use std::future::Future;
|
||||
|
||||
use futures::stream::{StreamExt, FuturesOrdered};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
|
||||
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use serai_cosign::Cosigning;
|
||||
|
||||
use crate::NewSetInformation;
|
||||
|
||||
create_db!(
|
||||
CoordinatorSubstrateEphemeral {
|
||||
NextBlock: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
/// The event stream for ephemeral events.
|
||||
pub struct EphemeralEventStream<D: Db> {
|
||||
db: D,
|
||||
serai: Serai,
|
||||
validator: PublicKey,
|
||||
}
|
||||
|
||||
impl<D: Db> EphemeralEventStream<D> {
|
||||
/// Create a new ephemeral event stream.
|
||||
///
|
||||
/// Only one of these may exist over the provided database.
|
||||
pub fn new(db: D, serai: Serai, validator: PublicKey) -> Self {
|
||||
Self { db, serai, validator }
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||
let latest_finalized_block =
|
||||
Cosigning::<D>::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
// These are all the events which generate canonical messages
|
||||
struct EphemeralEvents {
|
||||
block_hash: [u8; 32],
|
||||
time: u64,
|
||||
new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
}
|
||||
|
||||
// For a cosigned block, fetch all relevant events
|
||||
let scan = {
|
||||
let db = self.db.clone();
|
||||
let serai = &self.serai;
|
||||
move |block_number| {
|
||||
let block_hash = Cosigning::<D>::cosigned_block(&db, block_number);
|
||||
|
||||
async move {
|
||||
let block_hash = match block_hash {
|
||||
Ok(Some(block_hash)) => block_hash,
|
||||
Ok(None) => {
|
||||
panic!("iterating to latest cosigned block but couldn't get cosigned block")
|
||||
}
|
||||
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
||||
};
|
||||
|
||||
let temporal_serai = serai.as_of(block_hash);
|
||||
let temporal_serai_validators = temporal_serai.validator_sets();
|
||||
let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
|
||||
serai.block(block_hash),
|
||||
temporal_serai_validators.new_set_events(),
|
||||
temporal_serai_validators.accepted_handover_events(),
|
||||
)
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
let Some(block) = block else {
|
||||
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
||||
};
|
||||
|
||||
let time = if block_number == 0 {
|
||||
block.time().unwrap_or(0)
|
||||
} else {
|
||||
// Serai's block time is in milliseconds
|
||||
block
|
||||
.time()
|
||||
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
|
||||
1000
|
||||
};
|
||||
|
||||
Ok((
|
||||
block_number,
|
||||
EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
|
||||
))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Sync the next set of upcoming blocks all at once to minimize latency
|
||||
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50;
|
||||
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
|
||||
// sufficiently polled. Our processing loop isn't minimal, itself making multiple requests,
|
||||
// but the loop body should only be executed a few times a week. It's better to get through
|
||||
// most blocks with this optimization, and have timeouts a few times a week, than not have
|
||||
// this at all.
|
||||
let mut set = FuturesOrdered::new();
|
||||
for block_number in
|
||||
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
|
||||
{
|
||||
set.push_back(scan(block_number));
|
||||
}
|
||||
|
||||
for block_number in next_block ..= latest_finalized_block {
|
||||
// Get the next block in our queue
|
||||
let (popped_block_number, block) = set.next().await.unwrap()?;
|
||||
assert_eq!(block_number, popped_block_number);
|
||||
// Re-populate the queue
|
||||
if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block {
|
||||
set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE));
|
||||
}
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
for new_set in block.new_set_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
||||
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
||||
};
|
||||
|
||||
// We only coordinate over external networks
|
||||
if set.network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
let serai = self.serai.as_of(block.block_hash);
|
||||
let serai = serai.validator_sets();
|
||||
let Some(validators) =
|
||||
serai.participants(set.network).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
Err(format!(
|
||||
"block #{block_number} declared a new set but didn't have the participants"
|
||||
))?
|
||||
};
|
||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||
if in_set {
|
||||
if u16::try_from(validators.len()).is_err() {
|
||||
Err("more than u16::MAX validators sent")?;
|
||||
}
|
||||
|
||||
let Ok(validators) = validators
|
||||
.into_iter()
|
||||
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
else {
|
||||
Err("validator's weight exceeded u16::MAX".to_string())?
|
||||
};
|
||||
|
||||
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
||||
if total_weight > MAX_KEY_SHARES_PER_SET {
|
||||
Err(format!(
|
||||
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
||||
))?;
|
||||
}
|
||||
let total_weight = u16::try_from(total_weight).unwrap();
|
||||
|
||||
// Fetch all of the validators' embedded elliptic curve keys
|
||||
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
|
||||
for (validator, _) in &validators {
|
||||
let validator = *validator;
|
||||
// try_join doesn't return a future so we need to wrap it in this additional async
|
||||
// block
|
||||
embedded_elliptic_curve_keys.push_back(async move {
|
||||
tokio::try_join!(
|
||||
// One future to fetch the substrate embedded key
|
||||
serai
|
||||
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
|
||||
// One future to fetch the external embedded key, if there is a distinct curve
|
||||
async {
|
||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
)
|
||||
.map(|(substrate_embedded_key, external_embedded_key)| {
|
||||
(validator, substrate_embedded_key, external_embedded_key)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
|
||||
for (validator, weight) in &validators {
|
||||
let (future_validator, substrate_embedded_key, external_embedded_key) =
|
||||
embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
|
||||
assert_eq!(*validator, future_validator);
|
||||
let external_embedded_key =
|
||||
external_embedded_key.unwrap_or(substrate_embedded_key.clone());
|
||||
match (substrate_embedded_key, external_embedded_key) {
|
||||
(Some(substrate_embedded_key), Some(external_embedded_key)) => {
|
||||
let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
|
||||
.map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
|
||||
for _ in 0 .. *weight {
|
||||
evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
|
||||
}
|
||||
}
|
||||
_ => Err("NewSet with validator missing an embedded key".to_string())?,
|
||||
}
|
||||
}
|
||||
|
||||
crate::NewSet::send(
|
||||
&mut txn,
|
||||
&NewSetInformation {
|
||||
set: *set,
|
||||
serai_block: block.block_hash,
|
||||
start_time: block.time,
|
||||
// TODO: Why do we have this as an explicit field here?
|
||||
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
||||
threshold: ((total_weight * 2) / 3) + 1,
|
||||
validators,
|
||||
evrf_public_keys,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
for accepted_handover in block.accepted_handover_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
|
||||
&accepted_handover
|
||||
else {
|
||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||
};
|
||||
crate::SignSlashReport::send(&mut txn, set);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(next_block <= latest_finalized_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use borsh::{io, BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{PublicKey, NetworkId},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
mod canonical;
|
||||
mod ephemeral;
|
||||
|
||||
fn borsh_serialize_validators<W: io::Write>(
|
||||
validators: &Vec<(PublicKey, u16)>,
|
||||
writer: &mut W,
|
||||
) -> Result<(), io::Error> {
|
||||
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
|
||||
writer.write_all(&validators.encode())
|
||||
}
|
||||
|
||||
fn borsh_deserialize_validators<R: io::Read>(
|
||||
reader: &mut R,
|
||||
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
|
||||
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
|
||||
}
|
||||
|
||||
/// The information for a new set.
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct NewSetInformation {
|
||||
set: ValidatorSet,
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
threshold: u16,
|
||||
#[borsh(
|
||||
serialize_with = "borsh_serialize_validators",
|
||||
deserialize_with = "borsh_deserialize_validators"
|
||||
)]
|
||||
validators: Vec<(PublicKey, u16)>,
|
||||
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||
}
|
||||
|
||||
mod _public_db {
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use crate::NewSetInformation;
|
||||
|
||||
db_channel!(
|
||||
CoordinatorSubstrate {
|
||||
// Canonical messages to send to the processor
|
||||
Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage,
|
||||
|
||||
// Relevant new set, from an ephemeral event stream
|
||||
NewSet: () -> NewSetInformation,
|
||||
// Relevant sign slash report, from an ephemeral event stream
|
||||
SignSlashReport: () -> ValidatorSet,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/// The canonical event stream.
|
||||
pub struct Canonical;
|
||||
impl Canonical {
|
||||
pub(crate) fn send(
|
||||
txn: &mut impl DbTxn,
|
||||
network: NetworkId,
|
||||
msg: &messages::substrate::CoordinatorMessage,
|
||||
) {
|
||||
_public_db::Canonical::send(txn, network, msg);
|
||||
}
|
||||
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
||||
pub fn try_recv(
|
||||
txn: &mut impl DbTxn,
|
||||
network: NetworkId,
|
||||
) -> Option<messages::substrate::CoordinatorMessage> {
|
||||
_public_db::Canonical::try_recv(txn, network)
|
||||
}
|
||||
}
|
||||
|
||||
/// The channel for new set events emitted by an ephemeral event stream.
|
||||
pub struct NewSet;
|
||||
impl NewSet {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, msg: &NewSetInformation) {
|
||||
_public_db::NewSet::send(txn, msg);
|
||||
}
|
||||
/// Try to receive a new set's information, returning `None` if there is none to receive.
|
||||
pub fn try_recv(txn: &mut impl DbTxn) -> Option<NewSetInformation> {
|
||||
_public_db::NewSet::try_recv(txn)
|
||||
}
|
||||
}
|
||||
|
||||
/// The channel for notifications to sign a slash report, as emitted by an ephemeral event stream.
|
||||
///
|
||||
/// These notifications MAY be for irrelevant validator sets. The only guarantee is the
|
||||
/// notifications for all relevant validator sets will be included.
|
||||
pub struct SignSlashReport;
|
||||
impl SignSlashReport {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: &ValidatorSet) {
|
||||
_public_db::SignSlashReport::send(txn, set);
|
||||
}
|
||||
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
||||
/// receive.
|
||||
pub fn try_recv(txn: &mut impl DbTxn) -> Option<ValidatorSet> {
|
||||
_public_db::SignSlashReport::try_recv(txn)
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@ license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -16,7 +15,8 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
@@ -27,7 +27,8 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
|
||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
||||
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
|
||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
@@ -135,7 +135,7 @@ impl<T: TransactionTrait> Block<T> {
|
||||
// Check TXs are sorted by nonce.
|
||||
let nonce = |tx: &Transaction<T>| {
|
||||
if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() {
|
||||
nonce
|
||||
*nonce
|
||||
} else {
|
||||
0
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::{VecDeque, HashSet};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
|
||||
@@ -323,7 +324,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
}
|
||||
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
||||
let next_nonce = nonce + 1;
|
||||
txn.put(Self::next_nonce_key(&self.genesis, &signer, &order), next_nonce.to_le_bytes());
|
||||
txn.put(Self::next_nonce_key(&self.genesis, signer, &order), next_nonce.to_le_bytes());
|
||||
self.mempool.remove(&tx.hash());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
use core::{marker::PhantomData, fmt::Debug, future::Future};
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use std::{sync::Arc, io};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use scale::Decode;
|
||||
use futures_channel::mpsc::UnboundedReceiver;
|
||||
@@ -48,17 +51,13 @@ pub(crate) use crate::tendermint::*;
|
||||
pub mod tests;
|
||||
|
||||
/// Size limit for an individual transaction.
|
||||
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||
// TODO: Add a test for these properties
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
|
||||
/// Amount of transactions a single account may have in the mempool.
|
||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
/// Block size limit.
|
||||
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious
|
||||
// participant from flooding disks and causing out of space errors in order processes.
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||
@@ -108,7 +107,7 @@ impl<T: TransactionTrait> Transaction<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> TransactionKind {
|
||||
pub fn kind(&self) -> TransactionKind<'_> {
|
||||
match self {
|
||||
Transaction::Tendermint(tx) => tx.kind(),
|
||||
Transaction::Application(tx) => tx.kind(),
|
||||
@@ -129,18 +128,20 @@ pub trait ReadWrite: Sized {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait P2p: 'static + Send + Sync + Clone {
|
||||
#[async_trait]
|
||||
pub trait P2p: 'static + Send + Sync + Clone + Debug {
|
||||
/// Broadcast a message to all other members of the Tributary with the specified genesis.
|
||||
///
|
||||
/// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't
|
||||
/// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based
|
||||
/// deduplication to ensure a sane amount of load.
|
||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()>;
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>);
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<P: P2p> P2p for Arc<P> {
|
||||
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
P::broadcast(self, genesis, msg)
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||
(*self).broadcast(genesis, msg).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
@@ -81,11 +82,11 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
}
|
||||
Transaction::Application(tx) => match tx.kind() {
|
||||
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
||||
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
||||
res.txs_per_signer.insert(signer, amount);
|
||||
let amount = *res.txs_per_signer.get(signer).unwrap_or(&0) + 1;
|
||||
res.txs_per_signer.insert(*signer, amount);
|
||||
|
||||
if let Some(prior_nonce) =
|
||||
res.last_nonce_in_mempool.insert((signer, order.clone()), nonce)
|
||||
res.last_nonce_in_mempool.insert((*signer, order.clone()), *nonce)
|
||||
{
|
||||
assert_eq!(prior_nonce, nonce - 1);
|
||||
}
|
||||
@@ -133,14 +134,14 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
match app_tx.kind() {
|
||||
TransactionKind::Signed(order, Signed { signer, .. }) => {
|
||||
// Get the nonce from the blockchain
|
||||
let Some(blockchain_next_nonce) = blockchain_next_nonce(signer, order.clone()) else {
|
||||
let Some(blockchain_next_nonce) = blockchain_next_nonce(*signer, order.clone()) else {
|
||||
// Not a participant
|
||||
Err(TransactionError::InvalidSigner)?
|
||||
};
|
||||
let mut next_nonce = blockchain_next_nonce;
|
||||
|
||||
if let Some(mempool_last_nonce) =
|
||||
self.last_nonce_in_mempool.get(&(signer, order.clone()))
|
||||
self.last_nonce_in_mempool.get(&(*signer, order.clone()))
|
||||
{
|
||||
assert!(*mempool_last_nonce >= blockchain_next_nonce);
|
||||
next_nonce = *mempool_last_nonce + 1;
|
||||
@@ -148,14 +149,14 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
|
||||
// If we have too many transactions from this sender, don't add this yet UNLESS we are
|
||||
// this sender
|
||||
let amount_in_pool = *self.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
||||
let amount_in_pool = *self.txs_per_signer.get(signer).unwrap_or(&0) + 1;
|
||||
if !internal && (amount_in_pool > ACCOUNT_MEMPOOL_LIMIT) {
|
||||
Err(TransactionError::TooManyInMempool)?;
|
||||
}
|
||||
|
||||
verify_transaction(app_tx, self.genesis, &mut |_, _| Some(next_nonce))?;
|
||||
self.last_nonce_in_mempool.insert((signer, order.clone()), next_nonce);
|
||||
self.txs_per_signer.insert(signer, amount_in_pool);
|
||||
self.last_nonce_in_mempool.insert((*signer, order.clone()), next_nonce);
|
||||
self.txs_per_signer.insert(*signer, amount_in_pool);
|
||||
}
|
||||
TransactionKind::Unsigned => {
|
||||
// check we have the tx in the pool/chain
|
||||
@@ -205,7 +206,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
// Sort signed by nonce
|
||||
let nonce = |tx: &Transaction<T>| {
|
||||
if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() {
|
||||
nonce
|
||||
*nonce
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
@@ -242,11 +243,11 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
|
||||
if let Some(tx) = self.txs.remove(tx) {
|
||||
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
|
||||
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
|
||||
self.txs_per_signer.insert(signer, amount);
|
||||
let amount = *self.txs_per_signer.get(signer).unwrap() - 1;
|
||||
self.txs_per_signer.insert(*signer, amount);
|
||||
|
||||
if self.last_nonce_in_mempool.get(&(signer, order.clone())) == Some(&nonce) {
|
||||
self.last_nonce_in_mempool.remove(&(signer, order));
|
||||
if self.last_nonce_in_mempool.get(&(*signer, order.clone())) == Some(nonce) {
|
||||
self.last_nonce_in_mempool.remove(&(*signer, order));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use core::{ops::Deref, future::Future};
|
||||
use core::ops::Deref;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use subtle::ConstantTimeEq;
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
@@ -9,12 +11,13 @@ use rand_chacha::ChaCha12Rng;
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{
|
||||
GroupEncoding,
|
||||
ff::{Field, PrimeField},
|
||||
},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::{
|
||||
SchnorrSignature,
|
||||
@@ -72,52 +75,50 @@ impl Signer {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SignerTrait for Signer {
|
||||
type ValidatorId = [u8; 32];
|
||||
type Signature = [u8; 64];
|
||||
|
||||
/// Returns the validator's current ID. Returns None if they aren't a current validator.
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<Self::ValidatorId>> {
|
||||
async move { Some((Ristretto::generator() * self.key.deref()).to_bytes()) }
|
||||
async fn validator_id(&self) -> Option<Self::ValidatorId> {
|
||||
Some((Ristretto::generator() * self.key.deref()).to_bytes())
|
||||
}
|
||||
|
||||
/// Sign a signature with the current validator's private key.
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = Self::Signature> {
|
||||
async move {
|
||||
let mut nonce =
|
||||
Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce"));
|
||||
nonce.append_message(b"genesis", self.genesis);
|
||||
nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref());
|
||||
nonce.append_message(b"message", msg);
|
||||
let mut nonce = nonce.challenge(b"nonce");
|
||||
async fn sign(&self, msg: &[u8]) -> Self::Signature {
|
||||
let mut nonce = Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce"));
|
||||
nonce.append_message(b"genesis", self.genesis);
|
||||
nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref());
|
||||
nonce.append_message(b"message", msg);
|
||||
let mut nonce = nonce.challenge(b"nonce");
|
||||
|
||||
let mut nonce_arr = [0; 64];
|
||||
nonce_arr.copy_from_slice(nonce.as_ref());
|
||||
let mut nonce_arr = [0; 64];
|
||||
nonce_arr.copy_from_slice(nonce.as_ref());
|
||||
|
||||
let nonce_ref: &mut [u8] = nonce.as_mut();
|
||||
nonce_ref.zeroize();
|
||||
let nonce_ref: &[u8] = nonce.as_ref();
|
||||
assert_eq!(nonce_ref, [0; 64].as_ref());
|
||||
let nonce_ref: &mut [u8] = nonce.as_mut();
|
||||
nonce_ref.zeroize();
|
||||
let nonce_ref: &[u8] = nonce.as_ref();
|
||||
assert_eq!(nonce_ref, [0; 64].as_ref());
|
||||
|
||||
let nonce =
|
||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));
|
||||
nonce_arr.zeroize();
|
||||
let nonce =
|
||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));
|
||||
nonce_arr.zeroize();
|
||||
|
||||
assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));
|
||||
assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));
|
||||
|
||||
let challenge = challenge(
|
||||
self.genesis,
|
||||
(Ristretto::generator() * self.key.deref()).to_bytes(),
|
||||
(Ristretto::generator() * nonce.deref()).to_bytes().as_ref(),
|
||||
msg,
|
||||
);
|
||||
let challenge = challenge(
|
||||
self.genesis,
|
||||
(Ristretto::generator() * self.key.deref()).to_bytes(),
|
||||
(Ristretto::generator() * nonce.deref()).to_bytes().as_ref(),
|
||||
msg,
|
||||
);
|
||||
|
||||
let sig = SchnorrSignature::<Ristretto>::sign(&self.key, nonce, challenge).serialize();
|
||||
let sig = SchnorrSignature::<Ristretto>::sign(&self.key, nonce, challenge).serialize();
|
||||
|
||||
let mut res = [0; 64];
|
||||
res.copy_from_slice(&sig);
|
||||
res
|
||||
}
|
||||
let mut res = [0; 64];
|
||||
res.copy_from_slice(&sig);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,6 +275,7 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
||||
pub const LATENCY_TIME: u32 = 1667;
|
||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||
type Db = D;
|
||||
|
||||
@@ -299,126 +301,111 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
self.validators.clone()
|
||||
}
|
||||
|
||||
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
|
||||
fn slash(
|
||||
&mut self,
|
||||
validator: Self::ValidatorId,
|
||||
slash_event: SlashEvent,
|
||||
) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
log::error!(
|
||||
"validator {} triggered a slash event on tributary {} (with evidence: {})",
|
||||
hex::encode(validator),
|
||||
hex::encode(self.genesis),
|
||||
matches!(slash_event, SlashEvent::WithEvidence(_)),
|
||||
);
|
||||
async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent) {
|
||||
log::error!(
|
||||
"validator {} triggered a slash event on tributary {} (with evidence: {})",
|
||||
hex::encode(validator),
|
||||
hex::encode(self.genesis),
|
||||
matches!(slash_event, SlashEvent::WithEvidence(_)),
|
||||
);
|
||||
|
||||
let signer = self.signer();
|
||||
let Some(tx) = (match slash_event {
|
||||
SlashEvent::WithEvidence(evidence) => {
|
||||
// create an unsigned evidence tx
|
||||
Some(TendermintTx::SlashEvidence(evidence))
|
||||
}
|
||||
SlashEvent::Id(_reason, _block, _round) => {
|
||||
// TODO: Increase locally observed slash points
|
||||
None
|
||||
}
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
// add tx to blockchain and broadcast to peers
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
if self.blockchain.write().await.add_transaction::<Self>(
|
||||
true,
|
||||
Transaction::Tendermint(tx),
|
||||
&self.signature_scheme(),
|
||||
) == Ok(true)
|
||||
{
|
||||
self.p2p.broadcast(signer.genesis, to_broadcast).await;
|
||||
let signer = self.signer();
|
||||
let Some(tx) = (match slash_event {
|
||||
SlashEvent::WithEvidence(evidence) => {
|
||||
// create an unsigned evidence tx
|
||||
Some(TendermintTx::SlashEvidence(evidence))
|
||||
}
|
||||
SlashEvent::Id(_reason, _block, _round) => {
|
||||
// TODO: Increase locally observed slash points
|
||||
None
|
||||
}
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
// add tx to blockchain and broadcast to peers
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
if self.blockchain.write().await.add_transaction::<Self>(
|
||||
true,
|
||||
Transaction::Tendermint(tx),
|
||||
&self.signature_scheme(),
|
||||
) == Ok(true)
|
||||
{
|
||||
self.p2p.broadcast(signer.genesis, to_broadcast).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn validate(
|
||||
&self,
|
||||
block: &Self::Block,
|
||||
) -> impl Send + Future<Output = Result<(), TendermintBlockError>> {
|
||||
async move {
|
||||
let block =
|
||||
Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?;
|
||||
self
|
||||
.blockchain
|
||||
.read()
|
||||
.await
|
||||
.verify_block::<Self>(&block, &self.signature_scheme(), false)
|
||||
.map_err(|e| match e {
|
||||
BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal,
|
||||
_ => {
|
||||
log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}");
|
||||
TendermintBlockError::Fatal
|
||||
}
|
||||
})
|
||||
}
|
||||
async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> {
|
||||
let block =
|
||||
Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?;
|
||||
self
|
||||
.blockchain
|
||||
.read()
|
||||
.await
|
||||
.verify_block::<Self>(&block, &self.signature_scheme(), false)
|
||||
.map_err(|e| match e {
|
||||
BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal,
|
||||
_ => {
|
||||
log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}");
|
||||
TendermintBlockError::Fatal
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn add_block(
|
||||
async fn add_block(
|
||||
&mut self,
|
||||
serialized_block: Self::Block,
|
||||
commit: Commit<Self::SignatureScheme>,
|
||||
) -> impl Send + Future<Output = Option<Self::Block>> {
|
||||
async move {
|
||||
let invalid_block = || {
|
||||
// There's a fatal flaw in the code, it's behind a hard fork, or the validators turned
|
||||
// malicious
|
||||
// All justify a halt to then achieve social consensus from
|
||||
// TODO: Under multiple validator sets, a small validator set turning malicious knocks
|
||||
// off the entire network. That's an unacceptable DoS.
|
||||
panic!("validators added invalid block to tributary {}", hex::encode(self.genesis));
|
||||
};
|
||||
) -> Option<Self::Block> {
|
||||
let invalid_block = || {
|
||||
// There's a fatal flaw in the code, it's behind a hard fork, or the validators turned
|
||||
// malicious
|
||||
// All justify a halt to then achieve social consensus from
|
||||
// TODO: Under multiple validator sets, a small validator set turning malicious knocks
|
||||
// off the entire network. That's an unacceptable DoS.
|
||||
panic!("validators added invalid block to tributary {}", hex::encode(self.genesis));
|
||||
};
|
||||
|
||||
// Tendermint should only produce valid commits
|
||||
assert!(self.verify_commit(serialized_block.id(), &commit));
|
||||
// Tendermint should only produce valid commits
|
||||
assert!(self.verify_commit(serialized_block.id(), &commit));
|
||||
|
||||
let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else {
|
||||
return invalid_block();
|
||||
};
|
||||
let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else {
|
||||
return invalid_block();
|
||||
};
|
||||
|
||||
let encoded_commit = commit.encode();
|
||||
loop {
|
||||
let block_res = self.blockchain.write().await.add_block::<Self>(
|
||||
&block,
|
||||
encoded_commit.clone(),
|
||||
&self.signature_scheme(),
|
||||
);
|
||||
match block_res {
|
||||
Ok(()) => {
|
||||
// If we successfully added this block, break
|
||||
break;
|
||||
}
|
||||
Err(BlockError::NonLocalProvided(hash)) => {
|
||||
log::error!(
|
||||
"missing provided transaction {} which other validators on tributary {} had",
|
||||
hex::encode(hash),
|
||||
hex::encode(self.genesis)
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
_ => return invalid_block(),
|
||||
let encoded_commit = commit.encode();
|
||||
loop {
|
||||
let block_res = self.blockchain.write().await.add_block::<Self>(
|
||||
&block,
|
||||
encoded_commit.clone(),
|
||||
&self.signature_scheme(),
|
||||
);
|
||||
match block_res {
|
||||
Ok(()) => {
|
||||
// If we successfully added this block, break
|
||||
break;
|
||||
}
|
||||
Err(BlockError::NonLocalProvided(hash)) => {
|
||||
log::error!(
|
||||
"missing provided transaction {} which other validators on tributary {} had",
|
||||
hex::encode(hash),
|
||||
hex::encode(self.genesis)
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
_ => return invalid_block(),
|
||||
}
|
||||
|
||||
Some(TendermintBlock(
|
||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||
))
|
||||
}
|
||||
|
||||
Some(TendermintBlock(
|
||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,8 @@ use scale::{Encode, Decode, IoReader};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use crate::{
|
||||
transaction::{Transaction, TransactionKind, TransactionError},
|
||||
@@ -39,7 +40,7 @@ impl ReadWrite for TendermintTx {
|
||||
}
|
||||
|
||||
impl Transaction for TendermintTx {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
// There's an assert elsewhere in the codebase expecting this behavior
|
||||
// If we do want to add Provided/Signed TendermintTxs, review the implications carefully
|
||||
TransactionKind::Unsigned
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, Group},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
@@ -60,8 +62,8 @@ impl ReadWrite for NonceTransaction {
|
||||
}
|
||||
|
||||
impl TransactionTrait for NonceTransaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
TransactionKind::Signed(vec![], self.2.clone())
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
TransactionKind::Signed(vec![], &self.2)
|
||||
}
|
||||
|
||||
fn hash(&self) -> [u8; 32] {
|
||||
|
||||
@@ -10,7 +10,8 @@ use rand::rngs::OsRng;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
||||
|
||||
use serai_db::{DbTxn, Db, MemDb};
|
||||
|
||||
@@ -425,7 +426,7 @@ async fn block_tx_ordering() {
|
||||
}
|
||||
|
||||
impl TransactionTrait for SignedTx {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
match self {
|
||||
SignedTx::Signed(signed) => signed.kind(),
|
||||
SignedTx::Provided(pro) => pro.kind(),
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::{sync::Arc, collections::HashMap};
|
||||
use zeroize::Zeroizing;
|
||||
use rand::{RngCore, rngs::OsRng};
|
||||
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
||||
|
||||
use tendermint::ext::Commit;
|
||||
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use core::future::Future;
|
||||
|
||||
pub use crate::P2p;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
fn broadcast(&self, _: [u8; 32], _: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move { unimplemented!() }
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
use core::future::Future;
|
||||
|
||||
use tendermint::ext::Network;
|
||||
|
||||
use crate::{
|
||||
P2p, TendermintTx,
|
||||
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
|
||||
@@ -14,9 +11,10 @@ fn assert_target_block_time() {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
fn broadcast(&self, _: [u8; 32], _: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move { unimplemented!() }
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,9 +6,10 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{
|
||||
group::{ff::Field, Group},
|
||||
Ciphersuite, Ristretto,
|
||||
Ciphersuite,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
@@ -67,7 +68,7 @@ impl ReadWrite for ProvidedTransaction {
|
||||
}
|
||||
|
||||
impl Transaction for ProvidedTransaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
match self.0[0] {
|
||||
1 => TransactionKind::Provided("order1"),
|
||||
2 => TransactionKind::Provided("order2"),
|
||||
@@ -119,8 +120,8 @@ impl ReadWrite for SignedTransaction {
|
||||
}
|
||||
|
||||
impl Transaction for SignedTransaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
TransactionKind::Signed(vec![], self.1.clone())
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
TransactionKind::Signed(vec![], &self.1)
|
||||
}
|
||||
|
||||
fn hash(&self) -> [u8; 32] {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user