mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-15 07:29:25 +00:00
Compare commits
220 Commits
develop
...
1de8136739
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1de8136739 | ||
|
|
445c49f030 | ||
|
|
5b74fc8ac1 | ||
|
|
e67e301fc2 | ||
|
|
1d50792eed | ||
|
|
9c92709e62 | ||
|
|
3d15710a43 | ||
|
|
df06da5552 | ||
|
|
cef5bc95b0 | ||
|
|
f336ab1ece | ||
|
|
2aebfb21af | ||
|
|
56af6c44eb | ||
|
|
4b34be05bf | ||
|
|
5b337c3ce8 | ||
|
|
e119fb4c16 | ||
|
|
ef972b2658 | ||
|
|
4de1a5804d | ||
|
|
147a6e43d0 | ||
|
|
066aa9eda4 | ||
|
|
9593a428e3 | ||
|
|
5b3c5ec02b | ||
|
|
9ccfa8a9f5 | ||
|
|
18897978d0 | ||
|
|
3192370484 | ||
|
|
8013c56195 | ||
|
|
834c16930b | ||
|
|
2920987173 | ||
|
|
26230377b0 | ||
|
|
2f5c0c68d0 | ||
|
|
8de42cc2d4 | ||
|
|
cf4123b0f8 | ||
|
|
6a520a7412 | ||
|
|
b2ec58a445 | ||
|
|
8e800885fb | ||
|
|
2a427382f1 | ||
|
|
ce1689b325 | ||
|
|
0b61a75afc | ||
|
|
2aee21e507 | ||
|
|
b3e003bd5d | ||
|
|
251a6e96e8 | ||
|
|
2c8af04781 | ||
|
|
a0ed043372 | ||
|
|
2984d2f8cf | ||
|
|
554c5778e4 | ||
|
|
7e4c59a0a3 | ||
|
|
294462641e | ||
|
|
ae76749513 | ||
|
|
1e1b821d34 | ||
|
|
702b4c860c | ||
|
|
bc1bbf9951 | ||
|
|
ec9211fd84 | ||
|
|
4292660eda | ||
|
|
8ea5acbacb | ||
|
|
1b1aa74770 | ||
|
|
861a8352e5 | ||
|
|
e64827b6d7 | ||
|
|
c27aaf8658 | ||
|
|
53567e91c8 | ||
|
|
1a08d50e16 | ||
|
|
855e53164e | ||
|
|
1367e41510 | ||
|
|
a691be21c8 | ||
|
|
673cf8fd47 | ||
|
|
118d81bc90 | ||
|
|
e75c4ec6ed | ||
|
|
9e628d217f | ||
|
|
a717ae9ea7 | ||
|
|
98c3f75fa2 | ||
|
|
18178f3764 | ||
|
|
bdc3bda04a | ||
|
|
433beac93a | ||
|
|
8f2a9301cf | ||
|
|
d21034c349 | ||
|
|
381495618c | ||
|
|
ee0efe7cde | ||
|
|
7feb7aed22 | ||
|
|
cc75a92641 | ||
|
|
a7d5640642 | ||
|
|
ae61f3d359 | ||
|
|
4bcea31c2a | ||
|
|
eb9bce6862 | ||
|
|
39be23d807 | ||
|
|
3f0f4d520d | ||
|
|
80ca2b780a | ||
|
|
0813351f1f | ||
|
|
a38d135059 | ||
|
|
67f9f76fdf | ||
|
|
1c5bc2259e | ||
|
|
bdf89f5350 | ||
|
|
239127aae5 | ||
|
|
d9543bee40 | ||
|
|
8746b54a43 | ||
|
|
7761798a78 | ||
|
|
72a18bf8bb | ||
|
|
0616085109 | ||
|
|
e23176deeb | ||
|
|
5551521e58 | ||
|
|
a2d9aeaed7 | ||
|
|
e1ad897f7e | ||
|
|
2edc2f3612 | ||
|
|
e56af7fc51 | ||
|
|
947e1067d9 | ||
|
|
b4e94f3d51 | ||
|
|
1b39138472 | ||
|
|
e78236276a | ||
|
|
2c4c33e632 | ||
|
|
02409c5735 | ||
|
|
f2cf03cedf | ||
|
|
0d4c8cf032 | ||
|
|
b6811f9015 | ||
|
|
fcd5fb85df | ||
|
|
3ac0265f07 | ||
|
|
9b8c8f8231 | ||
|
|
59fa49f750 | ||
|
|
723f529659 | ||
|
|
73af09effb | ||
|
|
4054e44471 | ||
|
|
a8159e9070 | ||
|
|
b61ba9d1bb | ||
|
|
776cbbb9a4 | ||
|
|
76a3f3ec4b | ||
|
|
93c7d06684 | ||
|
|
4cb838e248 | ||
|
|
c988b7cdb0 | ||
|
|
017aab2258 | ||
|
|
ba3a6f9e91 | ||
|
|
e36b671f37 | ||
|
|
2d4b775b6e | ||
|
|
247cc8f0cc | ||
|
|
0ccf71df1e | ||
|
|
8aba71b9c4 | ||
|
|
46c12c0e66 | ||
|
|
3cc7b49492 | ||
|
|
0078858c1c | ||
|
|
a3cb514400 | ||
|
|
ed0221d804 | ||
|
|
4152bcacb2 | ||
|
|
f07ec7bee0 | ||
|
|
7484eadbbb | ||
|
|
59ff944152 | ||
|
|
8f848b1abc | ||
|
|
100c80be9f | ||
|
|
a353f9e2da | ||
|
|
b62fc3a1fa | ||
|
|
8380653855 | ||
|
|
b50b889918 | ||
|
|
d570c1d277 | ||
|
|
2da24506a2 | ||
|
|
6e9cb74022 | ||
|
|
0c1aec29bb | ||
|
|
653ead1e8c | ||
|
|
8ff019265f | ||
|
|
0601d47789 | ||
|
|
ebef38d93b | ||
|
|
75b4707002 | ||
|
|
3c787e005f | ||
|
|
f11a6b4ff1 | ||
|
|
fadc88d2ad | ||
|
|
c88ebe985e | ||
|
|
6deb60513c | ||
|
|
bd277e7032 | ||
|
|
fc765bb9e0 | ||
|
|
13b74195f7 | ||
|
|
f21838e0d5 | ||
|
|
76cbe6cf1e | ||
|
|
5999f5d65a | ||
|
|
d429a0bae6 | ||
|
|
775824f373 | ||
|
|
41a74cb513 | ||
|
|
e26da1ec34 | ||
|
|
7266e7f7ea | ||
|
|
a8b9b7bad3 | ||
|
|
2ca7fccb08 | ||
|
|
4f6d91037e | ||
|
|
8db76ed67c | ||
|
|
920303e1b4 | ||
|
|
9f4b28e5ae | ||
|
|
f9d02d43c2 | ||
|
|
8ac501028d | ||
|
|
612c67c537 | ||
|
|
04a971a024 | ||
|
|
738636c238 | ||
|
|
65f3f48517 | ||
|
|
7cc07d64d1 | ||
|
|
fdfe520f9d | ||
|
|
77ef25416b | ||
|
|
7c1025dbcb | ||
|
|
a771fbe1c6 | ||
|
|
9cebdf7c68 | ||
|
|
75251f04b4 | ||
|
|
6196642beb | ||
|
|
2bddf00222 | ||
|
|
9ab8ba0215 | ||
|
|
33e0c85f34 | ||
|
|
1e8f4e6156 | ||
|
|
66f3428051 | ||
|
|
7e71840822 | ||
|
|
b65dbacd6a | ||
|
|
2fcd9530dd | ||
|
|
379780a3c9 | ||
|
|
945f31dfc7 | ||
|
|
d5d1fc3eea | ||
|
|
fd12cc0213 | ||
|
|
ce805c8cc8 | ||
|
|
bc0cc5a754 | ||
|
|
f2ee4daf43 | ||
|
|
4e29678799 | ||
|
|
74d3075dae | ||
|
|
155ad48f4c | ||
|
|
951872b026 | ||
|
|
2b47feafed | ||
|
|
a2717d73f0 | ||
|
|
8763ef23ed | ||
|
|
57a0ba966b | ||
|
|
e843b4a2a0 | ||
|
|
2f3bd7a02a | ||
|
|
1e8a9ec5bd | ||
|
|
2f29c91d30 | ||
|
|
f3b91bd44f | ||
|
|
e4e4245ee3 |
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Bitcoin Daemon Cache
|
- name: Bitcoin Daemon Cache
|
||||||
id: cache-bitcoind
|
id: cache-bitcoind
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||||
with:
|
with:
|
||||||
path: bitcoin.tar.gz
|
path: bitcoin.tar.gz
|
||||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
@@ -37,4 +37,4 @@ runs:
|
|||||||
|
|
||||||
- name: Bitcoin Regtest Daemon
|
- name: Bitcoin Regtest Daemon
|
||||||
shell: bash
|
shell: bash
|
||||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
|
||||||
|
|||||||
50
.github/actions/build-dependencies/action.yml
vendored
50
.github/actions/build-dependencies/action.yml
vendored
@@ -7,20 +7,13 @@ runs:
|
|||||||
- name: Remove unused packages
|
- name: Remove unused packages
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Ensure the repositories are synced
|
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||||
sudo apt update -y
|
|
||||||
|
|
||||||
# Actually perform the removals
|
|
||||||
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
|
||||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||||
|
|
||||||
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
|
||||||
# This removal command requires the prior removals due to unmet dependencies otherwise
|
|
||||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||||
|
sudo apt autoremove -y
|
||||||
# Reinstall python3 as a general dependency of a functional operating system
|
sudo apt clean
|
||||||
sudo apt install -y python3 --fix-missing
|
docker system prune -a --volumes
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
- name: Remove unused packages
|
- name: Remove unused packages
|
||||||
@@ -38,48 +31,19 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||||
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
sudo apt install -y ca-certificates protobuf-compiler
|
||||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||||
choco install protoc
|
choco install protoc
|
||||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||||
brew install protobuf llvm
|
brew install protobuf
|
||||||
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
|
||||||
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
|
||||||
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
|
||||||
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Install solc
|
- name: Install solc
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo +1.89 install svm-rs --version =0.5.18
|
cargo install svm-rs
|
||||||
svm install 0.8.26
|
svm install 0.8.26
|
||||||
svm use 0.8.26
|
svm use 0.8.26
|
||||||
|
|
||||||
- name: Remove preinstalled Docker
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
docker system prune -a --volumes
|
|
||||||
sudo apt remove -y *docker*
|
|
||||||
# Install uidmap which will be required for the explicitly installed Docker
|
|
||||||
sudo apt install uidmap
|
|
||||||
if: runner.os == 'Linux'
|
|
||||||
|
|
||||||
- name: Update system dependencies
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt update -y
|
|
||||||
sudo apt upgrade -y
|
|
||||||
sudo apt autoremove -y
|
|
||||||
sudo apt clean
|
|
||||||
if: runner.os == 'Linux'
|
|
||||||
|
|
||||||
- name: Install rootless Docker
|
|
||||||
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
|
||||||
with:
|
|
||||||
rootless: true
|
|
||||||
set-host: true
|
|
||||||
if: runner.os == 'Linux'
|
|
||||||
|
|
||||||
# - name: Cache Rust
|
# - name: Cache Rust
|
||||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||||
|
|||||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Monero Wallet RPC Cache
|
- name: Monero Wallet RPC Cache
|
||||||
id: cache-monero-wallet-rpc
|
id: cache-monero-wallet-rpc
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||||
with:
|
with:
|
||||||
path: monero-wallet-rpc
|
path: monero-wallet-rpc
|
||||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Monero Daemon Cache
|
- name: Monero Daemon Cache
|
||||||
id: cache-monerod
|
id: cache-monerod
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||||
with:
|
with:
|
||||||
path: /usr/bin/monerod
|
path: /usr/bin/monerod
|
||||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2025-11-01
|
nightly-2024-07-01
|
||||||
|
|||||||
1
.github/workflows/common-tests.yml
vendored
1
.github/workflows/common-tests.yml
vendored
@@ -30,4 +30,5 @@ jobs:
|
|||||||
-p patchable-async-sleep \
|
-p patchable-async-sleep \
|
||||||
-p serai-db \
|
-p serai-db \
|
||||||
-p serai-env \
|
-p serai-env \
|
||||||
|
-p serai-task \
|
||||||
-p simple-request
|
-p simple-request
|
||||||
|
|||||||
10
.github/workflows/crypto-tests.yml
vendored
10
.github/workflows/crypto-tests.yml
vendored
@@ -32,15 +32,13 @@ jobs:
|
|||||||
-p dalek-ff-group \
|
-p dalek-ff-group \
|
||||||
-p minimal-ed448 \
|
-p minimal-ed448 \
|
||||||
-p ciphersuite \
|
-p ciphersuite \
|
||||||
-p ciphersuite-kp256 \
|
|
||||||
-p multiexp \
|
-p multiexp \
|
||||||
-p schnorr-signatures \
|
-p schnorr-signatures \
|
||||||
-p dleq \
|
-p dleq \
|
||||||
|
-p generalized-bulletproofs \
|
||||||
|
-p generalized-bulletproofs-circuit-abstraction \
|
||||||
|
-p ec-divisors \
|
||||||
|
-p generalized-bulletproofs-ec-gadgets \
|
||||||
-p dkg \
|
-p dkg \
|
||||||
-p dkg-recovery \
|
|
||||||
-p dkg-dealer \
|
|
||||||
-p dkg-promote \
|
|
||||||
-p dkg-musig \
|
|
||||||
-p dkg-pedpop \
|
|
||||||
-p modular-frost \
|
-p modular-frost \
|
||||||
-p frost-schnorrkel
|
-p frost-schnorrkel
|
||||||
|
|||||||
6
.github/workflows/daily-deny.yml
vendored
6
.github/workflows/daily-deny.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
- name: Advisory Cache
|
- name: Advisory Cache
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||||
with:
|
with:
|
||||||
path: ~/.cargo/advisory-db
|
path: ~/.cargo/advisory-db
|
||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
run: cargo install --locked cargo-deny
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
run: cargo deny -L error --all-features check
|
||||||
|
|||||||
45
.github/workflows/lint.yml
vendored
45
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
clippy:
|
clippy:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install nightly rust
|
- name: Install nightly rust
|
||||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
||||||
|
|
||||||
- name: Run Clippy
|
- name: Run Clippy
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||||
@@ -46,16 +46,16 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
- name: Advisory Cache
|
- name: Advisory Cache
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||||
with:
|
with:
|
||||||
path: ~/.cargo/advisory-db
|
path: ~/.cargo/advisory-db
|
||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo +1.89 install cargo-deny --version =0.18.4
|
run: cargo install --locked cargo-deny
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
run: cargo deny -L error --all-features check
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -73,11 +73,42 @@ jobs:
|
|||||||
- name: Run rustfmt
|
- name: Run rustfmt
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||||
|
|
||||||
|
- name: Install foundry
|
||||||
|
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||||
|
with:
|
||||||
|
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||||
|
cache: false
|
||||||
|
|
||||||
|
- name: Run forge fmt
|
||||||
|
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||||
|
|
||||||
machete:
|
machete:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
- name: Verify all dependencies are in use
|
- name: Verify all dependencies are in use
|
||||||
run: |
|
run: |
|
||||||
cargo +1.89 install cargo-machete --version =0.8.0
|
cargo install cargo-machete
|
||||||
cargo +1.89 machete
|
cargo machete
|
||||||
|
|
||||||
|
slither:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
- name: Slither
|
||||||
|
run: |
|
||||||
|
python3 -m pip install solc-select
|
||||||
|
solc-select install 0.8.26
|
||||||
|
solc-select use 0.8.26
|
||||||
|
|
||||||
|
python3 -m pip install slither-analyzer
|
||||||
|
|
||||||
|
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||||
|
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||||
|
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||||
|
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||||
|
|
||||||
|
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
|
||||||
|
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||||
|
cd processor/ethereum/router/contracts
|
||||||
|
slither Router.sol
|
||||||
|
|||||||
77
.github/workflows/monero-tests.yaml
vendored
Normal file
77
.github/workflows/monero-tests.yaml
vendored
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
name: Monero Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
paths:
|
||||||
|
- "networks/monero/**"
|
||||||
|
- "processor/**"
|
||||||
|
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "networks/monero/**"
|
||||||
|
- "processor/**"
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Only run these once since they will be consistent regardless of any node
|
||||||
|
unit-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Test Dependencies
|
||||||
|
uses: ./.github/actions/test-dependencies
|
||||||
|
|
||||||
|
- name: Run Unit Tests Without Features
|
||||||
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||||
|
|
||||||
|
# Doesn't run unit tests with features as the tests workflow will
|
||||||
|
|
||||||
|
integration-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Test against all supported protocol versions
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
version: [v0.17.3.2, v0.18.3.4]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Test Dependencies
|
||||||
|
uses: ./.github/actions/test-dependencies
|
||||||
|
with:
|
||||||
|
monero-version: ${{ matrix.version }}
|
||||||
|
|
||||||
|
- name: Run Integration Tests Without Features
|
||||||
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||||
|
|
||||||
|
- name: Run Integration Tests
|
||||||
|
# Don't run if the the tests workflow also will
|
||||||
|
if: ${{ matrix.version != 'v0.18.3.4' }}
|
||||||
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||||
255
.github/workflows/msrv.yml
vendored
Normal file
255
.github/workflows/msrv.yml
vendored
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
name: Weekly MSRV Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 0"
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
msrv-common:
|
||||||
|
name: Run cargo msrv on common
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on common
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path common/env/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path common/db/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path common/task/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path common/request/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
|
||||||
|
|
||||||
|
msrv-crypto:
|
||||||
|
name: Run cargo msrv on crypto
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on crypto
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
|
||||||
|
|
||||||
|
msrv-networks:
|
||||||
|
name: Run cargo msrv on networks
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on networks
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
|
||||||
|
|
||||||
|
msrv-message-queue:
|
||||||
|
name: Run cargo msrv on message-queue
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on message-queue
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
|
||||||
|
|
||||||
|
msrv-processor:
|
||||||
|
name: Run cargo msrv on processor
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on processor
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/messages/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/signers/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path processor/monero/Cargo.toml
|
||||||
|
|
||||||
|
msrv-coordinator:
|
||||||
|
name: Run cargo msrv on coordinator
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on coordinator
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
||||||
|
|
||||||
|
msrv-substrate:
|
||||||
|
name: Run cargo msrv on substrate
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on substrate
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/client/Cargo.toml
|
||||||
|
|
||||||
|
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
|
||||||
|
cargo msrv verify --manifest-path substrate/node/Cargo.toml
|
||||||
|
|
||||||
|
msrv-orchestration:
|
||||||
|
name: Run cargo msrv on orchestration
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on message-queue
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path orchestration/Cargo.toml
|
||||||
|
|
||||||
|
msrv-mini:
|
||||||
|
name: Run cargo msrv on mini
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Install Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Install cargo msrv
|
||||||
|
run: cargo install --locked cargo-msrv
|
||||||
|
|
||||||
|
- name: Run cargo msrv on mini
|
||||||
|
run: |
|
||||||
|
cargo msrv verify --manifest-path mini/Cargo.toml
|
||||||
19
.github/workflows/networks-tests.yml
vendored
19
.github/workflows/networks-tests.yml
vendored
@@ -30,6 +30,23 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p bitcoin-serai \
|
-p bitcoin-serai \
|
||||||
|
-p build-solidity-contracts \
|
||||||
|
-p ethereum-schnorr-contract \
|
||||||
-p alloy-simple-request-transport \
|
-p alloy-simple-request-transport \
|
||||||
-p ethereum-serai \
|
|
||||||
-p serai-ethereum-relayer \
|
-p serai-ethereum-relayer \
|
||||||
|
-p monero-io \
|
||||||
|
-p monero-generators \
|
||||||
|
-p monero-primitives \
|
||||||
|
-p monero-mlsag \
|
||||||
|
-p monero-clsag \
|
||||||
|
-p monero-borromean \
|
||||||
|
-p monero-bulletproofs \
|
||||||
|
-p monero-serai \
|
||||||
|
-p monero-rpc \
|
||||||
|
-p monero-simple-request-rpc \
|
||||||
|
-p monero-address \
|
||||||
|
-p monero-wallet \
|
||||||
|
-p monero-seed \
|
||||||
|
-p polyseed \
|
||||||
|
-p monero-wallet-util \
|
||||||
|
-p monero-serai-verify-chain
|
||||||
|
|||||||
43
.github/workflows/pages.yml
vendored
43
.github/workflows/pages.yml
vendored
@@ -1,7 +1,6 @@
|
|||||||
# MIT License
|
# MIT License
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022 just-the-docs
|
# Copyright (c) 2022 just-the-docs
|
||||||
# Copyright (c) 2022-2024 Luke Parker
|
|
||||||
#
|
#
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -21,21 +20,31 @@
|
|||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
# SOFTWARE.
|
# SOFTWARE.
|
||||||
|
|
||||||
name: Deploy Rust docs and Jekyll site to Pages
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||||
|
name: Deploy Jekyll site to Pages
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "develop"
|
- "develop"
|
||||||
|
paths:
|
||||||
|
- "docs/**"
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
pages: write
|
pages: write
|
||||||
id-token: write
|
id-token: write
|
||||||
|
|
||||||
# Only allow one concurrent deployment
|
# Allow one concurrent deployment
|
||||||
concurrency:
|
concurrency:
|
||||||
group: "pages"
|
group: "pages"
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -44,37 +53,27 @@ jobs:
|
|||||||
# Build job
|
# Build job
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: docs
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
uses: actions/checkout@v3
|
||||||
- name: Setup Ruby
|
- name: Setup Ruby
|
||||||
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
uses: ruby/setup-ruby@v1
|
||||||
with:
|
with:
|
||||||
bundler-cache: true
|
bundler-cache: true
|
||||||
cache-version: 0
|
cache-version: 0
|
||||||
working-directory: "${{ github.workspace }}/docs"
|
working-directory: "${{ github.workspace }}/docs"
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
uses: actions/configure-pages@v3
|
||||||
- name: Build with Jekyll
|
- name: Build with Jekyll
|
||||||
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
env:
|
env:
|
||||||
JEKYLL_ENV: production
|
JEKYLL_ENV: production
|
||||||
|
|
||||||
- name: Get nightly version to use
|
|
||||||
id: nightly
|
|
||||||
shell: bash
|
|
||||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
|
||||||
- name: Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
- name: Buld Rust docs
|
|
||||||
run: |
|
|
||||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
|
||||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
|
||||||
mv target/doc docs/_site/rust
|
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
uses: actions/upload-pages-artifact@v1
|
||||||
with:
|
with:
|
||||||
path: "docs/_site/"
|
path: "docs/_site/"
|
||||||
|
|
||||||
@@ -88,4 +87,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
uses: actions/deploy-pages@v2
|
||||||
|
|||||||
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
@@ -39,9 +39,29 @@ jobs:
|
|||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p serai-message-queue \
|
-p serai-message-queue \
|
||||||
-p serai-processor-messages \
|
-p serai-processor-messages \
|
||||||
-p serai-processor \
|
-p serai-processor-key-gen \
|
||||||
|
-p serai-processor-view-keys \
|
||||||
|
-p serai-processor-frost-attempt-manager \
|
||||||
|
-p serai-processor-primitives \
|
||||||
|
-p serai-processor-scanner \
|
||||||
|
-p serai-processor-scheduler-primitives \
|
||||||
|
-p serai-processor-utxo-scheduler-primitives \
|
||||||
|
-p serai-processor-utxo-scheduler \
|
||||||
|
-p serai-processor-transaction-chaining-scheduler \
|
||||||
|
-p serai-processor-smart-contract-scheduler \
|
||||||
|
-p serai-processor-signers \
|
||||||
|
-p serai-processor-bin \
|
||||||
|
-p serai-bitcoin-processor \
|
||||||
|
-p serai-processor-ethereum-primitives \
|
||||||
|
-p serai-processor-ethereum-test-primitives \
|
||||||
|
-p serai-processor-ethereum-deployer \
|
||||||
|
-p serai-processor-ethereum-router \
|
||||||
|
-p serai-processor-ethereum-erc20 \
|
||||||
|
-p serai-ethereum-processor \
|
||||||
|
-p serai-monero-processor \
|
||||||
-p tendermint-machine \
|
-p tendermint-machine \
|
||||||
-p tributary-chain \
|
-p tributary-chain \
|
||||||
|
-p serai-cosign \
|
||||||
-p serai-coordinator \
|
-p serai-coordinator \
|
||||||
-p serai-orchestrator \
|
-p serai-orchestrator \
|
||||||
-p serai-docker-tests
|
-p serai-docker-tests
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,14 +1,7 @@
|
|||||||
target
|
target
|
||||||
|
|
||||||
# Don't commit any `Cargo.lock` which aren't the workspace's
|
|
||||||
Cargo.lock
|
|
||||||
!./Cargo.lock
|
|
||||||
|
|
||||||
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
|
||||||
Dockerfile
|
Dockerfile
|
||||||
Dockerfile.fast-epoch
|
Dockerfile.fast-epoch
|
||||||
!orchestration/runtime/Dockerfile
|
!orchestration/runtime/Dockerfile
|
||||||
|
|
||||||
.test-logs
|
.test-logs
|
||||||
|
|
||||||
.vscode
|
.vscode
|
||||||
|
|||||||
6870
Cargo.lock
generated
6870
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
140
Cargo.toml
140
Cargo.toml
@@ -1,8 +1,15 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
|
# Version patches
|
||||||
|
"patches/parking_lot_core",
|
||||||
|
"patches/parking_lot",
|
||||||
|
"patches/zstd",
|
||||||
|
"patches/rocksdb",
|
||||||
|
|
||||||
# std patches
|
# std patches
|
||||||
"patches/matches",
|
"patches/matches",
|
||||||
|
"patches/is-terminal",
|
||||||
|
|
||||||
# Rewrites/redirects
|
# Rewrites/redirects
|
||||||
"patches/option-ext",
|
"patches/option-ext",
|
||||||
@@ -13,6 +20,7 @@ members = [
|
|||||||
"common/patchable-async-sleep",
|
"common/patchable-async-sleep",
|
||||||
"common/db",
|
"common/db",
|
||||||
"common/env",
|
"common/env",
|
||||||
|
"common/task",
|
||||||
"common/request",
|
"common/request",
|
||||||
|
|
||||||
"crypto/transcript",
|
"crypto/transcript",
|
||||||
@@ -21,34 +29,76 @@ members = [
|
|||||||
"crypto/dalek-ff-group",
|
"crypto/dalek-ff-group",
|
||||||
"crypto/ed448",
|
"crypto/ed448",
|
||||||
"crypto/ciphersuite",
|
"crypto/ciphersuite",
|
||||||
"crypto/ciphersuite/kp256",
|
|
||||||
|
|
||||||
"crypto/multiexp",
|
"crypto/multiexp",
|
||||||
|
|
||||||
"crypto/schnorr",
|
"crypto/schnorr",
|
||||||
"crypto/dleq",
|
"crypto/dleq",
|
||||||
|
|
||||||
|
"crypto/evrf/secq256k1",
|
||||||
|
"crypto/evrf/embedwards25519",
|
||||||
|
"crypto/evrf/generalized-bulletproofs",
|
||||||
|
"crypto/evrf/circuit-abstraction",
|
||||||
|
"crypto/evrf/divisors",
|
||||||
|
"crypto/evrf/ec-gadgets",
|
||||||
|
|
||||||
"crypto/dkg",
|
"crypto/dkg",
|
||||||
"crypto/dkg/recovery",
|
|
||||||
"crypto/dkg/dealer",
|
|
||||||
"crypto/dkg/promote",
|
|
||||||
"crypto/dkg/musig",
|
|
||||||
"crypto/dkg/pedpop",
|
|
||||||
"crypto/frost",
|
"crypto/frost",
|
||||||
"crypto/schnorrkel",
|
"crypto/schnorrkel",
|
||||||
|
|
||||||
"networks/bitcoin",
|
"networks/bitcoin",
|
||||||
|
|
||||||
|
"networks/ethereum/build-contracts",
|
||||||
|
"networks/ethereum/schnorr",
|
||||||
"networks/ethereum/alloy-simple-request-transport",
|
"networks/ethereum/alloy-simple-request-transport",
|
||||||
"networks/ethereum",
|
|
||||||
"networks/ethereum/relayer",
|
"networks/ethereum/relayer",
|
||||||
|
|
||||||
|
"networks/monero/io",
|
||||||
|
"networks/monero/generators",
|
||||||
|
"networks/monero/primitives",
|
||||||
|
"networks/monero/ringct/mlsag",
|
||||||
|
"networks/monero/ringct/clsag",
|
||||||
|
"networks/monero/ringct/borromean",
|
||||||
|
"networks/monero/ringct/bulletproofs",
|
||||||
|
"networks/monero",
|
||||||
|
"networks/monero/rpc",
|
||||||
|
"networks/monero/rpc/simple-request",
|
||||||
|
"networks/monero/wallet/address",
|
||||||
|
"networks/monero/wallet",
|
||||||
|
"networks/monero/wallet/seed",
|
||||||
|
"networks/monero/wallet/polyseed",
|
||||||
|
"networks/monero/wallet/util",
|
||||||
|
"networks/monero/verify-chain",
|
||||||
|
|
||||||
"message-queue",
|
"message-queue",
|
||||||
|
|
||||||
"processor/messages",
|
"processor/messages",
|
||||||
"processor",
|
|
||||||
|
"processor/key-gen",
|
||||||
|
"processor/view-keys",
|
||||||
|
"processor/frost-attempt-manager",
|
||||||
|
|
||||||
|
"processor/primitives",
|
||||||
|
"processor/scanner",
|
||||||
|
"processor/scheduler/primitives",
|
||||||
|
"processor/scheduler/utxo/primitives",
|
||||||
|
"processor/scheduler/utxo/standard",
|
||||||
|
"processor/scheduler/utxo/transaction-chaining",
|
||||||
|
"processor/scheduler/smart-contract",
|
||||||
|
"processor/signers",
|
||||||
|
|
||||||
|
"processor/bin",
|
||||||
|
"processor/bitcoin",
|
||||||
|
"processor/ethereum/primitives",
|
||||||
|
"processor/ethereum/test-primitives",
|
||||||
|
"processor/ethereum/deployer",
|
||||||
|
"processor/ethereum/router",
|
||||||
|
"processor/ethereum/erc20",
|
||||||
|
"processor/ethereum",
|
||||||
|
"processor/monero",
|
||||||
|
|
||||||
"coordinator/tributary/tendermint",
|
"coordinator/tributary/tendermint",
|
||||||
"coordinator/tributary",
|
"coordinator/tributary",
|
||||||
|
"coordinator/cosign",
|
||||||
"coordinator",
|
"coordinator",
|
||||||
|
|
||||||
"substrate/primitives",
|
"substrate/primitives",
|
||||||
@@ -100,37 +150,51 @@ members = [
|
|||||||
# to the extensive operations required for Bulletproofs
|
# to the extensive operations required for Bulletproofs
|
||||||
[profile.dev.package]
|
[profile.dev.package]
|
||||||
subtle = { opt-level = 3 }
|
subtle = { opt-level = 3 }
|
||||||
curve25519-dalek = { opt-level = 3 }
|
|
||||||
|
|
||||||
ff = { opt-level = 3 }
|
ff = { opt-level = 3 }
|
||||||
group = { opt-level = 3 }
|
group = { opt-level = 3 }
|
||||||
|
|
||||||
crypto-bigint = { opt-level = 3 }
|
crypto-bigint = { opt-level = 3 }
|
||||||
|
secp256k1 = { opt-level = 3 }
|
||||||
|
curve25519-dalek = { opt-level = 3 }
|
||||||
dalek-ff-group = { opt-level = 3 }
|
dalek-ff-group = { opt-level = 3 }
|
||||||
minimal-ed448 = { opt-level = 3 }
|
minimal-ed448 = { opt-level = 3 }
|
||||||
|
|
||||||
multiexp = { opt-level = 3 }
|
multiexp = { opt-level = 3 }
|
||||||
|
|
||||||
monero-oxide = { opt-level = 3 }
|
secq256k1 = { opt-level = 3 }
|
||||||
|
embedwards25519 = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||||
|
ec-divisors = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||||
|
|
||||||
|
dkg = { opt-level = 3 }
|
||||||
|
|
||||||
|
monero-generators = { opt-level = 3 }
|
||||||
|
monero-borromean = { opt-level = 3 }
|
||||||
|
monero-bulletproofs = { opt-level = 3 }
|
||||||
|
monero-mlsag = { opt-level = 3 }
|
||||||
|
monero-clsag = { opt-level = 3 }
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
panic = "unwind"
|
panic = "unwind"
|
||||||
overflow-checks = true
|
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Dependencies from monero-oxide which originate from within our own tree
|
|
||||||
std-shims = { path = "common/std-shims" }
|
|
||||||
simple-request = { path = "common/request" }
|
|
||||||
dalek-ff-group = { path = "crypto/dalek-ff-group" }
|
|
||||||
flexible-transcript = { path = "crypto/transcript" }
|
|
||||||
modular-frost = { path = "crypto/frost" }
|
|
||||||
|
|
||||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||||
|
|
||||||
# These have `std` alternatives
|
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||||
|
parking_lot = { path = "patches/parking_lot" }
|
||||||
|
# wasmtime pulls in an old version for this
|
||||||
|
zstd = { path = "patches/zstd" }
|
||||||
|
# Needed for WAL compression
|
||||||
|
rocksdb = { path = "patches/rocksdb" }
|
||||||
|
|
||||||
|
# is-terminal now has an std-based solution with an equivalent API
|
||||||
|
is-terminal = { path = "patches/is-terminal" }
|
||||||
|
# So does matches
|
||||||
matches = { path = "patches/matches" }
|
matches = { path = "patches/matches" }
|
||||||
home = { path = "patches/home" }
|
|
||||||
|
|
||||||
# directories-next was created because directories was unmaintained
|
# directories-next was created because directories was unmaintained
|
||||||
# directories-next is now unmaintained while directories is maintained
|
# directories-next is now unmaintained while directories is maintained
|
||||||
@@ -140,11 +204,12 @@ home = { path = "patches/home" }
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
|
# The official pasta_curves repo doesn't support Zeroize
|
||||||
|
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
uninlined_format_args = "allow" # TODO
|
|
||||||
unwrap_or_default = "allow"
|
unwrap_or_default = "allow"
|
||||||
manual_is_multiple_of = "allow"
|
map_unwrap_or = "allow"
|
||||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
|
||||||
borrow_as_ptr = "deny"
|
borrow_as_ptr = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
cast_possible_truncation = "deny"
|
cast_possible_truncation = "deny"
|
||||||
@@ -169,14 +234,13 @@ large_stack_arrays = "deny"
|
|||||||
linkedlist = "deny"
|
linkedlist = "deny"
|
||||||
macro_use_imports = "deny"
|
macro_use_imports = "deny"
|
||||||
manual_instant_elapsed = "deny"
|
manual_instant_elapsed = "deny"
|
||||||
# TODO manual_let_else = "deny"
|
manual_let_else = "deny"
|
||||||
manual_ok_or = "deny"
|
manual_ok_or = "deny"
|
||||||
manual_string_new = "deny"
|
manual_string_new = "deny"
|
||||||
map_unwrap_or = "deny"
|
|
||||||
match_bool = "deny"
|
match_bool = "deny"
|
||||||
match_same_arms = "deny"
|
match_same_arms = "deny"
|
||||||
missing_fields_in_debug = "deny"
|
missing_fields_in_debug = "deny"
|
||||||
# TODO needless_continue = "deny"
|
needless_continue = "deny"
|
||||||
needless_pass_by_value = "deny"
|
needless_pass_by_value = "deny"
|
||||||
ptr_cast_constness = "deny"
|
ptr_cast_constness = "deny"
|
||||||
range_minus_one = "deny"
|
range_minus_one = "deny"
|
||||||
@@ -184,7 +248,9 @@ range_plus_one = "deny"
|
|||||||
redundant_closure_for_method_calls = "deny"
|
redundant_closure_for_method_calls = "deny"
|
||||||
redundant_else = "deny"
|
redundant_else = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
unchecked_time_subtraction = "deny"
|
string_slice = "deny"
|
||||||
|
unchecked_duration_subtraction = "deny"
|
||||||
|
uninlined_format_args = "deny"
|
||||||
unnecessary_box_returns = "deny"
|
unnecessary_box_returns = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
unnecessary_wraps = "deny"
|
unnecessary_wraps = "deny"
|
||||||
@@ -192,21 +258,3 @@ unnested_or_patterns = "deny"
|
|||||||
unused_async = "deny"
|
unused_async = "deny"
|
||||||
unused_self = "deny"
|
unused_self = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
|
||||||
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
|
|
||||||
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
|
|
||||||
# primary branch, `next` is)
|
|
||||||
needless_continue = "allow"
|
|
||||||
needless_lifetimes = "allow"
|
|
||||||
useless_conversion = "allow"
|
|
||||||
empty_line_after_doc_comments = "allow"
|
|
||||||
manual_div_ceil = "allow"
|
|
||||||
manual_let_else = "allow"
|
|
||||||
unnecessary_map_or = "allow"
|
|
||||||
result_large_err = "allow"
|
|
||||||
unneeded_struct_pattern = "allow"
|
|
||||||
[workspace.lints.rust]
|
|
||||||
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
|
||||||
mismatched_lifetime_syntaxes = "allow"
|
|
||||||
unused_attributes = "allow"
|
|
||||||
unused_parens = "allow"
|
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
|
|||||||
as a reference text. This copy should be provided with any distribution of a
|
as a reference text. This copy should be provided with any distribution of a
|
||||||
crate licensed under the AGPL-3.0, as per its terms.
|
crate licensed under the AGPL-3.0, as per its terms.
|
||||||
|
|
||||||
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
|
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ issued at the discretion of the Immunefi program managers.
|
|||||||
- [Website](https://serai.exchange/): https://serai.exchange/
|
- [Website](https://serai.exchange/): https://serai.exchange/
|
||||||
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
||||||
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
||||||
|
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
|
||||||
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
||||||
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
||||||
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "serai-db"
|
name = "serai-db"
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
description = "A simple database trait and backends for it"
|
description = "A simple database trait and backends for it"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.65"
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -18,7 +18,7 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||||
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
parity-db = ["dep:parity-db"]
|
parity-db = ["dep:parity-db"]
|
||||||
|
|||||||
8
common/db/README.md
Normal file
8
common/db/README.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Serai DB
|
||||||
|
|
||||||
|
An inefficient, minimal abstraction around databases.
|
||||||
|
|
||||||
|
The abstraction offers `get`, `put`, and `del` with helper functions and macros
|
||||||
|
built on top. Database iteration is not offered, forcing the caller to manually
|
||||||
|
implement indexing schemes. This ensures wide compatibility across abstracted
|
||||||
|
databases.
|
||||||
@@ -38,12 +38,21 @@ pub fn serai_db_key(
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! create_db {
|
macro_rules! create_db {
|
||||||
($db_name: ident {
|
($db_name: ident {
|
||||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
$(
|
||||||
|
$field_name: ident:
|
||||||
|
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||||
|
$($arg: ident: $arg_type: ty),*
|
||||||
|
) -> $field_type: ty$(,)?
|
||||||
|
)*
|
||||||
}) => {
|
}) => {
|
||||||
$(
|
$(
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct $field_name;
|
pub(crate) struct $field_name$(
|
||||||
impl $field_name {
|
<$($generic_name: $generic_type),+>
|
||||||
|
)?$(
|
||||||
|
(core::marker::PhantomData<($($generic_name),+)>)
|
||||||
|
)?;
|
||||||
|
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
$crate::serai_db_key(
|
$crate::serai_db_key(
|
||||||
@@ -52,18 +61,43 @@ macro_rules! create_db {
|
|||||||
($($arg),*).encode()
|
($($arg),*).encode()
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
pub(crate) fn set(
|
||||||
let key = $field_name::key($($arg),*);
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*,
|
||||||
|
data: &$field_type
|
||||||
|
) {
|
||||||
|
let key = Self::key($($arg),*);
|
||||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||||
}
|
}
|
||||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
pub(crate) fn get(
|
||||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
getter: &impl Get,
|
||||||
|
$($arg: $arg_type),*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
getter.get(Self::key($($arg),*)).map(|data| {
|
||||||
borsh::from_slice(data.as_ref()).unwrap()
|
borsh::from_slice(data.as_ref()).unwrap()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
// Returns a PhantomData of all generic types so if the generic was only used in the value,
|
||||||
|
// not the keys, this doesn't have unused generic types
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
pub(crate) fn del(
|
||||||
txn.del(&$field_name::key($($arg),*))
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
|
||||||
|
txn.del(&Self::key($($arg),*));
|
||||||
|
core::marker::PhantomData
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn take(
|
||||||
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
let key = Self::key($($arg),*);
|
||||||
|
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
|
||||||
|
if res.is_some() {
|
||||||
|
txn.del(key);
|
||||||
|
}
|
||||||
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
@@ -73,19 +107,30 @@ macro_rules! create_db {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! db_channel {
|
macro_rules! db_channel {
|
||||||
($db_name: ident {
|
($db_name: ident {
|
||||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
$($field_name: ident:
|
||||||
|
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||||
|
$($arg: ident: $arg_type: ty),*
|
||||||
|
) -> $field_type: ty$(,)?
|
||||||
|
)*
|
||||||
}) => {
|
}) => {
|
||||||
$(
|
$(
|
||||||
create_db! {
|
create_db! {
|
||||||
$db_name {
|
$db_name {
|
||||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
$field_name: $(<$($generic_name: $generic_type),+>)?(
|
||||||
|
$($arg: $arg_type,)*
|
||||||
|
index: u32
|
||||||
|
) -> $field_type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl $field_name {
|
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
pub(crate) fn send(
|
||||||
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
, value: &$field_type
|
||||||
|
) {
|
||||||
// Use index 0 to store the amount of messages
|
// Use index 0 to store the amount of messages
|
||||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
let messages_sent_key = Self::key($($arg,)* 0);
|
||||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||||
u32::from_le_bytes(counter.try_into().unwrap())
|
u32::from_le_bytes(counter.try_into().unwrap())
|
||||||
}).unwrap_or(0);
|
}).unwrap_or(0);
|
||||||
@@ -96,19 +141,35 @@ macro_rules! db_channel {
|
|||||||
// at the same time
|
// at the same time
|
||||||
let index_to_use = messages_sent + 2;
|
let index_to_use = messages_sent + 2;
|
||||||
|
|
||||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
Self::set(txn, $($arg,)* index_to_use, value);
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
pub(crate) fn peek(
|
||||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
getter: &impl Get
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||||
|
let messages_recvd = getter.get(&messages_recvd_key).map(|counter| {
|
||||||
|
u32::from_le_bytes(counter.try_into().unwrap())
|
||||||
|
}).unwrap_or(0);
|
||||||
|
|
||||||
|
let index_to_read = messages_recvd + 2;
|
||||||
|
|
||||||
|
Self::get(getter, $($arg,)* index_to_read)
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(
|
||||||
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||||
u32::from_le_bytes(counter.try_into().unwrap())
|
u32::from_le_bytes(counter.try_into().unwrap())
|
||||||
}).unwrap_or(0);
|
}).unwrap_or(0);
|
||||||
|
|
||||||
let index_to_read = messages_recvd + 2;
|
let index_to_read = messages_recvd + 2;
|
||||||
|
|
||||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
let res = Self::get(txn, $($arg,)* index_to_read);
|
||||||
if res.is_some() {
|
if res.is_some() {
|
||||||
$field_name::del(txn, $($arg),*, index_to_read);
|
Self::del(txn, $($arg,)* index_to_read);
|
||||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
|
|||||||
@@ -14,26 +14,43 @@ mod parity_db;
|
|||||||
#[cfg(feature = "parity-db")]
|
#[cfg(feature = "parity-db")]
|
||||||
pub use parity_db::{ParityDb, new_parity_db};
|
pub use parity_db::{ParityDb, new_parity_db};
|
||||||
|
|
||||||
/// An object implementing get.
|
/// An object implementing `get`.
|
||||||
pub trait Get {
|
pub trait Get {
|
||||||
|
/// Get a value from the database.
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An atomic database operation.
|
/// An atomic database transaction.
|
||||||
|
///
|
||||||
|
/// A transaction is only required to atomically commit. It is not required that two `Get` calls
|
||||||
|
/// made with the same transaction return the same result, if another transaction wrote to that
|
||||||
|
/// key.
|
||||||
|
///
|
||||||
|
/// If two transactions are created, and both write (including deletions) to the same key, behavior
|
||||||
|
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||||
|
/// randomly, or any other action, at time of write or at time of commit.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub trait DbTxn: Send + Get {
|
pub trait DbTxn: Send + Get {
|
||||||
|
/// Write a value to this key.
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||||
|
/// Delete the value from this key.
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||||
|
/// Commit this transaction.
|
||||||
fn commit(self);
|
fn commit(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A database supporting atomic operations.
|
/// A database supporting atomic transaction.
|
||||||
pub trait Db: 'static + Send + Sync + Clone + Get {
|
pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||||
|
/// The type representing a database transaction.
|
||||||
type Transaction<'a>: DbTxn;
|
type Transaction<'a>: DbTxn;
|
||||||
|
/// Calculate a key for a database entry.
|
||||||
|
///
|
||||||
|
/// Keys are separated by the database, the item within the database, and the item's key itself.
|
||||||
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||||
}
|
}
|
||||||
|
/// Open a new transaction.
|
||||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use crate::*;
|
|||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
||||||
|
|
||||||
impl<'a> Get for MemDbTxn<'a> {
|
impl Get for MemDbTxn<'_> {
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
if self.2.contains(key.as_ref()) {
|
if self.2.contains(key.as_ref()) {
|
||||||
return None;
|
return None;
|
||||||
@@ -23,7 +23,7 @@ impl<'a> Get for MemDbTxn<'a> {
|
|||||||
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<'a> DbTxn for MemDbTxn<'a> {
|
impl DbTxn for MemDbTxn<'_> {
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
self.2.remove(key.as_ref());
|
self.2.remove(key.as_ref());
|
||||||
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
||||||
|
|||||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.60"
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
|
|
||||||
// Obtain a variable from the Serai environment/secret store.
|
// Obtain a variable from the Serai environment/secret store.
|
||||||
pub fn var(variable: &str) -> Option<String> {
|
pub fn var(variable: &str) -> Option<String> {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["http", "https", "async", "request", "ssl"]
|
keywords = ["http", "https", "async", "request", "ssl"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.70"
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "std-shims"
|
name = "std-shims"
|
||||||
version = "0.1.4"
|
version = "0.1.1"
|
||||||
description = "A series of std shims to make alloc more feasible"
|
description = "A series of std shims to make alloc more feasible"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.64"
|
rust-version = "1.80"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -17,9 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rustversion = { version = "1", default-features = false }
|
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
||||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
|
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
|
||||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = []
|
std = []
|
||||||
|
|||||||
@@ -3,9 +3,4 @@
|
|||||||
A crate which passes through to std when the default `std` feature is enabled,
|
A crate which passes through to std when the default `std` feature is enabled,
|
||||||
yet provides a series of shims when it isn't.
|
yet provides a series of shims when it isn't.
|
||||||
|
|
||||||
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
|
`HashSet` and `HashMap` are provided via `hashbrown`.
|
||||||
average case.
|
|
||||||
|
|
||||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
|
|
||||||
`spin` (avoiding a requirement on `critical-section`).
|
|
||||||
types are not guaranteed to be
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
@@ -11,64 +11,3 @@ pub mod io;
|
|||||||
pub use alloc::vec;
|
pub use alloc::vec;
|
||||||
pub use alloc::str;
|
pub use alloc::str;
|
||||||
pub use alloc::string;
|
pub use alloc::string;
|
||||||
|
|
||||||
pub mod prelude {
|
|
||||||
#[rustversion::before(1.73)]
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub trait StdShimsDivCeil {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self;
|
|
||||||
}
|
|
||||||
#[rustversion::before(1.73)]
|
|
||||||
mod impl_divceil {
|
|
||||||
use super::StdShimsDivCeil;
|
|
||||||
impl StdShimsDivCeil for u8 {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self {
|
|
||||||
(self + (rhs - 1)) / rhs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl StdShimsDivCeil for u16 {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self {
|
|
||||||
(self + (rhs - 1)) / rhs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl StdShimsDivCeil for u32 {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self {
|
|
||||||
(self + (rhs - 1)) / rhs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl StdShimsDivCeil for u64 {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self {
|
|
||||||
(self + (rhs - 1)) / rhs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl StdShimsDivCeil for u128 {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self {
|
|
||||||
(self + (rhs - 1)) / rhs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl StdShimsDivCeil for usize {
|
|
||||||
fn div_ceil(self, rhs: Self) -> Self {
|
|
||||||
(self + (rhs - 1)) / rhs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
#[rustversion::before(1.74)]
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub trait StdShimsIoErrorOther {
|
|
||||||
fn other<E>(error: E) -> Self
|
|
||||||
where
|
|
||||||
E: Into<Box<dyn std::error::Error + Send + Sync>>;
|
|
||||||
}
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
#[rustversion::before(1.74)]
|
|
||||||
impl StdShimsIoErrorOther for std::io::Error {
|
|
||||||
fn other<E>(error: E) -> Self
|
|
||||||
where
|
|
||||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
|
||||||
{
|
|
||||||
std::io::Error::new(std::io::ErrorKind::Other, error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -25,11 +25,7 @@ mod mutex_shim {
|
|||||||
}
|
}
|
||||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use spin::Lazy as LazyLock;
|
|
||||||
#[rustversion::before(1.80)]
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub use spin::Lazy as LazyLock;
|
|
||||||
#[rustversion::since(1.80)]
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use std::sync::LazyLock;
|
pub use std::sync::LazyLock;
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
pub use spin::Lazy as LazyLock;
|
||||||
|
|||||||
22
common/task/Cargo.toml
Normal file
22
common/task/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-task"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "A task schema for Serai services"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/task"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.75"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] }
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2024 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
3
common/task/README.md
Normal file
3
common/task/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Task
|
||||||
|
|
||||||
|
A schema to define tasks to be run ad infinitum.
|
||||||
159
common/task/src/lib.rs
Normal file
159
common/task/src/lib.rs
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use core::{future::Future, time::Duration};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||||
|
|
||||||
|
enum Closed {
|
||||||
|
NotClosed(Option<oneshot::Receiver<()>>),
|
||||||
|
Closed,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A handle for a task.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct TaskHandle {
|
||||||
|
run_now: mpsc::Sender<()>,
|
||||||
|
close: mpsc::Sender<()>,
|
||||||
|
closed: Arc<Mutex<Closed>>,
|
||||||
|
}
|
||||||
|
/// A task's internal structures.
|
||||||
|
pub struct Task {
|
||||||
|
run_now: mpsc::Receiver<()>,
|
||||||
|
close: mpsc::Receiver<()>,
|
||||||
|
closed: oneshot::Sender<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Task {
|
||||||
|
/// Create a new task definition.
|
||||||
|
pub fn new() -> (Self, TaskHandle) {
|
||||||
|
// Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as
|
||||||
|
// soon as possible
|
||||||
|
let (run_now_send, run_now_recv) = mpsc::channel(1);
|
||||||
|
// And any call to close satisfies all calls to close
|
||||||
|
let (close_send, close_recv) = mpsc::channel(1);
|
||||||
|
let (closed_send, closed_recv) = oneshot::channel();
|
||||||
|
(
|
||||||
|
Self { run_now: run_now_recv, close: close_recv, closed: closed_send },
|
||||||
|
TaskHandle {
|
||||||
|
run_now: run_now_send,
|
||||||
|
close: close_send,
|
||||||
|
closed: Arc::new(Mutex::new(Closed::NotClosed(Some(closed_recv)))),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TaskHandle {
|
||||||
|
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||||
|
///
|
||||||
|
/// Panics if the task has been dropped.
|
||||||
|
pub fn run_now(&self) {
|
||||||
|
#[allow(clippy::match_same_arms)]
|
||||||
|
match self.run_now.try_send(()) {
|
||||||
|
Ok(()) => {}
|
||||||
|
// NOP on full, as this task will already be ran as soon as possible
|
||||||
|
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||||
|
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||||
|
panic!("task was unexpectedly closed when calling run_now")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Close the task.
|
||||||
|
///
|
||||||
|
/// Returns once the task shuts down after it finishes its current iteration (which may be of
|
||||||
|
/// unbounded time).
|
||||||
|
pub async fn close(self) {
|
||||||
|
// If another instance of the handle called tfhis, don't error
|
||||||
|
let _ = self.close.send(()).await;
|
||||||
|
// Wait until we receive the closed message
|
||||||
|
let mut closed = self.closed.lock().await;
|
||||||
|
match &mut *closed {
|
||||||
|
Closed::NotClosed(ref mut recv) => {
|
||||||
|
assert_eq!(recv.take().unwrap().await, Ok(()), "continually ran task dropped itself?");
|
||||||
|
*closed = Closed::Closed;
|
||||||
|
}
|
||||||
|
Closed::Closed => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task to be continually ran.
|
||||||
|
pub trait ContinuallyRan: Sized + Send {
|
||||||
|
/// The amount of seconds before this task should be polled again.
|
||||||
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
||||||
|
/// The maximum amount of seconds before this task should be run again.
|
||||||
|
///
|
||||||
|
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||||
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||||
|
|
||||||
|
/// Run an iteration of the task.
|
||||||
|
///
|
||||||
|
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||||
|
/// (without waiting for whatever timer they were already on).
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
||||||
|
|
||||||
|
/// Continually run the task.
|
||||||
|
fn continually_run(
|
||||||
|
mut self,
|
||||||
|
mut task: Task,
|
||||||
|
dependents: Vec<TaskHandle>,
|
||||||
|
) -> impl Send + Future<Output = ()> {
|
||||||
|
async move {
|
||||||
|
// The default number of seconds to sleep before running the task again
|
||||||
|
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
||||||
|
// The current number of seconds to sleep before running the task again
|
||||||
|
// We increment this upon errors in order to not flood the logs with errors
|
||||||
|
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||||
|
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||||
|
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||||
|
// Set a limit of sleeping for two minutes
|
||||||
|
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
||||||
|
};
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// If we were told to close/all handles were dropped, drop it
|
||||||
|
{
|
||||||
|
let should_close = task.close.try_recv();
|
||||||
|
match should_close {
|
||||||
|
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
||||||
|
Err(mpsc::error::TryRecvError::Empty) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.run_iteration().await {
|
||||||
|
Ok(run_dependents) => {
|
||||||
|
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||||
|
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||||
|
|
||||||
|
if run_dependents {
|
||||||
|
for dependent in &dependents {
|
||||||
|
dependent.run_now();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("{}", e);
|
||||||
|
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't run the task again for another few seconds UNLESS told to run now
|
||||||
|
tokio::select! {
|
||||||
|
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||||
|
msg = task.run_now.recv() => {
|
||||||
|
// Check if this is firing because the handle was dropped
|
||||||
|
if msg.is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task.closed.send(()).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||||
|
|
||||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -20,15 +21,14 @@ workspace = true
|
|||||||
async-trait = { version = "0.1", default-features = false }
|
async-trait = { version = "0.1", default-features = false }
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||||
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
|
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
|
||||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
@@ -42,7 +42,7 @@ processor-messages = { package = "serai-processor-messages", path = "../processo
|
|||||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||||
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
@@ -57,8 +57,8 @@ libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
longer-reattempts = []
|
longer-reattempts = []
|
||||||
|
|||||||
36
coordinator/cosign/Cargo.toml
Normal file
36
coordinator/cosign/Cargo.toml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-cosign"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Evaluator of cosigns for the Serai network"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.81"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[package.metadata.cargo-machete]
|
||||||
|
ignored = ["scale"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
tokio = { version = "1", default-features = false, features = [] }
|
||||||
|
|
||||||
|
serai-db = { path = "../../common/db" }
|
||||||
|
serai-task = { path = "../../common/task" }
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2023-2024 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
121
coordinator/cosign/README.md
Normal file
121
coordinator/cosign/README.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Serai Cosign
|
||||||
|
|
||||||
|
The Serai blockchain is controlled by a set of validators referred to as the
|
||||||
|
Serai validators. These validators could attempt to double-spend, even if every
|
||||||
|
node on the network is a full node, via equivocating.
|
||||||
|
|
||||||
|
Posit:
|
||||||
|
- The Serai validators control X SRI
|
||||||
|
- The Serai validators produce block A swapping X SRI to Y XYZ
|
||||||
|
- The Serai validators produce block B swapping X SRI to Z ABC
|
||||||
|
- The Serai validators finalize block A and send to the validators for XYZ
|
||||||
|
- The Serai validators finalize block B and send to the validators for ABC
|
||||||
|
|
||||||
|
This is solved via the cosigning protocol. The validators for XYZ and the
|
||||||
|
validators for ABC each sign their view of the Serai blockchain, communicating
|
||||||
|
amongst each other to ensure consistency.
|
||||||
|
|
||||||
|
The security of the cosigning protocol is not formally proven, and there are no
|
||||||
|
claims it achieves Byzantine Fault Tolerance. This protocol is meant to be
|
||||||
|
practical and make such attacks infeasible, when they could already be argued
|
||||||
|
difficult to perform.
|
||||||
|
|
||||||
|
### Definitions
|
||||||
|
|
||||||
|
- Cosign: A signature from a non-Serai validator set for a Serai block
|
||||||
|
- Cosign Commit: A collection of cosigns which achieve the necessary weight
|
||||||
|
|
||||||
|
### Methodology
|
||||||
|
|
||||||
|
Finalized blocks from the Serai network are intended to be cosigned if they
|
||||||
|
contain burn events. Only once cosigned should non-Serai validators process
|
||||||
|
them.
|
||||||
|
|
||||||
|
Cosigning occurs by a non-Serai validator set, using their threshold keys
|
||||||
|
declared on the Serai blockchain. Once 83% of non-Serai validator sets, by
|
||||||
|
weight, cosign a block, a cosign commit is formed. A cosign commit for a block
|
||||||
|
is considered to also cosign for all blocks preceding it.
|
||||||
|
|
||||||
|
### Bounds Under Asynchrony
|
||||||
|
|
||||||
|
Assuming an asynchronous environment fully controlled by the adversary, 34% of
|
||||||
|
a validator set may cause an equivocation. Control of 67% of non-Serai
|
||||||
|
validator sets, by weight, is sufficient to produce two distinct cosign commits
|
||||||
|
at the same position. This is due to the honest stake, 33%, being split across
|
||||||
|
the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means
|
||||||
|
the cosigning protocol may produce multiple cosign commits if 34% of 67%, just
|
||||||
|
22.78%, of the non-Serai validator sets, is malicious. This would be in
|
||||||
|
conjunction with 34% of the Serai validator set (assumed 20% of total stake),
|
||||||
|
for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is
|
||||||
|
an increase from the 6.8% required without the cosigning protocol.
|
||||||
|
|
||||||
|
### Bounds Under Synchrony
|
||||||
|
|
||||||
|
Assuming the honest stake within the non-Serai validator sets detect the
|
||||||
|
malicious stake within their set prior to assisting in producing a cosign for
|
||||||
|
their set, for which there is a multi-second window, 67% of 67% of non-Serai
|
||||||
|
validator sets is required to produce cosigns for those sets. This raises the
|
||||||
|
total stake requirement to 42.712% (past the usual 34% threshold).
|
||||||
|
|
||||||
|
### Behavior Reliant on Synchrony
|
||||||
|
|
||||||
|
If the Serai blockchain node detects an equivocation, it will stop responding
|
||||||
|
to all RPC requests and stop participating in finalizing further blocks. This
|
||||||
|
lets the node communicate the equivocating commits to other nodes (causing them
|
||||||
|
to exhibit the same behavior), yet prevents interaction with it.
|
||||||
|
|
||||||
|
If cosigns representing 17% of the non-Serai validators sets by weight are
|
||||||
|
detected for distinct blocks at the same position, the protocol halts. An
|
||||||
|
explicit latency period of seventy seconds is enacted after receiving a cosign
|
||||||
|
commit for the detection of such an equivocation. This is largely redundant
|
||||||
|
given how the Serai blockchain node will presumably have halted itself by this
|
||||||
|
time.
|
||||||
|
|
||||||
|
### Equivocation-Detection Avoidance
|
||||||
|
|
||||||
|
Malicious Serai validators could avoid detection of their equivocating if they
|
||||||
|
produced two distinct blockchains, A and B, with different keys declared for
|
||||||
|
the same non-Serai validator set. While the validators following A may detect
|
||||||
|
the cosigns for distinct blocks by validators following B, the cosigns would be
|
||||||
|
assumed invalid due to their signatures being verified against distinct keys.
|
||||||
|
|
||||||
|
This is prevented by requiring cosigns on the blocks which declare new keys,
|
||||||
|
ensuring all validators have a consistent view of the keys used within the
|
||||||
|
cosigning protocol (per the bounds of the cosigning protocol). These blocks are
|
||||||
|
exempt from the general policy of cosign commits cosigning all prior blocks,
|
||||||
|
preventing the newly declared keys (which aren't yet cosigned) from being used
|
||||||
|
to cosign themselves. These cosigns are flagged as "notable", are permanently
|
||||||
|
archived, and must be synced before a validator will move forward.
|
||||||
|
|
||||||
|
Cosigning the block which declares new keys also ensures agreement on the
|
||||||
|
preceding block which declared the new set, with an exact specification of the
|
||||||
|
participants and their weight, before it impacts the cosigning protocol.
|
||||||
|
|
||||||
|
### Denial of Service Concerns
|
||||||
|
|
||||||
|
Any historical Serai validator set may trigger a chain halt by producing an
|
||||||
|
equivocation after their retiry. This requires 67% to be malicious. 34% of the
|
||||||
|
active Serai validator set may also trigger a chain halt.
|
||||||
|
|
||||||
|
17% of non-Serai validator sets equivocating causing a halt means 5.67% of
|
||||||
|
non-Serai validator sets' stake may cause a halt (in an asynchronous
|
||||||
|
environment fully controlled by the adversary). In a synchronous environment
|
||||||
|
where the honest stake cannot be split across two candidates, 11.33% of
|
||||||
|
non-Serai validator sets' stake is required.
|
||||||
|
|
||||||
|
The more practical attack is for one to obtain 5.67% of non-Serai validator
|
||||||
|
sets' stake, under any network conditions, and simply go offline. This will
|
||||||
|
take 17% of validator sets offline with it, preventing any cosign commits
|
||||||
|
from being performed. A fallback protocol where validators individually produce
|
||||||
|
cosigns, removing the network's horizontal scalability but ensuring liveness,
|
||||||
|
prevents this, restoring the additional requirements for control of an
|
||||||
|
asynchronous network or 11.33% of non-Serai validator sets' stake.
|
||||||
|
|
||||||
|
### TODO
|
||||||
|
|
||||||
|
The Serai node no longer responding to RPC requests upon detecting any
|
||||||
|
equivocation, and the fallback protocol where validators individually produce
|
||||||
|
signatures, are not implemented at this time. The former means the detection of
|
||||||
|
equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai
|
||||||
|
validator sets' stake the DoS threshold, even without control of an
|
||||||
|
asynchronous network.
|
||||||
55
coordinator/cosign/src/delay.rs
Normal file
55
coordinator/cosign/src/delay.rs
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::evaluator::CosignedBlocks;
|
||||||
|
|
||||||
|
/// How often callers should broadcast the cosigns flagged for rebroadcasting.
|
||||||
|
pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60);
|
||||||
|
const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10);
|
||||||
|
const ACKNOWLEDGEMENT_DELAY: Duration =
|
||||||
|
Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs());
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
SubstrateCosignDelay {
|
||||||
|
// The latest cosigned block number.
|
||||||
|
LatestCosignedBlockNumber: () -> u64,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
/// A task to delay acknowledgement of cosigns.
|
||||||
|
pub(crate) struct CosignDelayTask<D: Db> {
|
||||||
|
pub(crate) db: D,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
// Receive the next block to mark as cosigned
|
||||||
|
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
// Calculate when we should mark it as valid
|
||||||
|
let time_valid =
|
||||||
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||||
|
// Sleep until then
|
||||||
|
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Set the cosigned block
|
||||||
|
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
222
coordinator/cosign/src/evaluator.rs
Normal file
222
coordinator/cosign/src/evaluator.rs
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns,
|
||||||
|
intend::{GlobalSessionsChannel, BlockEventData, BlockEvents},
|
||||||
|
};
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
SubstrateCosignEvaluator {
|
||||||
|
// The global session currently being evaluated.
|
||||||
|
CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
db_channel!(
|
||||||
|
SubstrateCosignEvaluatorChannels {
|
||||||
|
// (cosigned block, time cosign was evaluated)
|
||||||
|
CosignedBlocks: () -> (u64, u64),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// This is a strict function which won't panic, even with a malicious Serai node, so long as:
|
||||||
|
// - It's called incrementally
|
||||||
|
// - It's only called for block numbers we've completed indexing on within the intend task
|
||||||
|
// - It's only called for block numbers after a global session has started
|
||||||
|
// - The global sessions channel is populated as the block declaring the session is indexed
|
||||||
|
// Which all hold true within the context of this task and the intend task.
|
||||||
|
//
|
||||||
|
// This function will also ensure the currently evaluated global session is incremented once we
|
||||||
|
// finish evaluation of the prior session.
|
||||||
|
fn currently_evaluated_global_session_strict(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
block_number: u64,
|
||||||
|
) -> ([u8; 32], GlobalSession) {
|
||||||
|
let mut res = {
|
||||||
|
let existing = match CurrentlyEvaluatedGlobalSession::get(txn) {
|
||||||
|
Some(existing) => existing,
|
||||||
|
None => {
|
||||||
|
let first = GlobalSessionsChannel::try_recv(txn)
|
||||||
|
.expect("fetching latest global session yet none declared");
|
||||||
|
CurrentlyEvaluatedGlobalSession::set(txn, &first);
|
||||||
|
first
|
||||||
|
}
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
existing.1.start_block_number <= block_number,
|
||||||
|
"candidate's start block number exceeds our block number"
|
||||||
|
);
|
||||||
|
existing
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(next) = GlobalSessionsChannel::peek(txn) {
|
||||||
|
assert!(
|
||||||
|
block_number <= next.1.start_block_number,
|
||||||
|
"currently_evaluated_global_session_strict wasn't called incrementally"
|
||||||
|
);
|
||||||
|
// If it's time for this session to activate, take it from the channel and set it
|
||||||
|
if block_number == next.1.start_block_number {
|
||||||
|
GlobalSessionsChannel::try_recv(txn).unwrap();
|
||||||
|
CurrentlyEvaluatedGlobalSession::set(txn, &next);
|
||||||
|
res = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task to determine if a block has been cosigned and we should handle it.
|
||||||
|
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||||
|
pub(crate) db: D,
|
||||||
|
pub(crate) request: R,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
|
async move {
|
||||||
|
let mut known_cosign = None;
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match has_events {
|
||||||
|
// Because this had notable events, we require an explicit cosign for this block by a
|
||||||
|
// supermajority of the prior block's validator sets
|
||||||
|
HasEvents::Notable => {
|
||||||
|
let (global_session, global_session_info) =
|
||||||
|
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||||
|
|
||||||
|
let mut weight_cosigned = 0;
|
||||||
|
for set in global_session_info.sets {
|
||||||
|
// Check if we have the cosign from this set
|
||||||
|
if NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||||
|
.map(|signed_cosign| signed_cosign.cosign.block_number) ==
|
||||||
|
Some(block_number)
|
||||||
|
{
|
||||||
|
// Since have this cosign, add the set's weight to the weight which has cosigned
|
||||||
|
weight_cosigned +=
|
||||||
|
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||||
|
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
|
// Request the necessary cosigns over the network
|
||||||
|
// TODO: Add a timer to ensure this isn't called too often
|
||||||
|
self
|
||||||
|
.request
|
||||||
|
.request_notable_cosigns(global_session)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
// We return an error so the delay before this task is run again increases
|
||||||
|
return Err(format!(
|
||||||
|
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Since this block didn't have any notable events, we simply require a cosign for this
|
||||||
|
// block or a greater block by the current validator sets
|
||||||
|
HasEvents::NonNotable => {
|
||||||
|
// Check if this was satisfied by a cached result which wasn't calculated incrementally
|
||||||
|
let known_cosigned = if let Some(known_cosign) = known_cosign {
|
||||||
|
known_cosign >= block_number
|
||||||
|
} else {
|
||||||
|
// Clear `known_cosign` which is no longer helpful
|
||||||
|
known_cosign = None;
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
// If it isn't already known to be cosigned, evaluate the latest cosigns
|
||||||
|
if !known_cosigned {
|
||||||
|
/*
|
||||||
|
LatestCosign is populated with the latest cosigns for each network which don't
|
||||||
|
exceed the latest global session we've evaluated the start of. This current block
|
||||||
|
is during the latest global session we've evaluated the start of.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Get the global session for this block
|
||||||
|
let (global_session, global_session_info) =
|
||||||
|
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||||
|
|
||||||
|
let mut weight_cosigned = 0;
|
||||||
|
let mut lowest_common_block: Option<u64> = None;
|
||||||
|
for set in global_session_info.sets {
|
||||||
|
// Check if this set cosigned this block or not
|
||||||
|
let Some(cosign) =
|
||||||
|
NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
if cosign.cosign.block_number >= block_number {
|
||||||
|
weight_cosigned +=
|
||||||
|
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||||
|
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the lowest block common to all of these cosigns
|
||||||
|
lowest_common_block = lowest_common_block
|
||||||
|
.map(|existing| existing.min(cosign.cosign.block_number))
|
||||||
|
.or(Some(cosign.cosign.block_number));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
|
// Request the superseding notable cosigns over the network
|
||||||
|
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||||
|
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||||
|
// explicitly request them
|
||||||
|
self
|
||||||
|
.request
|
||||||
|
.request_notable_cosigns(global_session)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
// We return an error so the delay before this task is run again increases
|
||||||
|
return Err(format!(
|
||||||
|
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the cached result for the block we know is cosigned
|
||||||
|
/*
|
||||||
|
There may be a higher block which was cosigned, but once we get to this block,
|
||||||
|
we'll re-evaluate and find it then. The alternative would be an optimistic
|
||||||
|
re-evaluation now. Both are fine, so the lower-complexity option is preferred.
|
||||||
|
*/
|
||||||
|
known_cosign = lowest_common_block;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If this block has no events necessitating cosigning, we can immediately consider the
|
||||||
|
// block cosigned (making this block a NOP)
|
||||||
|
HasEvents::No => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we checked we had the necessary cosigns, send it for delay before acknowledgement
|
||||||
|
CosignedBlocks::send(
|
||||||
|
&mut txn,
|
||||||
|
&(
|
||||||
|
block_number,
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap_or(Duration::ZERO)
|
||||||
|
.as_secs(),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
181
coordinator/cosign/src/intend.rs
Normal file
181
coordinator/cosign/src/intend.rs
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::{SeraiAddress, Amount},
|
||||||
|
validator_sets::primitives::ValidatorSet,
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::*;
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
CosignIntend {
|
||||||
|
ScanCosignFrom: () -> u64,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) struct BlockEventData {
|
||||||
|
pub(crate) block_number: u64,
|
||||||
|
pub(crate) has_events: HasEvents,
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
CosignIntendChannels {
|
||||||
|
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||||
|
BlockEvents: () -> BlockEventData,
|
||||||
|
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn block_has_events_justifying_a_cosign(
|
||||||
|
serai: &Serai,
|
||||||
|
block_number: u64,
|
||||||
|
) -> Result<(Block, HasEvents), String> {
|
||||||
|
let block = serai
|
||||||
|
.finalized_block_by_number(block_number)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||||
|
let serai = serai.as_of(block.hash());
|
||||||
|
|
||||||
|
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||||
|
return Ok((block, HasEvents::Notable));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||||
|
return Ok((block, HasEvents::NonNotable));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((block, HasEvents::No))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task to determine which blocks we should intend to cosign.
|
||||||
|
pub(crate) struct CosignIntendTask<D: Db> {
|
||||||
|
pub(crate) db: D,
|
||||||
|
pub(crate) serai: Serai,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
|
async move {
|
||||||
|
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||||
|
let latest_block_number =
|
||||||
|
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||||
|
|
||||||
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
let (block, mut has_events) =
|
||||||
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
|
// Check we are indexing a linear chain
|
||||||
|
if (block_number > 1) &&
|
||||||
|
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||||
|
SubstrateBlocks::get(&txn, block_number - 1)
|
||||||
|
.expect("indexing a block but haven't indexed its parent"))
|
||||||
|
{
|
||||||
|
Err(format!(
|
||||||
|
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||||
|
block_number - 1
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
SubstrateBlocks::set(&mut txn, block_number, &block.hash());
|
||||||
|
|
||||||
|
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||||
|
|
||||||
|
// If this is notable, it creates a new global session, which we index into the database
|
||||||
|
// now
|
||||||
|
if has_events == HasEvents::Notable {
|
||||||
|
let serai = self.serai.as_of(block.hash());
|
||||||
|
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||||
|
let global_session =
|
||||||
|
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||||
|
|
||||||
|
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
||||||
|
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
||||||
|
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
||||||
|
let mut total_stake = 0;
|
||||||
|
for (set, key) in &sets_and_keys {
|
||||||
|
sets.push(*set);
|
||||||
|
keys.insert(set.network, SeraiAddress::from(*key));
|
||||||
|
let stake = serai
|
||||||
|
.validator_sets()
|
||||||
|
.total_allocated_stake(set.network)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
.unwrap_or(Amount(0))
|
||||||
|
.0;
|
||||||
|
stakes.insert(set.network, stake);
|
||||||
|
total_stake += stake;
|
||||||
|
}
|
||||||
|
if total_stake == 0 {
|
||||||
|
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let global_session_info = GlobalSession {
|
||||||
|
// This session starts cosigning after this block, as this block must be cosigned by
|
||||||
|
// the existing validators
|
||||||
|
start_block_number: block_number + 1,
|
||||||
|
sets,
|
||||||
|
keys,
|
||||||
|
stakes,
|
||||||
|
total_stake,
|
||||||
|
};
|
||||||
|
GlobalSessions::set(&mut txn, global_session, &global_session_info);
|
||||||
|
if let Some(ending_global_session) = global_session_for_this_block {
|
||||||
|
GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number);
|
||||||
|
}
|
||||||
|
LatestGlobalSessionIntended::set(&mut txn, &global_session);
|
||||||
|
GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info));
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there isn't anyone available to cosign this block, meaning it'll never be cosigned,
|
||||||
|
// we flag it as not having any events requiring cosigning so we don't attempt to
|
||||||
|
// sign/require a cosign for it
|
||||||
|
if global_session_for_this_block.is_none() {
|
||||||
|
has_events = HasEvents::No;
|
||||||
|
}
|
||||||
|
|
||||||
|
match has_events {
|
||||||
|
HasEvents::Notable | HasEvents::NonNotable => {
|
||||||
|
let global_session_for_this_block = global_session_for_this_block
|
||||||
|
.expect("global session for this block was None but still attempting to cosign it");
|
||||||
|
let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block)
|
||||||
|
.expect("last global session intended wasn't saved to the database");
|
||||||
|
|
||||||
|
// Tell each set of their expectation to cosign this block
|
||||||
|
for set in global_session_info.sets {
|
||||||
|
log::debug!("{:?} will be cosigning block #{block_number}", set);
|
||||||
|
IntendedCosigns::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&CosignIntent {
|
||||||
|
global_session: global_session_for_this_block,
|
||||||
|
block_number,
|
||||||
|
block_hash: block.hash(),
|
||||||
|
notable: has_events == HasEvents::Notable,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
HasEvents::No => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate a singular feed with every block's status for the evluator to work off of
|
||||||
|
BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events }));
|
||||||
|
// Mark this block as handled, meaning we should scan from the next block moving on
|
||||||
|
ScanCosignFrom::set(&mut txn, &(block_number + 1));
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(start_block_number <= latest_block_number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
425
coordinator/cosign/src/lib.rs
Normal file
425
coordinator/cosign/src/lib.rs
Normal file
@@ -0,0 +1,425 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use core::{fmt::Debug, future::Future};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::{NetworkId, SeraiAddress},
|
||||||
|
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||||
|
Public, Block, Serai, TemporalSerai,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::*;
|
||||||
|
|
||||||
|
/// The cosigns which are intended to be performed.
|
||||||
|
mod intend;
|
||||||
|
/// The evaluator of the cosigns.
|
||||||
|
mod evaluator;
|
||||||
|
/// The task to delay acknowledgement of the cosigns.
|
||||||
|
mod delay;
|
||||||
|
pub use delay::BROADCAST_FREQUENCY;
|
||||||
|
use delay::LatestCosignedBlockNumber;
|
||||||
|
|
||||||
|
/// The schnorrkel context to used when signing a cosign.
|
||||||
|
pub const COSIGN_CONTEXT: &[u8] = b"serai-cosign";
|
||||||
|
|
||||||
|
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||||
|
///
|
||||||
|
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
|
||||||
|
/// distinct blocks at distinct positions within a global session, we still identify the faults.
|
||||||
|
/*
|
||||||
|
There is the attack where a validator set is given an alternate blockchain with a key generation
|
||||||
|
event at block #n, while most validator sets are given a blockchain with a key generation event
|
||||||
|
at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the
|
||||||
|
cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block
|
||||||
|
prior to the block being cosigned.
|
||||||
|
|
||||||
|
We solve this by binding cosigns to a global session ID, which has a specific start block, and
|
||||||
|
reading the keys from the start block. This means that so long as all validator sets agree on the
|
||||||
|
start of a global session, they can verify all cosigns produced by that session, regardless of
|
||||||
|
how it advances. Since agreeing on the start of a global session is mandated, there's no way to
|
||||||
|
have validator sets follow two distinct global sessions without breaking the bounds of the
|
||||||
|
cosigning protocol.
|
||||||
|
*/
|
||||||
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) struct GlobalSession {
|
||||||
|
pub(crate) start_block_number: u64,
|
||||||
|
pub(crate) sets: Vec<ValidatorSet>,
|
||||||
|
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
||||||
|
pub(crate) stakes: HashMap<NetworkId, u64>,
|
||||||
|
pub(crate) total_stake: u64,
|
||||||
|
}
|
||||||
|
impl GlobalSession {
|
||||||
|
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
||||||
|
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||||
|
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Cosign {
|
||||||
|
// The following are populated by the intend task and used throughout the library
|
||||||
|
|
||||||
|
// An index of Substrate blocks
|
||||||
|
SubstrateBlocks: (block_number: u64) -> [u8; 32],
|
||||||
|
// A mapping from a global session's ID to its relevant information.
|
||||||
|
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||||
|
// The last block to be cosigned by a global session.
|
||||||
|
GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64,
|
||||||
|
// The latest global session intended.
|
||||||
|
//
|
||||||
|
// This is distinct from the latest global session for which we've evaluated the cosigns for.
|
||||||
|
LatestGlobalSessionIntended: () -> [u8; 32],
|
||||||
|
|
||||||
|
// The following are managed by the `intake_cosign` function present in this file
|
||||||
|
|
||||||
|
// The latest cosigned block for each network.
|
||||||
|
//
|
||||||
|
// This will only be populated with cosigns predating or during the most recent global session
|
||||||
|
// to have its start cosigned.
|
||||||
|
//
|
||||||
|
// The global session changes upon a notable block, causing each global session to have exactly
|
||||||
|
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||||
|
// block, causing the latest cosigned block for a global session to either be the global
|
||||||
|
// session's notable cosigns or the network's latest cosigns.
|
||||||
|
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
||||||
|
// Cosigns received for blocks not locally recognized as finalized.
|
||||||
|
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||||
|
// The global session which faulted.
|
||||||
|
FaultedSession: () -> [u8; 32],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If the block has events.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
enum HasEvents {
|
||||||
|
/// The block had a notable event.
|
||||||
|
///
|
||||||
|
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
|
||||||
|
/// accordingly must be cosigned before we advance past them.
|
||||||
|
Notable,
|
||||||
|
/// The block had an non-notable event justifying a cosign.
|
||||||
|
NonNotable,
|
||||||
|
/// The block didn't have an event justifying a cosign.
|
||||||
|
No,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An intended cosign.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
struct CosignIntent {
|
||||||
|
/// The global session this cosign is being performed under.
|
||||||
|
global_session: [u8; 32],
|
||||||
|
/// The number of the block to cosign.
|
||||||
|
block_number: u64,
|
||||||
|
/// The hash of the block to cosign.
|
||||||
|
block_hash: [u8; 32],
|
||||||
|
/// If this cosign must be handled before further cosigns are.
|
||||||
|
notable: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A cosign.
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct Cosign {
|
||||||
|
/// The global session this cosign is being performed under.
|
||||||
|
pub global_session: [u8; 32],
|
||||||
|
/// The number of the block to cosign.
|
||||||
|
pub block_number: u64,
|
||||||
|
/// The hash of the block to cosign.
|
||||||
|
pub block_hash: [u8; 32],
|
||||||
|
/// The actual cosigner.
|
||||||
|
pub cosigner: NetworkId,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A signed cosign.
|
||||||
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct SignedCosign {
|
||||||
|
/// The cosign.
|
||||||
|
pub cosign: Cosign,
|
||||||
|
/// The signature for the cosign.
|
||||||
|
pub signature: [u8; 64],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignedCosign {
|
||||||
|
fn verify_signature(&self, signer: serai_client::Public) -> bool {
|
||||||
|
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||||
|
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||||
|
|
||||||
|
signer.verify_simple(COSIGN_CONTEXT, &borsh::to_vec(&self.cosign).unwrap(), &signature).is_ok()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the keys used for cosigning by a specific network.
|
||||||
|
async fn keys_for_network(
|
||||||
|
serai: &TemporalSerai<'_>,
|
||||||
|
network: NetworkId,
|
||||||
|
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||||
|
let Some(latest_session) =
|
||||||
|
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
||||||
|
else {
|
||||||
|
// If this network hasn't had a session declared, move on
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the keys for the latest session
|
||||||
|
if let Some(keys) = serai
|
||||||
|
.validator_sets()
|
||||||
|
.keys(ValidatorSet { network, session: latest_session })
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
{
|
||||||
|
return Ok(Some((latest_session, keys)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the latest session has yet to set keys, use the prior session
|
||||||
|
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||||
|
if let Some(keys) = serai
|
||||||
|
.validator_sets()
|
||||||
|
.keys(ValidatorSet { network, session: prior_session })
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
{
|
||||||
|
return Ok(Some((prior_session, keys)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
||||||
|
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
||||||
|
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||||
|
// If this network doesn't have usable keys, move on
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
sets.push((ValidatorSet { network, session }, keys.0));
|
||||||
|
}
|
||||||
|
Ok(sets)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object usable to request notable cosigns for a block.
|
||||||
|
pub trait RequestNotableCosigns: 'static + Send {
|
||||||
|
/// The error type which may be encountered when requesting notable cosigns.
|
||||||
|
type Error: Debug;
|
||||||
|
|
||||||
|
/// Request the notable cosigns for this global session.
|
||||||
|
fn request_notable_cosigns(
|
||||||
|
&self,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::Error>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An error used to indicate the cosigning protocol has faulted.
|
||||||
|
pub struct Faulted;
|
||||||
|
|
||||||
|
/// The interface to manage cosigning with.
|
||||||
|
pub struct Cosigning<D: Db> {
|
||||||
|
db: D,
|
||||||
|
}
|
||||||
|
impl<D: Db> Cosigning<D> {
|
||||||
|
/// Spawn the tasks to intend and evaluate cosigns.
|
||||||
|
///
|
||||||
|
/// The database specified must only be used with a singular instance of the Serai network, and
|
||||||
|
/// only used once at any given time.
|
||||||
|
pub fn spawn<R: RequestNotableCosigns>(
|
||||||
|
db: D,
|
||||||
|
serai: Serai,
|
||||||
|
request: R,
|
||||||
|
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||||
|
) -> Self {
|
||||||
|
let (intend_task, _intend_task_handle) = Task::new();
|
||||||
|
let (evaluator_task, evaluator_task_handle) = Task::new();
|
||||||
|
let (delay_task, delay_task_handle) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(intend::CosignIntendTask { db: db.clone(), serai })
|
||||||
|
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||||
|
);
|
||||||
|
tokio::spawn(
|
||||||
|
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
||||||
|
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||||
|
);
|
||||||
|
tokio::spawn(
|
||||||
|
(delay::CosignDelayTask { db: db.clone() })
|
||||||
|
.continually_run(delay_task, tasks_to_run_upon_cosigning),
|
||||||
|
);
|
||||||
|
Self { db }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The latest cosigned block number.
|
||||||
|
pub fn latest_cosigned_block_number(&self) -> Result<u64, Faulted> {
|
||||||
|
if FaultedSession::get(&self.db).is_some() {
|
||||||
|
Err(Faulted)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(LatestCosignedBlockNumber::get(&self.db).unwrap_or(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the notable cosigns for a global session in order to respond to requests.
|
||||||
|
///
|
||||||
|
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||||
|
/// cosigns for this session.
|
||||||
|
pub fn notable_cosigns(&self, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||||
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosigns
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds.
|
||||||
|
///
|
||||||
|
/// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty
|
||||||
|
/// cosigns, in case of a fault, to induce identification of the fault by others.
|
||||||
|
pub fn cosigns_to_rebroadcast(&self) -> Vec<SignedCosign> {
|
||||||
|
if let Some(faulted) = FaultedSession::get(&self.db) {
|
||||||
|
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||||
|
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||||
|
// identification in those who see the faulty cosigns as honest
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||||
|
if cosign.cosign.global_session == faulted {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosigns
|
||||||
|
} else {
|
||||||
|
let Some(latest_global_session) = LatestGlobalSessionIntended::get(&self.db) else {
|
||||||
|
return vec![];
|
||||||
|
};
|
||||||
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||||
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if let Some(cosign) =
|
||||||
|
NetworksLatestCosignedBlock::get(&self.db, latest_global_session, network)
|
||||||
|
{
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosigns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Intake a cosign from the Serai network.
|
||||||
|
///
|
||||||
|
/// - Returns Err(_) if there was an error trying to validate the cosign and it should be retired
|
||||||
|
/// later.
|
||||||
|
/// - Returns Ok(true) if the cosign was successfully handled or could not be handled at this
|
||||||
|
/// time.
|
||||||
|
/// - Returns Ok(false) if the cosign was invalid.
|
||||||
|
//
|
||||||
|
// We collapse a cosign which shouldn't be handled yet into a valid cosign (`Ok(true)`) as we
|
||||||
|
// assume we'll either explicitly request it if we need it or we'll naturally see it (or a later,
|
||||||
|
// more relevant, cosign) again.
|
||||||
|
//
|
||||||
|
// Takes `&mut self` as this should only be called once at any given moment.
|
||||||
|
// TODO: Don't overload bool here
|
||||||
|
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<bool, String> {
|
||||||
|
let cosign = &signed_cosign.cosign;
|
||||||
|
let network = cosign.cosigner;
|
||||||
|
|
||||||
|
// Check our indexed blockchain includes a block with this block number
|
||||||
|
let Some(our_block_hash) = SubstrateBlocks::get(&self.db, cosign.block_number) else {
|
||||||
|
return Ok(true);
|
||||||
|
};
|
||||||
|
let faulty = cosign.block_hash != our_block_hash;
|
||||||
|
|
||||||
|
// Check this isn't a dated cosign within its global session (as it would be if rebroadcasted)
|
||||||
|
if !faulty {
|
||||||
|
if let Some(existing) =
|
||||||
|
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||||
|
{
|
||||||
|
if existing.cosign.block_number >= cosign.block_number {
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||||
|
// Unrecognized global session
|
||||||
|
return Ok(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check the cosigned block number is in range to the global session
|
||||||
|
if cosign.block_number < global_session.start_block_number {
|
||||||
|
// Cosign is for a block predating the global session
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
if !faulty {
|
||||||
|
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||||
|
// their final block, replacing their notable cosign
|
||||||
|
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||||
|
if cosign.block_number > last_block {
|
||||||
|
// Cosign is for a block after the last block this global session should have signed
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the cosign's signature
|
||||||
|
{
|
||||||
|
let key = Public::from({
|
||||||
|
let Some(key) = global_session.keys.get(&network) else {
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
*key
|
||||||
|
});
|
||||||
|
|
||||||
|
if !signed_cosign.verify_signature(key) {
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||||
|
// cosign
|
||||||
|
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
if !faulty {
|
||||||
|
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||||
|
let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0);
|
||||||
|
// This global session starts the block *after* its declaration, so we want to check if the
|
||||||
|
// block declaring it was cosigned
|
||||||
|
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||||
|
drop(txn);
|
||||||
|
return Ok(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||||
|
NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign);
|
||||||
|
} else {
|
||||||
|
let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]);
|
||||||
|
// Only handle this as a fault if this set wasn't prior faulty
|
||||||
|
if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) {
|
||||||
|
faults.push(signed_cosign.clone());
|
||||||
|
Faults::set(&mut txn, cosign.global_session, &faults);
|
||||||
|
|
||||||
|
let mut weight_cosigned = 0;
|
||||||
|
for fault in &faults {
|
||||||
|
let Some(stake) = global_session.stakes.get(&fault.cosign.cosigner) else {
|
||||||
|
Err("cosigner with recognized key didn't have a stake entry saved".to_string())?
|
||||||
|
};
|
||||||
|
weight_cosigned += stake;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the sum weight means a fault has occurred
|
||||||
|
if weight_cosigned >= ((global_session.total_stake * 17) / 100) {
|
||||||
|
FaultedSession::set(&mut txn, &cosign.global_session);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,333 +0,0 @@
|
|||||||
use core::time::Duration;
|
|
||||||
use std::{
|
|
||||||
sync::Arc,
|
|
||||||
collections::{HashSet, HashMap},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tokio::{
|
|
||||||
sync::{mpsc, Mutex, RwLock},
|
|
||||||
time::sleep,
|
|
||||||
};
|
|
||||||
|
|
||||||
use borsh::BorshSerialize;
|
|
||||||
use sp_application_crypto::RuntimePublic;
|
|
||||||
use serai_client::{
|
|
||||||
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
|
||||||
Serai, SeraiError, TemporalSerai,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, create_db};
|
|
||||||
|
|
||||||
use processor_messages::coordinator::cosign_block_msg;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
|
||||||
substrate::LatestCosignedBlock,
|
|
||||||
};
|
|
||||||
|
|
||||||
create_db! {
|
|
||||||
CosignDb {
|
|
||||||
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
|
|
||||||
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
|
|
||||||
DistinctChain: (set: ExternalValidatorSet) -> (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CosignEvaluator<D: Db> {
|
|
||||||
db: Mutex<D>,
|
|
||||||
serai: Arc<Serai>,
|
|
||||||
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
|
|
||||||
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Db> CosignEvaluator<D> {
|
|
||||||
async fn update_latest_cosign(&self) {
|
|
||||||
let stakes_lock = self.stakes.read().await;
|
|
||||||
// If we haven't gotten the stake data yet, return
|
|
||||||
let Some(stakes) = stakes_lock.as_ref() else { return };
|
|
||||||
|
|
||||||
let total_stake = stakes.values().copied().sum::<u64>();
|
|
||||||
|
|
||||||
let latest_cosigns = self.latest_cosigns.read().await;
|
|
||||||
let mut highest_block = 0;
|
|
||||||
for cosign in latest_cosigns.values() {
|
|
||||||
let mut networks = HashSet::new();
|
|
||||||
for (network, sub_cosign) in &*latest_cosigns {
|
|
||||||
if sub_cosign.block_number >= cosign.block_number {
|
|
||||||
networks.insert(network);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let sum_stake =
|
|
||||||
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
|
|
||||||
let needed_stake = ((total_stake * 2) / 3) + 1;
|
|
||||||
if (total_stake == 0) || (sum_stake > needed_stake) {
|
|
||||||
highest_block = highest_block.max(cosign.block_number);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut db_lock = self.db.lock().await;
|
|
||||||
let mut txn = db_lock.txn();
|
|
||||||
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
|
|
||||||
log::info!("setting latest cosigned block to {}", highest_block);
|
|
||||||
LatestCosignedBlock::set(&mut txn, &highest_block);
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_stakes(&self) -> Result<(), SeraiError> {
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
|
||||||
|
|
||||||
let mut stakes = HashMap::new();
|
|
||||||
for network in EXTERNAL_NETWORKS {
|
|
||||||
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
|
|
||||||
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
|
|
||||||
if set_key {
|
|
||||||
stakes.insert(
|
|
||||||
network,
|
|
||||||
serai
|
|
||||||
.validator_sets()
|
|
||||||
.total_allocated_stake(network.into())
|
|
||||||
.await?
|
|
||||||
.expect("network which published a batch didn't have a stake set")
|
|
||||||
.0,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we've successfully built stakes, set it
|
|
||||||
*self.stakes.write().await = Some(stakes);
|
|
||||||
|
|
||||||
self.update_latest_cosign().await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses Err to signify a message should be retried
|
|
||||||
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
|
|
||||||
// If we already have this cosign or a newer cosign, return
|
|
||||||
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
|
|
||||||
if latest.block_number >= cosign.block_number {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this an old cosign (older than a day), drop it
|
|
||||||
let latest_block = self.serai.latest_finalized_block().await?;
|
|
||||||
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
|
|
||||||
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
|
|
||||||
log::warn!("received cosign with a block number which doesn't map to a block");
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn set_with_keys_fn(
|
|
||||||
serai: &TemporalSerai<'_>,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
|
|
||||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
|
||||||
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
|
||||||
Ok(Some(
|
|
||||||
if serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
ExternalValidatorSet { network, session: prior_session }
|
|
||||||
} else {
|
|
||||||
ExternalValidatorSet { network, session: latest_session }
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the key for this network as of the prior block
|
|
||||||
// If we have two chains, this value may be different across chains depending on if one chain
|
|
||||||
// included the set_keys and one didn't
|
|
||||||
// Because set_keys will force a cosign, it will force detection of distinct blocks
|
|
||||||
// re: set_keys using keys prior to set_keys (assumed amenable to all)
|
|
||||||
let serai = self.serai.as_of(block.header.parent_hash.into());
|
|
||||||
|
|
||||||
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
|
|
||||||
log::warn!("received cosign for a block we didn't have keys for");
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
if !keys
|
|
||||||
.0
|
|
||||||
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
|
|
||||||
{
|
|
||||||
log::warn!("received cosigned block with an invalid signature");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
log::info!(
|
|
||||||
"received cosign for block {} ({}) by {:?}",
|
|
||||||
block.number(),
|
|
||||||
hex::encode(cosign.block),
|
|
||||||
cosign.network
|
|
||||||
);
|
|
||||||
|
|
||||||
// Save this cosign to the DB
|
|
||||||
{
|
|
||||||
let mut db = self.db.lock().await;
|
|
||||||
let mut txn = db.txn();
|
|
||||||
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
|
|
||||||
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
if cosign.block != block.hash() {
|
|
||||||
log::error!(
|
|
||||||
"received cosign for a distinct block at {}. we have {}. cosign had {}",
|
|
||||||
cosign.block_number,
|
|
||||||
hex::encode(block.hash()),
|
|
||||||
hex::encode(cosign.block)
|
|
||||||
);
|
|
||||||
|
|
||||||
let serai = self.serai.as_of(latest_block.hash());
|
|
||||||
|
|
||||||
let mut db = self.db.lock().await;
|
|
||||||
// Save this set as being on a different chain
|
|
||||||
let mut txn = db.txn();
|
|
||||||
DistinctChain::set(&mut txn, set_with_keys, &());
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
let mut total_stake = 0;
|
|
||||||
let mut total_on_distinct_chain = 0;
|
|
||||||
for network in EXTERNAL_NETWORKS {
|
|
||||||
// Get the current set for this network
|
|
||||||
let set_with_keys = {
|
|
||||||
let mut res;
|
|
||||||
while {
|
|
||||||
res = set_with_keys_fn(&serai, network).await;
|
|
||||||
res.is_err()
|
|
||||||
} {
|
|
||||||
log::error!(
|
|
||||||
"couldn't get the set with keys when checking for a distinct chain: {:?}",
|
|
||||||
res
|
|
||||||
);
|
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
|
||||||
}
|
|
||||||
res.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get its stake
|
|
||||||
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
|
|
||||||
if let Some(set_with_keys) = set_with_keys {
|
|
||||||
let stake = {
|
|
||||||
let mut res;
|
|
||||||
while {
|
|
||||||
res =
|
|
||||||
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
|
|
||||||
res.is_err()
|
|
||||||
} {
|
|
||||||
log::error!(
|
|
||||||
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
|
|
||||||
res
|
|
||||||
);
|
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
|
||||||
}
|
|
||||||
res.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(stake) = stake {
|
|
||||||
total_stake += stake.0;
|
|
||||||
|
|
||||||
if DistinctChain::get(&*db, set_with_keys).is_some() {
|
|
||||||
total_on_distinct_chain += stake.0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
|
|
||||||
if (total_stake * 17 / 100) <= total_on_distinct_chain {
|
|
||||||
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
{
|
|
||||||
let mut latest_cosigns = self.latest_cosigns.write().await;
|
|
||||||
latest_cosigns.insert(cosign.network, cosign);
|
|
||||||
}
|
|
||||||
self.update_latest_cosign().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::new_ret_no_self)]
|
|
||||||
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
|
|
||||||
let mut latest_cosigns = HashMap::new();
|
|
||||||
for network in EXTERNAL_NETWORKS {
|
|
||||||
if let Some(cosign) = LatestCosign::get(&db, network) {
|
|
||||||
latest_cosigns.insert(network, cosign);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let evaluator = Arc::new(Self {
|
|
||||||
db: Mutex::new(db),
|
|
||||||
serai,
|
|
||||||
stakes: RwLock::new(None),
|
|
||||||
latest_cosigns: RwLock::new(latest_cosigns),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn a task to update stakes regularly
|
|
||||||
tokio::spawn({
|
|
||||||
let evaluator = evaluator.clone();
|
|
||||||
async move {
|
|
||||||
loop {
|
|
||||||
// Run this until it passes
|
|
||||||
while evaluator.update_stakes().await.is_err() {
|
|
||||||
log::warn!("couldn't update stakes in the cosign evaluator");
|
|
||||||
// Try again in 10 seconds
|
|
||||||
sleep(Duration::from_secs(10)).await;
|
|
||||||
}
|
|
||||||
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
|
|
||||||
sleep(Duration::from_secs(10 * 60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn a task to receive cosigns and handle them
|
|
||||||
let (send, mut recv) = mpsc::unbounded_channel();
|
|
||||||
tokio::spawn({
|
|
||||||
let evaluator = evaluator.clone();
|
|
||||||
async move {
|
|
||||||
while let Some(msg) = recv.recv().await {
|
|
||||||
while evaluator.handle_new_cosign(msg).await.is_err() {
|
|
||||||
// Try again in 10 seconds
|
|
||||||
sleep(Duration::from_secs(10)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn a task to rebroadcast the most recent cosigns
|
|
||||||
tokio::spawn({
|
|
||||||
async move {
|
|
||||||
loop {
|
|
||||||
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
|
|
||||||
for cosign in cosigns {
|
|
||||||
let mut buf = vec![];
|
|
||||||
cosign.serialize(&mut buf).unwrap();
|
|
||||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
|
||||||
}
|
|
||||||
sleep(Duration::from_secs(60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Return the channel to send cosigns
|
|
||||||
send
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -6,9 +6,9 @@ use blake2::{
|
|||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
|
primitives::NetworkId,
|
||||||
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
in_instructions::primitives::{Batch, SignedBatch},
|
in_instructions::primitives::{Batch, SignedBatch},
|
||||||
primitives::ExternalNetworkId,
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use serai_db::*;
|
pub use serai_db::*;
|
||||||
@@ -18,21 +18,21 @@ use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
|||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
MainDb {
|
MainDb {
|
||||||
HandledMessageDb: (network: ExternalNetworkId) -> u64,
|
HandledMessageDb: (network: NetworkId) -> u64,
|
||||||
ActiveTributaryDb: () -> Vec<u8>,
|
ActiveTributaryDb: () -> Vec<u8>,
|
||||||
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
|
RetiredTributaryDb: (set: ValidatorSet) -> (),
|
||||||
FirstPreprocessDb: (
|
FirstPreprocessDb: (
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
id_type: RecognizedIdType,
|
id_type: RecognizedIdType,
|
||||||
id: &[u8]
|
id: &[u8]
|
||||||
) -> Vec<Vec<u8>>,
|
) -> Vec<Vec<u8>>,
|
||||||
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
|
LastReceivedBatchDb: (network: NetworkId) -> u32,
|
||||||
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
|
ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
|
||||||
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
|
BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
|
||||||
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
|
LastVerifiedBatchDb: (network: NetworkId) -> u32,
|
||||||
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
|
HandoverBatchDb: (set: ValidatorSet) -> u32,
|
||||||
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
|
LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
|
||||||
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
|
QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -61,7 +61,7 @@ impl ActiveTributaryDb {
|
|||||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
ActiveTributaryDb::set(txn, &existing_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||||
let mut active = Self::active_tributaries(txn).1;
|
let mut active = Self::active_tributaries(txn).1;
|
||||||
for i in 0 .. active.len() {
|
for i in 0 .. active.len() {
|
||||||
if active[i].set() == set {
|
if active[i].set() == set {
|
||||||
@@ -82,7 +82,7 @@ impl ActiveTributaryDb {
|
|||||||
impl FirstPreprocessDb {
|
impl FirstPreprocessDb {
|
||||||
pub fn save_first_preprocess(
|
pub fn save_first_preprocess(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
id_type: RecognizedIdType,
|
id_type: RecognizedIdType,
|
||||||
id: &[u8],
|
id: &[u8],
|
||||||
preprocess: &Vec<Vec<u8>>,
|
preprocess: &Vec<Vec<u8>>,
|
||||||
@@ -108,19 +108,19 @@ impl ExpectedBatchDb {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HandoverBatchDb {
|
impl HandoverBatchDb {
|
||||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
|
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
|
||||||
Self::set(txn, set, &batch);
|
Self::set(txn, set, &batch);
|
||||||
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl QueuedBatchesDb {
|
impl QueuedBatchesDb {
|
||||||
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
|
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
|
||||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
let mut batches = Self::get(txn, set).unwrap_or_default();
|
||||||
batch.write(&mut batches).unwrap();
|
batch.write(&mut batches).unwrap();
|
||||||
Self::set(txn, set, &batches);
|
Self::set(txn, set, &batches);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
|
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
||||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||||
txn.del(Self::key(set));
|
txn.del(Self::key(set));
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#![expect(clippy::cast_possible_truncation)]
|
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::{
|
use std::{
|
||||||
sync::{OnceLock, Arc},
|
sync::{OnceLock, Arc},
|
||||||
@@ -10,24 +8,22 @@ use std::{
|
|||||||
use zeroize::{Zeroize, Zeroizing};
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{
|
group::{
|
||||||
ff::{Field, PrimeField},
|
ff::{Field, PrimeField},
|
||||||
GroupEncoding,
|
GroupEncoding,
|
||||||
},
|
},
|
||||||
Ciphersuite,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use borsh::BorshSerialize;
|
use borsh::BorshSerialize;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::ExternalNetworkId,
|
primitives::NetworkId,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session},
|
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||||
Public, Serai, SeraiInInstructions,
|
Public, Serai, SeraiInInstructions,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -82,7 +78,7 @@ pub struct ActiveTributary<D: Db, P: P2p> {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub enum TributaryEvent<D: Db, P: P2p> {
|
pub enum TributaryEvent<D: Db, P: P2p> {
|
||||||
NewTributary(ActiveTributary<D, P>),
|
NewTributary(ActiveTributary<D, P>),
|
||||||
TributaryRetired(ExternalValidatorSet),
|
TributaryRetired(ValidatorSet),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new tributary and sends it to all listeners.
|
// Creates a new tributary and sends it to all listeners.
|
||||||
@@ -117,16 +113,17 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
|
|||||||
// If we're rebooting, we'll re-fire this message
|
// If we're rebooting, we'll re-fire this message
|
||||||
// This is safe due to the message-queue deduplicating based off the intent system
|
// This is safe due to the message-queue deduplicating based off the intent system
|
||||||
let set = spec.set();
|
let set = spec.set();
|
||||||
let our_i = spec
|
|
||||||
.i(&[], Ristretto::generator() * key.deref())
|
|
||||||
.expect("adding a tributary for a set we aren't in set for");
|
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
set.network,
|
set.network,
|
||||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||||
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
|
session: set.session,
|
||||||
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
threshold: spec.t(),
|
||||||
shares: u16::from(our_i.end) - u16::from(our_i.start),
|
evrf_public_keys: spec.evrf_public_keys(),
|
||||||
|
// TODO
|
||||||
|
// params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
||||||
|
// shares: u16::from(our_i.end) - u16::from(our_i.start),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -148,7 +145,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
p2p: &P,
|
p2p: &P,
|
||||||
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
|
cosign_channel: &mpsc::UnboundedSender<CosignedBlock>,
|
||||||
tributaries: &HashMap<Session, ActiveTributary<D, P>>,
|
tributaries: &HashMap<Session, ActiveTributary<D, P>>,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
msg: &processors::Message,
|
msg: &processors::Message,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
#[allow(clippy::nonminimal_bool)]
|
#[allow(clippy::nonminimal_bool)]
|
||||||
@@ -169,12 +166,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
|
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
|
||||||
// in-set, making the Tributary relevant
|
// in-set, making the Tributary relevant
|
||||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||||
key_gen::ProcessorMessage::Commitments { id, .. } |
|
key_gen::ProcessorMessage::Participation { session, .. } |
|
||||||
key_gen::ProcessorMessage::InvalidCommitments { id, .. } |
|
key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } |
|
||||||
key_gen::ProcessorMessage::Shares { id, .. } |
|
key_gen::ProcessorMessage::Blame { session, .. } => Some(*session),
|
||||||
key_gen::ProcessorMessage::InvalidShare { id, .. } |
|
|
||||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |
|
|
||||||
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
|
|
||||||
},
|
},
|
||||||
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
||||||
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
||||||
@@ -196,8 +190,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|plan| plan.session)
|
.map(|plan| plan.session)
|
||||||
.filter(|session| {
|
.filter(|session| {
|
||||||
RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session })
|
RetiredTributaryDb::get(&txn, ValidatorSet { network, session: *session }).is_none()
|
||||||
.is_none()
|
|
||||||
})
|
})
|
||||||
.collect::<HashSet<_>>();
|
.collect::<HashSet<_>>();
|
||||||
|
|
||||||
@@ -269,17 +262,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
}
|
}
|
||||||
// This causes an action on Substrate yet not on any Tributary
|
// This causes an action on Substrate yet not on any Tributary
|
||||||
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||||
let set = ExternalValidatorSet { network, session: *session };
|
let set = ValidatorSet { network, session: *session };
|
||||||
let signature: &[u8] = signature.as_ref();
|
let signature: &[u8] = signature.as_ref();
|
||||||
let signature = <[u8; 64]>::try_from(signature).unwrap();
|
let signature = serai_client::Signature(signature.try_into().unwrap());
|
||||||
let signature: serai_client::Signature = signature.into();
|
|
||||||
|
|
||||||
let slashes = crate::tributary::SlashReport::get(&txn, set)
|
let slashes = crate::tributary::SlashReport::get(&txn, set)
|
||||||
.expect("signed slash report despite not having slash report locally");
|
.expect("signed slash report despite not having slash report locally");
|
||||||
let slashes_pubs = slashes
|
let slashes_pubs =
|
||||||
.iter()
|
slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::<Vec<_>>();
|
||||||
.map(|(address, points)| (Public::from(*address), *points))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let tx = serai_client::SeraiValidatorSets::report_slashes(
|
let tx = serai_client::SeraiValidatorSets::report_slashes(
|
||||||
network,
|
network,
|
||||||
@@ -289,7 +279,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.try_into()
|
.try_into()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
signature,
|
signature.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
@@ -400,7 +390,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
if let Some(relevant_tributary_value) = relevant_tributary {
|
if let Some(relevant_tributary_value) = relevant_tributary {
|
||||||
if RetiredTributaryDb::get(
|
if RetiredTributaryDb::get(
|
||||||
&txn,
|
&txn,
|
||||||
ExternalValidatorSet { network: msg.network, session: relevant_tributary_value },
|
ValidatorSet { network: msg.network, session: relevant_tributary_value },
|
||||||
)
|
)
|
||||||
.is_some()
|
.is_some()
|
||||||
{
|
{
|
||||||
@@ -428,125 +418,33 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
|
|
||||||
let txs = match msg.msg.clone() {
|
let txs = match msg.msg.clone() {
|
||||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||||
key_gen::ProcessorMessage::Commitments { id, commitments } => {
|
key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
vec![Transaction::DkgCommitments {
|
assert_eq!(session, spec.set().session);
|
||||||
attempt: id.attempt,
|
vec![Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }]
|
||||||
commitments,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
|
key_gen::ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } => {
|
||||||
// This doesn't have guaranteed timing
|
assert_eq!(session, spec.set().session);
|
||||||
//
|
crate::tributary::generated_key_pair::<D>(
|
||||||
// While the party *should* be fatally slashed and not included in future attempts,
|
|
||||||
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
|
|
||||||
// and not included in future attempts *which begin after the latency window completes*
|
|
||||||
let participant = spec
|
|
||||||
.reverse_lookup_i(
|
|
||||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
|
||||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
|
||||||
faulty,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
vec![Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
|
||||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
|
||||||
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
|
|
||||||
|
|
||||||
let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
|
|
||||||
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
|
|
||||||
let our_i = spec
|
|
||||||
.i(&removed, pub_key)
|
|
||||||
.expect("processor message to DKG for an attempt we aren't a validator in");
|
|
||||||
|
|
||||||
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
|
||||||
// without further context, it can't be deserialized without context
|
|
||||||
let mut tx_shares = Vec::with_capacity(shares.len());
|
|
||||||
for shares in &mut shares {
|
|
||||||
tx_shares.push(vec![]);
|
|
||||||
for i in 1 ..= spec.n(&removed) {
|
|
||||||
let i = Participant::new(i).unwrap();
|
|
||||||
if our_i.contains(&i) {
|
|
||||||
if shares.contains_key(&i) {
|
|
||||||
panic!("processor sent us our own shares");
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
tx_shares.last_mut().unwrap().push(
|
|
||||||
shares.remove(&i).expect("processor didn't send share for another validator"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vec![Transaction::DkgShares {
|
|
||||||
attempt: id.attempt,
|
|
||||||
shares: tx_shares,
|
|
||||||
confirmation_nonces: nonces,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
|
|
||||||
vec![Transaction::InvalidDkgShare {
|
|
||||||
attempt: id.attempt,
|
|
||||||
accuser,
|
|
||||||
faulty,
|
|
||||||
blame,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
|
|
||||||
// TODO2: Check the KeyGenId fields
|
|
||||||
|
|
||||||
// Tell the Tributary the key pair, get back the share for the MuSig signature
|
|
||||||
let share = crate::tributary::generated_key_pair::<D>(
|
|
||||||
&mut txn,
|
&mut txn,
|
||||||
key,
|
genesis,
|
||||||
spec,
|
&KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
|
||||||
&KeyPair(Public::from(substrate_key), network_key.try_into().unwrap()),
|
|
||||||
id.attempt,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: Move this into generated_key_pair?
|
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||||
match share {
|
let confirmation_nonces =
|
||||||
Ok(share) => {
|
crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, 0);
|
||||||
vec![Transaction::DkgConfirmed {
|
|
||||||
attempt: id.attempt,
|
vec![Transaction::DkgConfirmationNonces {
|
||||||
confirmation_share: share,
|
attempt: 0,
|
||||||
signed: Transaction::empty_signed(),
|
confirmation_nonces,
|
||||||
}]
|
|
||||||
}
|
|
||||||
Err(p) => {
|
|
||||||
let participant = spec
|
|
||||||
.reverse_lookup_i(
|
|
||||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
|
||||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
|
||||||
p,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
vec![Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::Blame { id, participant } => {
|
|
||||||
let participant = spec
|
|
||||||
.reverse_lookup_i(
|
|
||||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
|
||||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
|
||||||
participant,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
vec![Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant,
|
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
|
assert_eq!(session, spec.set().session);
|
||||||
|
let participant = spec.reverse_lookup_i(participant).unwrap();
|
||||||
|
vec![Transaction::RemoveParticipant { participant, signed: Transaction::empty_signed() }]
|
||||||
|
}
|
||||||
},
|
},
|
||||||
ProcessorMessage::Sign(msg) => match msg {
|
ProcessorMessage::Sign(msg) => match msg {
|
||||||
sign::ProcessorMessage::InvalidParticipant { .. } => {
|
sign::ProcessorMessage::InvalidParticipant { .. } => {
|
||||||
@@ -789,7 +687,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
|
|||||||
processors: Pro,
|
processors: Pro,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
|
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
|
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
|
||||||
) {
|
) {
|
||||||
let mut tributaries = HashMap::new();
|
let mut tributaries = HashMap::new();
|
||||||
@@ -838,7 +736,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||||
mut db: D,
|
mut db: D,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
|
mut tributary_event: mpsc::UnboundedReceiver<TributaryEvent<D, P>>,
|
||||||
) {
|
) {
|
||||||
let mut tributaries = HashMap::new();
|
let mut tributaries = HashMap::new();
|
||||||
@@ -912,7 +810,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
|||||||
for batch in start_id ..= last_id {
|
for batch in start_id ..= last_id {
|
||||||
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
|
let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1);
|
||||||
if let Some(session) = is_pre_handover {
|
if let Some(session) = is_pre_handover {
|
||||||
let set = ExternalValidatorSet { network, session };
|
let set = ValidatorSet { network, session };
|
||||||
let mut queued = QueuedBatchesDb::take(&mut txn, set);
|
let mut queued = QueuedBatchesDb::take(&mut txn, set);
|
||||||
// is_handover_batch is only set for handover `Batch`s we're participating in, making
|
// is_handover_batch is only set for handover `Batch`s we're participating in, making
|
||||||
// this safe
|
// this safe
|
||||||
@@ -930,8 +828,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
|||||||
|
|
||||||
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
|
let is_handover = LookupHandoverBatchDb::get(&txn, network, batch);
|
||||||
if let Some(session) = is_handover {
|
if let Some(session) = is_handover {
|
||||||
for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session })
|
for queued in QueuedBatchesDb::take(&mut txn, ValidatorSet { network, session }) {
|
||||||
{
|
|
||||||
to_publish.push((session, queued));
|
to_publish.push((session, queued));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -978,7 +875,10 @@ pub async fn handle_processors<D: Db, Pro: Processors, P: P2p>(
|
|||||||
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
||||||
) {
|
) {
|
||||||
let mut channels = HashMap::new();
|
let mut channels = HashMap::new();
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let (processor_send, processor_recv) = mpsc::unbounded_channel();
|
let (processor_send, processor_recv) = mpsc::unbounded_channel();
|
||||||
tokio::spawn(handle_processor_messages(
|
tokio::spawn(handle_processor_messages(
|
||||||
db.clone(),
|
db.clone(),
|
||||||
@@ -1200,7 +1100,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
move |set: ExternalValidatorSet, genesis, id_type, id: Vec<u8>| {
|
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
|
||||||
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
|
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
|
||||||
let mut raw_db = raw_db.clone();
|
let mut raw_db = raw_db.clone();
|
||||||
let key = key.clone();
|
let key = key.clone();
|
||||||
|
|||||||
@@ -11,9 +11,7 @@ use rand_core::{RngCore, OsRng};
|
|||||||
|
|
||||||
use scale::{Decode, Encode};
|
use scale::{Decode, Encode};
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
use serai_client::{
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||||
primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::Db;
|
use serai_db::Db;
|
||||||
|
|
||||||
@@ -71,7 +69,7 @@ const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
|||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct CosignedBlock {
|
pub struct CosignedBlock {
|
||||||
pub network: ExternalNetworkId,
|
pub network: NetworkId,
|
||||||
pub block_number: u64,
|
pub block_number: u64,
|
||||||
pub block: [u8; 32],
|
pub block: [u8; 32],
|
||||||
pub signature: [u8; 64],
|
pub signature: [u8; 64],
|
||||||
@@ -210,8 +208,8 @@ pub struct HeartbeatBatch {
|
|||||||
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||||
type Id: Send + Sync + Clone + Copy + fmt::Debug;
|
type Id: Send + Sync + Clone + Copy + fmt::Debug;
|
||||||
|
|
||||||
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
|
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||||
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]);
|
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||||
|
|
||||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
|
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
|
||||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
|
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
|
||||||
@@ -311,7 +309,7 @@ struct Behavior {
|
|||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct LibP2p {
|
pub struct LibP2p {
|
||||||
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ExternalValidatorSet, [u8; 32])>>>,
|
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
|
||||||
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
|
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
|
||||||
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
|
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
|
||||||
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
|
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
|
||||||
@@ -399,7 +397,7 @@ impl LibP2p {
|
|||||||
let (receive_send, receive_recv) = mpsc::unbounded_channel();
|
let (receive_send, receive_recv) = mpsc::unbounded_channel();
|
||||||
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
|
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic {
|
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
|
||||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -409,8 +407,7 @@ impl LibP2p {
|
|||||||
// The addrs we're currently dialing, and the networks associated with them
|
// The addrs we're currently dialing, and the networks associated with them
|
||||||
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
|
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
|
||||||
// The peers we're currently connected to, and the networks associated with them
|
// The peers we're currently connected to, and the networks associated with them
|
||||||
let connected_peers =
|
let connected_peers = Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<NetworkId>>::new()));
|
||||||
Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<ExternalNetworkId>>::new()));
|
|
||||||
|
|
||||||
// Find and connect to peers
|
// Find and connect to peers
|
||||||
let (connect_to_network_send, mut connect_to_network_recv) =
|
let (connect_to_network_send, mut connect_to_network_recv) =
|
||||||
@@ -423,7 +420,7 @@ impl LibP2p {
|
|||||||
let connect_to_network_send = connect_to_network_send.clone();
|
let connect_to_network_send = connect_to_network_send.clone();
|
||||||
async move {
|
async move {
|
||||||
loop {
|
loop {
|
||||||
let connect = |network: ExternalNetworkId, addr: Multiaddr| {
|
let connect = |network: NetworkId, addr: Multiaddr| {
|
||||||
let dialing_peers = dialing_peers.clone();
|
let dialing_peers = dialing_peers.clone();
|
||||||
let connected_peers = connected_peers.clone();
|
let connected_peers = connected_peers.clone();
|
||||||
let to_dial_send = to_dial_send.clone();
|
let to_dial_send = to_dial_send.clone();
|
||||||
@@ -510,7 +507,7 @@ impl LibP2p {
|
|||||||
connect_to_network_networks.insert(network);
|
connect_to_network_networks.insert(network);
|
||||||
}
|
}
|
||||||
for network in connect_to_network_networks {
|
for network in connect_to_network_networks {
|
||||||
if let Ok(mut nodes) = serai.p2p_validators(network.into()).await {
|
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||||
// If there's an insufficient amount of nodes known, connect to all yet add it
|
// If there's an insufficient amount of nodes known, connect to all yet add it
|
||||||
// back and break
|
// back and break
|
||||||
if nodes.len() < TARGET_PEERS {
|
if nodes.len() < TARGET_PEERS {
|
||||||
@@ -560,7 +557,7 @@ impl LibP2p {
|
|||||||
|
|
||||||
// Subscribe to any new topics
|
// Subscribe to any new topics
|
||||||
set = subscribe_recv.recv() => {
|
set = subscribe_recv.recv() => {
|
||||||
let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) =
|
let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
|
||||||
set.expect("subscribe_recv closed. are we shutting down?");
|
set.expect("subscribe_recv closed. are we shutting down?");
|
||||||
let topic = topic_for_set(set);
|
let topic = topic_for_set(set);
|
||||||
if subscribe {
|
if subscribe {
|
||||||
@@ -779,7 +776,7 @@ impl LibP2p {
|
|||||||
impl P2p for LibP2p {
|
impl P2p for LibP2p {
|
||||||
type Id = PeerId;
|
type Id = PeerId;
|
||||||
|
|
||||||
async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
|
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
|
||||||
self
|
self
|
||||||
.subscribe
|
.subscribe
|
||||||
.lock()
|
.lock()
|
||||||
@@ -788,7 +785,7 @@ impl P2p for LibP2p {
|
|||||||
.expect("subscribe_send closed. are we shutting down?");
|
.expect("subscribe_send closed. are we shutting down?");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) {
|
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
|
||||||
self
|
self
|
||||||
.subscribe
|
.subscribe
|
||||||
.lock()
|
.lock()
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serai_client::primitives::ExternalNetworkId;
|
use serai_client::primitives::NetworkId;
|
||||||
use processor_messages::{ProcessorMessage, CoordinatorMessage};
|
use processor_messages::{ProcessorMessage, CoordinatorMessage};
|
||||||
|
|
||||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
@@ -8,27 +8,27 @@ use message_queue::{Service, Metadata, client::MessageQueue};
|
|||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct Message {
|
pub struct Message {
|
||||||
pub id: u64,
|
pub id: u64,
|
||||||
pub network: ExternalNetworkId,
|
pub network: NetworkId,
|
||||||
pub msg: ProcessorMessage,
|
pub msg: ProcessorMessage,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
pub trait Processors: 'static + Send + Sync + Clone {
|
pub trait Processors: 'static + Send + Sync + Clone {
|
||||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
|
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>);
|
||||||
async fn recv(&self, network: ExternalNetworkId) -> Message;
|
async fn recv(&self, network: NetworkId) -> Message;
|
||||||
async fn ack(&self, msg: Message);
|
async fn ack(&self, msg: Message);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Processors for Arc<MessageQueue> {
|
impl Processors for Arc<MessageQueue> {
|
||||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||||
let msg: CoordinatorMessage = msg.into();
|
let msg: CoordinatorMessage = msg.into();
|
||||||
let metadata =
|
let metadata =
|
||||||
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
|
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
|
||||||
let msg = borsh::to_vec(&msg).unwrap();
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
self.queue(metadata, msg).await;
|
self.queue(metadata, msg).await;
|
||||||
}
|
}
|
||||||
async fn recv(&self, network: ExternalNetworkId) -> Message {
|
async fn recv(&self, network: NetworkId) -> Message {
|
||||||
let msg = self.next(Service::Processor(network)).await;
|
let msg = self.next(Service::Processor(network)).await;
|
||||||
assert_eq!(msg.from, Service::Processor(network));
|
assert_eq!(msg.from, Service::Processor(network));
|
||||||
|
|
||||||
|
|||||||
@@ -1,338 +0,0 @@
|
|||||||
/*
|
|
||||||
If:
|
|
||||||
A) This block has events and it's been at least X blocks since the last cosign or
|
|
||||||
B) This block doesn't have events but it's been X blocks since a skipped block which did
|
|
||||||
have events or
|
|
||||||
C) This block key gens (which changes who the cosigners are)
|
|
||||||
cosign this block.
|
|
||||||
|
|
||||||
This creates both a minimum and maximum delay of X blocks before a block's cosigning begins,
|
|
||||||
barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly
|
|
||||||
spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to
|
|
||||||
ensure any block needing cosigned is consigned within a reasonable amount of time.
|
|
||||||
*/
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::Ciphersuite;
|
|
||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::ExternalNetworkId,
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
|
||||||
Serai, SeraiError,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::*;
|
|
||||||
|
|
||||||
use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};
|
|
||||||
|
|
||||||
// 5 minutes, expressed in blocks
|
|
||||||
// TODO: Pull a constant for block time
|
|
||||||
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
|
||||||
enum HasEvents {
|
|
||||||
KeyGen,
|
|
||||||
Yes,
|
|
||||||
No,
|
|
||||||
}
|
|
||||||
|
|
||||||
create_db!(
|
|
||||||
SubstrateCosignDb {
|
|
||||||
ScanCosignFrom: () -> u64,
|
|
||||||
IntendedCosign: () -> (u64, Option<u64>),
|
|
||||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
|
||||||
LatestCosignedBlock: () -> u64,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
impl IntendedCosign {
|
|
||||||
// Sets the intended to cosign block, clearing the prior value entirely.
|
|
||||||
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
|
||||||
Self::set(txn, &(intended, None::<u64>));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sets the cosign skipped since the last intended to cosign block.
|
|
||||||
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
|
||||||
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
|
||||||
assert!(prior_skipped.is_none());
|
|
||||||
Self::set(txn, &(intended, Some(skipped)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LatestCosignedBlock {
|
|
||||||
pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
|
|
||||||
Self::get(getter).unwrap_or_default().max(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db_channel! {
|
|
||||||
SubstrateDbChannels {
|
|
||||||
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CosignTransactions {
|
|
||||||
// Append a cosign transaction.
|
|
||||||
pub fn append_cosign(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
number: u64,
|
|
||||||
hash: [u8; 32],
|
|
||||||
) {
|
|
||||||
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn block_has_events(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
serai: &Serai,
|
|
||||||
block: u64,
|
|
||||||
) -> Result<HasEvents, SeraiError> {
|
|
||||||
let cached = BlockHasEventsCache::get(txn, block);
|
|
||||||
match cached {
|
|
||||||
None => {
|
|
||||||
let serai = serai.as_of(
|
|
||||||
serai
|
|
||||||
.finalized_block_by_number(block)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block which should've been finalized")
|
|
||||||
.hash(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if !serai.validator_sets().key_gen_events().await?.is_empty() {
|
|
||||||
return Ok(HasEvents::KeyGen);
|
|
||||||
}
|
|
||||||
|
|
||||||
let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&
|
|
||||||
serai.in_instructions().batch_events().await?.is_empty() &&
|
|
||||||
serai.validator_sets().new_set_events().await?.is_empty() &&
|
|
||||||
serai.validator_sets().set_retired_events().await?.is_empty();
|
|
||||||
|
|
||||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
|
||||||
|
|
||||||
BlockHasEventsCache::set(txn, block, &has_events);
|
|
||||||
Ok(has_events)
|
|
||||||
}
|
|
||||||
Some(code) => Ok(code),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn potentially_cosign_block(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
serai: &Serai,
|
|
||||||
block: u64,
|
|
||||||
skipped_block: Option<u64>,
|
|
||||||
window_end_exclusive: u64,
|
|
||||||
) -> Result<bool, SeraiError> {
|
|
||||||
// The following code regarding marking cosigned if prior block is cosigned expects this block to
|
|
||||||
// not be zero
|
|
||||||
// While we could perform this check there, there's no reason not to optimize the entire function
|
|
||||||
// as such
|
|
||||||
if block == 0 {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_has_events = block_has_events(txn, serai, block).await?;
|
|
||||||
|
|
||||||
// If this block had no events and immediately follows a cosigned block, mark it as cosigned
|
|
||||||
if (block_has_events == HasEvents::No) &&
|
|
||||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
|
||||||
{
|
|
||||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
|
||||||
LatestCosignedBlock::set(txn, &block);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
|
|
||||||
// trigger a cosigning protocol covering it
|
|
||||||
// This means there will be the maximum delay allowed from a block needing cosigning occurring
|
|
||||||
// and a cosign for it triggering
|
|
||||||
let maximally_latent_cosign_block =
|
|
||||||
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
|
||||||
|
|
||||||
// If this block is within the window,
|
|
||||||
if block < window_end_exclusive {
|
|
||||||
// and set a key, cosign it
|
|
||||||
if block_has_events == HasEvents::KeyGen {
|
|
||||||
IntendedCosign::set_intended_cosign(txn, block);
|
|
||||||
// Carry skipped if it isn't included by cosigning this block
|
|
||||||
if let Some(skipped) = skipped_block {
|
|
||||||
if skipped > block {
|
|
||||||
IntendedCosign::set_skipped_cosign(txn, block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
} else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {
|
|
||||||
// Since this block was outside the window and had events/was maximally latent, cosign it
|
|
||||||
IntendedCosign::set_intended_cosign(txn, block);
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Advances the cosign protocol as should be done per the latest block.
|
|
||||||
|
|
||||||
A block is considered cosigned if:
|
|
||||||
A) It was cosigned
|
|
||||||
B) It's the parent of a cosigned block
|
|
||||||
C) It immediately follows a cosigned block and has no events requiring cosigning
|
|
||||||
|
|
||||||
This only actually performs advancement within a limited bound (generally until it finds a block
|
|
||||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
|
||||||
`latest_number` doesn't change.
|
|
||||||
*/
|
|
||||||
async fn advance_cosign_protocol_inner(
|
|
||||||
db: &mut impl Db,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
serai: &Serai,
|
|
||||||
latest_number: u64,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
let mut txn = db.txn();
|
|
||||||
|
|
||||||
const INITIAL_INTENDED_COSIGN: u64 = 1;
|
|
||||||
let (last_intended_to_cosign_block, mut skipped_block) = {
|
|
||||||
let intended_cosign = IntendedCosign::get(&txn);
|
|
||||||
// If we haven't prior intended to cosign a block, set the intended cosign to 1
|
|
||||||
if let Some(intended_cosign) = intended_cosign {
|
|
||||||
intended_cosign
|
|
||||||
} else {
|
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);
|
|
||||||
IntendedCosign::get(&txn).unwrap()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// "windows" refers to the window of blocks where even if there's a block which should be
|
|
||||||
// cosigned, it won't be due to proximity due to the prior cosign
|
|
||||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
|
||||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
|
||||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
|
||||||
window_end_exclusive = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
|
||||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
|
||||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
|
||||||
// new state
|
|
||||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
|
||||||
|
|
||||||
// Check all blocks within the window to see if they should be cosigned
|
|
||||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
|
||||||
// do cosign them
|
|
||||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
|
||||||
// the skipped block will cause will cosign all other blocks within this window
|
|
||||||
if skipped_block.is_none() {
|
|
||||||
let window_end_inclusive = window_end_exclusive - 1;
|
|
||||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
|
||||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
|
||||||
skipped_block = Some(b);
|
|
||||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
|
||||||
IntendedCosign::set_skipped_cosign(&mut txn, b);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A block which should be cosigned
|
|
||||||
let mut to_cosign = None;
|
|
||||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
|
||||||
let mut cosigning = vec![];
|
|
||||||
|
|
||||||
for block in scan_start_block ..= latest_number {
|
|
||||||
let actual_block = serai
|
|
||||||
.finalized_block_by_number(block)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block which should've been finalized");
|
|
||||||
|
|
||||||
// Save the block number for this block, as needed by the cosigner to perform cosigning
|
|
||||||
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
|
||||||
|
|
||||||
if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?
|
|
||||||
{
|
|
||||||
to_cosign = Some((block, actual_block.hash()));
|
|
||||||
|
|
||||||
// Get the keys as of the prior block
|
|
||||||
// If this key sets new keys, the coordinator won't acknowledge so until we process this
|
|
||||||
// block
|
|
||||||
// We won't process this block until its co-signed
|
|
||||||
// Using the keys of the prior block ensures this deadlock isn't reached
|
|
||||||
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
|
||||||
|
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
|
||||||
// Get the latest session to have set keys
|
|
||||||
let set_with_keys = {
|
|
||||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
|
||||||
if serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
ExternalValidatorSet { network, session: prior_session }
|
|
||||||
} else {
|
|
||||||
let set = ExternalValidatorSet { network, session: latest_session };
|
|
||||||
if serai.validator_sets().keys(set).await?.is_none() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
set
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
|
||||||
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this TX is committed, always start future scanning from the next block
|
|
||||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
|
||||||
// Since we're scanning *from* the next block, tidy the cache
|
|
||||||
BlockHasEventsCache::del(&mut txn, block);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some((number, hash)) = to_cosign {
|
|
||||||
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
|
||||||
// cosigned
|
|
||||||
if cosigning.is_empty() {
|
|
||||||
log::debug!("{} had no cosigners available, marking as cosigned", number);
|
|
||||||
LatestCosignedBlock::set(&mut txn, &number);
|
|
||||||
} else {
|
|
||||||
for (set, in_set) in cosigning {
|
|
||||||
if in_set {
|
|
||||||
log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session);
|
|
||||||
CosignTransactions::append_cosign(&mut txn, set, number, hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn advance_cosign_protocol(
|
|
||||||
db: &mut impl Db,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
serai: &Serai,
|
|
||||||
latest_number: u64,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
loop {
|
|
||||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
|
||||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
|
||||||
let scan_to = latest_number.min(scan_from + 1000);
|
|
||||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
|
||||||
// If we didn't limit the scan_to, break
|
|
||||||
if scan_to == latest_number {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
use serai_client::primitives::ExternalNetworkId;
|
use serai_client::primitives::NetworkId;
|
||||||
|
|
||||||
pub use serai_db::*;
|
pub use serai_db::*;
|
||||||
|
|
||||||
@@ -9,7 +9,7 @@ mod inner_db {
|
|||||||
SubstrateDb {
|
SubstrateDb {
|
||||||
NextBlock: () -> u64,
|
NextBlock: () -> u64,
|
||||||
HandledEvent: (block: [u8; 32]) -> u32,
|
HandledEvent: (block: [u8; 32]) -> u32,
|
||||||
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
|
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,18 +6,14 @@ use std::{
|
|||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
coins::CoinsEvent,
|
SeraiError, Block, Serai, TemporalSerai,
|
||||||
|
primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId},
|
||||||
|
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
||||||
in_instructions::InInstructionsEvent,
|
in_instructions::InInstructionsEvent,
|
||||||
primitives::{BlockHash, ExternalNetworkId},
|
coins::CoinsEvent,
|
||||||
validator_sets::{
|
|
||||||
primitives::{ExternalValidatorSet, ValidatorSet},
|
|
||||||
ValidatorSetsEvent,
|
|
||||||
},
|
|
||||||
Block, Serai, SeraiError, TemporalSerai,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::DbTxn;
|
use serai_db::DbTxn;
|
||||||
@@ -56,21 +52,54 @@ async fn handle_new_set<D: Db>(
|
|||||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: &Block,
|
block: &Block,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
) -> Result<(), SeraiError> {
|
) -> Result<(), SeraiError> {
|
||||||
if in_set(key, &serai.as_of(block.hash()), set.into())
|
if in_set(key, &serai.as_of(block.hash()), set)
|
||||||
.await?
|
.await?
|
||||||
.expect("NewSet for set which doesn't exist")
|
.expect("NewSet for set which doesn't exist")
|
||||||
{
|
{
|
||||||
log::info!("present in set {:?}", set);
|
log::info!("present in set {:?}", set);
|
||||||
|
|
||||||
let set_data = {
|
let validators;
|
||||||
|
let mut evrf_public_keys = vec![];
|
||||||
|
{
|
||||||
let serai = serai.as_of(block.hash());
|
let serai = serai.as_of(block.hash());
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let set_participants =
|
let set_participants =
|
||||||
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
|
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||||
|
|
||||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
validators = set_participants
|
||||||
|
.iter()
|
||||||
|
.map(|(k, w)| {
|
||||||
|
(
|
||||||
|
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut k.0.as_ref())
|
||||||
|
.expect("invalid key registered as participant"),
|
||||||
|
u16::try_from(*w).unwrap(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
for (validator, _) in set_participants {
|
||||||
|
// This is only run for external networks which always do a DKG for Serai
|
||||||
|
let substrate = serai
|
||||||
|
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519)
|
||||||
|
.await?
|
||||||
|
.expect("Serai called NewSet on a validator without an Embedwards25519 key");
|
||||||
|
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||||
|
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||||
|
let network =
|
||||||
|
if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||||
|
serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect(
|
||||||
|
"Serai called NewSet on a validator without the embedded key required for the network",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
substrate.clone()
|
||||||
|
};
|
||||||
|
evrf_public_keys.push((
|
||||||
|
<[u8; 32]>::try_from(substrate)
|
||||||
|
.expect("validator-sets pallet accepted a key of an invalid length"),
|
||||||
|
network,
|
||||||
|
));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let time = if let Ok(time) = block.time() {
|
let time = if let Ok(time) = block.time() {
|
||||||
@@ -94,7 +123,7 @@ async fn handle_new_set<D: Db>(
|
|||||||
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
||||||
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
||||||
|
|
||||||
let spec = TributarySpec::new(block.hash(), time, set, set_data);
|
let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys);
|
||||||
|
|
||||||
log::info!("creating new tributary for {:?}", spec.set());
|
log::info!("creating new tributary for {:?}", spec.set());
|
||||||
|
|
||||||
@@ -135,7 +164,7 @@ async fn handle_batch_and_burns<Pro: Processors>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let mut batch_block = HashMap::new();
|
let mut batch_block = HashMap::new();
|
||||||
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
|
let mut batches = HashMap::<NetworkId, Vec<u32>>::new();
|
||||||
let mut burns = HashMap::new();
|
let mut burns = HashMap::new();
|
||||||
|
|
||||||
let serai = serai.as_of(block.hash());
|
let serai = serai.as_of(block.hash());
|
||||||
@@ -209,8 +238,8 @@ async fn handle_block<D: Db, Pro: Processors>(
|
|||||||
db: &mut D,
|
db: &mut D,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
|
||||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: Block,
|
block: Block,
|
||||||
@@ -230,8 +259,12 @@ async fn handle_block<D: Db, Pro: Processors>(
|
|||||||
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// If this is Serai, do nothing
|
||||||
// We only coordinate/process external networks
|
// We only coordinate/process external networks
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
if set.network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
log::info!("found fresh new set event {:?}", new_set);
|
log::info!("found fresh new set event {:?}", new_set);
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
@@ -286,7 +319,10 @@ async fn handle_block<D: Db, Pro: Processors>(
|
|||||||
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
|
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
if set.network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
log::info!("found fresh accepted handover event {:?}", accepted_handover);
|
log::info!("found fresh accepted handover event {:?}", accepted_handover);
|
||||||
// TODO: This isn't atomic with the event handling
|
// TODO: This isn't atomic with the event handling
|
||||||
@@ -304,7 +340,10 @@ async fn handle_block<D: Db, Pro: Processors>(
|
|||||||
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
|
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
|
||||||
};
|
};
|
||||||
|
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
if set.network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||||
log::info!("found fresh set retired event {:?}", retired_set);
|
log::info!("found fresh set retired event {:?}", retired_set);
|
||||||
let mut txn = db.txn();
|
let mut txn = db.txn();
|
||||||
@@ -334,8 +373,8 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
|
|||||||
db: &mut D,
|
db: &mut D,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
|
||||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
next_block: &mut u64,
|
next_block: &mut u64,
|
||||||
@@ -389,8 +428,8 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
|||||||
processors: Pro,
|
processors: Pro,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
|
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
|
||||||
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
|
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
|
||||||
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
|
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
|
||||||
) {
|
) {
|
||||||
log::info!("scanning substrate");
|
log::info!("scanning substrate");
|
||||||
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
|
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
|
||||||
@@ -488,12 +527,9 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
|||||||
/// retry.
|
/// retry.
|
||||||
pub(crate) async fn expected_next_batch(
|
pub(crate) async fn expected_next_batch(
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
) -> Result<u32, SeraiError> {
|
) -> Result<u32, SeraiError> {
|
||||||
async fn expected_next_batch_inner(
|
async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
|
||||||
serai: &Serai,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Result<u32, SeraiError> {
|
|
||||||
let serai = serai.as_of_latest_finalized_block().await?;
|
let serai = serai.as_of_latest_finalized_block().await?;
|
||||||
let last = serai.in_instructions().last_batch_for_network(network).await?;
|
let last = serai.in_instructions().last_batch_for_network(network).await?;
|
||||||
Ok(if let Some(last) = last { last + 1 } else { 0 })
|
Ok(if let Some(last) = last { last + 1 } else { 0 })
|
||||||
@@ -516,7 +552,7 @@ pub(crate) async fn expected_next_batch(
|
|||||||
/// This is deemed fine.
|
/// This is deemed fine.
|
||||||
pub(crate) async fn verify_published_batches<D: Db>(
|
pub(crate) async fn verify_published_batches<D: Db>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
optimistic_up_to: u32,
|
optimistic_up_to: u32,
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
// TODO: Localize from MainDb to SubstrateDb
|
// TODO: Localize from MainDb to SubstrateDb
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use std::{
|
|||||||
collections::{VecDeque, HashSet, HashMap},
|
collections::{VecDeque, HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||||
|
|
||||||
use processor_messages::CoordinatorMessage;
|
use processor_messages::CoordinatorMessage;
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ use crate::{
|
|||||||
pub mod tributary;
|
pub mod tributary;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
|
pub struct MemProcessors(pub Arc<RwLock<HashMap<NetworkId, VecDeque<CoordinatorMessage>>>>);
|
||||||
impl MemProcessors {
|
impl MemProcessors {
|
||||||
#[allow(clippy::new_without_default)]
|
#[allow(clippy::new_without_default)]
|
||||||
pub fn new() -> MemProcessors {
|
pub fn new() -> MemProcessors {
|
||||||
@@ -30,12 +30,12 @@ impl MemProcessors {
|
|||||||
|
|
||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl Processors for MemProcessors {
|
impl Processors for MemProcessors {
|
||||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
async fn send(&self, network: NetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
||||||
let mut processors = self.0.write().await;
|
let mut processors = self.0.write().await;
|
||||||
let processor = processors.entry(network).or_insert_with(VecDeque::new);
|
let processor = processors.entry(network).or_insert_with(VecDeque::new);
|
||||||
processor.push_back(msg.into());
|
processor.push_back(msg.into());
|
||||||
}
|
}
|
||||||
async fn recv(&self, _: ExternalNetworkId) -> Message {
|
async fn recv(&self, _: NetworkId) -> Message {
|
||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
async fn ack(&self, _: Message) {
|
async fn ack(&self, _: Message) {
|
||||||
@@ -65,8 +65,8 @@ impl LocalP2p {
|
|||||||
impl P2p for LocalP2p {
|
impl P2p for LocalP2p {
|
||||||
type Id = usize;
|
type Id = usize;
|
||||||
|
|
||||||
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||||
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||||
|
|
||||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||||
let mut msg_ref = msg.as_slice();
|
let mut msg_ref = msg.as_slice();
|
||||||
|
|||||||
@@ -7,17 +7,12 @@ use zeroize::Zeroizing;
|
|||||||
use rand_core::{RngCore, CryptoRng, OsRng};
|
use rand_core::{RngCore, CryptoRng, OsRng};
|
||||||
use futures_util::{task::Poll, poll};
|
use futures_util::{task::Poll, poll};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{
|
|
||||||
group::{ff::Field, GroupEncoding},
|
|
||||||
Ciphersuite,
|
|
||||||
};
|
|
||||||
|
|
||||||
use sp_application_crypto::sr25519;
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::ExternalNetworkId,
|
primitives::NetworkId,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
@@ -51,16 +46,24 @@ pub fn new_spec<R: RngCore + CryptoRng>(
|
|||||||
|
|
||||||
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
||||||
|
|
||||||
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };
|
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
|
||||||
|
|
||||||
let set_participants = keys
|
let validators = keys
|
||||||
.iter()
|
.iter()
|
||||||
.map(|key| {
|
.map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
|
||||||
(sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
// Generate random eVRF keys as none of these test rely on them to have any structure
|
||||||
|
let mut evrf_keys = vec![];
|
||||||
|
for _ in 0 .. keys.len() {
|
||||||
|
let mut substrate = [0; 32];
|
||||||
|
OsRng.fill_bytes(&mut substrate);
|
||||||
|
let mut network = vec![0; 64];
|
||||||
|
OsRng.fill_bytes(&mut network);
|
||||||
|
evrf_keys.push((substrate, network));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
||||||
res,
|
res,
|
||||||
|
|||||||
@@ -1,27 +1,22 @@
|
|||||||
use core::time::Duration;
|
use core::time::Duration;
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
use frost::Participant;
|
use frost::Participant;
|
||||||
|
|
||||||
use sp_runtime::traits::Verify;
|
use sp_runtime::traits::Verify;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Signature},
|
primitives::Signature,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
validator_sets::primitives::{ValidatorSet, KeyPair},
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
use serai_db::{Get, DbTxn, Db, MemDb};
|
||||||
|
|
||||||
use processor_messages::{
|
use processor_messages::{key_gen, CoordinatorMessage};
|
||||||
key_gen::{self, KeyGenId},
|
|
||||||
CoordinatorMessage,
|
|
||||||
};
|
|
||||||
|
|
||||||
use tributary::{TransactionTrait, Tributary};
|
use tributary::{TransactionTrait, Tributary};
|
||||||
|
|
||||||
@@ -55,44 +50,41 @@ async fn dkg_test() {
|
|||||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||||
|
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
// Create DKG commitments for each key
|
// Create DKG participation for each key
|
||||||
for key in &keys {
|
for key in &keys {
|
||||||
let attempt = 0;
|
let mut participation = vec![0; 4096];
|
||||||
let mut commitments = vec![0; 256];
|
OsRng.fill_bytes(&mut participation);
|
||||||
OsRng.fill_bytes(&mut commitments);
|
|
||||||
|
|
||||||
let mut tx = Transaction::DkgCommitments {
|
let mut tx =
|
||||||
attempt,
|
Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
|
||||||
commitments: vec![commitments],
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
let block_before_tx = tributaries[0].1.tip().await;
|
||||||
|
|
||||||
// Publish all commitments but one
|
// Publish t-1 participations
|
||||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
let t = ((keys.len() * 2) / 3) + 1;
|
||||||
|
for (i, tx) in txs.iter().take(t - 1).enumerate() {
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until these are included
|
|
||||||
for tx in txs.iter().skip(1) {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let expected_commitments: HashMap<_, _> = txs
|
let expected_participations = txs
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, tx)| {
|
.map(|(i, tx)| {
|
||||||
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
if let Transaction::DkgParticipation { participation, .. } = tx {
|
||||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
|
||||||
|
session: spec.set().session,
|
||||||
|
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
|
||||||
|
participation: participation.clone(),
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
panic!("txs had non-commitments");
|
panic!("txs wasn't a DkgParticipation");
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
async fn new_processors(
|
async fn new_processors(
|
||||||
db: &mut MemDb,
|
db: &mut MemDb,
|
||||||
@@ -121,28 +113,30 @@ async fn dkg_test() {
|
|||||||
processors
|
processors
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a scanner and verify it has nothing to report
|
// Instantiate a scanner and verify it has the first two participations to report (and isn't
|
||||||
|
// waiting for `t`)
|
||||||
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
||||||
assert!(processors.0.read().await.is_empty());
|
assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
|
||||||
|
|
||||||
// Publish the last commitment
|
// Publish the rest of the participations
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
let block_before_tx = tributaries[0].1.tip().await;
|
||||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
for tx in txs.iter().skip(t - 1) {
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||||
|
}
|
||||||
|
|
||||||
// Verify the scanner emits a KeyGen::Commitments message
|
// Verify the scanner emits all KeyGen::Participations messages
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&mut dbs[0],
|
&mut dbs[0],
|
||||||
&keys[0],
|
&keys[0],
|
||||||
&|_, _, _, _| async {
|
&|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
panic!("provided TX caused recognized_id to be called after DkgParticipation")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
&(),
|
&(),
|
||||||
&|_| async {
|
&|_| async {
|
||||||
panic!(
|
panic!(
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
"test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
&spec,
|
&spec,
|
||||||
@@ -151,17 +145,11 @@ async fn dkg_test() {
|
|||||||
.await;
|
.await;
|
||||||
{
|
{
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
assert_eq!(msgs.len(), keys.len());
|
||||||
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
for expected in &expected_participations {
|
||||||
assert_eq!(
|
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||||
msgs.pop_front().unwrap(),
|
}
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert!(msgs.is_empty());
|
assert!(msgs.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -169,169 +157,35 @@ async fn dkg_test() {
|
|||||||
for (i, key) in keys.iter().enumerate().skip(1) {
|
for (i, key) in keys.iter().enumerate().skip(1) {
|
||||||
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
assert_eq!(msgs.len(), keys.len());
|
||||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
for expected in &expected_participations {
|
||||||
assert_eq!(
|
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||||
msgs.pop_front().unwrap(),
|
}
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert!(msgs.is_empty());
|
assert!(msgs.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now do shares
|
|
||||||
let mut txs = vec![];
|
|
||||||
for (k, key) in keys.iter().enumerate() {
|
|
||||||
let attempt = 0;
|
|
||||||
|
|
||||||
let mut shares = vec![vec![]];
|
|
||||||
for i in 0 .. keys.len() {
|
|
||||||
if i != k {
|
|
||||||
let mut share = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
shares.last_mut().unwrap().push(share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut txn = dbs[k].txn();
|
|
||||||
let mut tx = Transaction::DkgShares {
|
|
||||||
attempt,
|
|
||||||
shares,
|
|
||||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
txn.commit();
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
|
||||||
txs.push(tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
|
||||||
}
|
|
||||||
for tx in txs.iter().skip(1) {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// With just 4 sets of shares, nothing should happen yet
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[0],
|
|
||||||
&keys[0],
|
|
||||||
&|_, _, _, _| async {
|
|
||||||
panic!("provided TX caused recognized_id to be called after some shares")
|
|
||||||
},
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async {
|
|
||||||
panic!(
|
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
|
||||||
)
|
|
||||||
},
|
|
||||||
&spec,
|
|
||||||
&tributaries[0].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert_eq!(processors.0.read().await.len(), 1);
|
|
||||||
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
|
||||||
|
|
||||||
// Publish the final set of shares
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
||||||
|
|
||||||
// Each scanner should emit a distinct shares message
|
|
||||||
let shares_for = |i: usize| {
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
shares: vec![txs
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(l, tx)| {
|
|
||||||
if let Transaction::DkgShares { shares, .. } = tx {
|
|
||||||
if i == l {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
|
||||||
Some((
|
|
||||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
|
||||||
shares[0][relative_i].clone(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic!("txs had non-shares");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<HashMap<_, _>>()],
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
// Any scanner which has handled the prior blocks should only emit the new event
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[i],
|
|
||||||
key,
|
|
||||||
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
|
||||||
&spec,
|
|
||||||
&tributaries[i].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
{
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Yet new scanners should emit all events
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
|
||||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
||||||
assert_eq!(
|
|
||||||
msgs.pop_front().unwrap(),
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send DkgConfirmed
|
|
||||||
let mut substrate_key = [0; 32];
|
let mut substrate_key = [0; 32];
|
||||||
OsRng.fill_bytes(&mut substrate_key);
|
OsRng.fill_bytes(&mut substrate_key);
|
||||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
||||||
OsRng.fill_bytes(&mut network_key);
|
OsRng.fill_bytes(&mut network_key);
|
||||||
let key_pair =
|
let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap());
|
||||||
KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());
|
|
||||||
|
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
for (i, key) in keys.iter().enumerate() {
|
for (i, key) in keys.iter().enumerate() {
|
||||||
let attempt = 0;
|
|
||||||
let mut txn = dbs[i].txn();
|
let mut txn = dbs[i].txn();
|
||||||
let share =
|
|
||||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
let mut tx = Transaction::DkgConfirmed {
|
// Claim we've generated the key pair
|
||||||
|
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
|
||||||
|
|
||||||
|
// Publish the nonces
|
||||||
|
let attempt = 0;
|
||||||
|
let mut tx = Transaction::DkgConfirmationNonces {
|
||||||
attempt,
|
attempt,
|
||||||
confirmation_share: share,
|
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
|
txn.commit();
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
@@ -343,6 +197,35 @@ async fn dkg_test() {
|
|||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This should not cause any new processor event as the processor doesn't handle DKG confirming
|
||||||
|
for (i, key) in keys.iter().enumerate() {
|
||||||
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
|
&mut dbs[i],
|
||||||
|
key,
|
||||||
|
&|_, _, _, _| async {
|
||||||
|
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
|
||||||
|
},
|
||||||
|
&processors,
|
||||||
|
&(),
|
||||||
|
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
|
||||||
|
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
|
||||||
|
&spec,
|
||||||
|
&tributaries[i].1.reader(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
{
|
||||||
|
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
|
||||||
|
// This means in the block after the next block, the keys should be set onto Serai
|
||||||
|
// Sleep twice as long as two blocks, in case there's some stability issue
|
||||||
|
sleep(Duration::from_secs(
|
||||||
|
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
|
||||||
struct CheckPublishSetKeys {
|
struct CheckPublishSetKeys {
|
||||||
spec: TributarySpec,
|
spec: TributarySpec,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
@@ -352,20 +235,25 @@ async fn dkg_test() {
|
|||||||
async fn publish_set_keys(
|
async fn publish_set_keys(
|
||||||
&self,
|
&self,
|
||||||
_db: &(impl Sync + Get),
|
_db: &(impl Sync + Get),
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) {
|
) {
|
||||||
assert_eq!(set, self.spec.set());
|
assert_eq!(set, self.spec.set());
|
||||||
assert!(removed.is_empty());
|
|
||||||
assert_eq!(self.key_pair, key_pair);
|
assert_eq!(self.key_pair, key_pair);
|
||||||
assert!(signature.verify(
|
assert!(signature.verify(
|
||||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
|
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
|
||||||
&serai_client::Public::from(
|
&serai_client::Public(
|
||||||
dkg_musig::musig_key_vartime::<Ristretto>(
|
frost::dkg::musig::musig_key::<Ristretto>(
|
||||||
serai_client::validator_sets::primitives::musig_context(set.into()),
|
&serai_client::validator_sets::primitives::musig_context(set),
|
||||||
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
|
&self
|
||||||
|
.spec
|
||||||
|
.validators()
|
||||||
|
.into_iter()
|
||||||
|
.zip(signature_participants)
|
||||||
|
.filter_map(|((validator, _), included)| included.then_some(validator))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_bytes()
|
.to_bytes()
|
||||||
|
|||||||
@@ -2,13 +2,12 @@ use core::fmt::Debug;
|
|||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::Group, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::Group, Ciphersuite};
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Signature},
|
primitives::Signature,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},
|
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
||||||
};
|
};
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
|
|
||||||
@@ -32,9 +31,9 @@ impl PublishSeraiTransaction for () {
|
|||||||
async fn publish_set_keys(
|
async fn publish_set_keys(
|
||||||
&self,
|
&self,
|
||||||
_db: &(impl Sync + serai_db::Get),
|
_db: &(impl Sync + serai_db::Get),
|
||||||
_set: ExternalValidatorSet,
|
_set: ValidatorSet,
|
||||||
_removed: Vec<SeraiAddress>,
|
|
||||||
_key_pair: KeyPair,
|
_key_pair: KeyPair,
|
||||||
|
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
_signature: Signature,
|
_signature: Signature,
|
||||||
) {
|
) {
|
||||||
panic!("publish_set_keys was called in test")
|
panic!("publish_set_keys was called in test")
|
||||||
@@ -85,23 +84,25 @@ fn tx_size_limit() {
|
|||||||
use tributary::TRANSACTION_SIZE_LIMIT;
|
use tributary::TRANSACTION_SIZE_LIMIT;
|
||||||
|
|
||||||
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
||||||
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
// n coefficients
|
||||||
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
// 2 ECDH values per recipient, and the encrypted share
|
||||||
// 1024 bytes for all overhead
|
let elements_outside_of_proof = max_dkg_coefficients + ((2 + 1) * MAX_KEY_SHARES_PER_SET);
|
||||||
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
// Then Pedersen Vector Commitments for each DH done, and the associated overhead in the proof
|
||||||
assert!(
|
// It's handwaved as one commitment per DH, where we do 2 per coefficient and 1 for the explicit
|
||||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
// ECDHs
|
||||||
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
let vector_commitments = (2 * max_dkg_coefficients) + (2 * MAX_KEY_SHARES_PER_SET);
|
||||||
);
|
// Then we have commitments to the `t` polynomial of length 2 + 2 nc, where nc is the amount of
|
||||||
|
// commitments
|
||||||
|
let t_commitments = 2 + (2 * vector_commitments);
|
||||||
|
// The remainder of the proof should be ~30 elements
|
||||||
|
let proof_elements = 30;
|
||||||
|
|
||||||
// Encryption key, PoP (2 elements), message
|
let handwaved_dkg_size =
|
||||||
let elements_per_share = 4;
|
((elements_outside_of_proof + vector_commitments + t_commitments + proof_elements) *
|
||||||
let handwaved_dkg_shares_size =
|
MAX_KEY_LEN) +
|
||||||
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
1024;
|
||||||
assert!(
|
// Further scale by two in case of any errors in the above
|
||||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
assert!(u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= (2 * handwaved_dkg_size));
|
||||||
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -144,84 +145,34 @@ fn serialize_sign_data() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn serialize_transaction() {
|
fn serialize_transaction() {
|
||||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
test_read_write(&Transaction::RemoveParticipant {
|
||||||
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
});
|
});
|
||||||
|
|
||||||
{
|
test_read_write(&Transaction::DkgParticipation {
|
||||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
participation: random_vec(&mut OsRng, 4096),
|
||||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
let mut temp = commitments[0].clone();
|
});
|
||||||
OsRng.fill_bytes(&mut temp);
|
|
||||||
commitments.push(temp);
|
|
||||||
}
|
|
||||||
test_read_write(&Transaction::DkgCommitments {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
commitments,
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
test_read_write(&Transaction::DkgConfirmationNonces {
|
||||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
attempt: random_u32(&mut OsRng),
|
||||||
// and sent shares is expected to be constant among recipients
|
confirmation_nonces: {
|
||||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
let mut nonces = [0; 64];
|
||||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
OsRng.fill_bytes(&mut nonces);
|
||||||
// Create a valid vec of shares
|
nonces
|
||||||
let mut shares = vec![];
|
},
|
||||||
// Create up to 150 participants
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
});
|
||||||
// Give each sender multiple shares
|
|
||||||
let mut sender_shares = vec![];
|
|
||||||
for _ in 0 .. amount_of_shares {
|
|
||||||
let mut share = vec![0; share_len];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
sender_shares.push(share);
|
|
||||||
}
|
|
||||||
shares.push(sender_shares);
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::DkgShares {
|
test_read_write(&Transaction::DkgConfirmationShare {
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
shares,
|
|
||||||
confirmation_nonces: {
|
|
||||||
let mut nonces = [0; 64];
|
|
||||||
OsRng.fill_bytes(&mut nonces);
|
|
||||||
nonces
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in 0 .. 2 {
|
|
||||||
test_read_write(&Transaction::InvalidDkgShare {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
accuser: frost::Participant::new(
|
|
||||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
faulty: frost::Participant::new(
|
|
||||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
blame: if i == 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::DkgConfirmed {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
attempt: random_u32(&mut OsRng),
|
||||||
confirmation_share: {
|
confirmation_share: {
|
||||||
let mut share = [0; 32];
|
let mut share = [0; 32];
|
||||||
OsRng.fill_bytes(&mut share);
|
OsRng.fill_bytes(&mut share);
|
||||||
share
|
share
|
||||||
},
|
},
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||||
});
|
});
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashSet};
|
|||||||
|
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
|
||||||
use tokio::{
|
use tokio::{
|
||||||
sync::{mpsc, broadcast},
|
sync::{mpsc, broadcast},
|
||||||
@@ -30,7 +29,7 @@ async fn sync_test() {
|
|||||||
let mut keys = new_keys(&mut OsRng);
|
let mut keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
// Ensure this can have a node fail
|
// Ensure this can have a node fail
|
||||||
assert!(spec.n(&[]) > spec.t());
|
assert!(spec.n() > spec.t());
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec)
|
let mut tributaries = new_tributaries(&keys, &spec)
|
||||||
.await
|
.await
|
||||||
@@ -143,7 +142,7 @@ async fn sync_test() {
|
|||||||
// Because only `t` validators are used in a commit, take n - t nodes offline
|
// Because only `t` validators are used in a commit, take n - t nodes offline
|
||||||
// leaving only `t` nodes. Which should force it to participate in the consensus
|
// leaving only `t` nodes. Which should force it to participate in the consensus
|
||||||
// of next blocks.
|
// of next blocks.
|
||||||
let spares = usize::from(spec.n(&[]) - spec.t());
|
let spares = usize::from(spec.n() - spec.t());
|
||||||
for thread in p2p_threads.iter().take(spares) {
|
for thread in p2p_threads.iter().take(spares) {
|
||||||
thread.abort();
|
thread.abort();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,15 +37,14 @@ async fn tx_test() {
|
|||||||
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
||||||
let key = keys[sender].clone();
|
let key = keys[sender].clone();
|
||||||
|
|
||||||
let attempt = 0;
|
|
||||||
let mut commitments = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut commitments);
|
|
||||||
|
|
||||||
// Create the TX with a null signature so we can get its sig hash
|
|
||||||
let block_before_tx = tributaries[sender].1.tip().await;
|
let block_before_tx = tributaries[sender].1.tip().await;
|
||||||
let mut tx = Transaction::DkgCommitments {
|
// Create the TX with a null signature so we can get its sig hash
|
||||||
attempt,
|
let mut tx = Transaction::DkgParticipation {
|
||||||
commitments: vec![commitments.clone()],
|
participation: {
|
||||||
|
let mut participation = vec![0; 4096];
|
||||||
|
OsRng.fill_bytes(&mut participation);
|
||||||
|
participation
|
||||||
|
},
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
tx.sign(&mut OsRng, spec.genesis(), &key);
|
||||||
|
|||||||
@@ -3,11 +3,10 @@ use std::collections::HashMap;
|
|||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
use frost::Participant;
|
use frost::Participant;
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};
|
use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
|
||||||
|
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
|
|
||||||
@@ -19,7 +18,6 @@ use crate::tributary::{Label, Transaction};
|
|||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum Topic {
|
pub enum Topic {
|
||||||
Dkg,
|
|
||||||
DkgConfirmation,
|
DkgConfirmation,
|
||||||
SubstrateSign(SubstrateSignableId),
|
SubstrateSign(SubstrateSignableId),
|
||||||
Sign([u8; 32]),
|
Sign([u8; 32]),
|
||||||
@@ -47,15 +45,13 @@ pub enum Accumulation {
|
|||||||
create_db!(
|
create_db!(
|
||||||
Tributary {
|
Tributary {
|
||||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||||
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],
|
SeraiDkgCompleted: (set: ValidatorSet) -> [u8; 32],
|
||||||
|
|
||||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
||||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||||
|
|
||||||
// TODO: Revisit the point of this
|
// TODO: Revisit the point of this
|
||||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
|
||||||
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
|
||||||
// TODO: Combine these two
|
// TODO: Combine these two
|
||||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||||
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
||||||
@@ -68,11 +64,9 @@ create_db!(
|
|||||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||||
|
|
||||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
DkgParticipation: (genesis: [u8; 32], from: u16) -> Vec<u8>,
|
||||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
DkgKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
||||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
|
||||||
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
|
|
||||||
|
|
||||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||||
|
|
||||||
@@ -81,7 +75,7 @@ create_db!(
|
|||||||
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
|
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
|
||||||
SlashReported: (genesis: [u8; 32]) -> u16,
|
SlashReported: (genesis: [u8; 32]) -> u16,
|
||||||
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
|
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
|
||||||
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,
|
SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -124,12 +118,12 @@ impl AttemptDb {
|
|||||||
|
|
||||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||||
let attempt = Self::get(getter, genesis, &topic);
|
let attempt = Self::get(getter, genesis, &topic);
|
||||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
// Don't require explicit recognition of the DkgConfirmation topic as it starts when the chain
|
||||||
|
// does
|
||||||
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
||||||
// should always happen (eventually)
|
// should always happen (eventually)
|
||||||
if attempt.is_none() &&
|
if attempt.is_none() &&
|
||||||
((topic == Topic::Dkg) ||
|
((topic == Topic::DkgConfirmation) ||
|
||||||
(topic == Topic::DkgConfirmation) ||
|
|
||||||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
||||||
{
|
{
|
||||||
return Some(0);
|
return Some(0);
|
||||||
@@ -156,16 +150,12 @@ impl ReattemptDb {
|
|||||||
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
||||||
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
||||||
// network bandwidth issues
|
// network bandwidth issues
|
||||||
let mut reattempt_delay = BASE_REATTEMPT_DELAY *
|
let reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||||
((AttemptDb::attempt(txn, genesis, topic)
|
((AttemptDb::attempt(txn, genesis, topic)
|
||||||
.expect("scheduling re-attempt for unknown topic") /
|
.expect("scheduling re-attempt for unknown topic") /
|
||||||
3) +
|
3) +
|
||||||
1)
|
1)
|
||||||
.min(3);
|
.min(3);
|
||||||
// Allow more time for DKGs since they have an extra round and much more data
|
|
||||||
if matches!(topic, Topic::Dkg) {
|
|
||||||
reattempt_delay *= 4;
|
|
||||||
}
|
|
||||||
let upon_block = current_block_number + reattempt_delay;
|
let upon_block = current_block_number + reattempt_delay;
|
||||||
|
|
||||||
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
||||||
|
|||||||
@@ -4,17 +4,16 @@ use std::collections::HashMap;
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
use frost::dkg::Participant;
|
use frost::dkg::Participant;
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use serai_client::validator_sets::primitives::KeyPair;
|
use serai_client::{Signature, validator_sets::primitives::KeyPair};
|
||||||
|
|
||||||
use tributary::{Signed, TransactionKind, TransactionTrait};
|
use tributary::{Signed, TransactionKind, TransactionTrait};
|
||||||
|
|
||||||
use processor_messages::{
|
use processor_messages::{
|
||||||
key_gen::{self, KeyGenId},
|
key_gen::self,
|
||||||
coordinator::{self, SubstrateSignableId, SubstrateSignId},
|
coordinator::{self, SubstrateSignableId, SubstrateSignId},
|
||||||
sign::{self, SignId},
|
sign::{self, SignId},
|
||||||
};
|
};
|
||||||
@@ -39,33 +38,20 @@ pub fn dkg_confirmation_nonces(
|
|||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> [u8; 64] {
|
) -> [u8; 64] {
|
||||||
DkgConfirmer::new(key, spec, txn, attempt)
|
DkgConfirmer::new(key, spec, txn, attempt).preprocess()
|
||||||
.expect("getting DKG confirmation nonces for unknown attempt")
|
|
||||||
.preprocess()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generated_key_pair<D: Db>(
|
pub fn generated_key_pair<D: Db>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
genesis: [u8; 32],
|
||||||
spec: &TributarySpec,
|
|
||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
attempt: u32,
|
) {
|
||||||
) -> Result<[u8; 32], Participant> {
|
DkgKeyPair::set(txn, genesis, key_pair);
|
||||||
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
|
|
||||||
KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);
|
|
||||||
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
|
||||||
DkgConfirmer::new(key, spec, txn, attempt)
|
|
||||||
.expect("claiming to have generated a key pair for an unrecognized attempt")
|
|
||||||
.share(preprocesses, key_pair)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unflatten(
|
fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
|
||||||
spec: &TributarySpec,
|
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
data: &mut HashMap<Participant, Vec<u8>>,
|
|
||||||
) {
|
|
||||||
for (validator, _) in spec.validators() {
|
for (validator, _) in spec.validators() {
|
||||||
let Some(range) = spec.i(removed, validator) else { continue };
|
let Some(range) = spec.i(validator) else { continue };
|
||||||
let Some(all_segments) = data.remove(&range.start) else {
|
let Some(all_segments) = data.remove(&range.start) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@@ -89,7 +75,6 @@ impl<
|
|||||||
{
|
{
|
||||||
fn accumulate(
|
fn accumulate(
|
||||||
&mut self,
|
&mut self,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
data: &Vec<u8>,
|
data: &Vec<u8>,
|
||||||
@@ -100,10 +85,7 @@ impl<
|
|||||||
panic!("accumulating data for a participant multiple times");
|
panic!("accumulating data for a participant multiple times");
|
||||||
}
|
}
|
||||||
let signer_shares = {
|
let signer_shares = {
|
||||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
let signer_i = self.spec.i(signer).expect("transaction signer wasn't a member of the set");
|
||||||
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
|
|
||||||
return Accumulation::NotReady;
|
|
||||||
};
|
|
||||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -116,11 +98,7 @@ impl<
|
|||||||
|
|
||||||
// If 2/3rds of the network participated in this preprocess, queue it for an automatic
|
// If 2/3rds of the network participated in this preprocess, queue it for an automatic
|
||||||
// re-attempt
|
// re-attempt
|
||||||
// DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg
|
if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) {
|
||||||
if (data_spec.label == Label::Preprocess) &&
|
|
||||||
received_range.contains(&self.spec.t()) &&
|
|
||||||
(data_spec.topic != Topic::DkgConfirmation)
|
|
||||||
{
|
|
||||||
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
|
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
|
||||||
// is an old entry
|
// is an old entry
|
||||||
// This is an assert, not part of the if check, as old data shouldn't be here in the first
|
// This is an assert, not part of the if check, as old data shouldn't be here in the first
|
||||||
@@ -130,10 +108,7 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||||
let needs_everyone =
|
if received_range.contains(&self.spec.t()) {
|
||||||
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
|
|
||||||
let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };
|
|
||||||
if received_range.contains(&needed) {
|
|
||||||
log::debug!(
|
log::debug!(
|
||||||
"accumulation for entry {:?} attempt #{} is ready",
|
"accumulation for entry {:?} attempt #{} is ready",
|
||||||
&data_spec.topic,
|
&data_spec.topic,
|
||||||
@@ -142,7 +117,7 @@ impl<
|
|||||||
|
|
||||||
let mut data = HashMap::new();
|
let mut data = HashMap::new();
|
||||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||||
let Some(i) = self.spec.i(removed, validator) else { continue };
|
let Some(i) = self.spec.i(validator) else { continue };
|
||||||
data.insert(
|
data.insert(
|
||||||
i.start,
|
i.start,
|
||||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||||
@@ -153,10 +128,10 @@ impl<
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(data.len(), usize::from(needed));
|
assert_eq!(data.len(), usize::from(self.spec.t()));
|
||||||
|
|
||||||
// Remove our own piece of data, if we were involved
|
// Remove our own piece of data, if we were involved
|
||||||
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {
|
if let Some(i) = self.spec.i(Ristretto::generator() * self.our_key.deref()) {
|
||||||
if data.remove(&i.start).is_some() {
|
if data.remove(&i.start).is_some() {
|
||||||
return Accumulation::Ready(DataSet::Participating(data));
|
return Accumulation::Ready(DataSet::Participating(data));
|
||||||
}
|
}
|
||||||
@@ -168,7 +143,6 @@ impl<
|
|||||||
|
|
||||||
fn handle_data(
|
fn handle_data(
|
||||||
&mut self,
|
&mut self,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
bytes: &Vec<u8>,
|
bytes: &Vec<u8>,
|
||||||
signed: &Signed,
|
signed: &Signed,
|
||||||
@@ -214,21 +188,15 @@ impl<
|
|||||||
// TODO: If this is shares, we need to check they are part of the selected signing set
|
// TODO: If this is shares, we need to check they are part of the selected signing set
|
||||||
|
|
||||||
// Accumulate this data
|
// Accumulate this data
|
||||||
self.accumulate(removed, data_spec, signed.signer, bytes)
|
self.accumulate(data_spec, signed.signer, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_sign_data_len(
|
fn check_sign_data_len(
|
||||||
&mut self,
|
&mut self,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
len: usize,
|
len: usize,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
let signer_i = self.spec.i(signer).expect("signer wasn't a member of the set");
|
||||||
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
|
|
||||||
// offline
|
|
||||||
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
|
|
||||||
Err(())?
|
|
||||||
};
|
|
||||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||||
self.fatal_slash(
|
self.fatal_slash(
|
||||||
signer.to_bytes(),
|
signer.to_bytes(),
|
||||||
@@ -255,12 +223,9 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
if self.spec.i(&[], participant).is_none() {
|
if self.spec.i(participant).is_none() {
|
||||||
self.fatal_slash(
|
self.fatal_slash(participant.to_bytes(), "RemoveParticipant vote for non-validator");
|
||||||
participant.to_bytes(),
|
|
||||||
"RemoveParticipantDueToDkg vote for non-validator",
|
|
||||||
);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -275,268 +240,106 @@ impl<
|
|||||||
|
|
||||||
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
||||||
let signer_votes =
|
let signer_votes =
|
||||||
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?");
|
self.spec.i(signed.signer).expect("signer wasn't a validator for this network?");
|
||||||
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
||||||
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
||||||
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
||||||
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote")
|
self.fatal_slash(participant, "RemoveParticipant vote")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
// Send the participation to the processor
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt");
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
|
||||||
match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {
|
|
||||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
|
||||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
|
||||||
unflatten(self.spec, &removed, &mut commitments);
|
|
||||||
self
|
|
||||||
.processors
|
|
||||||
.send(
|
|
||||||
self.spec.set().network,
|
|
||||||
key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
|
||||||
commitments,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
|
||||||
assert!(
|
|
||||||
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
|
|
||||||
"NotParticipating in a DkgCommitments we weren't removed for"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Accumulation::NotReady => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
|
|
||||||
|
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"DkgShares for a DKG they aren't participating in",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
|
||||||
for shares in &shares {
|
|
||||||
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save each share as needed for blame
|
|
||||||
for (from_offset, shares) in shares.iter().enumerate() {
|
|
||||||
let from =
|
|
||||||
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
for (to_offset, share) in shares.iter().enumerate() {
|
|
||||||
// 0-indexed (the enumeration) to 1-indexed (Participant)
|
|
||||||
let mut to = u16::try_from(to_offset).unwrap() + 1;
|
|
||||||
// Adjust for the omission of the sender's own shares
|
|
||||||
if to >= u16::from(sender_i.start) {
|
|
||||||
to += u16::from(sender_i.end) - u16::from(sender_i.start);
|
|
||||||
}
|
|
||||||
let to = Participant::new(to).unwrap();
|
|
||||||
|
|
||||||
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter down to only our share's bytes for handle
|
|
||||||
let our_shares = if let Some(our_i) =
|
|
||||||
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
|
||||||
{
|
|
||||||
if sender_i == our_i {
|
|
||||||
vec![]
|
|
||||||
} else {
|
|
||||||
// 1-indexed to 0-indexed
|
|
||||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
|
||||||
// Handle the omission of the sender's own data
|
|
||||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
|
||||||
our_i_pos -= sender_is_len;
|
|
||||||
}
|
|
||||||
let our_i_pos = usize::from(our_i_pos);
|
|
||||||
shares
|
|
||||||
.iter_mut()
|
|
||||||
.map(|shares| {
|
|
||||||
shares
|
|
||||||
.drain(
|
|
||||||
our_i_pos ..
|
|
||||||
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
|
||||||
)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert!(
|
|
||||||
not_participating,
|
|
||||||
"we didn't have an i while handling DkgShares we weren't removed for"
|
|
||||||
);
|
|
||||||
// Since we're not participating, simply save vec![] for our shares
|
|
||||||
vec![]
|
|
||||||
};
|
|
||||||
// Drop shares as it's presumably been mutated into invalidity
|
|
||||||
drop(shares);
|
|
||||||
|
|
||||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
|
||||||
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
|
||||||
match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {
|
|
||||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
|
||||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
|
||||||
|
|
||||||
let mut confirmation_nonces = HashMap::new();
|
|
||||||
let mut shares = HashMap::new();
|
|
||||||
for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares {
|
|
||||||
let (these_confirmation_nonces, these_shares) =
|
|
||||||
<(Vec<u8>, Vec<u8>)>::decode(&mut confirmation_nonces_and_shares.as_slice())
|
|
||||||
.unwrap();
|
|
||||||
confirmation_nonces.insert(participant, these_confirmation_nonces);
|
|
||||||
shares.insert(participant, these_shares);
|
|
||||||
}
|
|
||||||
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
|
||||||
|
|
||||||
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
|
|
||||||
// - Each of the sender's shares
|
|
||||||
// - Each of the our shares
|
|
||||||
// - Each share
|
|
||||||
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
|
|
||||||
let mut expanded_shares = vec![];
|
|
||||||
for (sender_start_i, shares) in shares {
|
|
||||||
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
|
|
||||||
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
|
|
||||||
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
|
|
||||||
if expanded_shares.len() <= our_share_i {
|
|
||||||
expanded_shares.push(HashMap::new());
|
|
||||||
}
|
|
||||||
expanded_shares[our_share_i].insert(
|
|
||||||
Participant::new(
|
|
||||||
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
our_share,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self
|
|
||||||
.processors
|
|
||||||
.send(
|
|
||||||
self.spec.set().network,
|
|
||||||
key_gen::CoordinatorMessage::Shares {
|
|
||||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
|
||||||
shares: expanded_shares,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
|
||||||
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
|
|
||||||
}
|
|
||||||
Accumulation::NotReady => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
|
||||||
self
|
|
||||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let Some(range) = self.spec.i(&removed, signed.signer) else {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"InvalidDkgShare for a DKG they aren't participating in",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
if !range.contains(&accuser) {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"accused with a Participant index which wasn't theirs",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if range.contains(&faulty) {
|
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"InvalidDkgShare had a non-existent faulty participant",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
self
|
self
|
||||||
.processors
|
.processors
|
||||||
.send(
|
.send(
|
||||||
self.spec.set().network,
|
self.spec.set().network,
|
||||||
key_gen::CoordinatorMessage::VerifyBlame {
|
key_gen::CoordinatorMessage::Participation {
|
||||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
session: self.spec.set().session,
|
||||||
accuser,
|
participant: self
|
||||||
accused: faulty,
|
.spec
|
||||||
share,
|
.i(signed.signer)
|
||||||
blame,
|
.expect("signer wasn't a validator for this network?")
|
||||||
|
.start,
|
||||||
|
participation,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
let data_spec =
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt");
|
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Preprocess, attempt };
|
||||||
return;
|
match self.handle_data(&data_spec, &confirmation_nonces.to_vec(), &signed) {
|
||||||
};
|
Accumulation::Ready(DataSet::Participating(confirmation_nonces)) => {
|
||||||
|
log::info!(
|
||||||
|
"got all DkgConfirmationNonces for {}, attempt {attempt}",
|
||||||
|
hex::encode(genesis)
|
||||||
|
);
|
||||||
|
|
||||||
|
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
||||||
|
|
||||||
|
// Send the expected DkgConfirmationShare
|
||||||
|
// TODO: Slight race condition here due to set, publish tx, then commit txn
|
||||||
|
let key_pair = DkgKeyPair::get(self.txn, genesis)
|
||||||
|
.expect("participating in confirming key we don't have");
|
||||||
|
let mut tx = match DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
||||||
|
.share(confirmation_nonces, &key_pair)
|
||||||
|
{
|
||||||
|
Ok(confirmation_share) => Transaction::DkgConfirmationShare {
|
||||||
|
attempt,
|
||||||
|
confirmation_share,
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
},
|
||||||
|
Err(participant) => Transaction::RemoveParticipant {
|
||||||
|
participant: self.spec.reverse_lookup_i(participant).unwrap(),
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
|
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||||
|
}
|
||||||
|
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
|
||||||
let data_spec =
|
let data_spec =
|
||||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
||||||
match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) {
|
match self.handle_data(&data_spec, &confirmation_share.to_vec(), &signed) {
|
||||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
log::info!(
|
||||||
|
"got all DkgConfirmationShare for {}, attempt {attempt}",
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
hex::encode(genesis)
|
||||||
panic!(
|
);
|
||||||
"DkgConfirmed for everyone yet didn't have the removed parties for this attempt",
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
||||||
|
|
||||||
// TODO: This can technically happen under very very very specific timing as the txn
|
// TODO: This can technically happen under very very very specific timing as the txn
|
||||||
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
// put happens before DkgConfirmationShare, yet the txn isn't guaranteed to be
|
||||||
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(
|
// committed
|
||||||
"in DkgConfirmed handling, which happens after everyone \
|
let key_pair = DkgKeyPair::get(self.txn, genesis).expect(
|
||||||
(including us) fires DkgConfirmed, yet no confirming key pair",
|
"in DkgConfirmationShare handling, which happens after everyone \
|
||||||
|
(including us) fires DkgConfirmationShare, yet no confirming key pair",
|
||||||
);
|
);
|
||||||
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
|
||||||
.expect("confirming DKG for unrecognized attempt");
|
// Determine the bitstring representing who participated before we move `shares`
|
||||||
|
let validators = self.spec.validators();
|
||||||
|
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
|
||||||
|
for (participant, _) in validators {
|
||||||
|
signature_participants.push(
|
||||||
|
(participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
|
||||||
|
shares.contains_key(&self.spec.i(participant).unwrap().start),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Produce the final signature
|
||||||
|
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt);
|
||||||
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||||
Ok(sig) => sig,
|
Ok(sig) => sig,
|
||||||
Err(p) => {
|
Err(p) => {
|
||||||
let mut tx = Transaction::RemoveParticipantDueToDkg {
|
let mut tx = Transaction::RemoveParticipant {
|
||||||
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),
|
participant: self.spec.reverse_lookup_i(p).unwrap(),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
@@ -545,23 +348,18 @@ impl<
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
DkgLocallyCompleted::set(self.txn, genesis, &());
|
|
||||||
|
|
||||||
self
|
self
|
||||||
.publish_serai_tx
|
.publish_serai_tx
|
||||||
.publish_set_keys(
|
.publish_set_keys(
|
||||||
self.db,
|
self.db,
|
||||||
self.spec.set(),
|
self.spec.set(),
|
||||||
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
|
|
||||||
key_pair,
|
key_pair,
|
||||||
sig.into(),
|
signature_participants,
|
||||||
|
Signature(sig),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
|
||||||
panic!("wasn't a participant in DKG confirmination shares")
|
|
||||||
}
|
|
||||||
Accumulation::NotReady => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -619,19 +417,8 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateSign(data) => {
|
Transaction::SubstrateSign(data) => {
|
||||||
// Provided transactions ensure synchrony on any signing protocol, and we won't start
|
|
||||||
// signing with threshold keys before we've confirmed them on-chain
|
|
||||||
let Some(removed) =
|
|
||||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
|
||||||
else {
|
|
||||||
self.fatal_slash(
|
|
||||||
data.signed.signer.to_bytes(),
|
|
||||||
"signing despite not having set keys on substrate",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let signer = data.signed.signer;
|
let signer = data.signed.signer;
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else {
|
let Ok(()) = self.check_sign_data_len(signer, data.data.len()) else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
let expected_len = match data.label {
|
let expected_len = match data.label {
|
||||||
@@ -654,11 +441,11 @@ impl<
|
|||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
self.handle_data(&data_spec, &data.data.encode(), &data.signed)
|
||||||
else {
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
unflatten(self.spec, &removed, &mut results);
|
unflatten(self.spec, &mut results);
|
||||||
|
|
||||||
let id = SubstrateSignId {
|
let id = SubstrateSignId {
|
||||||
session: self.spec.set().session,
|
session: self.spec.set().session,
|
||||||
@@ -679,16 +466,7 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Sign(data) => {
|
Transaction::Sign(data) => {
|
||||||
let Some(removed) =
|
let Ok(()) = self.check_sign_data_len(data.signed.signer, data.data.len()) else {
|
||||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
|
||||||
else {
|
|
||||||
self.fatal_slash(
|
|
||||||
data.signed.signer.to_bytes(),
|
|
||||||
"signing despite not having set keys on substrate",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {
|
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -698,9 +476,9 @@ impl<
|
|||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
self.handle_data(&data_spec, &data.data.encode(), &data.signed)
|
||||||
{
|
{
|
||||||
unflatten(self.spec, &removed, &mut results);
|
unflatten(self.spec, &mut results);
|
||||||
let id =
|
let id =
|
||||||
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
||||||
self
|
self
|
||||||
@@ -741,8 +519,7 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SlashReport(points, signed) => {
|
Transaction::SlashReport(points, signed) => {
|
||||||
// Uses &[] as we only need the length which is independent to who else was removed
|
let signer_range = self.spec.i(signed.signer).unwrap();
|
||||||
let signer_range = self.spec.i(&[], signed.signer).unwrap();
|
|
||||||
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
||||||
if points.len() != (self.spec.validators().len() - 1) {
|
if points.len() != (self.spec.validators().len() - 1) {
|
||||||
self.fatal_slash(
|
self.fatal_slash(
|
||||||
|
|||||||
@@ -1,8 +1,3 @@
|
|||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
|
||||||
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
ReadWrite,
|
ReadWrite,
|
||||||
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||||
@@ -25,39 +20,6 @@ pub use handle::*;
|
|||||||
|
|
||||||
pub mod scanner;
|
pub mod scanner;
|
||||||
|
|
||||||
pub fn removed_as_of_dkg_attempt(
|
|
||||||
getter: &impl Get,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
attempt: u32,
|
|
||||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
|
||||||
if attempt == 0 {
|
|
||||||
Some(vec![])
|
|
||||||
} else {
|
|
||||||
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
|
||||||
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn removed_as_of_set_keys(
|
|
||||||
getter: &impl Get,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
|
||||||
// SeraiDkgCompleted has the key placed on-chain.
|
|
||||||
// This key can be uniquely mapped to an attempt so long as one participant was honest, which we
|
|
||||||
// assume as a presumably honest participant.
|
|
||||||
// Resolve from generated key to attempt to fatally slashed as of attempt.
|
|
||||||
|
|
||||||
// This expect will trigger if this is prematurely called and Substrate has tracked the keys yet
|
|
||||||
// we haven't locally synced and handled the Tributary
|
|
||||||
// All callers of this, at the time of writing, ensure the Tributary has sufficiently synced
|
|
||||||
// making the panic with context more desirable than the None
|
|
||||||
let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)
|
|
||||||
.expect("key completed on-chain didn't have an attempt related");
|
|
||||||
removed_as_of_dkg_attempt(getter, genesis, attempt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
tributary: &Tributary<D, Transaction, P>,
|
tributary: &Tributary<D, Transaction, P>,
|
||||||
|
|||||||
@@ -1,17 +1,18 @@
|
|||||||
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};
|
use core::{marker::PhantomData, future::Future, time::Duration};
|
||||||
use std::{sync::Arc, collections::HashSet};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use rand_core::OsRng;
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Signature},
|
primitives::Signature,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
validator_sets::primitives::{KeyPair, ValidatorSet},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -39,7 +40,7 @@ pub enum RecognizedIdType {
|
|||||||
pub trait RIDTrait {
|
pub trait RIDTrait {
|
||||||
async fn recognized_id(
|
async fn recognized_id(
|
||||||
&self,
|
&self,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
kind: RecognizedIdType,
|
kind: RecognizedIdType,
|
||||||
id: Vec<u8>,
|
id: Vec<u8>,
|
||||||
@@ -48,12 +49,12 @@ pub trait RIDTrait {
|
|||||||
#[async_trait::async_trait]
|
#[async_trait::async_trait]
|
||||||
impl<
|
impl<
|
||||||
FRid: Send + Future<Output = ()>,
|
FRid: Send + Future<Output = ()>,
|
||||||
F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
|
F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> FRid,
|
||||||
> RIDTrait for F
|
> RIDTrait for F
|
||||||
{
|
{
|
||||||
async fn recognized_id(
|
async fn recognized_id(
|
||||||
&self,
|
&self,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
kind: RecognizedIdType,
|
kind: RecognizedIdType,
|
||||||
id: Vec<u8>,
|
id: Vec<u8>,
|
||||||
@@ -67,9 +68,9 @@ pub trait PublishSeraiTransaction {
|
|||||||
async fn publish_set_keys(
|
async fn publish_set_keys(
|
||||||
&self,
|
&self,
|
||||||
db: &(impl Sync + Get),
|
db: &(impl Sync + Get),
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -87,7 +88,7 @@ mod impl_pst_for_serai {
|
|||||||
async fn publish(
|
async fn publish(
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
db: &impl Get,
|
db: &impl Get,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
tx: serai_client::Transaction,
|
tx: serai_client::Transaction,
|
||||||
meta: $Meta,
|
meta: $Meta,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
@@ -129,19 +130,14 @@ mod impl_pst_for_serai {
|
|||||||
async fn publish_set_keys(
|
async fn publish_set_keys(
|
||||||
&self,
|
&self,
|
||||||
db: &(impl Sync + Get),
|
db: &(impl Sync + Get),
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) {
|
) {
|
||||||
// TODO: BoundedVec as an arg to avoid this expect
|
let tx =
|
||||||
let tx = SeraiValidatorSets::set_keys(
|
SeraiValidatorSets::set_keys(set.network, key_pair, signature_participants, signature);
|
||||||
set.network,
|
async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
|
||||||
removed.try_into().expect("removing more than allowed"),
|
|
||||||
key_pair,
|
|
||||||
signature,
|
|
||||||
);
|
|
||||||
async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool {
|
|
||||||
if matches!(serai.keys(set).await, Ok(Some(_))) {
|
if matches!(serai.keys(set).await, Ok(Some(_))) {
|
||||||
log::info!("another coordinator set key pair for {:?}", set);
|
log::info!("another coordinator set key pair for {:?}", set);
|
||||||
return true;
|
return true;
|
||||||
@@ -250,18 +246,15 @@ impl<
|
|||||||
|
|
||||||
let genesis = self.spec.genesis();
|
let genesis = self.spec.genesis();
|
||||||
|
|
||||||
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
|
||||||
|
|
||||||
// Calculate the shares still present, spinning if not enough are
|
// Calculate the shares still present, spinning if not enough are
|
||||||
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if
|
{
|
||||||
// we should spin, hence storing it in a variable here
|
|
||||||
let still_present_shares = {
|
|
||||||
// Start with the original n value
|
// Start with the original n value
|
||||||
let mut present_shares = self.spec.n(&[]);
|
let mut present_shares = self.spec.n();
|
||||||
// Remove everyone fatally slashed
|
// Remove everyone fatally slashed
|
||||||
|
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
||||||
for removed in ¤t_fatal_slashes {
|
for removed in ¤t_fatal_slashes {
|
||||||
let original_i_for_removed =
|
let original_i_for_removed =
|
||||||
self.spec.i(&[], *removed).expect("removed party was never present");
|
self.spec.i(*removed).expect("removed party was never present");
|
||||||
let removed_shares =
|
let removed_shares =
|
||||||
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
||||||
present_shares -= removed_shares;
|
present_shares -= removed_shares;
|
||||||
@@ -277,79 +270,17 @@ impl<
|
|||||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
present_shares
|
|
||||||
};
|
|
||||||
|
|
||||||
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
||||||
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
||||||
log::info!("re-attempting {topic:?} with attempt {attempt}");
|
log::info!("potentially re-attempting {topic:?} with attempt {attempt}");
|
||||||
|
|
||||||
// Slash people who failed to participate as expected in the prior attempt
|
// Slash people who failed to participate as expected in the prior attempt
|
||||||
{
|
{
|
||||||
let prior_attempt = attempt - 1;
|
let prior_attempt = attempt - 1;
|
||||||
let (removed, expected_participants) = match topic {
|
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
||||||
Topic::Dkg => {
|
let expected_participants: Vec<<Ristretto as Ciphersuite>::G> = vec![];
|
||||||
// Every validator who wasn't removed is expected to have participated
|
|
||||||
let removed =
|
|
||||||
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
|
|
||||||
.expect("prior attempt didn't have its removed saved to disk");
|
|
||||||
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
|
|
||||||
(
|
|
||||||
removed,
|
|
||||||
self
|
|
||||||
.spec
|
|
||||||
.validators()
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|(validator, _)| {
|
|
||||||
Some(validator).filter(|validator| !removed_set.contains(validator))
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Topic::DkgConfirmation => {
|
|
||||||
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
|
|
||||||
}
|
|
||||||
Topic::SubstrateSign(_) | Topic::Sign(_) => {
|
|
||||||
let removed =
|
|
||||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
|
||||||
.expect("SubstrateSign/Sign yet have yet to set keys");
|
|
||||||
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
|
||||||
let expected_participants = vec![];
|
|
||||||
(removed, expected_participants)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (expected_topic, expected_label) = match topic {
|
|
||||||
Topic::Dkg => {
|
|
||||||
let n = self.spec.n(&removed);
|
|
||||||
// If we got all the DKG shares, we should be on DKG confirmation
|
|
||||||
let share_spec =
|
|
||||||
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
|
|
||||||
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
|
|
||||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
|
||||||
// preprocess is part of Topic::Dkg Label::Share
|
|
||||||
(Topic::DkgConfirmation, Label::Share)
|
|
||||||
} else {
|
|
||||||
let preprocess_spec = DataSpecification {
|
|
||||||
topic: Topic::Dkg,
|
|
||||||
label: Label::Preprocess,
|
|
||||||
attempt: prior_attempt,
|
|
||||||
};
|
|
||||||
// If we got all the DKG preprocesses, DKG shares
|
|
||||||
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
|
|
||||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
|
||||||
// preprocess is part of Topic::Dkg Label::Share
|
|
||||||
(Topic::Dkg, Label::Share)
|
|
||||||
} else {
|
|
||||||
(Topic::Dkg, Label::Preprocess)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Topic::DkgConfirmation => unreachable!(),
|
|
||||||
// If we got enough participants to move forward, then we expect shares from them all
|
|
||||||
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut did_not_participate = vec![];
|
let mut did_not_participate = vec![];
|
||||||
for expected_participant in expected_participants {
|
for expected_participant in expected_participants {
|
||||||
@@ -357,8 +288,9 @@ impl<
|
|||||||
self.txn,
|
self.txn,
|
||||||
genesis,
|
genesis,
|
||||||
&DataSpecification {
|
&DataSpecification {
|
||||||
topic: expected_topic,
|
topic,
|
||||||
label: expected_label,
|
// Since we got the preprocesses, we were supposed to get the shares
|
||||||
|
label: Label::Share,
|
||||||
attempt: prior_attempt,
|
attempt: prior_attempt,
|
||||||
},
|
},
|
||||||
&expected_participant.to_bytes(),
|
&expected_participant.to_bytes(),
|
||||||
@@ -374,15 +306,8 @@ impl<
|
|||||||
// Accordingly, clear did_not_participate
|
// Accordingly, clear did_not_participate
|
||||||
// TODO
|
// TODO
|
||||||
|
|
||||||
// If during the DKG, explicitly mark these people as having been offline
|
// TODO: Increment the slash points of people who didn't preprocess in some expected window
|
||||||
// TODO: If they were offline sufficiently long ago, don't strike them off
|
// of time
|
||||||
if topic == Topic::Dkg {
|
|
||||||
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
|
|
||||||
for did_not_participate in did_not_participate {
|
|
||||||
existing.push(did_not_participate.to_bytes());
|
|
||||||
}
|
|
||||||
OfflineDuringDkg::set(self.txn, genesis, &existing);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slash everyone who didn't participate as expected
|
// Slash everyone who didn't participate as expected
|
||||||
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
||||||
@@ -412,75 +337,22 @@ impl<
|
|||||||
then preprocesses. This only sends preprocesses).
|
then preprocesses. This only sends preprocesses).
|
||||||
*/
|
*/
|
||||||
match topic {
|
match topic {
|
||||||
Topic::Dkg => {
|
Topic::DkgConfirmation => {
|
||||||
let mut removed = current_fatal_slashes.clone();
|
if SeraiDkgCompleted::get(self.txn, self.spec.set()).is_none() {
|
||||||
|
log::info!("re-attempting DKG confirmation with attempt {attempt}");
|
||||||
|
|
||||||
let t = self.spec.t();
|
// Since it wasn't completed, publish our nonces for the next attempt
|
||||||
{
|
let confirmation_nonces =
|
||||||
let mut present_shares = still_present_shares;
|
crate::tributary::dkg_confirmation_nonces(self.our_key, self.spec, self.txn, attempt);
|
||||||
|
let mut tx = Transaction::DkgConfirmationNonces {
|
||||||
// Load the parties marked as offline across the various attempts
|
attempt,
|
||||||
let mut offline = OfflineDuringDkg::get(self.txn, genesis)
|
confirmation_nonces,
|
||||||
.unwrap_or(vec![])
|
signed: Transaction::empty_signed(),
|
||||||
.iter()
|
|
||||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
// Pop from the list to prioritize the removal of those recently offline
|
|
||||||
while let Some(offline) = offline.pop() {
|
|
||||||
// Make sure they weren't removed already (such as due to being fatally slashed)
|
|
||||||
// This also may trigger if they were offline across multiple attempts
|
|
||||||
if removed.contains(&offline) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we can remove them and still meet the threshold, do so
|
|
||||||
let original_i_for_offline =
|
|
||||||
self.spec.i(&[], offline).expect("offline was never present?");
|
|
||||||
let offline_shares =
|
|
||||||
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
|
|
||||||
if (present_shares - offline_shares) >= t {
|
|
||||||
present_shares -= offline_shares;
|
|
||||||
removed.push(offline);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we've removed as many people as we can, break
|
|
||||||
if present_shares == t {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RemovedAsOfDkgAttempt::set(
|
|
||||||
self.txn,
|
|
||||||
genesis,
|
|
||||||
attempt,
|
|
||||||
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
|
|
||||||
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
};
|
||||||
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
// Since it wasn't completed, instruct the processor to start the next attempt
|
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||||
let id =
|
|
||||||
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
|
||||||
|
|
||||||
let params =
|
|
||||||
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
|
|
||||||
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
|
||||||
|
|
||||||
self
|
|
||||||
.processors
|
|
||||||
.send(
|
|
||||||
self.spec.set().network,
|
|
||||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares },
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Topic::DkgConfirmation => unreachable!(),
|
|
||||||
Topic::SubstrateSign(inner_id) => {
|
Topic::SubstrateSign(inner_id) => {
|
||||||
let id = processor_messages::coordinator::SubstrateSignId {
|
let id = processor_messages::coordinator::SubstrateSignId {
|
||||||
session: self.spec.set().session,
|
session: self.spec.set().session,
|
||||||
@@ -497,6 +369,8 @@ impl<
|
|||||||
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
||||||
.map_or(0, |cosign| cosign.block_number);
|
.map_or(0, |cosign| cosign.block_number);
|
||||||
if latest_cosign < block_number {
|
if latest_cosign < block_number {
|
||||||
|
log::info!("re-attempting cosigning {block_number:?} with attempt {attempt}");
|
||||||
|
|
||||||
// Instruct the processor to start the next attempt
|
// Instruct the processor to start the next attempt
|
||||||
self
|
self
|
||||||
.processors
|
.processors
|
||||||
@@ -513,6 +387,8 @@ impl<
|
|||||||
SubstrateSignableId::Batch(batch) => {
|
SubstrateSignableId::Batch(batch) => {
|
||||||
// If the Batch hasn't appeared on-chain...
|
// If the Batch hasn't appeared on-chain...
|
||||||
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
|
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
|
||||||
|
log::info!("re-attempting signing batch {batch:?} with attempt {attempt}");
|
||||||
|
|
||||||
// Instruct the processor to start the next attempt
|
// Instruct the processor to start the next attempt
|
||||||
// The processor won't continue if it's already signed a Batch
|
// The processor won't continue if it's already signed a Batch
|
||||||
// Prior checking if the Batch is on-chain just may reduce the non-participating
|
// Prior checking if the Batch is on-chain just may reduce the non-participating
|
||||||
@@ -530,6 +406,11 @@ impl<
|
|||||||
// If this Tributary hasn't been retired...
|
// If this Tributary hasn't been retired...
|
||||||
// (published SlashReport/took too long to do so)
|
// (published SlashReport/took too long to do so)
|
||||||
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
||||||
|
log::info!(
|
||||||
|
"re-attempting signing slash report for {:?} with attempt {attempt}",
|
||||||
|
self.spec.set()
|
||||||
|
);
|
||||||
|
|
||||||
let report = SlashReport::get(self.txn, self.spec.set())
|
let report = SlashReport::get(self.txn, self.spec.set())
|
||||||
.expect("re-attempting signing a SlashReport we don't have?");
|
.expect("re-attempting signing a SlashReport we don't have?");
|
||||||
self
|
self
|
||||||
@@ -576,8 +457,7 @@ impl<
|
|||||||
};
|
};
|
||||||
// Assign them 0 points for themselves
|
// Assign them 0 points for themselves
|
||||||
report.insert(i, 0);
|
report.insert(i, 0);
|
||||||
// Uses &[] as we only need the length which is independent to who else was removed
|
let signer_i = self.spec.i(validator).unwrap();
|
||||||
let signer_i = self.spec.i(&[], validator).unwrap();
|
|
||||||
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
||||||
// Push `n` copies, one for each of their shares
|
// Push `n` copies, one for each of their shares
|
||||||
for _ in 0 .. signer_len {
|
for _ in 0 .. signer_len {
|
||||||
|
|||||||
@@ -55,7 +55,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashSet, HashMap};
|
||||||
|
|
||||||
use zeroize::{Zeroize, Zeroizing};
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
|
||||||
@@ -63,21 +63,19 @@ use rand_core::OsRng;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{
|
use frost::{
|
||||||
group::{ff::PrimeField, GroupEncoding},
|
FrostError,
|
||||||
Ciphersuite,
|
dkg::{Participant, musig::musig},
|
||||||
|
ThresholdKeys,
|
||||||
|
sign::*,
|
||||||
};
|
};
|
||||||
use dkg_musig::musig;
|
|
||||||
use frost::{FrostError, dkg::Participant, ThresholdKeys, sign::*};
|
|
||||||
use frost_schnorrkel::Schnorrkel;
|
use frost_schnorrkel::Schnorrkel;
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
|
|
||||||
use serai_client::{
|
#[rustfmt::skip]
|
||||||
Public,
|
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message};
|
||||||
validator_sets::primitives::{KeyPair, musig_context, set_keys_message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
@@ -86,6 +84,7 @@ use crate::tributary::TributarySpec;
|
|||||||
create_db!(
|
create_db!(
|
||||||
SigningProtocolDb {
|
SigningProtocolDb {
|
||||||
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
||||||
|
DataSignedWith: (context: &impl Encode) -> (Vec<u8>, HashMap<Participant, Vec<u8>>),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -114,16 +113,22 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
};
|
};
|
||||||
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
||||||
|
|
||||||
let algorithm = Schnorrkel::new(b"substrate");
|
// Create the MuSig keys
|
||||||
let keys: ThresholdKeys<Ristretto> =
|
let keys: ThresholdKeys<Ristretto> =
|
||||||
musig(musig_context(self.spec.set().into()), self.key.clone(), participants)
|
musig(&musig_context(self.spec.set()), self.key, participants)
|
||||||
.expect("signing for a set we aren't in/validator present multiple times")
|
.expect("signing for a set we aren't in/validator present multiple times")
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
|
// Define the algorithm
|
||||||
|
let algorithm = Schnorrkel::new(b"substrate");
|
||||||
|
|
||||||
|
// Check if we've prior preprocessed
|
||||||
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
||||||
|
// If we haven't, we create a machine solely to obtain the preprocess with
|
||||||
let (machine, _) =
|
let (machine, _) =
|
||||||
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
||||||
|
|
||||||
|
// Cache and save the preprocess to disk
|
||||||
let mut cache = machine.cache();
|
let mut cache = machine.cache();
|
||||||
assert_eq!(cache.0.len(), 32);
|
assert_eq!(cache.0.len(), 32);
|
||||||
#[allow(clippy::needless_range_loop)]
|
#[allow(clippy::needless_range_loop)]
|
||||||
@@ -134,13 +139,15 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We're now guaranteed to have the preprocess, hence why this `unwrap` is safe
|
||||||
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
||||||
let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached);
|
let mut cached = Zeroizing::new(cached);
|
||||||
#[allow(clippy::needless_range_loop)]
|
#[allow(clippy::needless_range_loop)]
|
||||||
for b in 0 .. 32 {
|
for b in 0 .. 32 {
|
||||||
cached[b] ^= encryption_key_slice[b];
|
cached[b] ^= encryption_key_slice[b];
|
||||||
}
|
}
|
||||||
encryption_key_slice.zeroize();
|
encryption_key_slice.zeroize();
|
||||||
|
// Create the machine from the cached preprocess
|
||||||
let (machine, preprocess) =
|
let (machine, preprocess) =
|
||||||
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
||||||
|
|
||||||
@@ -153,8 +160,29 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
msg: &[u8],
|
msg: &[u8],
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
let machine = self.preprocess_internal(participants).0;
|
// We can't clear the preprocess as we sitll need it to accumulate all of the shares
|
||||||
|
// We do save the message we signed so any future calls with distinct messages panic
|
||||||
|
// This assumes the txn deciding this data is committed before the share is broaadcast
|
||||||
|
if let Some((existing_msg, existing_preprocesses)) =
|
||||||
|
DataSignedWith::get(self.txn, &self.context)
|
||||||
|
{
|
||||||
|
assert_eq!(msg, &existing_msg, "obtaining a signature share for a distinct message");
|
||||||
|
assert_eq!(
|
||||||
|
&serialized_preprocesses, &existing_preprocesses,
|
||||||
|
"obtaining a signature share with a distinct set of preprocesses"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
DataSignedWith::set(
|
||||||
|
self.txn,
|
||||||
|
&self.context,
|
||||||
|
&(msg.to_vec(), serialized_preprocesses.clone()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the preprocessed machine
|
||||||
|
let (machine, _) = self.preprocess_internal(participants);
|
||||||
|
|
||||||
|
// Deserialize all the preprocesses
|
||||||
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
|
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
|
||||||
participants.sort();
|
participants.sort();
|
||||||
let mut preprocesses = HashMap::new();
|
let mut preprocesses = HashMap::new();
|
||||||
@@ -167,13 +195,14 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sign the share
|
||||||
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
||||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||||
FrostError::InvalidParticipant(_, _) |
|
FrostError::InvalidParticipant(_, _) |
|
||||||
FrostError::InvalidSigningSet(_) |
|
FrostError::InvalidSigningSet(_) |
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
FrostError::DuplicatedParticipant(_) |
|
FrostError::DuplicatedParticipant(_) |
|
||||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
FrostError::MissingParticipant(_) => panic!("unexpected error during sign: {e:?}"),
|
||||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
@@ -204,24 +233,24 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
||||||
// the MuSig is.
|
// their MuSig is.
|
||||||
fn threshold_i_map_to_keys_and_musig_i_map(
|
fn threshold_i_map_to_keys_and_musig_i_map(
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
mut map: HashMap<Participant, Vec<u8>>,
|
mut map: HashMap<Participant, Vec<u8>>,
|
||||||
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||||
// Insert our own index so calculations aren't offset
|
// Insert our own index so calculations aren't offset
|
||||||
let our_threshold_i = spec
|
let our_threshold_i = spec
|
||||||
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())
|
.i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||||
.expect("MuSig t-of-n signing a for a protocol we were removed from")
|
.expect("not in a set we're signing for")
|
||||||
.start;
|
.start;
|
||||||
|
// Asserts we weren't unexpectedly already present
|
||||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||||
|
|
||||||
let spec_validators = spec.validators();
|
let spec_validators = spec.validators();
|
||||||
let key_from_threshold_i = |threshold_i| {
|
let key_from_threshold_i = |threshold_i| {
|
||||||
for (key, _) in &spec_validators {
|
for (key, _) in &spec_validators {
|
||||||
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start {
|
if threshold_i == spec.i(*key).expect("validator wasn't in a set they're in").start {
|
||||||
return *key;
|
return *key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -232,29 +261,37 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
|||||||
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
||||||
threshold_is.sort();
|
threshold_is.sort();
|
||||||
for threshold_i in threshold_is {
|
for threshold_i in threshold_is {
|
||||||
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
|
sorted.push((
|
||||||
|
threshold_i,
|
||||||
|
key_from_threshold_i(threshold_i),
|
||||||
|
map.remove(&threshold_i).unwrap(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
||||||
let mut participants = vec![];
|
let mut participants = vec![];
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
|
let mut our_musig_i = None;
|
||||||
let musig_i = u16::try_from(raw_i).unwrap() + 1;
|
for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
|
||||||
|
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
|
||||||
|
if threshold_i == our_threshold_i {
|
||||||
|
our_musig_i = Some(musig_i);
|
||||||
|
}
|
||||||
participants.push(key);
|
participants.push(key);
|
||||||
map.insert(Participant::new(musig_i).unwrap(), share);
|
map.insert(musig_i, share);
|
||||||
}
|
}
|
||||||
|
|
||||||
map.remove(&our_threshold_i).unwrap();
|
map.remove(&our_musig_i.unwrap()).unwrap();
|
||||||
|
|
||||||
(participants, map)
|
(participants, map)
|
||||||
}
|
}
|
||||||
|
|
||||||
type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>;
|
type DkgConfirmerSigningProtocol<'a, T> =
|
||||||
|
SigningProtocol<'a, T, (&'static [u8; 12], ValidatorSet, u32)>;
|
||||||
|
|
||||||
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||||
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
spec: &'a TributarySpec,
|
spec: &'a TributarySpec,
|
||||||
removed: Vec<<Ristretto as Ciphersuite>::G>,
|
|
||||||
txn: &'a mut T,
|
txn: &'a mut T,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
}
|
}
|
||||||
@@ -265,19 +302,19 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
|||||||
spec: &'a TributarySpec,
|
spec: &'a TributarySpec,
|
||||||
txn: &'a mut T,
|
txn: &'a mut T,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> Option<DkgConfirmer<'a, T>> {
|
) -> DkgConfirmer<'a, T> {
|
||||||
// This relies on how confirmations are inlined into the DKG protocol and they accordingly
|
DkgConfirmer { key, spec, txn, attempt }
|
||||||
// share attempts
|
|
||||||
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
|
|
||||||
Some(DkgConfirmer { key, spec, removed, txn, attempt })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
||||||
let context = (b"DkgConfirmer", self.attempt);
|
let context = (b"DkgConfirmer", self.spec.set(), self.attempt);
|
||||||
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
// This preprocesses with just us as we only decide the participants after obtaining
|
||||||
|
// preprocesses
|
||||||
|
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
|
||||||
self.signing_protocol().preprocess_internal(&participants)
|
self.signing_protocol().preprocess_internal(&participants)
|
||||||
}
|
}
|
||||||
// Get the preprocess for this confirmation.
|
// Get the preprocess for this confirmation.
|
||||||
@@ -290,14 +327,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
|||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
let (participants, preprocesses) =
|
||||||
let preprocesses =
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1;
|
let msg = set_keys_message(&self.spec.set(), key_pair);
|
||||||
let msg = set_keys_message(
|
|
||||||
&self.spec.set(),
|
|
||||||
&self.removed.iter().map(|key| Public::from(key.to_bytes())).collect::<Vec<_>>(),
|
|
||||||
key_pair,
|
|
||||||
);
|
|
||||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||||
}
|
}
|
||||||
// Get the share for this confirmation, if the preprocesses are valid.
|
// Get the share for this confirmation, if the preprocesses are valid.
|
||||||
@@ -315,8 +347,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
|||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
shares: HashMap<Participant, Vec<u8>>,
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
) -> Result<[u8; 64], Participant> {
|
) -> Result<[u8; 64], Participant> {
|
||||||
let shares =
|
assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1;
|
|
||||||
|
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
|
||||||
|
|
||||||
let machine = self
|
let machine = self
|
||||||
.share_internal(preprocesses, key_pair)
|
.share_internal(preprocesses, key_pair)
|
||||||
|
|||||||
@@ -3,14 +3,13 @@ use std::{io, collections::HashMap};
|
|||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
use frost::Participant;
|
use frost::Participant;
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet};
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
fn borsh_serialize_validators<W: io::Write>(
|
fn borsh_serialize_validators<W: io::Write>(
|
||||||
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
@@ -44,32 +43,27 @@ fn borsh_deserialize_validators<R: io::Read>(
|
|||||||
pub struct TributarySpec {
|
pub struct TributarySpec {
|
||||||
serai_block: [u8; 32],
|
serai_block: [u8; 32],
|
||||||
start_time: u64,
|
start_time: u64,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
#[borsh(
|
#[borsh(
|
||||||
serialize_with = "borsh_serialize_validators",
|
serialize_with = "borsh_serialize_validators",
|
||||||
deserialize_with = "borsh_deserialize_validators"
|
deserialize_with = "borsh_deserialize_validators"
|
||||||
)]
|
)]
|
||||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
|
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TributarySpec {
|
impl TributarySpec {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
serai_block: [u8; 32],
|
serai_block: [u8; 32],
|
||||||
start_time: u64,
|
start_time: u64,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
set_participants: Vec<(PublicKey, u16)>,
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
|
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
) -> TributarySpec {
|
) -> TributarySpec {
|
||||||
let mut validators = vec![];
|
Self { serai_block, start_time, set, validators, evrf_public_keys }
|
||||||
for (participant, shares) in set_participants {
|
|
||||||
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
|
||||||
.expect("invalid key registered as participant");
|
|
||||||
validators.push((participant, shares));
|
|
||||||
}
|
|
||||||
|
|
||||||
Self { serai_block, start_time, set, validators }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set(&self) -> ExternalValidatorSet {
|
pub fn set(&self) -> ValidatorSet {
|
||||||
self.set
|
self.set
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,24 +83,15 @@ impl TributarySpec {
|
|||||||
self.start_time
|
self.start_time
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 {
|
pub fn n(&self) -> u16 {
|
||||||
self
|
self.validators.iter().map(|(_, weight)| *weight).sum()
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })
|
|
||||||
.sum()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn t(&self) -> u16 {
|
pub fn t(&self) -> u16 {
|
||||||
// t doesn't change with regards to the amount of removed validators
|
((2 * self.n()) / 3) + 1
|
||||||
((2 * self.n(&[])) / 3) + 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn i(
|
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
||||||
&self,
|
|
||||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
key: <Ristretto as Ciphersuite>::G,
|
|
||||||
) -> Option<Range<Participant>> {
|
|
||||||
let mut all_is = HashMap::new();
|
let mut all_is = HashMap::new();
|
||||||
let mut i = 1;
|
let mut i = 1;
|
||||||
for (validator, weight) in &self.validators {
|
for (validator, weight) in &self.validators {
|
||||||
@@ -117,34 +102,12 @@ impl TributarySpec {
|
|||||||
i += weight;
|
i += weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
let original_i = all_is.get(&key)?.clone();
|
Some(all_is.get(&key)?.clone())
|
||||||
let mut result_i = original_i.clone();
|
|
||||||
for removed_validator in removed_validators {
|
|
||||||
let removed_i = all_is
|
|
||||||
.get(removed_validator)
|
|
||||||
.expect("removed validator wasn't present in set to begin with");
|
|
||||||
// If the queried key was removed, return None
|
|
||||||
if &original_i == removed_i {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the removed was before the queried, shift the queried down accordingly
|
|
||||||
if removed_i.start < original_i.start {
|
|
||||||
let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);
|
|
||||||
result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();
|
|
||||||
result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(result_i)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reverse_lookup_i(
|
pub fn reverse_lookup_i(&self, i: Participant) -> Option<<Ristretto as Ciphersuite>::G> {
|
||||||
&self,
|
|
||||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
i: Participant,
|
|
||||||
) -> Option<<Ristretto as Ciphersuite>::G> {
|
|
||||||
for (validator, _) in &self.validators {
|
for (validator, _) in &self.validators {
|
||||||
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {
|
if self.i(*validator).map_or(false, |range| range.contains(&i)) {
|
||||||
return Some(*validator);
|
return Some(*validator);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -154,4 +117,8 @@ impl TributarySpec {
|
|||||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn evrf_public_keys(&self) -> Vec<([u8; 32], Vec<u8>)> {
|
||||||
|
self.evrf_public_keys.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,13 +7,11 @@ use rand_core::{RngCore, CryptoRng};
|
|||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{ff::Field, GroupEncoding},
|
group::{ff::Field, GroupEncoding},
|
||||||
Ciphersuite,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
@@ -131,32 +129,26 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
|||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
pub enum Transaction {
|
pub enum Transaction {
|
||||||
RemoveParticipantDueToDkg {
|
RemoveParticipant {
|
||||||
participant: <Ristretto as Ciphersuite>::G,
|
participant: <Ristretto as Ciphersuite>::G,
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
|
|
||||||
DkgCommitments {
|
DkgParticipation {
|
||||||
attempt: u32,
|
participation: Vec<u8>,
|
||||||
commitments: Vec<Vec<u8>>,
|
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
DkgShares {
|
DkgConfirmationNonces {
|
||||||
|
// The confirmation attempt
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
// Sending Participant, Receiving Participant, Share
|
// The nonces for DKG confirmation attempt #attempt
|
||||||
shares: Vec<Vec<Vec<u8>>>,
|
|
||||||
confirmation_nonces: [u8; 64],
|
confirmation_nonces: [u8; 64],
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
InvalidDkgShare {
|
DkgConfirmationShare {
|
||||||
attempt: u32,
|
// The confirmation attempt
|
||||||
accuser: Participant,
|
|
||||||
faulty: Participant,
|
|
||||||
blame: Option<Vec<u8>>,
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
DkgConfirmed {
|
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
|
// The share for DKG confirmation attempt #attempt
|
||||||
confirmation_share: [u8; 32],
|
confirmation_share: [u8; 32],
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
@@ -198,29 +190,22 @@ pub enum Transaction {
|
|||||||
impl Debug for Transaction {
|
impl Debug for Transaction {
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt
|
Transaction::RemoveParticipant { participant, signed } => fmt
|
||||||
.debug_struct("Transaction::RemoveParticipantDueToDkg")
|
.debug_struct("Transaction::RemoveParticipant")
|
||||||
.field("participant", &hex::encode(participant.to_bytes()))
|
.field("participant", &hex::encode(participant.to_bytes()))
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
Transaction::DkgParticipation { signed, .. } => fmt
|
||||||
.debug_struct("Transaction::DkgCommitments")
|
.debug_struct("Transaction::DkgParticipation")
|
||||||
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt
|
||||||
|
.debug_struct("Transaction::DkgConfirmationNonces")
|
||||||
.field("attempt", attempt)
|
.field("attempt", attempt)
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
Transaction::DkgShares { attempt, signed, .. } => fmt
|
Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt
|
||||||
.debug_struct("Transaction::DkgShares")
|
.debug_struct("Transaction::DkgConfirmationShare")
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
|
|
||||||
.debug_struct("Transaction::InvalidDkgShare")
|
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("accuser", accuser)
|
|
||||||
.field("faulty", faulty)
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt
|
|
||||||
.debug_struct("Transaction::DkgConfirmed")
|
|
||||||
.field("attempt", attempt)
|
.field("attempt", attempt)
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
@@ -262,43 +247,32 @@ impl ReadWrite for Transaction {
|
|||||||
reader.read_exact(&mut kind)?;
|
reader.read_exact(&mut kind)?;
|
||||||
|
|
||||||
match kind[0] {
|
match kind[0] {
|
||||||
0 => Ok(Transaction::RemoveParticipantDueToDkg {
|
0 => Ok(Transaction::RemoveParticipant {
|
||||||
participant: Ristretto::read_G(reader)?,
|
participant: Ristretto::read_G(reader)?,
|
||||||
signed: Signed::read_without_nonce(reader, 0)?,
|
signed: Signed::read_without_nonce(reader, 0)?,
|
||||||
}),
|
}),
|
||||||
|
|
||||||
1 => {
|
1 => {
|
||||||
let mut attempt = [0; 4];
|
let participation = {
|
||||||
reader.read_exact(&mut attempt)?;
|
let mut participation_len = [0; 4];
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
reader.read_exact(&mut participation_len)?;
|
||||||
|
let participation_len = u32::from_le_bytes(participation_len);
|
||||||
|
|
||||||
let commitments = {
|
if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() {
|
||||||
let mut commitments_len = [0; 1];
|
|
||||||
reader.read_exact(&mut commitments_len)?;
|
|
||||||
let commitments_len = usize::from(commitments_len[0]);
|
|
||||||
if commitments_len == 0 {
|
|
||||||
Err(io::Error::other("zero commitments in DkgCommitments"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut each_commitments_len = [0; 2];
|
|
||||||
reader.read_exact(&mut each_commitments_len)?;
|
|
||||||
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
|
||||||
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
|
||||||
Err(io::Error::other(
|
Err(io::Error::other(
|
||||||
"commitments present in transaction exceeded transaction size limit",
|
"participation present in transaction exceeded transaction size limit",
|
||||||
))?;
|
))?;
|
||||||
}
|
}
|
||||||
let mut commitments = vec![vec![]; commitments_len];
|
let participation_len = usize::try_from(participation_len).unwrap();
|
||||||
for commitments in &mut commitments {
|
|
||||||
*commitments = vec![0; each_commitments_len];
|
let mut participation = vec![0; participation_len];
|
||||||
reader.read_exact(commitments)?;
|
reader.read_exact(&mut participation)?;
|
||||||
}
|
participation
|
||||||
commitments
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||||
|
|
||||||
Ok(Transaction::DkgCommitments { attempt, commitments, signed })
|
Ok(Transaction::DkgParticipation { participation, signed })
|
||||||
}
|
}
|
||||||
|
|
||||||
2 => {
|
2 => {
|
||||||
@@ -306,36 +280,12 @@ impl ReadWrite for Transaction {
|
|||||||
reader.read_exact(&mut attempt)?;
|
reader.read_exact(&mut attempt)?;
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
let shares = {
|
|
||||||
let mut share_quantity = [0; 1];
|
|
||||||
reader.read_exact(&mut share_quantity)?;
|
|
||||||
|
|
||||||
let mut key_share_quantity = [0; 1];
|
|
||||||
reader.read_exact(&mut key_share_quantity)?;
|
|
||||||
|
|
||||||
let mut share_len = [0; 2];
|
|
||||||
reader.read_exact(&mut share_len)?;
|
|
||||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
|
||||||
|
|
||||||
let mut all_shares = vec![];
|
|
||||||
for _ in 0 .. share_quantity[0] {
|
|
||||||
let mut shares = vec![];
|
|
||||||
for _ in 0 .. key_share_quantity[0] {
|
|
||||||
let mut share = vec![0; share_len];
|
|
||||||
reader.read_exact(&mut share)?;
|
|
||||||
shares.push(share);
|
|
||||||
}
|
|
||||||
all_shares.push(shares);
|
|
||||||
}
|
|
||||||
all_shares
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut confirmation_nonces = [0; 64];
|
let mut confirmation_nonces = [0; 64];
|
||||||
reader.read_exact(&mut confirmation_nonces)?;
|
reader.read_exact(&mut confirmation_nonces)?;
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||||
|
|
||||||
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed })
|
||||||
}
|
}
|
||||||
|
|
||||||
3 => {
|
3 => {
|
||||||
@@ -343,53 +293,21 @@ impl ReadWrite for Transaction {
|
|||||||
reader.read_exact(&mut attempt)?;
|
reader.read_exact(&mut attempt)?;
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
let mut accuser = [0; 2];
|
|
||||||
reader.read_exact(&mut accuser)?;
|
|
||||||
let accuser = Participant::new(u16::from_le_bytes(accuser))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
|
||||||
|
|
||||||
let mut faulty = [0; 2];
|
|
||||||
reader.read_exact(&mut faulty)?;
|
|
||||||
let faulty = Participant::new(u16::from_le_bytes(faulty))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
|
||||||
|
|
||||||
let mut blame_len = [0; 2];
|
|
||||||
reader.read_exact(&mut blame_len)?;
|
|
||||||
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
|
|
||||||
reader.read_exact(&mut blame)?;
|
|
||||||
|
|
||||||
// This shares a nonce with DkgConfirmed as only one is expected
|
|
||||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
|
||||||
|
|
||||||
Ok(Transaction::InvalidDkgShare {
|
|
||||||
attempt,
|
|
||||||
accuser,
|
|
||||||
faulty,
|
|
||||||
blame: Some(blame).filter(|blame| !blame.is_empty()),
|
|
||||||
signed,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
4 => {
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let mut confirmation_share = [0; 32];
|
let mut confirmation_share = [0; 32];
|
||||||
reader.read_exact(&mut confirmation_share)?;
|
reader.read_exact(&mut confirmation_share)?;
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||||
|
|
||||||
Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed })
|
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
|
||||||
}
|
}
|
||||||
|
|
||||||
5 => {
|
4 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
Ok(Transaction::CosignSubstrateBlock(block))
|
Ok(Transaction::CosignSubstrateBlock(block))
|
||||||
}
|
}
|
||||||
|
|
||||||
6 => {
|
5 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
let mut batch = [0; 4];
|
let mut batch = [0; 4];
|
||||||
@@ -397,16 +315,16 @@ impl ReadWrite for Transaction {
|
|||||||
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
|
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
|
||||||
}
|
}
|
||||||
|
|
||||||
7 => {
|
6 => {
|
||||||
let mut block = [0; 8];
|
let mut block = [0; 8];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
||||||
}
|
}
|
||||||
|
|
||||||
8 => SignData::read(reader).map(Transaction::SubstrateSign),
|
7 => SignData::read(reader).map(Transaction::SubstrateSign),
|
||||||
9 => SignData::read(reader).map(Transaction::Sign),
|
8 => SignData::read(reader).map(Transaction::Sign),
|
||||||
|
|
||||||
10 => {
|
9 => {
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; 32];
|
||||||
reader.read_exact(&mut plan)?;
|
reader.read_exact(&mut plan)?;
|
||||||
|
|
||||||
@@ -421,7 +339,7 @@ impl ReadWrite for Transaction {
|
|||||||
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
||||||
}
|
}
|
||||||
|
|
||||||
11 => {
|
10 => {
|
||||||
let mut len = [0];
|
let mut len = [0];
|
||||||
reader.read_exact(&mut len)?;
|
reader.read_exact(&mut len)?;
|
||||||
let len = len[0];
|
let len = len[0];
|
||||||
@@ -446,109 +364,59 @@ impl ReadWrite for Transaction {
|
|||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
writer.write_all(&[0])?;
|
writer.write_all(&[0])?;
|
||||||
writer.write_all(&participant.to_bytes())?;
|
writer.write_all(&participant.to_bytes())?;
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
writer.write_all(&[1])?;
|
writer.write_all(&[1])?;
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?;
|
||||||
if commitments.is_empty() {
|
writer.write_all(participation)?;
|
||||||
Err(io::Error::other("zero commitments in DkgCommitments"))?
|
|
||||||
}
|
|
||||||
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
|
||||||
for commitments_i in commitments {
|
|
||||||
if commitments_i.len() != commitments[0].len() {
|
|
||||||
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
|
||||||
for commitments in commitments {
|
|
||||||
writer.write_all(commitments)?;
|
|
||||||
}
|
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
|
||||||
writer.write_all(&[2])?;
|
writer.write_all(&[2])?;
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
|
||||||
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
|
|
||||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
|
||||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
|
||||||
// This assumes at least one share is being sent to another party
|
|
||||||
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
|
||||||
let share_len = shares[0][0].len();
|
|
||||||
// For BLS12-381 G2, this would be:
|
|
||||||
// - A 32-byte share
|
|
||||||
// - A 96-byte ephemeral key
|
|
||||||
// - A 128-byte signature
|
|
||||||
// Hence why this has to be u16
|
|
||||||
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
|
||||||
|
|
||||||
for these_shares in shares {
|
|
||||||
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
|
||||||
for share in these_shares {
|
|
||||||
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
|
||||||
writer.write_all(share)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_all(confirmation_nonces)?;
|
writer.write_all(confirmation_nonces)?;
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
|
||||||
writer.write_all(&[3])?;
|
writer.write_all(&[3])?;
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
|
|
||||||
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
|
||||||
|
|
||||||
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
|
||||||
assert!(blame.as_ref().map_or(1, Vec::len) != 0);
|
|
||||||
let blame_len =
|
|
||||||
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
|
||||||
writer.write_all(&blame_len.to_le_bytes())?;
|
|
||||||
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
|
|
||||||
|
|
||||||
signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
|
||||||
writer.write_all(&[4])?;
|
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
|
||||||
writer.write_all(confirmation_share)?;
|
writer.write_all(confirmation_share)?;
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(block) => {
|
Transaction::CosignSubstrateBlock(block) => {
|
||||||
writer.write_all(&[5])?;
|
writer.write_all(&[4])?;
|
||||||
writer.write_all(block)
|
writer.write_all(block)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Batch { block, batch } => {
|
Transaction::Batch { block, batch } => {
|
||||||
writer.write_all(&[6])?;
|
writer.write_all(&[5])?;
|
||||||
writer.write_all(block)?;
|
writer.write_all(block)?;
|
||||||
writer.write_all(&batch.to_le_bytes())
|
writer.write_all(&batch.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
Transaction::SubstrateBlock(block) => {
|
||||||
writer.write_all(&[7])?;
|
writer.write_all(&[6])?;
|
||||||
writer.write_all(&block.to_le_bytes())
|
writer.write_all(&block.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateSign(data) => {
|
Transaction::SubstrateSign(data) => {
|
||||||
writer.write_all(&[8])?;
|
writer.write_all(&[7])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::Sign(data) => {
|
Transaction::Sign(data) => {
|
||||||
writer.write_all(&[9])?;
|
writer.write_all(&[8])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||||
writer.write_all(&[10])?;
|
writer.write_all(&[9])?;
|
||||||
writer.write_all(plan)?;
|
writer.write_all(plan)?;
|
||||||
writer
|
writer
|
||||||
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
||||||
@@ -557,7 +425,7 @@ impl ReadWrite for Transaction {
|
|||||||
signature.write(writer)
|
signature.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SlashReport(points, signed) => {
|
Transaction::SlashReport(points, signed) => {
|
||||||
writer.write_all(&[11])?;
|
writer.write_all(&[10])?;
|
||||||
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
||||||
for points in points {
|
for points in points {
|
||||||
writer.write_all(&points.to_le_bytes())?;
|
writer.write_all(&points.to_le_bytes())?;
|
||||||
@@ -571,15 +439,16 @@ impl ReadWrite for Transaction {
|
|||||||
impl TransactionTrait for Transaction {
|
impl TransactionTrait for Transaction {
|
||||||
fn kind(&self) -> TransactionKind<'_> {
|
fn kind(&self) -> TransactionKind<'_> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments: _, signed } |
|
Transaction::DkgParticipation { signed, .. } => {
|
||||||
Transaction::DkgShares { attempt, signed, .. } |
|
TransactionKind::Signed(b"dkg".to_vec(), signed)
|
||||||
Transaction::InvalidDkgShare { attempt, signed, .. } |
|
}
|
||||||
Transaction::DkgConfirmed { attempt, signed, .. } => {
|
Transaction::DkgConfirmationNonces { attempt, signed, .. } |
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
||||||
|
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||||
@@ -646,11 +515,14 @@ impl Transaction {
|
|||||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||||
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
||||||
let nonce = match tx {
|
let nonce = match tx {
|
||||||
Transaction::RemoveParticipantDueToDkg { .. } => 0,
|
Transaction::RemoveParticipant { .. } => 0,
|
||||||
|
|
||||||
Transaction::DkgCommitments { .. } => 0,
|
Transaction::DkgParticipation { .. } => 0,
|
||||||
Transaction::DkgShares { .. } => 1,
|
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by
|
||||||
Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2,
|
Transaction::DkgConfirmationNonces { .. } => 0,
|
||||||
|
// Uses a nonce of 1 due to internal attempt counter and due to following
|
||||||
|
// DkgConfirmationNonces
|
||||||
|
Transaction::DkgConfirmationShare { .. } => 1,
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
@@ -669,11 +541,10 @@ impl Transaction {
|
|||||||
nonce,
|
nonce,
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
|
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||||
Transaction::DkgCommitments { ref mut signed, .. } |
|
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||||
Transaction::DkgShares { ref mut signed, .. } |
|
Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed,
|
||||||
Transaction::InvalidDkgShare { ref mut signed, .. } |
|
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||||
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
|
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ license = "AGPL-3.0-only"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -16,7 +17,7 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
async-trait = { version = "0.1", default-features = false }
|
||||||
thiserror = { version = "1", default-features = false }
|
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
subtle = { version = "^2", default-features = false, features = ["std"] }
|
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
@@ -27,8 +28,7 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
|||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||||
|
|
||||||
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
|
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
||||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
|
||||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::collections::{VecDeque, HashSet};
|
use std::collections::{VecDeque, HashSet};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db};
|
use serai_db::{Get, DbTxn, Db};
|
||||||
|
|
||||||
|
|||||||
@@ -5,8 +5,7 @@ use async_trait::async_trait;
|
|||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
use ciphersuite::Ciphersuite;
|
|
||||||
|
|
||||||
use scale::Decode;
|
use scale::Decode;
|
||||||
use futures_channel::mpsc::UnboundedReceiver;
|
use futures_channel::mpsc::UnboundedReceiver;
|
||||||
@@ -51,13 +50,17 @@ pub(crate) use crate::tendermint::*;
|
|||||||
pub mod tests;
|
pub mod tests;
|
||||||
|
|
||||||
/// Size limit for an individual transaction.
|
/// Size limit for an individual transaction.
|
||||||
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
|
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||||
|
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||||
|
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||||
|
// TODO: Add a test for these properties
|
||||||
|
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||||
/// Amount of transactions a single account may have in the mempool.
|
/// Amount of transactions a single account may have in the mempool.
|
||||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||||
/// Block size limit.
|
/// Block size limit.
|
||||||
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious
|
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||||
// participant from flooding disks and causing out of space errors in order processes.
|
// participant from flooding disks and causing out of space errors in order processes.
|
||||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||||
|
|
||||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
use ciphersuite::Ciphersuite;
|
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
|||||||
@@ -11,13 +11,12 @@ use rand_chacha::ChaCha12Rng;
|
|||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{
|
group::{
|
||||||
GroupEncoding,
|
GroupEncoding,
|
||||||
ff::{Field, PrimeField},
|
ff::{Field, PrimeField},
|
||||||
},
|
},
|
||||||
Ciphersuite,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::{
|
use schnorr::{
|
||||||
SchnorrSignature,
|
SchnorrSignature,
|
||||||
|
|||||||
@@ -4,8 +4,7 @@ use scale::{Encode, Decode, IoReader};
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{Ciphersuite, Ristretto};
|
||||||
use ciphersuite::Ciphersuite;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
transaction::{Transaction, TransactionKind, TransactionError},
|
transaction::{Transaction, TransactionKind, TransactionError},
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
|
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{ff::Field, Group},
|
group::{ff::Field, Group},
|
||||||
Ciphersuite,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
|
|||||||
@@ -10,8 +10,7 @@ use rand::rngs::OsRng;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db, MemDb};
|
use serai_db::{DbTxn, Db, MemDb};
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashMap};
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand::{RngCore, rngs::OsRng};
|
use rand::{RngCore, rngs::OsRng};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
|
||||||
|
|
||||||
use tendermint::ext::Commit;
|
use tendermint::ext::Commit;
|
||||||
|
|
||||||
|
|||||||
@@ -6,10 +6,9 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{ff::Field, Group},
|
group::{ff::Field, Group},
|
||||||
Ciphersuite,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
|
|||||||
@@ -2,8 +2,7 @@ use rand::rngs::OsRng;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ReadWrite,
|
ReadWrite,
|
||||||
|
|||||||
@@ -3,8 +3,7 @@ use std::sync::Arc;
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand::{RngCore, rngs::OsRng};
|
use rand::{RngCore, rngs::OsRng};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
|
||||||
use ciphersuite::{Ciphersuite, group::ff::Field};
|
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
|
|
||||||
|
|||||||
@@ -6,10 +6,9 @@ use thiserror::Error;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2b512};
|
use blake2::{Digest, Blake2b512};
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{Group, GroupEncoding},
|
group::{Group, GroupEncoding},
|
||||||
Ciphersuite,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ license = "MIT"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -16,7 +17,7 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
async-trait = { version = "0.1", default-features = false }
|
||||||
thiserror = { version = "1", default-features = false }
|
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#![expect(clippy::cast_possible_truncation)]
|
|
||||||
|
|
||||||
use core::fmt::Debug;
|
use core::fmt::Debug;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "ciphersuite"
|
name = "ciphersuite"
|
||||||
version = "0.4.2"
|
version = "0.4.1"
|
||||||
description = "Ciphersuites built around ff/group"
|
description = "Ciphersuites built around ff/group"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["ciphersuite", "ff", "group"]
|
keywords = ["ciphersuite", "ff", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.66"
|
rust-version = "1.80"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -24,12 +24,22 @@ rand_core = { version = "0.6", default-features = false }
|
|||||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
||||||
subtle = { version = "^2.4", default-features = false }
|
subtle = { version = "^2.4", default-features = false }
|
||||||
|
|
||||||
digest = { version = "0.10", default-features = false, features = ["core-api"] }
|
digest = { version = "0.10", default-features = false }
|
||||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
|
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false }
|
||||||
|
sha2 = { version = "0.10", default-features = false, optional = true }
|
||||||
|
sha3 = { version = "0.10", default-features = false, optional = true }
|
||||||
|
|
||||||
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
||||||
group = { version = "0.13", default-features = false }
|
group = { version = "0.13", default-features = false }
|
||||||
|
|
||||||
|
dalek-ff-group = { path = "../dalek-ff-group", version = "0.4", default-features = false, optional = true }
|
||||||
|
|
||||||
|
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
|
||||||
|
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
|
||||||
|
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"], optional = true }
|
||||||
|
|
||||||
|
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
@@ -38,7 +48,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|||||||
ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
|
ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
alloc = ["std-shims", "ff/alloc"]
|
alloc = ["std-shims"]
|
||||||
std = [
|
std = [
|
||||||
"std-shims/std",
|
"std-shims/std",
|
||||||
|
|
||||||
@@ -49,8 +59,27 @@ std = [
|
|||||||
|
|
||||||
"digest/std",
|
"digest/std",
|
||||||
"transcript/std",
|
"transcript/std",
|
||||||
|
"sha2?/std",
|
||||||
|
"sha3?/std",
|
||||||
|
|
||||||
"ff/std",
|
"ff/std",
|
||||||
|
|
||||||
|
"dalek-ff-group?/std",
|
||||||
|
|
||||||
|
"elliptic-curve?/std",
|
||||||
|
"p256?/std",
|
||||||
|
"k256?/std",
|
||||||
|
"minimal-ed448?/std",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dalek = ["sha2", "dalek-ff-group"]
|
||||||
|
ed25519 = ["dalek"]
|
||||||
|
ristretto = ["dalek"]
|
||||||
|
|
||||||
|
kp256 = ["sha2", "elliptic-curve"]
|
||||||
|
p256 = ["kp256", "dep:p256"]
|
||||||
|
secp256k1 = ["kp256", "k256"]
|
||||||
|
|
||||||
|
ed448 = ["sha3", "minimal-ed448"]
|
||||||
|
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ Their `hash_to_F` is the
|
|||||||
[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),
|
[IETF's hash to curve](https://www.ietf.org/archive/id/draft-irtf-cfrg-hash-to-curve-16.html),
|
||||||
yet applied to their scalar field.
|
yet applied to their scalar field.
|
||||||
|
|
||||||
Please see the [`ciphersuite-kp256`](https://docs.rs/ciphersuite-kp256) crate for more info.
|
|
||||||
|
|
||||||
### Ed25519/Ristretto
|
### Ed25519/Ristretto
|
||||||
|
|
||||||
Ed25519/Ristretto are offered via
|
Ed25519/Ristretto are offered via
|
||||||
@@ -35,8 +33,6 @@ the draft
|
|||||||
[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).
|
[RFC-RISTRETTO](https://www.ietf.org/archive/id/draft-irtf-cfrg-ristretto255-decaf448-05.html).
|
||||||
The domain-separation tag is naively prefixed to the message.
|
The domain-separation tag is naively prefixed to the message.
|
||||||
|
|
||||||
Please see the [`dalek-ff-group`](https://docs.rs/dalek-ff-group) crate for more info.
|
|
||||||
|
|
||||||
### Ed448
|
### Ed448
|
||||||
|
|
||||||
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
|
Ed448 is offered via [minimal-ed448](https://crates.io/crates/minimal-ed448), an
|
||||||
@@ -46,5 +42,3 @@ to its prime-order subgroup.
|
|||||||
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
|
Its `hash_to_F` is the wide reduction of SHAKE256, with a 114-byte output, as
|
||||||
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The
|
used in [RFC-8032](https://www.rfc-editor.org/rfc/rfc8032). The
|
||||||
domain-separation tag is naively prefixed to the message.
|
domain-separation tag is naively prefixed to the message.
|
||||||
|
|
||||||
Please see the [`minimal-ed448`](https://docs.rs/minimal-ed448) crate for more info.
|
|
||||||
|
|||||||
@@ -1,55 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "ciphersuite-kp256"
|
|
||||||
version = "0.4.0"
|
|
||||||
description = "Ciphersuites built around ff/group"
|
|
||||||
license = "MIT"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite/kp256"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
keywords = ["ciphersuite", "ff", "group"]
|
|
||||||
edition = "2021"
|
|
||||||
rust-version = "1.66"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
|
||||||
|
|
||||||
sha2 = { version = "0.10", default-features = false }
|
|
||||||
|
|
||||||
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"] }
|
|
||||||
p256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
|
|
||||||
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits", "hash2curve"] }
|
|
||||||
|
|
||||||
ciphersuite = { path = "../", version = "0.4", default-features = false }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
ff-group-tests = { version = "0.13", path = "../../ff-group-tests" }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
alloc = ["ciphersuite/alloc"]
|
|
||||||
std = [
|
|
||||||
"rand_core/std",
|
|
||||||
|
|
||||||
"zeroize/std",
|
|
||||||
|
|
||||||
"sha2/std",
|
|
||||||
|
|
||||||
"elliptic-curve/std",
|
|
||||||
"p256/std",
|
|
||||||
"k256/std",
|
|
||||||
|
|
||||||
"ciphersuite/std",
|
|
||||||
]
|
|
||||||
|
|
||||||
default = ["std"]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
# Ciphersuite {k, p}256
|
|
||||||
|
|
||||||
SECP256k1 and P-256 Ciphersuites around k256 and p256.
|
|
||||||
@@ -3,9 +3,9 @@ use zeroize::Zeroize;
|
|||||||
use sha2::{Digest, Sha512};
|
use sha2::{Digest, Sha512};
|
||||||
|
|
||||||
use group::Group;
|
use group::Group;
|
||||||
use crate::Scalar;
|
use dalek_ff_group::Scalar;
|
||||||
|
|
||||||
use ciphersuite::Ciphersuite;
|
use crate::Ciphersuite;
|
||||||
|
|
||||||
macro_rules! dalek_curve {
|
macro_rules! dalek_curve {
|
||||||
(
|
(
|
||||||
@@ -15,7 +15,7 @@ macro_rules! dalek_curve {
|
|||||||
$Point: ident,
|
$Point: ident,
|
||||||
$ID: literal
|
$ID: literal
|
||||||
) => {
|
) => {
|
||||||
use crate::$Point;
|
use dalek_ff_group::$Point;
|
||||||
|
|
||||||
impl Ciphersuite for $Ciphersuite {
|
impl Ciphersuite for $Ciphersuite {
|
||||||
type F = Scalar;
|
type F = Scalar;
|
||||||
@@ -40,9 +40,12 @@ macro_rules! dalek_curve {
|
|||||||
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
|
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
|
||||||
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
|
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
|
||||||
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
|
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
|
||||||
|
#[cfg(any(test, feature = "ristretto"))]
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
pub struct Ristretto;
|
pub struct Ristretto;
|
||||||
|
#[cfg(any(test, feature = "ristretto"))]
|
||||||
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
|
dalek_curve!("ristretto", Ristretto, RistrettoPoint, b"ristretto");
|
||||||
|
#[cfg(any(test, feature = "ristretto"))]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_ristretto() {
|
fn test_ristretto() {
|
||||||
ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
|
ff_group_tests::group::test_prime_group_bits::<_, RistrettoPoint>(&mut rand_core::OsRng);
|
||||||
@@ -68,9 +71,12 @@ fn test_ristretto() {
|
|||||||
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
|
/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition
|
||||||
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
|
/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as
|
||||||
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
|
/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other.
|
||||||
|
#[cfg(feature = "ed25519")]
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
pub struct Ed25519;
|
pub struct Ed25519;
|
||||||
|
#[cfg(feature = "ed25519")]
|
||||||
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
|
dalek_curve!("ed25519", Ed25519, EdwardsPoint, b"edwards25519");
|
||||||
|
#[cfg(feature = "ed25519")]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_ed25519() {
|
fn test_ed25519() {
|
||||||
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);
|
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);
|
||||||
@@ -1,17 +1,15 @@
|
|||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
use sha3::{
|
use digest::{
|
||||||
digest::{
|
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
|
||||||
typenum::U114, core_api::BlockSizeUser, Update, Output, OutputSizeUser, FixedOutput,
|
ExtendableOutput, XofReader, HashMarker, Digest,
|
||||||
ExtendableOutput, XofReader, HashMarker, Digest,
|
|
||||||
},
|
|
||||||
Shake256,
|
|
||||||
};
|
};
|
||||||
|
use sha3::Shake256;
|
||||||
|
|
||||||
use group::Group;
|
use group::Group;
|
||||||
use crate::{Scalar, Point};
|
use minimal_ed448::{Scalar, Point};
|
||||||
|
|
||||||
use ciphersuite::Ciphersuite;
|
use crate::Ciphersuite;
|
||||||
|
|
||||||
/// Shake256, fixed to a 114-byte output, as used by Ed448.
|
/// Shake256, fixed to a 114-byte output, as used by Ed448.
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
@@ -1,17 +1,16 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
use sha2::Sha256;
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
use group::ff::PrimeField;
|
||||||
|
|
||||||
use elliptic_curve::{
|
use elliptic_curve::{
|
||||||
generic_array::GenericArray,
|
generic_array::GenericArray,
|
||||||
bigint::{NonZero, CheckedAdd, Encoding, U384},
|
bigint::{NonZero, CheckedAdd, Encoding, U384},
|
||||||
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
|
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
|
||||||
};
|
};
|
||||||
|
|
||||||
use ciphersuite::{group::ff::PrimeField, Ciphersuite};
|
use crate::Ciphersuite;
|
||||||
|
|
||||||
macro_rules! kp_curve {
|
macro_rules! kp_curve {
|
||||||
(
|
(
|
||||||
@@ -108,9 +107,12 @@ fn test_oversize_dst<C: Ciphersuite>() {
|
|||||||
/// Ciphersuite for Secp256k1.
|
/// Ciphersuite for Secp256k1.
|
||||||
///
|
///
|
||||||
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
|
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
|
||||||
|
#[cfg(feature = "secp256k1")]
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
pub struct Secp256k1;
|
pub struct Secp256k1;
|
||||||
|
#[cfg(feature = "secp256k1")]
|
||||||
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
|
kp_curve!("secp256k1", k256, Secp256k1, b"secp256k1");
|
||||||
|
#[cfg(feature = "secp256k1")]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_secp256k1() {
|
fn test_secp256k1() {
|
||||||
ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
|
ff_group_tests::group::test_prime_group_bits::<_, k256::ProjectivePoint>(&mut rand_core::OsRng);
|
||||||
@@ -143,9 +145,12 @@ fn test_secp256k1() {
|
|||||||
/// Ciphersuite for P-256.
|
/// Ciphersuite for P-256.
|
||||||
///
|
///
|
||||||
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
|
/// hash_to_F is implemented via the IETF draft for hash to curve's hash_to_field (v16).
|
||||||
|
#[cfg(feature = "p256")]
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
pub struct P256;
|
pub struct P256;
|
||||||
|
#[cfg(feature = "p256")]
|
||||||
kp_curve!("p256", p256, P256, b"P-256");
|
kp_curve!("p256", p256, P256, b"P-256");
|
||||||
|
#[cfg(feature = "p256")]
|
||||||
#[test]
|
#[test]
|
||||||
fn test_p256() {
|
fn test_p256() {
|
||||||
ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);
|
ff_group_tests::group::test_prime_group_bits::<_, p256::ProjectivePoint>(&mut rand_core::OsRng);
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
Ciphersuites for elliptic curves premised on ff/group.
|
Ciphersuites for elliptic curves premised on ff/group.
|
||||||
|
|
||||||
This library was
|
This library, except for the not recommended Ed448 ciphersuite, was
|
||||||
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
|
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
|
||||||
culminating in commit
|
culminating in commit
|
||||||
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
|
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("lib.md")]
|
#![doc = include_str!("lib.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
use core::fmt::Debug;
|
use core::fmt::Debug;
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||||
#[allow(unused_imports)]
|
|
||||||
use std_shims::prelude::*;
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
|
||||||
use std_shims::io::{self, Read};
|
use std_shims::io::{self, Read};
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
use rand_core::{RngCore, CryptoRng};
|
||||||
@@ -26,6 +23,25 @@ use group::{
|
|||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||||
use group::GroupEncoding;
|
use group::GroupEncoding;
|
||||||
|
|
||||||
|
#[cfg(feature = "dalek")]
|
||||||
|
mod dalek;
|
||||||
|
#[cfg(feature = "ristretto")]
|
||||||
|
pub use dalek::Ristretto;
|
||||||
|
#[cfg(feature = "ed25519")]
|
||||||
|
pub use dalek::Ed25519;
|
||||||
|
|
||||||
|
#[cfg(feature = "kp256")]
|
||||||
|
mod kp256;
|
||||||
|
#[cfg(feature = "secp256k1")]
|
||||||
|
pub use kp256::Secp256k1;
|
||||||
|
#[cfg(feature = "p256")]
|
||||||
|
pub use kp256::P256;
|
||||||
|
|
||||||
|
#[cfg(feature = "ed448")]
|
||||||
|
mod ed448;
|
||||||
|
#[cfg(feature = "ed448")]
|
||||||
|
pub use ed448::*;
|
||||||
|
|
||||||
/// Unified trait defining a ciphersuite around an elliptic curve.
|
/// Unified trait defining a ciphersuite around an elliptic curve.
|
||||||
pub trait Ciphersuite:
|
pub trait Ciphersuite:
|
||||||
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
|
'static + Send + Sync + Clone + Copy + PartialEq + Eq + Debug + Zeroize
|
||||||
@@ -83,9 +99,6 @@ pub trait Ciphersuite:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Read a canonical point from something implementing std::io::Read.
|
/// Read a canonical point from something implementing std::io::Read.
|
||||||
///
|
|
||||||
/// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a
|
|
||||||
/// canonical serialization.
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "dalek-ff-group"
|
name = "dalek-ff-group"
|
||||||
version = "0.4.4"
|
version = "0.4.1"
|
||||||
description = "ff/group bindings around curve25519-dalek"
|
description = "ff/group bindings around curve25519-dalek"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-group"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
|
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.65"
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -25,22 +25,18 @@ subtle = { version = "^2.4", default-features = false }
|
|||||||
rand_core = { version = "0.6", default-features = false }
|
rand_core = { version = "0.6", default-features = false }
|
||||||
|
|
||||||
digest = { version = "0.10", default-features = false }
|
digest = { version = "0.10", default-features = false }
|
||||||
sha2 = { version = "0.10", default-features = false }
|
|
||||||
|
|
||||||
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
||||||
group = { version = "0.13", default-features = false }
|
group = { version = "0.13", default-features = false }
|
||||||
ciphersuite = { path = "../ciphersuite", default-features = false }
|
|
||||||
|
|
||||||
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
||||||
|
|
||||||
curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] }
|
curve25519-dalek = { version = ">= 4.0, < 4.2", default-features = false, features = ["alloc", "zeroize", "digest", "group", "precomputed-tables"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = "0.4"
|
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
ff-group-tests = { path = "../ff-group-tests" }
|
ff-group-tests = { path = "../ff-group-tests" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
alloc = ["zeroize/alloc", "ciphersuite/alloc"]
|
std = ["zeroize/std", "subtle/std", "rand_core/std", "digest/std"]
|
||||||
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "digest/std", "sha2/std", "ciphersuite/std"]
|
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ use crypto_bigint::{
|
|||||||
impl_modulus,
|
impl_modulus,
|
||||||
};
|
};
|
||||||
|
|
||||||
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes};
|
use group::ff::{Field, PrimeField, FieldBits, PrimeFieldBits};
|
||||||
|
|
||||||
use crate::{u8_from_bool, constant_time, math_op, math};
|
use crate::{u8_from_bool, constant_time, math_op, math};
|
||||||
|
|
||||||
@@ -36,7 +36,6 @@ type ResidueType = Residue<FieldModulus, { FieldModulus::LIMBS }>;
|
|||||||
|
|
||||||
/// A constant-time implementation of the Ed25519 field.
|
/// A constant-time implementation of the Ed25519 field.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)]
|
||||||
#[repr(transparent)]
|
|
||||||
pub struct FieldElement(ResidueType);
|
pub struct FieldElement(ResidueType);
|
||||||
|
|
||||||
// Square root of -1.
|
// Square root of -1.
|
||||||
@@ -93,7 +92,7 @@ impl Neg for FieldElement {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Neg for &FieldElement {
|
impl<'a> Neg for &'a FieldElement {
|
||||||
type Output = FieldElement;
|
type Output = FieldElement;
|
||||||
fn neg(self) -> Self::Output {
|
fn neg(self) -> Self::Output {
|
||||||
(*self).neg()
|
(*self).neg()
|
||||||
@@ -217,18 +216,10 @@ impl PrimeFieldBits for FieldElement {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FieldElement {
|
impl FieldElement {
|
||||||
/// Create a FieldElement from a `crypto_bigint::U256`.
|
/// Interpret the value as a little-endian integer, square it, and reduce it into a FieldElement.
|
||||||
///
|
pub fn from_square(value: [u8; 32]) -> FieldElement {
|
||||||
/// This will reduce the `U256` by the modulus, into a member of the field.
|
let value = U256::from_le_bytes(value);
|
||||||
pub const fn from_u256(u256: &U256) -> Self {
|
FieldElement(reduce(U512::from(value.mul_wide(&value))))
|
||||||
FieldElement(Residue::new(u256))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a `FieldElement` from the reduction of a 512-bit number.
|
|
||||||
///
|
|
||||||
/// The bytes are interpreted in little-endian format.
|
|
||||||
pub fn wide_reduce(value: [u8; 64]) -> Self {
|
|
||||||
FieldElement(reduce(U512::from_le_bytes(value)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Perform an exponentiation.
|
/// Perform an exponentiation.
|
||||||
@@ -253,16 +244,7 @@ impl FieldElement {
|
|||||||
res *= res;
|
res *= res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res *= table[usize::from(bits)];
|
||||||
let mut scale_by = FieldElement::ONE;
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in 0 .. 16 {
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
|
||||||
{
|
|
||||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res *= scale_by;
|
|
||||||
bits = 0;
|
bits = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -306,12 +288,6 @@ impl FieldElement {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromUniformBytes<64> for FieldElement {
|
|
||||||
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
|
|
||||||
Self::wide_reduce(*bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sum<FieldElement> for FieldElement {
|
impl Sum<FieldElement> for FieldElement {
|
||||||
fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
|
fn sum<I: Iterator<Item = FieldElement>>(iter: I) -> FieldElement {
|
||||||
let mut res = FieldElement::ZERO;
|
let mut res = FieldElement::ZERO;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#![allow(deprecated)]
|
#![allow(deprecated)]
|
||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![allow(clippy::redundant_closure_call)]
|
#![allow(clippy::redundant_closure_call)]
|
||||||
@@ -30,7 +30,7 @@ use dalek::{
|
|||||||
pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};
|
pub use constants::{ED25519_BASEPOINT_TABLE, RISTRETTO_BASEPOINT_TABLE};
|
||||||
|
|
||||||
use group::{
|
use group::{
|
||||||
ff::{Field, PrimeField, FieldBits, PrimeFieldBits, FromUniformBytes},
|
ff::{Field, PrimeField, FieldBits, PrimeFieldBits},
|
||||||
Group, GroupEncoding,
|
Group, GroupEncoding,
|
||||||
prime::PrimeGroup,
|
prime::PrimeGroup,
|
||||||
};
|
};
|
||||||
@@ -38,24 +38,13 @@ use group::{
|
|||||||
mod field;
|
mod field;
|
||||||
pub use field::FieldElement;
|
pub use field::FieldElement;
|
||||||
|
|
||||||
mod ciphersuite;
|
|
||||||
pub use crate::ciphersuite::{Ed25519, Ristretto};
|
|
||||||
|
|
||||||
// Use black_box when possible
|
// Use black_box when possible
|
||||||
#[rustversion::since(1.66)]
|
#[rustversion::since(1.66)]
|
||||||
mod black_box {
|
use core::hint::black_box;
|
||||||
pub(crate) fn black_box<T>(val: T) -> T {
|
|
||||||
#[allow(clippy::incompatible_msrv)]
|
|
||||||
core::hint::black_box(val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[rustversion::before(1.66)]
|
#[rustversion::before(1.66)]
|
||||||
mod black_box {
|
fn black_box<T>(val: T) -> T {
|
||||||
pub(crate) fn black_box<T>(val: T) -> T {
|
val
|
||||||
val
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
use black_box::black_box;
|
|
||||||
|
|
||||||
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
|
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
|
||||||
let bit_ref = black_box(bit_ref);
|
let bit_ref = black_box(bit_ref);
|
||||||
@@ -219,16 +208,7 @@ impl Scalar {
|
|||||||
res *= res;
|
res *= res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res *= table[usize::from(bits)];
|
||||||
let mut scale_by = Scalar::ONE;
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in 0 .. 16 {
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
|
||||||
{
|
|
||||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res *= scale_by;
|
|
||||||
bits = 0;
|
bits = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -325,12 +305,6 @@ impl PrimeFieldBits for Scalar {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FromUniformBytes<64> for Scalar {
|
|
||||||
fn from_uniform_bytes(bytes: &[u8; 64]) -> Self {
|
|
||||||
Self::from_bytes_mod_order_wide(bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sum<Scalar> for Scalar {
|
impl Sum<Scalar> for Scalar {
|
||||||
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
|
fn sum<I: Iterator<Item = Scalar>>(iter: I) -> Scalar {
|
||||||
Self(DScalar::sum(iter))
|
Self(DScalar::sum(iter))
|
||||||
@@ -368,12 +342,7 @@ macro_rules! dalek_group {
|
|||||||
$BASEPOINT_POINT: ident,
|
$BASEPOINT_POINT: ident,
|
||||||
$BASEPOINT_TABLE: ident
|
$BASEPOINT_TABLE: ident
|
||||||
) => {
|
) => {
|
||||||
/// Wrapper around the dalek Point type.
|
/// Wrapper around the dalek Point type. For Ed25519, this is restricted to the prime subgroup.
|
||||||
///
|
|
||||||
/// All operations will be restricted to a prime-order subgroup (equivalent to the group itself
|
|
||||||
/// in the case of Ristretto). The exposure of the internal element does allow bypassing this
|
|
||||||
/// however, which may lead to undefined/computationally-unsafe behavior, and is entirely at
|
|
||||||
/// the user's risk.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
pub struct $Point(pub $DPoint);
|
pub struct $Point(pub $DPoint);
|
||||||
deref_borrow!($Point, $DPoint);
|
deref_borrow!($Point, $DPoint);
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "dkg"
|
name = "dkg"
|
||||||
version = "0.6.1"
|
version = "0.5.1"
|
||||||
description = "Distributed key generation over ff/group"
|
description = "Distributed key generation over ff/group"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.66"
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -17,25 +17,82 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] }
|
|
||||||
|
|
||||||
thiserror = { version = "2", default-features = false }
|
thiserror = { version = "2", default-features = false }
|
||||||
|
|
||||||
|
rand_core = { version = "0.6", default-features = false }
|
||||||
|
|
||||||
|
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||||
|
|
||||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||||
|
|
||||||
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
||||||
|
|
||||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
|
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||||
|
chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] }
|
||||||
|
|
||||||
|
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false }
|
||||||
|
multiexp = { path = "../multiexp", version = "0.4", default-features = false }
|
||||||
|
|
||||||
|
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false }
|
||||||
|
dleq = { path = "../dleq", version = "^0.4.1", default-features = false }
|
||||||
|
|
||||||
|
# eVRF DKG dependencies
|
||||||
|
generic-array = { version = "1", default-features = false, features = ["alloc"], optional = true }
|
||||||
|
blake2 = { version = "0.10", default-features = false, features = ["std"], optional = true }
|
||||||
|
rand_chacha = { version = "0.3", default-features = false, features = ["std"], optional = true }
|
||||||
|
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", default-features = false, optional = true }
|
||||||
|
ec-divisors = { path = "../evrf/divisors", default-features = false, optional = true }
|
||||||
|
generalized-bulletproofs-circuit-abstraction = { path = "../evrf/circuit-abstraction", optional = true }
|
||||||
|
generalized-bulletproofs-ec-gadgets = { path = "../evrf/ec-gadgets", optional = true }
|
||||||
|
|
||||||
|
secq256k1 = { path = "../evrf/secq256k1", optional = true }
|
||||||
|
embedwards25519 = { path = "../evrf/embedwards25519", optional = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
|
||||||
|
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||||
|
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
|
||||||
|
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] }
|
||||||
|
ec-divisors = { path = "../evrf/divisors", features = ["pasta"] }
|
||||||
|
pasta_curves = "0.5"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = [
|
std = [
|
||||||
"thiserror/std",
|
"thiserror/std",
|
||||||
|
|
||||||
|
"rand_core/std",
|
||||||
|
|
||||||
"std-shims/std",
|
"std-shims/std",
|
||||||
|
|
||||||
"borsh?/std",
|
"borsh?/std",
|
||||||
|
|
||||||
|
"transcript/std",
|
||||||
|
"chacha20/std",
|
||||||
|
|
||||||
"ciphersuite/std",
|
"ciphersuite/std",
|
||||||
|
"multiexp/std",
|
||||||
|
"multiexp/batch",
|
||||||
|
|
||||||
|
"schnorr/std",
|
||||||
|
"dleq/std",
|
||||||
|
"dleq/serialize"
|
||||||
]
|
]
|
||||||
borsh = ["dep:borsh"]
|
borsh = ["dep:borsh"]
|
||||||
|
evrf = [
|
||||||
|
"std",
|
||||||
|
|
||||||
|
"dep:generic-array",
|
||||||
|
|
||||||
|
"dep:blake2",
|
||||||
|
"dep:rand_chacha",
|
||||||
|
|
||||||
|
"dep:generalized-bulletproofs",
|
||||||
|
"dep:ec-divisors",
|
||||||
|
"dep:generalized-bulletproofs-circuit-abstraction",
|
||||||
|
"dep:generalized-bulletproofs-ec-gadgets",
|
||||||
|
]
|
||||||
|
evrf-secp256k1 = ["evrf", "ciphersuite/secp256k1", "secq256k1"]
|
||||||
|
evrf-ed25519 = ["evrf", "ciphersuite/ed25519", "embedwards25519"]
|
||||||
|
evrf-ristretto = ["evrf", "ciphersuite/ristretto", "embedwards25519"]
|
||||||
|
tests = ["rand_core/getrandom"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2021-2025 Luke Parker
|
Copyright (c) 2021-2023 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,15 +1,16 @@
|
|||||||
# Distributed Key Generation
|
# Distributed Key Generation
|
||||||
|
|
||||||
A crate implementing a type for keys, presumably the result of a distributed
|
A collection of implementations of various distributed key generation protocols.
|
||||||
key generation protocol, and utilities from there.
|
|
||||||
|
|
||||||
This crate used to host implementations of distributed key generation protocols
|
All included protocols resolve into the provided `Threshold` types, intended to
|
||||||
as well (hence the name). Those have been smashed into their own crates, such
|
enable their modularity. Additional utilities around these types, such as
|
||||||
as [`dkg-musig`](https://docs.rs/dkg-musig) and
|
promotion from one generator to another, are also provided.
|
||||||
[`dkg-pedpop`](https://docs.rs/dkg-pedpop).
|
|
||||||
|
|
||||||
Before being smashed, this crate was [audited by Cypher Stack in March 2023](
|
Currently, the only included protocol is the two-round protocol from the
|
||||||
https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf
|
[FROST paper](https://eprint.iacr.org/2020/852).
|
||||||
), culminating in commit [669d2dbffc1dafb82a09d9419ea182667115df06](
|
|
||||||
https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06
|
This library was
|
||||||
). Any subsequent changes have not undergone auditing.
|
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),
|
||||||
|
culminating in commit
|
||||||
|
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
|
||||||
|
Any subsequent changes have not undergone auditing.
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "dkg-dealer"
|
|
||||||
version = "0.6.0"
|
|
||||||
description = "Produce dkg::ThresholdKeys with a dealer key generation"
|
|
||||||
license = "MIT"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg/dealer"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
|
||||||
edition = "2021"
|
|
||||||
rust-version = "1.66"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
zeroize = { version = "^1.5", default-features = false }
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
|
||||||
|
|
||||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
|
||||||
|
|
||||||
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
|
|
||||||
dkg = { path = "../", version = "0.6", default-features = false }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
std = [
|
|
||||||
"zeroize/std",
|
|
||||||
"rand_core/std",
|
|
||||||
"std-shims/std",
|
|
||||||
"ciphersuite/std",
|
|
||||||
"dkg/std",
|
|
||||||
]
|
|
||||||
default = ["std"]
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user