mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Compare commits
296 Commits
undroppabl
...
9c47ef2658
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c47ef2658 | ||
|
|
e1b6b638c6 | ||
|
|
c24768f922 | ||
|
|
87ee879dea | ||
|
|
b5603560e8 | ||
|
|
5818f1a41c | ||
|
|
1b781b4b57 | ||
|
|
94faf098b6 | ||
|
|
03e45f73cd | ||
|
|
63f7e220c0 | ||
|
|
7d49366373 | ||
|
|
55ed33d2d1 | ||
|
|
138a0e9b40 | ||
|
|
4fc7263ac3 | ||
|
|
f27fd59fa6 | ||
|
|
437f0e9a93 | ||
|
|
cc5d38f1ce | ||
|
|
0ce025e0c2 | ||
|
|
224cf4ea21 | ||
|
|
a9b1e5293c | ||
|
|
80009ab67f | ||
|
|
df9fda2971 | ||
|
|
ca8afb83a1 | ||
|
|
18a9cf2535 | ||
|
|
10c126ad92 | ||
|
|
19305aebc9 | ||
|
|
be68e27551 | ||
|
|
d6d96fe8ff | ||
|
|
95909d83a4 | ||
|
|
3bd48974f3 | ||
|
|
29093715e3 | ||
|
|
87b4dfc8f3 | ||
|
|
4db78b1787 | ||
|
|
02a5f15535 | ||
|
|
865e351f96 | ||
|
|
ea275df26c | ||
|
|
2216ade8c4 | ||
|
|
5265cc69de | ||
|
|
a141deaf36 | ||
|
|
215e41fdb6 | ||
|
|
41c34d7f11 | ||
|
|
974bc82387 | ||
|
|
47ef24a7cc | ||
|
|
c0e48867e1 | ||
|
|
0066b94d38 | ||
|
|
7d54c02ec6 | ||
|
|
568324f631 | ||
|
|
2a02a8dc59 | ||
|
|
eaa9a0e5a6 | ||
|
|
251996c1b0 | ||
|
|
98b9cc82a7 | ||
|
|
263d75d380 | ||
|
|
030185c7fc | ||
|
|
e2dc5db7aa | ||
|
|
90bc364f9f | ||
|
|
a4811c9a41 | ||
|
|
12cfa6b2a5 | ||
|
|
0c71b6fc4d | ||
|
|
ffe1b60a11 | ||
|
|
5526b8d439 | ||
|
|
beac35c119 | ||
|
|
62bb75e09a | ||
|
|
45bd376c08 | ||
|
|
da190759a9 | ||
|
|
f2d399ba1e | ||
|
|
220bcbc592 | ||
|
|
85949f4b04 | ||
|
|
f8adfb56ad | ||
|
|
2f833dec77 | ||
|
|
e3e41324c9 | ||
|
|
6ed7c5d65e | ||
|
|
9dddfd91c8 | ||
|
|
c24b694fb2 | ||
|
|
738babf7e9 | ||
|
|
33faa53b56 | ||
|
|
8c366107ae | ||
|
|
7a790f3a20 | ||
|
|
a7c77f8b5f | ||
|
|
da3095ed15 | ||
|
|
758d422595 | ||
|
|
9841061b49 | ||
|
|
4122a0135f | ||
|
|
b63ef32864 | ||
|
|
8be03a8fc2 | ||
|
|
677a2e5749 | ||
|
|
38bda1d586 | ||
|
|
2bc2ca6906 | ||
|
|
900a6612d7 | ||
|
|
17c1d5cd6b | ||
|
|
8a1b56a928 | ||
|
|
75964cf6da | ||
|
|
d407e35cee | ||
|
|
c8ef044acb | ||
|
|
ddbc32de4d | ||
|
|
e5ccfac19e | ||
|
|
432daae1d1 | ||
|
|
da3a85efe5 | ||
|
|
1e0240123d | ||
|
|
f6d4d1b084 | ||
|
|
1b37dd2951 | ||
|
|
f32e0609f1 | ||
|
|
ca85f9ba0c | ||
|
|
cfd1cb3a37 | ||
|
|
f2c13a0040 | ||
|
|
961f46bc04 | ||
|
|
2c4de3bab4 | ||
|
|
95c30720d2 | ||
|
|
ceede14f5c | ||
|
|
5e60ea9718 | ||
|
|
153f6f2f2f | ||
|
|
104c0d4492 | ||
|
|
7c8f13ab28 | ||
|
|
cb0deadf9a | ||
|
|
cb489f9cef | ||
|
|
cc662cb591 | ||
|
|
a8b8844e3f | ||
|
|
82b543ef75 | ||
|
|
72e80c1a3d | ||
|
|
b6edc94bcd | ||
|
|
cfce2b26e2 | ||
|
|
e87bbcda64 | ||
|
|
9f84adf8b3 | ||
|
|
3919cf55ae | ||
|
|
38dd8cb191 | ||
|
|
f2563d39cb | ||
|
|
15a9cbef40 | ||
|
|
078d6e51e5 | ||
|
|
6c33e18745 | ||
|
|
b743c9a43e | ||
|
|
0c2f2979a9 | ||
|
|
971951a1a6 | ||
|
|
92d9e908cb | ||
|
|
a32b97be88 | ||
|
|
e3809b2ff1 | ||
|
|
fd2d8b4f0a | ||
|
|
bc81614894 | ||
|
|
8df5aa2e2d | ||
|
|
b000740470 | ||
|
|
b9f554111d | ||
|
|
354c408e3e | ||
|
|
df3b60376a | ||
|
|
8d209c652e | ||
|
|
9ddad794b4 | ||
|
|
b934e484cc | ||
|
|
f8aee9b3c8 | ||
|
|
f51d77d26a | ||
|
|
0780deb643 | ||
|
|
75c38560f4 | ||
|
|
9f1c5268a5 | ||
|
|
35b113768b | ||
|
|
f2595c4939 | ||
|
|
8fcfa6d3d5 | ||
|
|
54c9d19726 | ||
|
|
25324c3cd5 | ||
|
|
ecb7df85b0 | ||
|
|
68c7acdbef | ||
|
|
8b60feed92 | ||
|
|
5c895efcd0 | ||
|
|
60e55656aa | ||
|
|
9536282418 | ||
|
|
8297d0679d | ||
|
|
d9f854b08a | ||
|
|
8aaf7f7dc6 | ||
|
|
ce447558ac | ||
|
|
fc850da30e | ||
|
|
d6f6cf1965 | ||
|
|
4438b51881 | ||
|
|
6ae0d9fad7 | ||
|
|
ad08b410a8 | ||
|
|
ec3cfd3ab7 | ||
|
|
01eb2daa0b | ||
|
|
885000f970 | ||
|
|
4be506414b | ||
|
|
1143d84e1d | ||
|
|
336922101f | ||
|
|
ffa033d978 | ||
|
|
23f986f57a | ||
|
|
bb726b58af | ||
|
|
387615705c | ||
|
|
c7f825a192 | ||
|
|
d363b1c173 | ||
|
|
d5077ae966 | ||
|
|
188fcc3cb4 | ||
|
|
cbab9486c6 | ||
|
|
a5f4c450c6 | ||
|
|
4f65a0b147 | ||
|
|
feb18d64a7 | ||
|
|
cb1e6535cb | ||
|
|
6b8cf6653a | ||
|
|
b426bfcfe8 | ||
|
|
21ce50ecf7 | ||
|
|
a4ceb2e756 | ||
|
|
b59b1f59dd | ||
|
|
cc4a65e82a | ||
|
|
eab5d9e64f | ||
|
|
4e0c58464f | ||
|
|
205da3fd38 | ||
|
|
f7e63d4944 | ||
|
|
b5608fc3d2 | ||
|
|
33018bf6da | ||
|
|
bef90b2f1a | ||
|
|
184c02714a | ||
|
|
5a7b815e2e | ||
|
|
22e411981a | ||
|
|
11d48d0685 | ||
|
|
e4cc23b72d | ||
|
|
52d853c8ba | ||
|
|
9c33a711d7 | ||
|
|
a275023cfc | ||
|
|
258c02ff39 | ||
|
|
3655dc723f | ||
|
|
315d4fb356 | ||
|
|
2bc880e372 | ||
|
|
19422de231 | ||
|
|
fa0dadc9bd | ||
|
|
f004c8726f | ||
|
|
835b5bb06f | ||
|
|
0484113254 | ||
|
|
17cc10b3f7 | ||
|
|
7e01589fba | ||
|
|
f8c3acae7b | ||
|
|
0957460f27 | ||
|
|
ea00ba9ff8 | ||
|
|
a9625364df | ||
|
|
75c6427d7c | ||
|
|
e742a6b0ec | ||
|
|
5164a710a2 | ||
|
|
27c1dc4646 | ||
|
|
3892fa30b7 | ||
|
|
ed599c8ab5 | ||
|
|
29bb5e21ab | ||
|
|
604a4b2442 | ||
|
|
977dcad86d | ||
|
|
cefc542744 | ||
|
|
164fe9a14f | ||
|
|
f948881eba | ||
|
|
201b675031 | ||
|
|
3d44766eff | ||
|
|
a63a86ba79 | ||
|
|
e922264ebf | ||
|
|
7e53eff642 | ||
|
|
669b8b776b | ||
|
|
6508957cbc | ||
|
|
373e794d2c | ||
|
|
c8f3a32fdf | ||
|
|
f690bf831f | ||
|
|
0b30ac175e | ||
|
|
47560fa9a9 | ||
|
|
9d57c4eb4d | ||
|
|
642ba00952 | ||
|
|
3c9c12d320 | ||
|
|
f6b52b3fd3 | ||
|
|
0d906363a0 | ||
|
|
8222ce78d8 | ||
|
|
cb906242e7 | ||
|
|
2a19e9da93 | ||
|
|
2226dd59cc | ||
|
|
be2098d2e1 | ||
|
|
6b41f32371 | ||
|
|
19b87c7f5a | ||
|
|
505f1b20a4 | ||
|
|
8b52b921f3 | ||
|
|
f36bbcba25 | ||
|
|
167826aa88 | ||
|
|
bea4f92b7a | ||
|
|
7312fa8d3c | ||
|
|
92a4cceeeb | ||
|
|
3357181fe2 | ||
|
|
7ce5bdad44 | ||
|
|
0de3fda921 | ||
|
|
e9c1235b76 | ||
|
|
dc1b8dfccd | ||
|
|
d0201cf2e5 | ||
|
|
f3d20e60b3 | ||
|
|
dafba81b40 | ||
|
|
91f8ec53d9 | ||
|
|
fc9a4a08b8 | ||
|
|
45fadb21ac | ||
|
|
28619fbee1 | ||
|
|
bbe014c3a7 | ||
|
|
fb3fadb3d3 | ||
|
|
f481d20773 | ||
|
|
599b2dec8f | ||
|
|
435f1d9ae1 | ||
|
|
d7ecab605e | ||
|
|
805fea52ec | ||
|
|
48db06f901 | ||
|
|
e9d0a5e0ed | ||
|
|
44d05518aa | ||
|
|
23b433fe6c | ||
|
|
2e57168a97 | ||
|
|
5c6160c398 | ||
|
|
9eee1d971e | ||
|
|
e6300847d6 | ||
|
|
e0a3e7bea6 | ||
|
|
cbebaa1349 |
2
.github/actions/LICENSE → .github/LICENSE
vendored
2
.github/actions/LICENSE → .github/LICENSE
vendored
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: "27.0"
|
default: "29.1"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Bitcoin Daemon Cache
|
- name: Bitcoin Daemon Cache
|
||||||
id: cache-bitcoind
|
id: cache-bitcoind
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: bitcoin.tar.gz
|
path: bitcoin.tar.gz
|
||||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
50
.github/actions/build-dependencies/action.yml
vendored
50
.github/actions/build-dependencies/action.yml
vendored
@@ -7,13 +7,20 @@ runs:
|
|||||||
- name: Remove unused packages
|
- name: Remove unused packages
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
# Ensure the repositories are synced
|
||||||
|
sudo apt update -y
|
||||||
|
|
||||||
|
# Actually perform the removals
|
||||||
|
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||||
|
|
||||||
|
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
||||||
|
# This removal command requires the prior removals due to unmet dependencies otherwise
|
||||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||||
sudo apt autoremove -y
|
|
||||||
sudo apt clean
|
# Reinstall python3 as a general dependency of a functional operating system
|
||||||
docker system prune -a --volumes
|
sudo apt install -y python3 --fix-missing
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
- name: Remove unused packages
|
- name: Remove unused packages
|
||||||
@@ -31,19 +38,48 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||||
sudo apt install -y ca-certificates protobuf-compiler
|
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||||
choco install protoc
|
choco install protoc
|
||||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||||
brew install protobuf
|
brew install protobuf llvm
|
||||||
|
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||||
|
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||||
|
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||||
|
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Install solc
|
- name: Install solc
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo install svm-rs
|
cargo +1.90 install svm-rs --version =0.5.19
|
||||||
svm install 0.8.26
|
svm install 0.8.26
|
||||||
svm use 0.8.26
|
svm use 0.8.26
|
||||||
|
|
||||||
|
- name: Remove preinstalled Docker
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker system prune -a --volumes
|
||||||
|
sudo apt remove -y *docker*
|
||||||
|
# Install uidmap which will be required for the explicitly installed Docker
|
||||||
|
sudo apt install uidmap
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
|
- name: Update system dependencies
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update -y
|
||||||
|
sudo apt upgrade -y
|
||||||
|
sudo apt autoremove -y
|
||||||
|
sudo apt clean
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
|
- name: Install rootless Docker
|
||||||
|
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
||||||
|
with:
|
||||||
|
rootless: true
|
||||||
|
set-host: true
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
# - name: Cache Rust
|
# - name: Cache Rust
|
||||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||||
|
|||||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Monero Wallet RPC Cache
|
- name: Monero Wallet RPC Cache
|
||||||
id: cache-monero-wallet-rpc
|
id: cache-monero-wallet-rpc
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: monero-wallet-rpc
|
path: monero-wallet-rpc
|
||||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Monero Daemon Cache
|
- name: Monero Daemon Cache
|
||||||
id: cache-monerod
|
id: cache-monerod
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: /usr/bin/monerod
|
path: /usr/bin/monerod
|
||||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
2
.github/actions/test-dependencies/action.yml
vendored
2
.github/actions/test-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
|||||||
bitcoin-version:
|
bitcoin-version:
|
||||||
description: "Bitcoin version to download and run as a regtest node"
|
description: "Bitcoin version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: "27.1"
|
default: "29.1"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2024-07-01
|
nightly-2025-11-01
|
||||||
|
|||||||
14
.github/workflows/crypto-tests.yml
vendored
14
.github/workflows/crypto-tests.yml
vendored
@@ -32,13 +32,17 @@ jobs:
|
|||||||
-p dalek-ff-group \
|
-p dalek-ff-group \
|
||||||
-p minimal-ed448 \
|
-p minimal-ed448 \
|
||||||
-p ciphersuite \
|
-p ciphersuite \
|
||||||
|
-p ciphersuite-kp256 \
|
||||||
-p multiexp \
|
-p multiexp \
|
||||||
-p schnorr-signatures \
|
-p schnorr-signatures \
|
||||||
-p dleq \
|
-p prime-field \
|
||||||
-p generalized-bulletproofs \
|
-p short-weierstrass \
|
||||||
-p generalized-bulletproofs-circuit-abstraction \
|
-p secq256k1 \
|
||||||
-p ec-divisors \
|
-p embedwards25519 \
|
||||||
-p generalized-bulletproofs-ec-gadgets \
|
|
||||||
-p dkg \
|
-p dkg \
|
||||||
|
-p dkg-recovery \
|
||||||
|
-p dkg-dealer \
|
||||||
|
-p dkg-musig \
|
||||||
|
-p dkg-evrf \
|
||||||
-p modular-frost \
|
-p modular-frost \
|
||||||
-p frost-schnorrkel
|
-p frost-schnorrkel
|
||||||
|
|||||||
6
.github/workflows/daily-deny.yml
vendored
6
.github/workflows/daily-deny.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
- name: Advisory Cache
|
- name: Advisory Cache
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: ~/.cargo/advisory-db
|
path: ~/.cargo/advisory-db
|
||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo install --locked cargo-deny
|
run: cargo +1.90 install cargo-deny --version =0.18.4
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
|
|||||||
111
.github/workflows/lint.yml
vendored
111
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
clippy:
|
clippy:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install nightly rust
|
- name: Install nightly rust
|
||||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
||||||
|
|
||||||
- name: Run Clippy
|
- name: Run Clippy
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||||
@@ -46,16 +46,16 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
- name: Advisory Cache
|
- name: Advisory Cache
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: ~/.cargo/advisory-db
|
path: ~/.cargo/advisory-db
|
||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo install --locked cargo-deny
|
run: cargo +1.90 install cargo-deny --version =0.18.4
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -88,8 +88,105 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
- name: Verify all dependencies are in use
|
- name: Verify all dependencies are in use
|
||||||
run: |
|
run: |
|
||||||
cargo install cargo-machete
|
cargo +1.90 install cargo-machete --version =0.9.1
|
||||||
cargo machete
|
cargo +1.90 machete
|
||||||
|
|
||||||
|
msrv:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
- name: Verify claimed `rust-version`
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cargo +1.90 install cargo-msrv --version =0.18.4
|
||||||
|
|
||||||
|
function check_msrv {
|
||||||
|
# We `cd` into the directory passed as the first argument, but will return to the
|
||||||
|
# directory called from.
|
||||||
|
return_to=$(pwd)
|
||||||
|
echo "Checking $1"
|
||||||
|
cd $1
|
||||||
|
|
||||||
|
# We then find the existing `rust-version` using `grep` (for the right line) and then a
|
||||||
|
# regex (to strip to just the major and minor version).
|
||||||
|
existing=$(cat ./Cargo.toml | grep "rust-version" | grep -Eo "[0-9]+\.[0-9]+")
|
||||||
|
|
||||||
|
# We then backup the `Cargo.toml`, allowing us to restore it after, saving time on future
|
||||||
|
# MSRV checks (as they'll benefit from immediately exiting if the queried version is less
|
||||||
|
# than the declared MSRV).
|
||||||
|
mv ./Cargo.toml ./Cargo.toml.bak
|
||||||
|
|
||||||
|
# We then use an inverted (`-v`) grep to remove the existing `rust-version` from the
|
||||||
|
# `Cargo.toml`, as required because else earlier versions of Rust won't even attempt to
|
||||||
|
# compile this crate.
|
||||||
|
cat ./Cargo.toml.bak | grep -v "rust-version" > Cargo.toml
|
||||||
|
|
||||||
|
# We then find the actual `rust-version` using `cargo-msrv` (again stripping to just the
|
||||||
|
# major and minor version).
|
||||||
|
actual=$(cargo msrv find --output-format minimal | grep -Eo "^[0-9]+\.[0-9]+")
|
||||||
|
|
||||||
|
# Finally, we compare the two.
|
||||||
|
echo "Declared rust-version: $existing"
|
||||||
|
echo "Actual rust-version: $actual"
|
||||||
|
[ $existing == $actual ]
|
||||||
|
result=$?
|
||||||
|
|
||||||
|
# Restore the original `Cargo.toml`.
|
||||||
|
rm Cargo.toml
|
||||||
|
mv ./Cargo.toml.bak ./Cargo.toml
|
||||||
|
|
||||||
|
# Return to the directory called from and return the result.
|
||||||
|
cd $return_to
|
||||||
|
return $result
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check each member of the workspace
|
||||||
|
function check_workspace {
|
||||||
|
# Get the members array from the workspace's `Cargo.toml`
|
||||||
|
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
|
||||||
|
# Keep all lines after the start of the array, then keep all lines before the next "]"
|
||||||
|
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
|
||||||
|
|
||||||
|
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
|
||||||
|
# We accomplish the latter by pruning all characters after the entry's ","
|
||||||
|
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
|
||||||
|
# Replace the first line, which was "members = [" and is now "members = [,", with "["
|
||||||
|
members=$(echo "$members" | sed "1s/.*/\[/")
|
||||||
|
# Correct the last line, which was malleated to "],"
|
||||||
|
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
|
||||||
|
|
||||||
|
# Don't check the patches
|
||||||
|
members=$(echo "$members" | grep -v "patches")
|
||||||
|
# Don't check the following
|
||||||
|
# Most of these are binaries, with the exception of the Substrate runtime which has a
|
||||||
|
# bespoke build pipeline
|
||||||
|
members=$(echo "$members" | grep -v "networks/ethereum/relayer\"")
|
||||||
|
members=$(echo "$members" | grep -v "message-queue\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/bin\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/bitcoin\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/ethereum\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/monero\"")
|
||||||
|
members=$(echo "$members" | grep -v "coordinator\"")
|
||||||
|
members=$(echo "$members" | grep -v "substrate/runtime\"")
|
||||||
|
members=$(echo "$members" | grep -v "substrate/node\"")
|
||||||
|
members=$(echo "$members" | grep -v "orchestration\"")
|
||||||
|
|
||||||
|
# Don't check the tests
|
||||||
|
members=$(echo "$members" | grep -v "mini\"")
|
||||||
|
members=$(echo "$members" | grep -v "tests/")
|
||||||
|
|
||||||
|
# Remove the trailing comma by replacing the last line's "," with ""
|
||||||
|
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
|
||||||
|
|
||||||
|
echo $members | jq -r ".[]" | while read -r member; do
|
||||||
|
check_msrv $member
|
||||||
|
correct=$?
|
||||||
|
if [ $correct -ne 0 ]; then
|
||||||
|
return $correct
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
check_workspace
|
||||||
|
|
||||||
slither:
|
slither:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
77
.github/workflows/monero-tests.yaml
vendored
77
.github/workflows/monero-tests.yaml
vendored
@@ -1,77 +0,0 @@
|
|||||||
name: Monero Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- develop
|
|
||||||
paths:
|
|
||||||
- "networks/monero/**"
|
|
||||||
- "processor/**"
|
|
||||||
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "networks/monero/**"
|
|
||||||
- "processor/**"
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# Only run these once since they will be consistent regardless of any node
|
|
||||||
unit-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Test Dependencies
|
|
||||||
uses: ./.github/actions/test-dependencies
|
|
||||||
|
|
||||||
- name: Run Unit Tests Without Features
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
|
||||||
|
|
||||||
# Doesn't run unit tests with features as the tests workflow will
|
|
||||||
|
|
||||||
integration-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Test against all supported protocol versions
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
version: [v0.17.3.2, v0.18.3.4]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Test Dependencies
|
|
||||||
uses: ./.github/actions/test-dependencies
|
|
||||||
with:
|
|
||||||
monero-version: ${{ matrix.version }}
|
|
||||||
|
|
||||||
- name: Run Integration Tests Without Features
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
|
||||||
|
|
||||||
- name: Run Integration Tests
|
|
||||||
# Don't run if the the tests workflow also will
|
|
||||||
if: ${{ matrix.version != 'v0.18.3.4' }}
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
|
||||||
259
.github/workflows/msrv.yml
vendored
259
.github/workflows/msrv.yml
vendored
@@ -1,259 +0,0 @@
|
|||||||
name: Weekly MSRV Check
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 0"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
msrv-common:
|
|
||||||
name: Run cargo msrv on common
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on common
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/env/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/db/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/task/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/request/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
|
|
||||||
|
|
||||||
msrv-crypto:
|
|
||||||
name: Run cargo msrv on crypto
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on crypto
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
|
|
||||||
|
|
||||||
msrv-networks:
|
|
||||||
name: Run cargo msrv on networks
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on networks
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
|
|
||||||
|
|
||||||
msrv-message-queue:
|
|
||||||
name: Run cargo msrv on message-queue
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on message-queue
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
|
|
||||||
|
|
||||||
msrv-processor:
|
|
||||||
name: Run cargo msrv on processor
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on processor
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/messages/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/signers/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/monero/Cargo.toml
|
|
||||||
|
|
||||||
msrv-coordinator:
|
|
||||||
name: Run cargo msrv on coordinator
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on coordinator
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
|
||||||
|
|
||||||
msrv-substrate:
|
|
||||||
name: Run cargo msrv on substrate
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on substrate
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/client/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/node/Cargo.toml
|
|
||||||
|
|
||||||
msrv-orchestration:
|
|
||||||
name: Run cargo msrv on orchestration
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on message-queue
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path orchestration/Cargo.toml
|
|
||||||
|
|
||||||
msrv-mini:
|
|
||||||
name: Run cargo msrv on mini
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on mini
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path mini/Cargo.toml
|
|
||||||
16
.github/workflows/networks-tests.yml
vendored
16
.github/workflows/networks-tests.yml
vendored
@@ -34,19 +34,3 @@ jobs:
|
|||||||
-p ethereum-schnorr-contract \
|
-p ethereum-schnorr-contract \
|
||||||
-p alloy-simple-request-transport \
|
-p alloy-simple-request-transport \
|
||||||
-p serai-ethereum-relayer \
|
-p serai-ethereum-relayer \
|
||||||
-p monero-io \
|
|
||||||
-p monero-generators \
|
|
||||||
-p monero-primitives \
|
|
||||||
-p monero-mlsag \
|
|
||||||
-p monero-clsag \
|
|
||||||
-p monero-borromean \
|
|
||||||
-p monero-bulletproofs \
|
|
||||||
-p monero-serai \
|
|
||||||
-p monero-rpc \
|
|
||||||
-p monero-simple-request-rpc \
|
|
||||||
-p monero-address \
|
|
||||||
-p monero-wallet \
|
|
||||||
-p monero-seed \
|
|
||||||
-p polyseed \
|
|
||||||
-p monero-wallet-util \
|
|
||||||
-p monero-serai-verify-chain
|
|
||||||
|
|||||||
14
.github/workflows/no-std.yml
vendored
14
.github/workflows/no-std.yml
vendored
@@ -28,8 +28,18 @@ jobs:
|
|||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Get nightly version to use
|
||||||
|
id: nightly
|
||||||
|
shell: bash
|
||||||
|
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Install RISC-V Toolchain
|
- name: Install RISC-V Toolchain
|
||||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib
|
||||||
|
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal --component rust-src --target riscv32imac-unknown-none-elf
|
||||||
|
|
||||||
- name: Verify no-std builds
|
- name: Verify no-std builds
|
||||||
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
|
run: |
|
||||||
|
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core -p serai-no-std-tests
|
||||||
|
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core,alloc -p serai-no-std-tests --features "alloc"
|
||||||
|
|||||||
43
.github/workflows/pages.yml
vendored
43
.github/workflows/pages.yml
vendored
@@ -1,6 +1,7 @@
|
|||||||
# MIT License
|
# MIT License
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022 just-the-docs
|
# Copyright (c) 2022 just-the-docs
|
||||||
|
# Copyright (c) 2022-2024 Luke Parker
|
||||||
#
|
#
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -20,31 +21,21 @@
|
|||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
# SOFTWARE.
|
# SOFTWARE.
|
||||||
|
|
||||||
# This workflow uses actions that are not certified by GitHub.
|
name: Deploy Rust docs and Jekyll site to Pages
|
||||||
# They are provided by a third-party and are governed by
|
|
||||||
# separate terms of service, privacy policy, and support
|
|
||||||
# documentation.
|
|
||||||
|
|
||||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
|
||||||
name: Deploy Jekyll site to Pages
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "develop"
|
- "develop"
|
||||||
paths:
|
|
||||||
- "docs/**"
|
|
||||||
|
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
pages: write
|
pages: write
|
||||||
id-token: write
|
id-token: write
|
||||||
|
|
||||||
# Allow one concurrent deployment
|
# Only allow one concurrent deployment
|
||||||
concurrency:
|
concurrency:
|
||||||
group: "pages"
|
group: "pages"
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -53,27 +44,37 @@ jobs:
|
|||||||
# Build job
|
# Build job
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: docs
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
- name: Setup Ruby
|
- name: Setup Ruby
|
||||||
uses: ruby/setup-ruby@v1
|
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
||||||
with:
|
with:
|
||||||
bundler-cache: true
|
bundler-cache: true
|
||||||
cache-version: 0
|
cache-version: 0
|
||||||
working-directory: "${{ github.workspace }}/docs"
|
working-directory: "${{ github.workspace }}/docs"
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@v3
|
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
||||||
- name: Build with Jekyll
|
- name: Build with Jekyll
|
||||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
env:
|
env:
|
||||||
JEKYLL_ENV: production
|
JEKYLL_ENV: production
|
||||||
|
|
||||||
|
- name: Get nightly version to use
|
||||||
|
id: nightly
|
||||||
|
shell: bash
|
||||||
|
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||||
|
- name: Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
- name: Buld Rust docs
|
||||||
|
run: |
|
||||||
|
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||||
|
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||||
|
mv target/doc docs/_site/rust
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v1
|
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
||||||
with:
|
with:
|
||||||
path: "docs/_site/"
|
path: "docs/_site/"
|
||||||
|
|
||||||
@@ -87,4 +88,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v2
|
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,7 +1,14 @@
|
|||||||
target
|
target
|
||||||
|
|
||||||
|
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||||
|
Cargo.lock
|
||||||
|
!./Cargo.lock
|
||||||
|
|
||||||
|
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||||
Dockerfile
|
Dockerfile
|
||||||
Dockerfile.fast-epoch
|
Dockerfile.fast-epoch
|
||||||
!orchestration/runtime/Dockerfile
|
!orchestration/runtime/Dockerfile
|
||||||
|
|
||||||
.test-logs
|
.test-logs
|
||||||
|
|
||||||
.vscode
|
.vscode
|
||||||
|
|||||||
7408
Cargo.lock
generated
7408
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
130
Cargo.toml
130
Cargo.toml
@@ -1,20 +1,6 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
# Version patches
|
|
||||||
"patches/parking_lot_core",
|
|
||||||
"patches/parking_lot",
|
|
||||||
"patches/zstd",
|
|
||||||
"patches/rocksdb",
|
|
||||||
|
|
||||||
# std patches
|
|
||||||
"patches/matches",
|
|
||||||
"patches/is-terminal",
|
|
||||||
|
|
||||||
# Rewrites/redirects
|
|
||||||
"patches/option-ext",
|
|
||||||
"patches/directories-next",
|
|
||||||
|
|
||||||
"common/std-shims",
|
"common/std-shims",
|
||||||
"common/zalloc",
|
"common/zalloc",
|
||||||
"common/patchable-async-sleep",
|
"common/patchable-async-sleep",
|
||||||
@@ -29,19 +15,21 @@ members = [
|
|||||||
"crypto/dalek-ff-group",
|
"crypto/dalek-ff-group",
|
||||||
"crypto/ed448",
|
"crypto/ed448",
|
||||||
"crypto/ciphersuite",
|
"crypto/ciphersuite",
|
||||||
|
"crypto/ciphersuite/kp256",
|
||||||
|
|
||||||
"crypto/multiexp",
|
"crypto/multiexp",
|
||||||
"crypto/schnorr",
|
"crypto/schnorr",
|
||||||
"crypto/dleq",
|
|
||||||
|
|
||||||
"crypto/evrf/secq256k1",
|
"crypto/prime-field",
|
||||||
"crypto/evrf/embedwards25519",
|
"crypto/short-weierstrass",
|
||||||
"crypto/evrf/generalized-bulletproofs",
|
"crypto/secq256k1",
|
||||||
"crypto/evrf/circuit-abstraction",
|
"crypto/embedwards25519",
|
||||||
"crypto/evrf/divisors",
|
|
||||||
"crypto/evrf/ec-gadgets",
|
|
||||||
|
|
||||||
"crypto/dkg",
|
"crypto/dkg",
|
||||||
|
"crypto/dkg/recovery",
|
||||||
|
"crypto/dkg/dealer",
|
||||||
|
"crypto/dkg/musig",
|
||||||
|
"crypto/dkg/evrf",
|
||||||
"crypto/frost",
|
"crypto/frost",
|
||||||
"crypto/schnorrkel",
|
"crypto/schnorrkel",
|
||||||
|
|
||||||
@@ -52,23 +40,6 @@ members = [
|
|||||||
"networks/ethereum/alloy-simple-request-transport",
|
"networks/ethereum/alloy-simple-request-transport",
|
||||||
"networks/ethereum/relayer",
|
"networks/ethereum/relayer",
|
||||||
|
|
||||||
"networks/monero/io",
|
|
||||||
"networks/monero/generators",
|
|
||||||
"networks/monero/primitives",
|
|
||||||
"networks/monero/ringct/mlsag",
|
|
||||||
"networks/monero/ringct/clsag",
|
|
||||||
"networks/monero/ringct/borromean",
|
|
||||||
"networks/monero/ringct/bulletproofs",
|
|
||||||
"networks/monero",
|
|
||||||
"networks/monero/rpc",
|
|
||||||
"networks/monero/rpc/simple-request",
|
|
||||||
"networks/monero/wallet/address",
|
|
||||||
"networks/monero/wallet",
|
|
||||||
"networks/monero/wallet/seed",
|
|
||||||
"networks/monero/wallet/polyseed",
|
|
||||||
"networks/monero/wallet/util",
|
|
||||||
"networks/monero/verify-chain",
|
|
||||||
|
|
||||||
"message-queue",
|
"message-queue",
|
||||||
|
|
||||||
"processor/messages",
|
"processor/messages",
|
||||||
@@ -144,61 +115,78 @@ members = [
|
|||||||
|
|
||||||
"tests/docker",
|
"tests/docker",
|
||||||
"tests/message-queue",
|
"tests/message-queue",
|
||||||
"tests/processor",
|
# TODO "tests/processor",
|
||||||
"tests/coordinator",
|
# TODO "tests/coordinator",
|
||||||
"tests/full-stack",
|
# TODO "tests/full-stack",
|
||||||
"tests/reproducible-runtime",
|
"tests/reproducible-runtime",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[profile.dev.package]
|
||||||
# Always compile Monero (and a variety of dependencies) with optimizations due
|
# Always compile Monero (and a variety of dependencies) with optimizations due
|
||||||
# to the extensive operations required for Bulletproofs
|
# to the extensive operations required for Bulletproofs
|
||||||
[profile.dev.package]
|
|
||||||
subtle = { opt-level = 3 }
|
subtle = { opt-level = 3 }
|
||||||
|
|
||||||
|
sha3 = { opt-level = 3 }
|
||||||
|
blake2 = { opt-level = 3 }
|
||||||
|
|
||||||
ff = { opt-level = 3 }
|
ff = { opt-level = 3 }
|
||||||
group = { opt-level = 3 }
|
group = { opt-level = 3 }
|
||||||
|
|
||||||
crypto-bigint = { opt-level = 3 }
|
crypto-bigint = { opt-level = 3 }
|
||||||
secp256k1 = { opt-level = 3 }
|
|
||||||
curve25519-dalek = { opt-level = 3 }
|
curve25519-dalek = { opt-level = 3 }
|
||||||
dalek-ff-group = { opt-level = 3 }
|
dalek-ff-group = { opt-level = 3 }
|
||||||
minimal-ed448 = { opt-level = 3 }
|
|
||||||
|
|
||||||
multiexp = { opt-level = 3 }
|
multiexp = { opt-level = 3 }
|
||||||
|
|
||||||
secq256k1 = { opt-level = 3 }
|
|
||||||
embedwards25519 = { opt-level = 3 }
|
|
||||||
generalized-bulletproofs = { opt-level = 3 }
|
|
||||||
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
|
||||||
ec-divisors = { opt-level = 3 }
|
|
||||||
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
|
||||||
|
|
||||||
dkg = { opt-level = 3 }
|
|
||||||
|
|
||||||
monero-generators = { opt-level = 3 }
|
monero-generators = { opt-level = 3 }
|
||||||
monero-borromean = { opt-level = 3 }
|
monero-borromean = { opt-level = 3 }
|
||||||
monero-bulletproofs = { opt-level = 3 }
|
monero-bulletproofs = { opt-level = 3 }
|
||||||
monero-mlsag = { opt-level = 3 }
|
monero-mlsag = { opt-level = 3 }
|
||||||
monero-clsag = { opt-level = 3 }
|
monero-clsag = { opt-level = 3 }
|
||||||
|
monero-oxide = { opt-level = 3 }
|
||||||
|
|
||||||
|
# Always compile the eVRF DKG tree with optimizations as well
|
||||||
|
secp256k1 = { opt-level = 3 }
|
||||||
|
secq256k1 = { opt-level = 3 }
|
||||||
|
embedwards25519 = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||||
|
|
||||||
|
# revm also effectively requires being built with optimizations
|
||||||
|
revm = { opt-level = 3 }
|
||||||
|
revm-bytecode = { opt-level = 3 }
|
||||||
|
revm-context = { opt-level = 3 }
|
||||||
|
revm-context-interface = { opt-level = 3 }
|
||||||
|
revm-database = { opt-level = 3 }
|
||||||
|
revm-database-interface = { opt-level = 3 }
|
||||||
|
revm-handler = { opt-level = 3 }
|
||||||
|
revm-inspector = { opt-level = 3 }
|
||||||
|
revm-interpreter = { opt-level = 3 }
|
||||||
|
revm-precompile = { opt-level = 3 }
|
||||||
|
revm-primitives = { opt-level = 3 }
|
||||||
|
revm-state = { opt-level = 3 }
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
panic = "unwind"
|
panic = "unwind"
|
||||||
|
overflow-checks = true
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
|
# Dependencies from monero-oxide which originate from within our own tree
|
||||||
|
std-shims = { path = "patches/std-shims" }
|
||||||
|
simple-request = { path = "patches/simple-request" }
|
||||||
|
multiexp = { path = "crypto/multiexp" }
|
||||||
|
flexible-transcript = { path = "crypto/transcript" }
|
||||||
|
ciphersuite = { path = "patches/ciphersuite" }
|
||||||
|
dalek-ff-group = { path = "patches/dalek-ff-group" }
|
||||||
|
minimal-ed448 = { path = "crypto/ed448" }
|
||||||
|
modular-frost = { path = "crypto/frost" }
|
||||||
|
|
||||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||||
|
|
||||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
# These has an `std` alternative since Rust's 2024 edition
|
||||||
parking_lot = { path = "patches/parking_lot" }
|
home = { path = "patches/home" }
|
||||||
# wasmtime pulls in an old version for this
|
|
||||||
zstd = { path = "patches/zstd" }
|
|
||||||
# Needed for WAL compression
|
|
||||||
rocksdb = { path = "patches/rocksdb" }
|
|
||||||
|
|
||||||
# is-terminal now has an std-based solution with an equivalent API
|
|
||||||
is-terminal = { path = "patches/is-terminal" }
|
|
||||||
# So does matches
|
|
||||||
matches = { path = "patches/matches" }
|
|
||||||
|
|
||||||
# directories-next was created because directories was unmaintained
|
# directories-next was created because directories was unmaintained
|
||||||
# directories-next is now unmaintained while directories is maintained
|
# directories-next is now unmaintained while directories is maintained
|
||||||
@@ -208,12 +196,16 @@ matches = { path = "patches/matches" }
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
# The official pasta_curves repo doesn't support Zeroize
|
# Patch to include `FromUniformBytes<64>` over Scalar
|
||||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
unwrap_or_default = "allow"
|
unwrap_or_default = "allow"
|
||||||
map_unwrap_or = "allow"
|
map_unwrap_or = "allow"
|
||||||
|
needless_continue = "allow"
|
||||||
|
manual_is_multiple_of = "allow"
|
||||||
|
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||||
borrow_as_ptr = "deny"
|
borrow_as_ptr = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
cast_possible_truncation = "deny"
|
cast_possible_truncation = "deny"
|
||||||
@@ -244,7 +236,6 @@ manual_string_new = "deny"
|
|||||||
match_bool = "deny"
|
match_bool = "deny"
|
||||||
match_same_arms = "deny"
|
match_same_arms = "deny"
|
||||||
missing_fields_in_debug = "deny"
|
missing_fields_in_debug = "deny"
|
||||||
needless_continue = "deny"
|
|
||||||
needless_pass_by_value = "deny"
|
needless_pass_by_value = "deny"
|
||||||
ptr_cast_constness = "deny"
|
ptr_cast_constness = "deny"
|
||||||
range_minus_one = "deny"
|
range_minus_one = "deny"
|
||||||
@@ -253,7 +244,7 @@ redundant_closure_for_method_calls = "deny"
|
|||||||
redundant_else = "deny"
|
redundant_else = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
string_slice = "deny"
|
string_slice = "deny"
|
||||||
unchecked_duration_subtraction = "deny"
|
unchecked_time_subtraction = "deny"
|
||||||
uninlined_format_args = "deny"
|
uninlined_format_args = "deny"
|
||||||
unnecessary_box_returns = "deny"
|
unnecessary_box_returns = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
@@ -262,3 +253,6 @@ unnested_or_patterns = "deny"
|
|||||||
unused_async = "deny"
|
unused_async = "deny"
|
||||||
unused_self = "deny"
|
unused_self = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
|
||||||
|
[workspace.lints.rust]
|
||||||
|
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
|
|||||||
as a reference text. This copy should be provided with any distribution of a
|
as a reference text. This copy should be provided with any distribution of a
|
||||||
crate licensed under the AGPL-3.0, as per its terms.
|
crate licensed under the AGPL-3.0, as per its terms.
|
||||||
|
|
||||||
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
|
||||||
|
|||||||
@@ -59,7 +59,6 @@ issued at the discretion of the Immunefi program managers.
|
|||||||
- [Website](https://serai.exchange/): https://serai.exchange/
|
- [Website](https://serai.exchange/): https://serai.exchange/
|
||||||
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
||||||
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
||||||
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
|
|
||||||
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
||||||
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
||||||
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
||||||
|
|||||||
14
audits/Trail of Bits ethereum contracts April 2025/README.md
Normal file
14
audits/Trail of Bits ethereum contracts April 2025/README.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Trail of Bits Ethereum Contracts Audit, June 2025
|
||||||
|
|
||||||
|
This audit included:
|
||||||
|
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
|
||||||
|
- Our Ethereum primitives library (/processor/ethereum/primitives)
|
||||||
|
- Our Deployer contract and associated library (/processor/ethereum/deployer)
|
||||||
|
- Our ERC20 library (/processor/ethereum/erc20)
|
||||||
|
- Our Router contract and associated library (/processor/ethereum/router)
|
||||||
|
|
||||||
|
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
|
||||||
|
|
||||||
|
Please see
|
||||||
|
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
|
||||||
|
for the actual report.
|
||||||
50
audits/crypto/dkg/evrf/README.md
Normal file
50
audits/crypto/dkg/evrf/README.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# eVRF DKG
|
||||||
|
|
||||||
|
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
|
||||||
|
the IACR preprint server. Within it was a one-round unbiased DKG and a
|
||||||
|
one-round unbiased threshold DKG. Unfortunately, both simply describe
|
||||||
|
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
|
||||||
|
in practice, the need for an additional round of communication to occur where
|
||||||
|
all participants confirm they received their secret shares.
|
||||||
|
|
||||||
|
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
|
||||||
|
achieve a verifiable encryption scheme. This allows the secret shares to be
|
||||||
|
posted to any 'bulletin board' (such as a blockchain) and for all observers to
|
||||||
|
confirm:
|
||||||
|
|
||||||
|
- A participant participated
|
||||||
|
- The secret shares sent can be received by the intended recipient so long as
|
||||||
|
they can access the bulletin board
|
||||||
|
|
||||||
|
Additionally, Serai desired a robust scheme (albeit with an biased key as the
|
||||||
|
output, which is fine for our purposes). Accordingly, our implementation
|
||||||
|
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
|
||||||
|
for verifiable encryption, with the caller allowed to decide the set of
|
||||||
|
participants. They may:
|
||||||
|
|
||||||
|
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
|
||||||
|
paper
|
||||||
|
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
|
||||||
|
the eVRF paper
|
||||||
|
- Select a post-determined set (with any solution for the Common Subset
|
||||||
|
problem), allowing achieving a robust threshold biased DKG
|
||||||
|
|
||||||
|
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
|
||||||
|
this is unnecessary when the resulting key will be biased. Any proof of
|
||||||
|
knowledge for the coefficients, as necessary for their extraction within the
|
||||||
|
security proofs, would be sufficient.
|
||||||
|
|
||||||
|
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
|
||||||
|
provide proofs for its security. This resulted in
|
||||||
|
[this paper](<./Security Proofs.pdf>).
|
||||||
|
|
||||||
|
Our implementation itself is then built on top of the audited
|
||||||
|
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
|
||||||
|
and
|
||||||
|
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
|
||||||
|
|
||||||
|
Note we do not use the originally premised DDH eVRF yet the one premised on
|
||||||
|
elliptic curve divisors, the methodology of which is commented on
|
||||||
|
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
|
||||||
|
|
||||||
|
Our implementation itself is unaudited at this time however.
|
||||||
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71"
|
rust-version = "1.65"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
parity-db = { version = "0.5", default-features = false, optional = true }
|
||||||
rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
|
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
parity-db = ["dep:parity-db"]
|
parity-db = ["dep:parity-db"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -30,53 +30,13 @@ pub trait Get {
|
|||||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||||
/// randomly, or any other action, at time of write or at time of commit.
|
/// randomly, or any other action, at time of write or at time of commit.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub trait DbTxn: Sized + Send + Get {
|
pub trait DbTxn: Send + Get {
|
||||||
/// Write a value to this key.
|
/// Write a value to this key.
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||||
/// Delete the value from this key.
|
/// Delete the value from this key.
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||||
/// Commit this transaction.
|
/// Commit this transaction.
|
||||||
fn commit(self);
|
fn commit(self);
|
||||||
/// Close this transaction.
|
|
||||||
///
|
|
||||||
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
|
|
||||||
/// with transactions which can't be dropped.
|
|
||||||
fn close(self) {
|
|
||||||
drop(self);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
|
|
||||||
pub struct Undroppable<T>(Option<T>);
|
|
||||||
impl<T> Drop for Undroppable<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// Use an assertion at compile time to prevent this code from compiling if generated
|
|
||||||
#[allow(clippy::assertions_on_constants)]
|
|
||||||
const {
|
|
||||||
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T: DbTxn> Get for Undroppable<T> {
|
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
|
||||||
self.0.as_ref().unwrap().get(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T: DbTxn> DbTxn for Undroppable<T> {
|
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
|
||||||
self.0.as_mut().unwrap().put(key, value);
|
|
||||||
}
|
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
|
||||||
self.0.as_mut().unwrap().del(key);
|
|
||||||
}
|
|
||||||
fn commit(mut self) {
|
|
||||||
self.0.take().unwrap().commit();
|
|
||||||
let _ = core::mem::ManuallyDrop::new(self);
|
|
||||||
}
|
|
||||||
fn close(mut self) {
|
|
||||||
drop(self.0.take().unwrap());
|
|
||||||
let _ = core::mem::ManuallyDrop::new(self);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A database supporting atomic transaction.
|
/// A database supporting atomic transaction.
|
||||||
@@ -91,10 +51,6 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
|
|||||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||||
}
|
}
|
||||||
/// Open a new transaction which may be dropped.
|
/// Open a new transaction.
|
||||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
|
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||||
/// Open a new transaction which must be committed or closed.
|
|
||||||
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
|
|
||||||
Undroppable(Some(self.unsafe_txn()))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl Get for MemDb {
|
|||||||
}
|
}
|
||||||
impl Db for MemDb {
|
impl Db for MemDb {
|
||||||
type Transaction<'a> = MemDbTxn<'a>;
|
type Transaction<'a> = MemDbTxn<'a>;
|
||||||
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
|
fn txn(&mut self) -> MemDbTxn<'_> {
|
||||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
|
|||||||
}
|
}
|
||||||
impl Db for Arc<ParityDb> {
|
impl Db for Arc<ParityDb> {
|
||||||
type Transaction<'a> = Transaction<'a>;
|
type Transaction<'a> = Transaction<'a>;
|
||||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||||
Transaction(self, vec![])
|
Transaction(self, vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
|||||||
}
|
}
|
||||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||||
type Transaction<'a> = Transaction<'a, T>;
|
type Transaction<'a> = Transaction<'a, T>;
|
||||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||||
let mut opts = WriteOptions::default();
|
let mut opts = WriteOptions::default();
|
||||||
opts.set_sync(true);
|
opts.set_sync(true);
|
||||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||||
|
|||||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71"
|
rust-version = "1.64"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
2
common/env/LICENSE
vendored
2
common/env/LICENSE
vendored
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
|
||||||
// Obtain a variable from the Serai environment/secret store.
|
// Obtain a variable from the Serai environment/secret store.
|
||||||
pub fn var(variable: &str) -> Option<String> {
|
pub fn var(variable: &str) -> Option<String> {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71"
|
rust-version = "1.70"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2024 Luke Parker
|
Copyright (c) 2024-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "simple-request"
|
name = "simple-request"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
description = "A simple HTTP(S) request library"
|
description = "A simple HTTP(S) request library"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["http", "https", "async", "request", "ssl"]
|
keywords = ["http", "https", "async", "request", "ssl"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
@@ -19,9 +19,10 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
tower-service = { version = "0.3", default-features = false }
|
tower-service = { version = "0.3", default-features = false }
|
||||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] }
|
||||||
http-body-util = { version = "0.1", default-features = false }
|
http-body-util = { version = "0.1", default-features = false }
|
||||||
tokio = { version = "1", default-features = false }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
|
|
||||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||||
|
|
||||||
@@ -29,6 +30,8 @@ zeroize = { version = "1", optional = true }
|
|||||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
tls = ["hyper-rustls"]
|
tokio = ["hyper-util/tokio"]
|
||||||
|
tls = ["tokio", "hyper-rustls"]
|
||||||
|
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
||||||
basic-auth = ["zeroize", "base64ct"]
|
basic-auth = ["zeroize", "base64ct"]
|
||||||
default = ["tls"]
|
default = ["tls"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,19 +1,20 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
|
use core::{pin::Pin, future::Future};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
use futures_util::FutureExt;
|
||||||
|
use ::tokio::sync::Mutex;
|
||||||
|
|
||||||
use tower_service::Service as TowerService;
|
use tower_service::Service as TowerService;
|
||||||
|
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
|
||||||
|
pub use hyper;
|
||||||
|
|
||||||
|
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
|
||||||
use hyper_util::{
|
|
||||||
rt::tokio::TokioExecutor,
|
|
||||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
|
||||||
};
|
|
||||||
pub use hyper;
|
|
||||||
|
|
||||||
mod request;
|
mod request;
|
||||||
pub use request::*;
|
pub use request::*;
|
||||||
@@ -37,52 +38,86 @@ type Connector = HttpConnector;
|
|||||||
type Connector = HttpsConnector<HttpConnector>;
|
type Connector = HttpsConnector<HttpConnector>;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
enum Connection {
|
enum Connection<
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||||
Connection {
|
Connection {
|
||||||
|
executor: E,
|
||||||
connector: Connector,
|
connector: Connector,
|
||||||
host: Uri,
|
host: Uri,
|
||||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An HTTP client.
|
||||||
|
///
|
||||||
|
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
|
||||||
|
/// the `tls` feature is active without using the `tokio` executor will cause errors.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Client {
|
pub struct Client<
|
||||||
connection: Connection,
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
connection: Connection<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
fn connector() -> Connector {
|
Client<E>
|
||||||
|
{
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
|
fn connector() -> Result<Connector, Error> {
|
||||||
let mut res = HttpConnector::new();
|
let mut res = HttpConnector::new();
|
||||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||||
res.set_nodelay(true);
|
res.set_nodelay(true);
|
||||||
res.set_reuse_address(true);
|
res.set_reuse_address(true);
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
if core::any::TypeId::of::<E>() !=
|
||||||
|
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
|
||||||
|
{
|
||||||
|
Err(Error::ConnectionError(
|
||||||
|
"`tls` feature enabled but not using the `tokio` executor".into(),
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
res.enforce_http(false);
|
res.enforce_http(false);
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
let res = HttpsConnectorBuilder::new()
|
let https = HttpsConnectorBuilder::new().with_native_roots();
|
||||||
.with_native_roots()
|
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))]
|
||||||
.expect("couldn't fetch system's SSL roots")
|
let https = https.map_err(|e| {
|
||||||
.https_or_http()
|
Error::ConnectionError(
|
||||||
.enable_http1()
|
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}")
|
||||||
.wrap_connector(res);
|
.into(),
|
||||||
res
|
)
|
||||||
|
})?;
|
||||||
|
// Fallback to `webpki-roots` if present
|
||||||
|
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
|
||||||
|
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
let res = https.https_or_http().enable_http1().wrap_connector(res);
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_connection_pool() -> Client {
|
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> {
|
||||||
Client {
|
Ok(Client {
|
||||||
connection: Connection::ConnectionPool(
|
connection: Connection::ConnectionPool(
|
||||||
HyperClient::builder(TokioExecutor::new())
|
HyperClient::builder(executor)
|
||||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||||
.build(Self::connector()),
|
.build(Self::connector()?),
|
||||||
),
|
),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
pub fn with_executor_and_without_connection_pool(
|
||||||
|
executor: E,
|
||||||
|
host: &str,
|
||||||
|
) -> Result<Client<E>, Error> {
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
connection: Connection::Connection {
|
connection: Connection::Connection {
|
||||||
connector: Self::connector(),
|
executor,
|
||||||
|
connector: Self::connector()?,
|
||||||
host: {
|
host: {
|
||||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||||
if uri.host().is_none() {
|
if uri.host().is_none() {
|
||||||
@@ -95,9 +130,9 @@ impl Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> {
|
||||||
let request: Request = request.into();
|
let request: Request = request.into();
|
||||||
let mut request = request.0;
|
let Request { mut request, response_size_limit } = request;
|
||||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||||
match &self.connection {
|
match &self.connection {
|
||||||
Connection::ConnectionPool(_) => {}
|
Connection::ConnectionPool(_) => {}
|
||||||
@@ -131,7 +166,7 @@ impl Client {
|
|||||||
Connection::ConnectionPool(client) => {
|
Connection::ConnectionPool(client) => {
|
||||||
client.request(request).await.map_err(Error::HyperUtil)?
|
client.request(request).await.map_err(Error::HyperUtil)?
|
||||||
}
|
}
|
||||||
Connection::Connection { connector, host, connection } => {
|
Connection::Connection { executor, connector, host, connection } => {
|
||||||
let mut connection_lock = connection.lock().await;
|
let mut connection_lock = connection.lock().await;
|
||||||
|
|
||||||
// If there's not a connection...
|
// If there's not a connection...
|
||||||
@@ -143,28 +178,46 @@ impl Client {
|
|||||||
let call_res = call_res.map_err(Error::ConnectionError);
|
let call_res = call_res.map_err(Error::ConnectionError);
|
||||||
let (requester, connection) =
|
let (requester, connection) =
|
||||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
// This task will die when we drop the requester
|
||||||
// for it
|
executor.execute(Box::pin(connection.map(|_| ())));
|
||||||
tokio::spawn(connection);
|
|
||||||
*connection_lock = Some(requester);
|
*connection_lock = Some(requester);
|
||||||
}
|
}
|
||||||
|
|
||||||
let connection = connection_lock.as_mut().unwrap();
|
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned");
|
||||||
let mut err = connection.ready().await.err();
|
let mut err = connection.ready().await.err();
|
||||||
if err.is_none() {
|
if err.is_none() {
|
||||||
// Send the request
|
// Send the request
|
||||||
let res = connection.send_request(request).await;
|
let response = connection.send_request(request).await;
|
||||||
if let Ok(res) = res {
|
if let Ok(response) = response {
|
||||||
return Ok(Response(res, self));
|
return Ok(Response { response, size_limit: response_size_limit, client: self });
|
||||||
}
|
}
|
||||||
err = res.err();
|
err = response.err();
|
||||||
}
|
}
|
||||||
// Since this connection has been put into an error state, drop it
|
// Since this connection has been put into an error state, drop it
|
||||||
*connection_lock = None;
|
*connection_lock = None;
|
||||||
Err(Error::Hyper(err.unwrap()))?
|
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response(response, self))
|
Ok(Response { response, size_limit: response_size_limit, client: self })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
mod tokio {
|
||||||
|
use hyper_util::rt::tokio::TokioExecutor;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
pub type TokioClient = Client<TokioExecutor>;
|
||||||
|
impl Client<TokioExecutor> {
|
||||||
|
pub fn with_connection_pool() -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_connection_pool(TokioExecutor::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
pub use tokio::TokioClient;
|
||||||
|
|||||||
@@ -7,11 +7,15 @@ pub use http_body_util::Full;
|
|||||||
use crate::Error;
|
use crate::Error;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
pub struct Request {
|
||||||
|
pub(crate) request: hyper::Request<Full<Bytes>>,
|
||||||
|
pub(crate) response_size_limit: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
impl Request {
|
impl Request {
|
||||||
#[cfg(feature = "basic-auth")]
|
#[cfg(feature = "basic-auth")]
|
||||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||||
if let Some(authority) = self.0.uri().authority() {
|
if let Some(authority) = self.request.uri().authority() {
|
||||||
let authority = authority.as_str();
|
let authority = authority.as_str();
|
||||||
if authority.contains('@') {
|
if authority.contains('@') {
|
||||||
// Decode the username and password from the URI
|
// Decode the username and password from the URI
|
||||||
@@ -36,9 +40,10 @@ impl Request {
|
|||||||
let mut formatted = format!("{username}:{password}");
|
let mut formatted = format!("{username}:{password}");
|
||||||
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
||||||
formatted.zeroize();
|
formatted.zeroize();
|
||||||
self.0.headers_mut().insert(
|
self.request.headers_mut().insert(
|
||||||
hyper::header::AUTHORIZATION,
|
hyper::header::AUTHORIZATION,
|
||||||
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
|
HeaderValue::from_str(&format!("Basic {encoded}"))
|
||||||
|
.expect("couldn't form header from base64-encoded string"),
|
||||||
);
|
);
|
||||||
encoded.zeroize();
|
encoded.zeroize();
|
||||||
}
|
}
|
||||||
@@ -59,9 +64,17 @@ impl Request {
|
|||||||
pub fn with_basic_auth(&mut self) {
|
pub fn with_basic_auth(&mut self) {
|
||||||
let _ = self.basic_auth_from_uri();
|
let _ = self.basic_auth_from_uri();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
/// Set a size limit for the response.
|
||||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
///
|
||||||
Request(request)
|
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
|
||||||
|
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
|
||||||
|
self.response_size_limit = response_size_limit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||||
|
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||||
|
Request { request, response_size_limit: None }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,54 @@
|
|||||||
|
use core::{pin::Pin, future::Future};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
use hyper::{
|
use hyper::{
|
||||||
StatusCode,
|
StatusCode,
|
||||||
header::{HeaderValue, HeaderMap},
|
header::{HeaderValue, HeaderMap},
|
||||||
body::{Buf, Incoming},
|
body::Incoming,
|
||||||
|
rt::Executor,
|
||||||
};
|
};
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
|
use futures_util::{Stream, StreamExt};
|
||||||
|
|
||||||
use crate::{Client, Error};
|
use crate::{Client, Error};
|
||||||
|
|
||||||
// Borrows the client so its async task lives as long as this response exists.
|
// Borrows the client so its async task lives as long as this response exists.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
pub struct Response<
|
||||||
impl<'a> Response<'a> {
|
'a,
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
pub(crate) response: hyper::Response<Incoming>,
|
||||||
|
pub(crate) size_limit: Option<usize>,
|
||||||
|
pub(crate) client: &'a Client<E>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
|
Response<'_, E>
|
||||||
|
{
|
||||||
pub fn status(&self) -> StatusCode {
|
pub fn status(&self) -> StatusCode {
|
||||||
self.0.status()
|
self.response.status()
|
||||||
}
|
}
|
||||||
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
||||||
self.0.headers()
|
self.response.headers()
|
||||||
}
|
}
|
||||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
let mut body = self.response.into_body().into_data_stream();
|
||||||
|
let mut res: Vec<u8> = vec![];
|
||||||
|
loop {
|
||||||
|
if let Some(size_limit) = self.size_limit {
|
||||||
|
let (lower, upper) = body.size_hint();
|
||||||
|
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
|
||||||
|
Err(Error::ConnectionError("response exceeded size limit".into()))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(part) = body.next().await else { break };
|
||||||
|
let part = part.map_err(Error::Hyper)?;
|
||||||
|
res.extend(part.as_ref());
|
||||||
|
}
|
||||||
|
Ok(io::Cursor::new(res))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "std-shims"
|
name = "std-shims"
|
||||||
version = "0.1.1"
|
version = "0.1.5"
|
||||||
description = "A series of std shims to make alloc more feasible"
|
description = "A series of std shims to make alloc more feasible"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.80"
|
rust-version = "1.65"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -17,9 +17,11 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
rustversion = { version = "1", default-features = false }
|
||||||
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
|
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] }
|
||||||
|
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = []
|
alloc = ["hashbrown"]
|
||||||
|
std = ["alloc", "spin/std"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,6 +1,28 @@
|
|||||||
# std shims
|
# `std` shims
|
||||||
|
|
||||||
A crate which passes through to std when the default `std` feature is enabled,
|
`std-shims` is a Rust crate with two purposes:
|
||||||
yet provides a series of shims when it isn't.
|
- Expand the functionality of `core` and `alloc`
|
||||||
|
- Polyfill functionality only available on newer version of Rust
|
||||||
|
|
||||||
`HashSet` and `HashMap` are provided via `hashbrown`.
|
The goal is to make supporting no-`std` environments, and older versions of
|
||||||
|
Rust, as simple as possible. For most use cases, replacing `std::` with
|
||||||
|
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
|
||||||
|
advantage of `std-shims`.
|
||||||
|
|
||||||
|
# API Surface
|
||||||
|
|
||||||
|
`std-shims` only aims to have items _mutually available_ between `alloc` (with
|
||||||
|
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
|
||||||
|
no shims available, will not be exported by `std-shims`.
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
|
||||||
|
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
|
||||||
|
primitives are provided via `spin` (avoiding a requirement on
|
||||||
|
`critical-section`). Sections of `std::io` are independently matched as
|
||||||
|
possible. `rustversion` is used to detect when to provide polyfills.
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
|
||||||
|
No guarantee of one-to-one parity is provided. The shims provided aim to be
|
||||||
|
sufficient for the average case. Pull requests are _welcome_.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::collections::*;
|
||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use hashbrown::{HashSet, HashMap};
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use std::collections::*;
|
pub use std::collections::*;
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use alloc::collections::*;
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use hashbrown::{HashSet, HashMap};
|
|
||||||
|
|||||||
@@ -1,42 +1,74 @@
|
|||||||
#[cfg(feature = "std")]
|
|
||||||
pub use std::io::*;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
mod shims {
|
mod shims {
|
||||||
use core::fmt::{Debug, Formatter};
|
use core::fmt::{self, Debug, Display, Formatter};
|
||||||
use alloc::{boxed::Box, vec::Vec};
|
#[cfg(feature = "alloc")]
|
||||||
|
use extern_alloc::{boxed::Box, vec::Vec};
|
||||||
|
use crate::error::Error as CoreError;
|
||||||
|
|
||||||
|
/// The kind of error.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub enum ErrorKind {
|
pub enum ErrorKind {
|
||||||
UnexpectedEof,
|
UnexpectedEof,
|
||||||
Other,
|
Other,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An error.
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct Error {
|
pub struct Error {
|
||||||
kind: ErrorKind,
|
kind: ErrorKind,
|
||||||
error: Box<dyn Send + Sync>,
|
#[cfg(feature = "alloc")]
|
||||||
|
error: Box<dyn Send + Sync + CoreError>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for Error {
|
impl Display for Error {
|
||||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
|
<Self as Debug>::fmt(self, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl CoreError for Error {}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub trait IntoBoxSendSyncError {}
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
impl<I> IntoBoxSendSyncError for I {}
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
|
/// Create a new error.
|
||||||
Error { kind, error: Box::new(error) }
|
///
|
||||||
|
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
let res = Error { kind };
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
let res = Error { kind, error: error.into() };
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
|
/// Create a new error with `io::ErrorKind::Other` as its kind.
|
||||||
Error { kind: ErrorKind::Other, error: Box::new(error) }
|
///
|
||||||
|
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
let res = Error { kind: ErrorKind::Other };
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
let res = Error { kind: ErrorKind::Other, error: error.into() };
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The kind of error.
|
||||||
pub fn kind(&self) -> ErrorKind {
|
pub fn kind(&self) -> ErrorKind {
|
||||||
self.kind
|
self.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
|
/// Retrieve the inner error.
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
|
||||||
Some(self.error)
|
Some(self.error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,6 +96,12 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<R: Read> Read for &mut R {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||||
|
R::read(*self, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait BufRead: Read {
|
pub trait BufRead: Read {
|
||||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||||
fn consume(&mut self, amt: usize);
|
fn consume(&mut self, amt: usize);
|
||||||
@@ -88,6 +126,7 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
impl Write for Vec<u8> {
|
impl Write for Vec<u8> {
|
||||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||||
self.extend(buf);
|
self.extend(buf);
|
||||||
@@ -95,6 +134,8 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use shims::*;
|
pub use shims::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};
|
||||||
|
|||||||
@@ -1,13 +1,102 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
pub extern crate alloc;
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub use core::*;
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
|
||||||
|
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
#[rustversion::before(1.81)]
|
||||||
|
pub mod error {
|
||||||
|
use core::fmt::Debug::Display;
|
||||||
|
pub trait Error: Debug + Display {}
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
#[rustversion::since(1.81)]
|
||||||
|
pub use core::error;
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
extern crate alloc as extern_alloc;
|
||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||||
|
|
||||||
pub mod sync;
|
|
||||||
pub mod collections;
|
pub mod collections;
|
||||||
pub mod io;
|
pub mod io;
|
||||||
|
pub mod sync;
|
||||||
|
|
||||||
pub use alloc::vec;
|
pub mod prelude {
|
||||||
pub use alloc::str;
|
// Shim the `std` prelude
|
||||||
pub use alloc::string;
|
#[cfg(feature = "alloc")]
|
||||||
|
pub use extern_alloc::{
|
||||||
|
format, vec,
|
||||||
|
borrow::ToOwned,
|
||||||
|
boxed::Box,
|
||||||
|
vec::Vec,
|
||||||
|
string::{String, ToString},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Shim `div_ceil`
|
||||||
|
#[rustversion::before(1.73)]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub trait StdShimsDivCeil {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self;
|
||||||
|
}
|
||||||
|
#[rustversion::before(1.73)]
|
||||||
|
mod impl_divceil {
|
||||||
|
use super::StdShimsDivCeil;
|
||||||
|
impl StdShimsDivCeil for u8 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u16 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u32 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u64 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u128 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for usize {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shim `io::Error::other`
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
#[rustversion::before(1.74)]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub trait StdShimsIoErrorOther {
|
||||||
|
fn other<E>(error: E) -> Self
|
||||||
|
where
|
||||||
|
E: Into<Box<dyn std::error::Error + Send + Sync>>;
|
||||||
|
}
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
#[rustversion::before(1.74)]
|
||||||
|
impl StdShimsIoErrorOther for std::io::Error {
|
||||||
|
fn other<E>(error: E) -> Self
|
||||||
|
where
|
||||||
|
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||||
|
{
|
||||||
|
std::io::Error::new(std::io::ErrorKind::Other, error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,19 +1,28 @@
|
|||||||
pub use core::sync::*;
|
pub use core::sync::atomic;
|
||||||
pub use alloc::sync::*;
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::sync::{Arc, Weak};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
mod mutex_shim {
|
mod mutex_shim {
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub use std::sync::*;
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::*;
|
pub use spin::{Mutex, MutexGuard};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::{Mutex, MutexGuard};
|
||||||
|
|
||||||
|
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct ShimMutex<T>(Mutex<T>);
|
pub struct ShimMutex<T>(Mutex<T>);
|
||||||
impl<T> ShimMutex<T> {
|
impl<T> ShimMutex<T> {
|
||||||
|
/// Construct a new `Mutex`.
|
||||||
pub const fn new(value: T) -> Self {
|
pub const fn new(value: T) -> Self {
|
||||||
Self(Mutex::new(value))
|
Self(Mutex::new(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Acquire a lock on the contents of the `Mutex`.
|
||||||
|
///
|
||||||
|
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
|
||||||
|
/// this may panic if the `Mutex` was poisoned.
|
||||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
let res = self.0.lock().unwrap();
|
let res = self.0.lock().unwrap();
|
||||||
@@ -25,7 +34,9 @@ mod mutex_shim {
|
|||||||
}
|
}
|
||||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[rustversion::before(1.80)]
|
||||||
pub use std::sync::LazyLock;
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::Lazy as LazyLock;
|
pub use spin::Lazy as LazyLock;
|
||||||
|
#[rustversion::since(1.80)]
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::LazyLock;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2022-2024 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,9 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.77"
|
# This must be specified with the patch version, else Rust believes `1.77` < `1.77.0` and will
|
||||||
|
# refuse to compile due to relying on versions introduced with `1.77.0`
|
||||||
|
rust-version = "1.77.0"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||||
|
|
||||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -22,15 +21,18 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
|||||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
dkg = { package = "dkg-musig", path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
||||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
zalloc = { path = "../common/zalloc" }
|
zalloc = { path = "../common/zalloc" }
|
||||||
serai-db = { path = "../common/db" }
|
serai-db = { path = "../common/db" }
|
||||||
@@ -43,9 +45,6 @@ tributary-sdk = { path = "./tributary-sdk" }
|
|||||||
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -18,7 +18,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023-2024 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -24,15 +24,6 @@ pub(crate) struct CosignDelayTask<D: Db> {
|
|||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
|
|
||||||
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if let Some(mut txn) = self.0.take() {
|
|
||||||
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
type Error = DoesNotError;
|
type Error = DoesNotError;
|
||||||
|
|
||||||
@@ -44,18 +35,14 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
|||||||
|
|
||||||
// Receive the next block to mark as cosigned
|
// Receive the next block to mark as cosigned
|
||||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||||
txn.close();
|
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Calculate when we should mark it as valid
|
// Calculate when we should mark it as valid
|
||||||
let time_valid =
|
let time_valid =
|
||||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||||
// Sleep until then
|
// Sleep until then
|
||||||
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
|
|
||||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||||
.await;
|
.await;
|
||||||
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
|
|
||||||
|
|
||||||
// Set the cosigned block
|
// Set the cosigned block
|
||||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -77,17 +77,27 @@ pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u
|
|||||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
pub(crate) request: R,
|
pub(crate) request: R,
|
||||||
|
pub(crate) last_request_for_cosigns: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
|
||||||
|
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
|
||||||
|
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*last_request_for_cosigns = Instant::now();
|
||||||
|
true
|
||||||
|
};
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let mut known_cosign = None;
|
let mut known_cosign = None;
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.unsafe_txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
@@ -118,12 +128,13 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// Check if the sum weight doesn't cross the required threshold
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
// Request the necessary cosigns over the network
|
// Request the necessary cosigns over the network
|
||||||
// TODO: Add a timer to ensure this isn't called too often
|
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
||||||
self
|
self
|
||||||
.request
|
.request
|
||||||
.request_notable_cosigns(global_session)
|
.request_notable_cosigns(global_session)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
}
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
@@ -180,11 +191,13 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||||
// explicitly request them
|
// explicitly request them
|
||||||
self
|
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
||||||
.request
|
self
|
||||||
.request_notable_cosigns(global_session)
|
.request
|
||||||
.await
|
.request_notable_cosigns(global_session)
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
}
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::{sync::Arc, collections::HashMap};
|
|||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Amount},
|
primitives::{SeraiAddress, Amount},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::ExternalValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ db_channel! {
|
|||||||
CosignIntendChannels {
|
CosignIntendChannels {
|
||||||
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||||
BlockEvents: () -> BlockEventData,
|
BlockEvents: () -> BlockEventData,
|
||||||
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +70,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||||
|
|
||||||
for block_number in start_block_number ..= latest_block_number {
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
let mut txn = self.db.unsafe_txn();
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
let (block, mut has_events) =
|
let (block, mut has_events) =
|
||||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
@@ -110,7 +110,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
keys.insert(set.network, SeraiAddress::from(*key));
|
keys.insert(set.network, SeraiAddress::from(*key));
|
||||||
let stake = serai
|
let stake = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.total_allocated_stake(set.network)
|
.total_allocated_stake(set.network.into())
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
.unwrap_or(Amount(0))
|
.unwrap_or(Amount(0))
|
||||||
@@ -155,7 +155,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
|
|
||||||
// Tell each set of their expectation to cosign this block
|
// Tell each set of their expectation to cosign this block
|
||||||
for set in global_session_info.sets {
|
for set in global_session_info.sets {
|
||||||
log::debug!("{:?} will be cosigning block #{block_number}", set);
|
log::debug!("{set:?} will be cosigning block #{block_number}");
|
||||||
IntendedCosigns::send(
|
IntendedCosigns::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{fmt::Debug, future::Future};
|
use core::{fmt::Debug, future::Future};
|
||||||
use std::{sync::Arc, collections::HashMap};
|
use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
@@ -11,8 +11,8 @@ use scale::{Encode, Decode};
|
|||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, SeraiAddress},
|
primitives::{ExternalNetworkId, SeraiAddress},
|
||||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||||
Public, Block, Serai, TemporalSerai,
|
Public, Block, Serai, TemporalSerai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -52,13 +52,13 @@ pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
|||||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub(crate) struct GlobalSession {
|
pub(crate) struct GlobalSession {
|
||||||
pub(crate) start_block_number: u64,
|
pub(crate) start_block_number: u64,
|
||||||
pub(crate) sets: Vec<ValidatorSet>,
|
pub(crate) sets: Vec<ExternalValidatorSet>,
|
||||||
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
|
||||||
pub(crate) stakes: HashMap<NetworkId, u64>,
|
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
||||||
pub(crate) total_stake: u64,
|
pub(crate) total_stake: u64,
|
||||||
}
|
}
|
||||||
impl GlobalSession {
|
impl GlobalSession {
|
||||||
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
fn id(mut cosigners: Vec<ExternalValidatorSet>) -> [u8; 32] {
|
||||||
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||||
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||||
}
|
}
|
||||||
@@ -101,7 +101,25 @@ pub struct Cosign {
|
|||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// The actual cosigner.
|
/// The actual cosigner.
|
||||||
pub cosigner: NetworkId,
|
pub cosigner: ExternalNetworkId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CosignIntent {
|
||||||
|
/// Convert this into a `Cosign`.
|
||||||
|
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
|
||||||
|
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
|
||||||
|
Cosign { global_session, block_number, block_hash, cosigner }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Cosign {
|
||||||
|
/// The message to sign to sign this cosign.
|
||||||
|
///
|
||||||
|
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
|
||||||
|
pub fn signature_message(&self) -> Vec<u8> {
|
||||||
|
// We use a schnorrkel context to domain-separate this
|
||||||
|
self.encode()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A signed cosign.
|
/// A signed cosign.
|
||||||
@@ -118,7 +136,7 @@ impl SignedCosign {
|
|||||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||||
|
|
||||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
|
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,7 +166,10 @@ create_db! {
|
|||||||
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||||
// block, causing the latest cosigned block for a global session to either be the global
|
// block, causing the latest cosigned block for a global session to either be the global
|
||||||
// session's notable cosigns or the network's latest cosigns.
|
// session's notable cosigns or the network's latest cosigns.
|
||||||
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
NetworksLatestCosignedBlock: (
|
||||||
|
global_session: [u8; 32],
|
||||||
|
network: ExternalNetworkId
|
||||||
|
) -> SignedCosign,
|
||||||
// Cosigns received for blocks not locally recognized as finalized.
|
// Cosigns received for blocks not locally recognized as finalized.
|
||||||
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||||
// The global session which faulted.
|
// The global session which faulted.
|
||||||
@@ -159,15 +180,10 @@ create_db! {
|
|||||||
/// Fetch the keys used for cosigning by a specific network.
|
/// Fetch the keys used for cosigning by a specific network.
|
||||||
async fn keys_for_network(
|
async fn keys_for_network(
|
||||||
serai: &TemporalSerai<'_>,
|
serai: &TemporalSerai<'_>,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||||
// The Serai network never cosigns so it has no keys for cosigning
|
|
||||||
if network == NetworkId::Serai {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(latest_session) =
|
let Some(latest_session) =
|
||||||
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||||
else {
|
else {
|
||||||
// If this network hasn't had a session declared, move on
|
// If this network hasn't had a session declared, move on
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@@ -176,7 +192,7 @@ async fn keys_for_network(
|
|||||||
// Get the keys for the latest session
|
// Get the keys for the latest session
|
||||||
if let Some(keys) = serai
|
if let Some(keys) = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ValidatorSet { network, session: latest_session })
|
.keys(ExternalValidatorSet { network, session: latest_session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
{
|
{
|
||||||
@@ -187,7 +203,7 @@ async fn keys_for_network(
|
|||||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||||
if let Some(keys) = serai
|
if let Some(keys) = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ValidatorSet { network, session: prior_session })
|
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
{
|
{
|
||||||
@@ -198,16 +214,19 @@ async fn keys_for_network(
|
|||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
||||||
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
/// block.
|
||||||
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
async fn cosigning_sets(
|
||||||
for network in serai_client::primitives::NETWORKS {
|
serai: &TemporalSerai<'_>,
|
||||||
|
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
|
||||||
|
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||||
// If this network doesn't have usable keys, move on
|
// If this network doesn't have usable keys, move on
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
sets.push((ValidatorSet { network, session }, keys.0));
|
sets.push((ExternalValidatorSet { network, session }, keys.0));
|
||||||
}
|
}
|
||||||
Ok(sets)
|
Ok(sets)
|
||||||
}
|
}
|
||||||
@@ -288,8 +307,12 @@ impl<D: Db> Cosigning<D> {
|
|||||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
(evaluator::CosignEvaluatorTask {
|
||||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
db: db.clone(),
|
||||||
|
request,
|
||||||
|
last_request_for_cosigns: Instant::now(),
|
||||||
|
})
|
||||||
|
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(delay::CosignDelayTask { db: db.clone() })
|
(delay::CosignDelayTask { db: db.clone() })
|
||||||
@@ -323,8 +346,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||||
/// cosigns for this session.
|
/// cosigns for this session.
|
||||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -341,7 +364,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||||
// identification in those who see the faulty cosigns as honest
|
// identification in those who see the faulty cosigns as honest
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||||
if cosign.cosign.global_session == faulted {
|
if cosign.cosign.global_session == faulted {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
@@ -353,8 +376,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -424,7 +447,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||||
// cosign
|
// cosign
|
||||||
|
|
||||||
let mut txn = self.db.unsafe_txn();
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
if !faulty {
|
if !faulty {
|
||||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||||
@@ -465,12 +488,12 @@ impl<D: Db> Cosigning<D> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Receive intended cosigns to produce for this ValidatorSet.
|
/// Receive intended cosigns to produce for this ExternalValidatorSet.
|
||||||
///
|
///
|
||||||
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
||||||
///
|
///
|
||||||
/// This will drain the internal channel and not re-yield these intentions again.
|
/// This will drain the internal channel and not re-yield these intentions again.
|
||||||
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
|
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<CosignIntent> {
|
||||||
let mut res: Vec<CosignIntent> = vec![];
|
let mut res: Vec<CosignIntent> = vec![];
|
||||||
// While we have yet to find a notable cosign...
|
// While we have yet to find a notable cosign...
|
||||||
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
||||||
@@ -480,30 +503,3 @@ impl<D: Db> Cosigning<D> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
struct RNC;
|
|
||||||
impl RequestNotableCosigns for RNC {
|
|
||||||
/// The error type which may be encountered when requesting notable cosigns.
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
/// Request the notable cosigns for this global session.
|
|
||||||
fn request_notable_cosigns(
|
|
||||||
&self,
|
|
||||||
global_session: [u8; 32],
|
|
||||||
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
|
||||||
async move { Ok(()) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test() {
|
|
||||||
let db: serai_db::MemDb = serai_db::MemDb::new();
|
|
||||||
let serai = unsafe { core::mem::transmute(0u64) };
|
|
||||||
let request = RNC;
|
|
||||||
let tasks = vec![];
|
|
||||||
let _ = Cosigning::spawn(db, serai, request, tasks);
|
|
||||||
core::future::pending().await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.87"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -23,7 +23,7 @@ async-trait = { version = "0.1", default-features = false }
|
|||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
@@ -35,7 +35,7 @@ tributary-sdk = { path = "../../tributary-sdk" }
|
|||||||
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
serai-task = { path = "../../../common/task", version = "0.1" }
|
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ use serai_client::primitives::PublicKey as Public;
|
|||||||
|
|
||||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::UpgradeInfo,
|
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade},
|
||||||
InboundUpgrade, OutboundUpgrade,
|
|
||||||
identity::{self, PeerId},
|
identity::{self, PeerId},
|
||||||
noise,
|
noise,
|
||||||
};
|
};
|
||||||
@@ -119,12 +118,18 @@ impl UpgradeInfo for OnlyValidators {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S>
|
||||||
|
for OnlyValidators
|
||||||
|
{
|
||||||
type Output = (PeerId, noise::Output<S>);
|
type Output = (PeerId, noise::Output<S>);
|
||||||
type Error = io::Error;
|
type Error = io::Error;
|
||||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
|
fn upgrade_inbound(
|
||||||
|
self,
|
||||||
|
socket: S,
|
||||||
|
info: <Self as UpgradeInfo>::Info,
|
||||||
|
) -> <Self as InboundConnectionUpgrade<S>>::Future {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -147,12 +152,18 @@ impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for O
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S>
|
||||||
|
for OnlyValidators
|
||||||
|
{
|
||||||
type Output = (PeerId, noise::Output<S>);
|
type Output = (PeerId, noise::Output<S>);
|
||||||
type Error = io::Error;
|
type Error = io::Error;
|
||||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
|
fn upgrade_outbound(
|
||||||
|
self,
|
||||||
|
socket: S,
|
||||||
|
info: <Self as UpgradeInfo>::Info,
|
||||||
|
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -14,8 +14,8 @@ use zeroize::Zeroizing;
|
|||||||
use schnorrkel::Keypair;
|
use schnorrkel::Keypair;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, PublicKey},
|
primitives::{ExternalNetworkId, PublicKey},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::ExternalValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ mod ping;
|
|||||||
|
|
||||||
/// The request-response messages and behavior
|
/// The request-response messages and behavior
|
||||||
mod reqres;
|
mod reqres;
|
||||||
use reqres::{RequestId, Request, Response};
|
use reqres::{InboundRequestId, Request, Response};
|
||||||
|
|
||||||
/// The gossip messages and behavior
|
/// The gossip messages and behavior
|
||||||
mod gossip;
|
mod gossip;
|
||||||
@@ -66,14 +66,6 @@ use dial::DialTask;
|
|||||||
|
|
||||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||||
|
|
||||||
// usize::max, manually implemented, as max isn't a const fn
|
|
||||||
const MAX_LIBP2P_MESSAGE_SIZE: usize =
|
|
||||||
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
|
||||||
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
|
||||||
} else {
|
|
||||||
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
|
||||||
};
|
|
||||||
|
|
||||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||||
// 0 represents the identity Multihash, that no hash was performed
|
// 0 represents the identity Multihash, that no hash was performed
|
||||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||||
@@ -112,7 +104,7 @@ impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct Peers {
|
struct Peers {
|
||||||
peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
|
peers: Arc<RwLock<HashMap<ExternalNetworkId, HashSet<PeerId>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
||||||
@@ -143,9 +135,10 @@ struct Libp2pInner {
|
|||||||
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||||
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
|
||||||
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
|
heartbeat_requests:
|
||||||
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
|
Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ExternalValidatorSet, [u8; 32])>>,
|
||||||
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>,
|
||||||
|
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The libp2p-backed P2P implementation.
|
/// The libp2p-backed P2P implementation.
|
||||||
@@ -176,19 +169,9 @@ impl Libp2p {
|
|||||||
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_yamux = || {
|
|
||||||
let mut config = yamux::Config::default();
|
|
||||||
// 1 MiB default + max message size
|
|
||||||
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
|
|
||||||
// 256 KiB default + max message size
|
|
||||||
config
|
|
||||||
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
|
|
||||||
config
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||||
.with_tokio()
|
.with_tokio()
|
||||||
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
|
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_behaviour(|_| Behavior {
|
.with_behaviour(|_| Behavior {
|
||||||
allow_list: allow_block_list::Behaviour::default(),
|
allow_list: allow_block_list::Behaviour::default(),
|
||||||
@@ -330,7 +313,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
impl serai_coordinator_p2p::P2p for Libp2p {
|
impl serai_coordinator_p2p::P2p for Libp2p {
|
||||||
type Peer<'a> = Peer<'a>;
|
type Peer<'a> = Peer<'a>;
|
||||||
|
|
||||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||||
async move {
|
async move {
|
||||||
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||||
return vec![];
|
return vec![];
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
|||||||
use libp2p::request_response::{
|
use libp2p::request_response::{
|
||||||
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||||
};
|
};
|
||||||
pub use request_response::{RequestId, Message};
|
pub use request_response::{InboundRequestId, Message};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
@@ -129,7 +129,6 @@ pub(crate) type Event = GenericEvent<Request, Response>;
|
|||||||
|
|
||||||
pub(crate) type Behavior = Behaviour<Codec>;
|
pub(crate) type Behavior = Behaviour<Codec>;
|
||||||
pub(crate) fn new_behavior() -> Behavior {
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
let mut config = Config::default();
|
let config = Config::default().with_request_timeout(Duration::from_secs(5));
|
||||||
config.set_request_timeout(Duration::from_secs(5));
|
|
||||||
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::{
|
|||||||
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||||
|
|
||||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||||
|
|
||||||
@@ -17,7 +17,7 @@ use serai_cosign::SignedCosign;
|
|||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
identity::PeerId,
|
identity::PeerId,
|
||||||
request_response::{RequestId, ResponseChannel},
|
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel},
|
||||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -65,12 +65,12 @@ pub(crate) struct SwarmTask {
|
|||||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||||
|
|
||||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
|
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>,
|
||||||
|
|
||||||
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>,
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SwarmTask {
|
impl SwarmTask {
|
||||||
@@ -92,7 +92,8 @@ impl SwarmTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
gossip::Event::GossipsubNotSupported { peer_id } |
|
||||||
|
gossip::Event::SlowPeer { peer_id, .. } => {
|
||||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -222,25 +223,21 @@ impl SwarmTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SwarmEvent::Behaviour(
|
SwarmEvent::Behaviour(event) => {
|
||||||
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
match event {
|
||||||
) => {
|
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => {
|
||||||
// This *is* an exhaustive match as these events are empty enums
|
// This *is* an exhaustive match as these events are empty enums
|
||||||
match event {}
|
match event {}
|
||||||
}
|
}
|
||||||
SwarmEvent::Behaviour(
|
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => {
|
||||||
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
if result.is_err() {
|
||||||
) => {
|
self.swarm.close_connection(connection);
|
||||||
if result.is_err() {
|
}
|
||||||
self.swarm.close_connection(connection);
|
}
|
||||||
|
BehaviorEvent::Reqres(event) => self.handle_reqres(event),
|
||||||
|
BehaviorEvent::Gossip(event) => self.handle_gossip(event),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
|
|
||||||
self.handle_reqres(event)
|
|
||||||
}
|
|
||||||
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
|
|
||||||
self.handle_gossip(event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't handle any of these
|
// We don't handle any of these
|
||||||
SwarmEvent::IncomingConnection { .. } |
|
SwarmEvent::IncomingConnection { .. } |
|
||||||
@@ -250,7 +247,14 @@ impl SwarmTask {
|
|||||||
SwarmEvent::ExpiredListenAddr { .. } |
|
SwarmEvent::ExpiredListenAddr { .. } |
|
||||||
SwarmEvent::ListenerClosed { .. } |
|
SwarmEvent::ListenerClosed { .. } |
|
||||||
SwarmEvent::ListenerError { .. } |
|
SwarmEvent::ListenerError { .. } |
|
||||||
SwarmEvent::Dialing { .. } => {}
|
SwarmEvent::Dialing { .. } |
|
||||||
|
SwarmEvent::NewExternalAddrCandidate { .. } |
|
||||||
|
SwarmEvent::ExternalAddrConfirmed { .. } |
|
||||||
|
SwarmEvent::ExternalAddrExpired { .. } |
|
||||||
|
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
|
||||||
|
|
||||||
|
// Requires as SwarmEvent is non-exhaustive
|
||||||
|
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,9 +325,9 @@ impl SwarmTask {
|
|||||||
|
|
||||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
||||||
) {
|
) {
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
SwarmTask {
|
SwarmTask {
|
||||||
|
|||||||
@@ -4,7 +4,9 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai};
|
use serai_client::{
|
||||||
|
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
|
||||||
|
};
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -24,11 +26,11 @@ pub(crate) struct Validators {
|
|||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
|
|
||||||
// A cache for which session we're populated with the validators of
|
// A cache for which session we're populated with the validators of
|
||||||
sessions: HashMap<NetworkId, Session>,
|
sessions: HashMap<ExternalNetworkId, Session>,
|
||||||
// The validators by network
|
// The validators by network
|
||||||
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
by_network: HashMap<ExternalNetworkId, HashSet<PeerId>>,
|
||||||
// The validators and their networks
|
// The validators and their networks
|
||||||
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
validators: HashMap<PeerId, HashSet<ExternalNetworkId>>,
|
||||||
|
|
||||||
// The channel to send the changes down
|
// The channel to send the changes down
|
||||||
changes: mpsc::UnboundedSender<Changes>,
|
changes: mpsc::UnboundedSender<Changes>,
|
||||||
@@ -49,8 +51,16 @@ impl Validators {
|
|||||||
|
|
||||||
async fn session_changes(
|
async fn session_changes(
|
||||||
serai: impl Borrow<Serai>,
|
serai: impl Borrow<Serai>,
|
||||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
||||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||||
|
/*
|
||||||
|
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
||||||
|
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
||||||
|
bypass the cosign protocol unless a historical global session was malicious, in which case
|
||||||
|
the cosign protocol already breaks.
|
||||||
|
|
||||||
|
Besides, we can't connect to historical validators, only the current validators.
|
||||||
|
*/
|
||||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
|
|
||||||
@@ -59,13 +69,10 @@ impl Validators {
|
|||||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||||
// we poll it till it yields all futures with the most minimal processing possible
|
// we poll it till it yields all futures with the most minimal processing possible
|
||||||
let mut futures = FuturesUnordered::new();
|
let mut futures = FuturesUnordered::new();
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if network == NetworkId::Serai {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let sessions = sessions.borrow();
|
let sessions = sessions.borrow();
|
||||||
futures.push(async move {
|
futures.push(async move {
|
||||||
let session = match temporal_serai.session(network).await {
|
let session = match temporal_serai.session(network.into()).await {
|
||||||
Ok(Some(session)) => session,
|
Ok(Some(session)) => session,
|
||||||
Ok(None) => return Ok(None),
|
Ok(None) => return Ok(None),
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
@@ -74,7 +81,7 @@ impl Validators {
|
|||||||
if sessions.get(&network) == Some(&session) {
|
if sessions.get(&network) == Some(&session) {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
match temporal_serai.active_network_validators(network).await {
|
match temporal_serai.active_network_validators(network.into()).await {
|
||||||
Ok(validators) => Ok(Some((
|
Ok(validators) => Ok(Some((
|
||||||
network,
|
network,
|
||||||
session,
|
session,
|
||||||
@@ -97,7 +104,7 @@ impl Validators {
|
|||||||
|
|
||||||
fn incorporate_session_changes(
|
fn incorporate_session_changes(
|
||||||
&mut self,
|
&mut self,
|
||||||
session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
|
session_changes: Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>,
|
||||||
) {
|
) {
|
||||||
let mut removed = HashSet::new();
|
let mut removed = HashSet::new();
|
||||||
let mut added = HashSet::new();
|
let mut added = HashSet::new();
|
||||||
@@ -152,11 +159,11 @@ impl Validators {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
pub(crate) fn by_network(&self) -> &HashMap<ExternalNetworkId, HashSet<PeerId>> {
|
||||||
&self.by_network
|
&self.by_network
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<ExternalNetworkId>> {
|
||||||
self.validators.get(peer_id)
|
self.validators.get(peer_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet};
|
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
||||||
|
|
||||||
use futures_lite::FutureExt;
|
use futures_lite::FutureExt;
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
|||||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||||
/// the sync protocol for our Tributaries.
|
/// the sync protocol for our Tributaries.
|
||||||
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||||
pub(crate) set: ValidatorSet,
|
pub(crate) set: ExternalValidatorSet,
|
||||||
pub(crate) tributary: Tributary<TD, Tx, P>,
|
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||||
pub(crate) reader: TributaryReader<TD, Tx>,
|
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||||
pub(crate) p2p: P,
|
pub(crate) p2p: P,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -7,7 +7,7 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
||||||
|
|
||||||
use serai_db::Db;
|
use serai_db::Db;
|
||||||
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||||
@@ -25,7 +25,7 @@ use crate::heartbeat::HeartbeatTask;
|
|||||||
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||||
pub struct Heartbeat {
|
pub struct Heartbeat {
|
||||||
/// The Tributary this is the heartbeat of.
|
/// The Tributary this is the heartbeat of.
|
||||||
pub set: ValidatorSet,
|
pub set: ExternalValidatorSet,
|
||||||
/// The hash of the latest block added to the Tributary.
|
/// The hash of the latest block added to the Tributary.
|
||||||
pub latest_block_hash: [u8; 32],
|
pub latest_block_hash: [u8; 32],
|
||||||
}
|
}
|
||||||
@@ -56,7 +56,7 @@ pub trait P2p:
|
|||||||
type Peer<'a>: Peer<'a>;
|
type Peer<'a>: Peer<'a>;
|
||||||
|
|
||||||
/// Fetch the peers for this network.
|
/// Fetch the peers for this network.
|
||||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||||
|
|
||||||
/// Broadcast a cosign.
|
/// Broadcast a cosign.
|
||||||
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||||
@@ -131,13 +131,13 @@ fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
|||||||
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||||
db: impl Db,
|
db: impl Db,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary<TD, Tx, P>)>,
|
mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary<TD, Tx, P>)>,
|
||||||
mut retire_tributary: mpsc::UnboundedReceiver<ValidatorSet>,
|
mut retire_tributary: mpsc::UnboundedReceiver<ExternalValidatorSet>,
|
||||||
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
) {
|
) {
|
||||||
let mut readers = HashMap::<ValidatorSet, TributaryReader<TD, Tx>>::new();
|
let mut readers = HashMap::<ExternalValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||||
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||||
let mut heartbeat_tasks = HashMap::<ValidatorSet, _>::new();
|
let mut heartbeat_tasks = HashMap::<ExternalValidatorSet, _>::new();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
|
|||||||
@@ -3,9 +3,11 @@ use std::{path::Path, fs};
|
|||||||
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||||
use serai_db::{create_db, db_channel};
|
use serai_db::{create_db, db_channel};
|
||||||
|
|
||||||
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::NetworkId,
|
primitives::ExternalNetworkId,
|
||||||
validator_sets::primitives::{Session, ValidatorSet},
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
@@ -13,7 +15,7 @@ use serai_coordinator_substrate::NewSetInformation;
|
|||||||
use serai_coordinator_tributary::Transaction;
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
|
||||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
pub(crate) type Db = serai_db::ParityDb;
|
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
|
||||||
#[cfg(feature = "rocksdb")]
|
#[cfg(feature = "rocksdb")]
|
||||||
pub(crate) type Db = serai_db::RocksDB;
|
pub(crate) type Db = serai_db::RocksDB;
|
||||||
|
|
||||||
@@ -41,22 +43,21 @@ pub(crate) fn coordinator_db() -> Db {
|
|||||||
db(&format!("{root_path}/coordinator/db"))
|
db(&format!("{root_path}/coordinator/db"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tributary_db_folder(set: ValidatorSet) -> String {
|
fn tributary_db_folder(set: ExternalValidatorSet) -> String {
|
||||||
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
let network = match set.network {
|
let network = match set.network {
|
||||||
NetworkId::Serai => panic!("creating Tributary for the Serai network"),
|
ExternalNetworkId::Bitcoin => "Bitcoin",
|
||||||
NetworkId::Bitcoin => "Bitcoin",
|
ExternalNetworkId::Ethereum => "Ethereum",
|
||||||
NetworkId::Ethereum => "Ethereum",
|
ExternalNetworkId::Monero => "Monero",
|
||||||
NetworkId::Monero => "Monero",
|
|
||||||
};
|
};
|
||||||
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn tributary_db(set: ValidatorSet) -> Db {
|
pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db {
|
||||||
db(&format!("{}/db", tributary_db_folder(set)))
|
db(&format!("{}/db", tributary_db_folder(set)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn prune_tributary_db(set: ValidatorSet) {
|
pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) {
|
||||||
log::info!("pruning data directory for tributary {set:?}");
|
log::info!("pruning data directory for tributary {set:?}");
|
||||||
let db = tributary_db_folder(set);
|
let db = tributary_db_folder(set);
|
||||||
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||||
@@ -71,11 +72,15 @@ create_db! {
|
|||||||
// The latest Tributary to have been retired for a network
|
// The latest Tributary to have been retired for a network
|
||||||
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||||
// retired
|
// retired
|
||||||
RetiredTributary: (network: NetworkId) -> Session,
|
RetiredTributary: (network: ExternalNetworkId) -> Session,
|
||||||
// The last handled message from a Processor
|
// The last handled message from a Processor
|
||||||
LastProcessorMessage: (network: NetworkId) -> u64,
|
LastProcessorMessage: (network: ExternalNetworkId) -> u64,
|
||||||
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||||
ErroneousCosigns: () -> Vec<SignedCosign>,
|
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||||
|
// The keys to confirm and set on the Serai network
|
||||||
|
KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair,
|
||||||
|
// The key was set on the Serai network
|
||||||
|
KeySet: (set: ExternalValidatorSet) -> (),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -84,7 +89,7 @@ db_channel! {
|
|||||||
// Cosigns we produced
|
// Cosigns we produced
|
||||||
SignedCosigns: () -> SignedCosign,
|
SignedCosigns: () -> SignedCosign,
|
||||||
// Tributaries to clean up upon reboot
|
// Tributaries to clean up upon reboot
|
||||||
TributaryCleanup: () -> ValidatorSet,
|
TributaryCleanup: () -> ExternalValidatorSet,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,21 +98,51 @@ mod _internal_db {
|
|||||||
|
|
||||||
db_channel! {
|
db_channel! {
|
||||||
Coordinator {
|
Coordinator {
|
||||||
// Tributary transactions to publish
|
// Tributary transactions to publish from the Processor messages
|
||||||
TributaryTransactions: (set: ValidatorSet) -> Transaction,
|
TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction,
|
||||||
|
// Tributary transactions to publish from the DKG confirmation task
|
||||||
|
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
||||||
|
// Participants to remove
|
||||||
|
RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct TributaryTransactions;
|
pub(crate) struct TributaryTransactionsFromProcessorMessages;
|
||||||
impl TributaryTransactions {
|
impl TributaryTransactionsFromProcessorMessages {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
||||||
// If this set has yet to be retired, send this transaction
|
// If this set has yet to be retired, send this transaction
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
_internal_db::TributaryTransactions::send(txn, set, tx);
|
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
||||||
_internal_db::TributaryTransactions::try_recv(txn, set)
|
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
|
||||||
|
impl TributaryTransactionsFromDkgConfirmation {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
||||||
|
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct RemoveParticipant;
|
||||||
|
impl RemoveParticipant {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
||||||
|
_internal_db::RemoveParticipant::try_recv(txn, set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
439
coordinator/src/dkg_confirmation.rs
Normal file
439
coordinator/src/dkg_confirmation.rs
Normal file
@@ -0,0 +1,439 @@
|
|||||||
|
use core::{ops::Deref, future::Future};
|
||||||
|
use std::{boxed::Box, collections::HashMap};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
use dkg::{Participant, musig};
|
||||||
|
use frost_schnorrkel::{
|
||||||
|
frost::{curve::Ristretto, FrostError, sign::*},
|
||||||
|
Schnorrkel,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::SeraiAddress,
|
||||||
|
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use serai_coordinator_substrate::{NewSetInformation, Keys};
|
||||||
|
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
|
||||||
|
|
||||||
|
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
|
||||||
|
|
||||||
|
fn schnorrkel() -> Schnorrkel {
|
||||||
|
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
|
||||||
|
}
|
||||||
|
|
||||||
|
fn our_i(
|
||||||
|
set: &NewSetInformation,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
data: &HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Participant {
|
||||||
|
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
|
||||||
|
|
||||||
|
let mut our_i = None;
|
||||||
|
for participant in data.keys() {
|
||||||
|
let validator_index = usize::from(u16::from(*participant) - 1);
|
||||||
|
let (validator, _weight) = set.validators[validator_index];
|
||||||
|
if validator == public {
|
||||||
|
our_i = Some(*participant);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
our_i.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take a HashMap of participations with non-contiguous Participants and convert them to a
|
||||||
|
// contiguous sequence.
|
||||||
|
//
|
||||||
|
// The input data is expected to not include our own data, which also won't be in the output data.
|
||||||
|
//
|
||||||
|
// Returns the mapping from the contiguous Participants to the original Participants.
|
||||||
|
fn make_contiguous<T>(
|
||||||
|
our_i: Participant,
|
||||||
|
mut data: HashMap<Participant, Vec<u8>>,
|
||||||
|
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
|
||||||
|
) -> Result<HashMap<Participant, T>, Participant> {
|
||||||
|
assert!(!data.contains_key(&our_i));
|
||||||
|
|
||||||
|
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
|
||||||
|
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
||||||
|
|
||||||
|
let mut our_i = Some(our_i);
|
||||||
|
let mut contiguous = HashMap::new();
|
||||||
|
let mut i = 1;
|
||||||
|
for participant in ordered_participants {
|
||||||
|
// If this is the first participant after our own index, increment to account for our index
|
||||||
|
if let Some(our_i_value) = our_i {
|
||||||
|
if u16::from(participant) > u16::from(our_i_value) {
|
||||||
|
i += 1;
|
||||||
|
our_i = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let contiguous_index = Participant::new(i).unwrap();
|
||||||
|
let data = match transform(data.remove(&participant).unwrap()) {
|
||||||
|
Ok(data) => data,
|
||||||
|
Err(_) => Err(participant)?,
|
||||||
|
};
|
||||||
|
contiguous.insert(contiguous_index, data);
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
Ok(contiguous)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
|
||||||
|
match &result {
|
||||||
|
Ok(_) => Ok(result.unwrap()),
|
||||||
|
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
|
||||||
|
Err(*participant)
|
||||||
|
}
|
||||||
|
// All of these should be unreachable
|
||||||
|
Err(
|
||||||
|
FrostError::InternalError(_) |
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_),
|
||||||
|
) => {
|
||||||
|
result.unwrap();
|
||||||
|
unreachable!("continued execution after unwrapping Result::Err");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
enum Signer {
|
||||||
|
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
|
||||||
|
Share {
|
||||||
|
attempt: u32,
|
||||||
|
musig_validators: Vec<SeraiAddress>,
|
||||||
|
share: [u8; 32],
|
||||||
|
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs the DKG Confirmation protocol.
|
||||||
|
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
|
||||||
|
db: CD,
|
||||||
|
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary_db: TD,
|
||||||
|
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
signer: Option<Signer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
||||||
|
pub(crate) fn new(
|
||||||
|
db: CD,
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary_db: TD,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
) -> Self {
|
||||||
|
Self { db, set, tributary_db, key, signer: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess(
|
||||||
|
db: &mut CD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
attempt: u32,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
signer: &mut Option<Signer>,
|
||||||
|
) {
|
||||||
|
// Perform the preprocess
|
||||||
|
let public_key = Ristretto::generator() * key.deref();
|
||||||
|
let (machine, preprocess) = AlgorithmMachine::new(
|
||||||
|
schnorrkel(),
|
||||||
|
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
||||||
|
musig(musig_context(set.into()), key, &[public_key]).unwrap(),
|
||||||
|
)
|
||||||
|
.preprocess(&mut OsRng);
|
||||||
|
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
||||||
|
// parameters
|
||||||
|
let seed = machine.cache();
|
||||||
|
|
||||||
|
let mut preprocess_bytes = [0u8; 64];
|
||||||
|
preprocess_bytes.copy_from_slice(&preprocess.serialize());
|
||||||
|
let preprocess = preprocess_bytes;
|
||||||
|
|
||||||
|
let mut txn = db.txn();
|
||||||
|
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
|
||||||
|
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// If we were sent a key to set, create the signer for it
|
||||||
|
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
|
||||||
|
// Create and publish the initial preprocess
|
||||||
|
Self::preprocess(&mut self.db, self.set.set, 0, self.key.clone(), &mut self.signer);
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have keys to confirm, handle all messages from the tributary
|
||||||
|
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
|
||||||
|
// Handle all messages from the Tributary
|
||||||
|
loop {
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::sign::CoordinatorMessage::Reattempt {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
} => {
|
||||||
|
// Create and publish the preprocess for the specified attempt
|
||||||
|
Self::preprocess(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
attempt,
|
||||||
|
self.key.clone(),
|
||||||
|
&mut self.signer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::sign::CoordinatorMessage::Preprocesses {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
mut preprocesses,
|
||||||
|
} => {
|
||||||
|
// Confirm the preprocess we're expected to sign with is the one we locally have
|
||||||
|
// It may be different if we rebooted and made a second preprocess for this attempt
|
||||||
|
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
|
||||||
|
self.signer.take()
|
||||||
|
else {
|
||||||
|
// If this message is not expected, commit the txn to drop it and move on
|
||||||
|
// At some point, we'll get a Reattempt and reset
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Determine the MuSig key signed with
|
||||||
|
let musig_validators = {
|
||||||
|
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
|
||||||
|
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
||||||
|
|
||||||
|
let mut res = vec![];
|
||||||
|
for participant in ordered_participants {
|
||||||
|
let (validator, _weight) =
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)];
|
||||||
|
res.push(validator);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
};
|
||||||
|
|
||||||
|
let musig_public_keys = musig_validators
|
||||||
|
.iter()
|
||||||
|
.map(|key| {
|
||||||
|
Ristretto::read_G(&mut key.0.as_slice())
|
||||||
|
.expect("Serai validator had invalid public key")
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let keys =
|
||||||
|
musig(musig_context(self.set.set.into()), self.key.clone(), &musig_public_keys)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Rebuild the machine
|
||||||
|
let (machine, preprocess_from_cache) =
|
||||||
|
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
|
||||||
|
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
|
||||||
|
|
||||||
|
// Ensure this is a consistent signing session
|
||||||
|
let our_i = our_i(&self.set, &self.key, &preprocesses);
|
||||||
|
let consistent = (attempt == our_attempt) &&
|
||||||
|
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
|
||||||
|
if !consistent {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reformat the preprocesses into the expected format for Musig
|
||||||
|
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
|
||||||
|
machine.read_preprocess(&mut preprocess.as_slice())
|
||||||
|
}) {
|
||||||
|
Ok(preprocesses) => preprocesses,
|
||||||
|
// This yields the *original participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calculate our share
|
||||||
|
let (machine, share) = match handle_frost_error(
|
||||||
|
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
||||||
|
) {
|
||||||
|
Ok((machine, share)) => (machine, share),
|
||||||
|
// This yields the *musig participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
musig_validators[usize::from(u16::from(participant) - 1)],
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send our share
|
||||||
|
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
self.set.set,
|
||||||
|
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
self.signer = Some(Signer::Share {
|
||||||
|
attempt,
|
||||||
|
musig_validators,
|
||||||
|
share,
|
||||||
|
machine: Box::new(machine),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
messages::sign::CoordinatorMessage::Shares {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
mut shares,
|
||||||
|
} => {
|
||||||
|
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
|
||||||
|
self.signer.take()
|
||||||
|
else {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Ensure this is a consistent signing session
|
||||||
|
let our_i = our_i(&self.set, &self.key, &shares);
|
||||||
|
let consistent = (attempt == our_attempt) &&
|
||||||
|
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
|
||||||
|
if !consistent {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reformat the shares into the expected format for Musig
|
||||||
|
let shares = match make_contiguous(our_i, shares, |share| {
|
||||||
|
machine.read_share(&mut share.as_slice())
|
||||||
|
}) {
|
||||||
|
Ok(shares) => shares,
|
||||||
|
// This yields the *original participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match handle_frost_error(machine.complete(shares)) {
|
||||||
|
Ok(signature) => {
|
||||||
|
// Create the bitvec of the participants
|
||||||
|
let mut signature_participants;
|
||||||
|
{
|
||||||
|
use bitvec::prelude::*;
|
||||||
|
signature_participants = bitvec![u8, Lsb0; 0; 0];
|
||||||
|
let mut i = 0;
|
||||||
|
for (validator, _) in &self.set.validators {
|
||||||
|
if Some(validator) == musig_validators.get(i) {
|
||||||
|
signature_participants.push(true);
|
||||||
|
i += 1;
|
||||||
|
} else {
|
||||||
|
signature_participants.push(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is safe to call multiple times as it'll just change which *valid*
|
||||||
|
// signature to publish
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
Keys::set(
|
||||||
|
&mut txn,
|
||||||
|
self.set.set,
|
||||||
|
key_pair.clone(),
|
||||||
|
signature_participants,
|
||||||
|
signature.into(),
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
// This yields the *musig participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
musig_validators[usize::from(u16::from(participant) - 1)],
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we successfully handled this message, note we made proress
|
||||||
|
made_progress = true;
|
||||||
|
tributary_txn.commit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the key has been set on Serai
|
||||||
|
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
|
||||||
|
KeySet::get(&self.db, self.set.set).is_some()
|
||||||
|
{
|
||||||
|
// Take the keys to confirm so we never instantiate the signer again
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
KeysToConfirm::take(&mut txn, self.set.set);
|
||||||
|
KeySet::take(&mut txn, self.set.set);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
// Drop our own signer
|
||||||
|
// The task won't die until the Tributary does, but now it'll never do anything again
|
||||||
|
self.signer = None;
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,9 +4,10 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
|
|||||||
use zeroize::{Zeroize, Zeroizing};
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{ff::PrimeField, GroupEncoding},
|
group::{ff::PrimeField, GroupEncoding},
|
||||||
Ciphersuite, Ristretto,
|
*,
|
||||||
};
|
};
|
||||||
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
@@ -14,8 +15,8 @@ use borsh::BorshDeserialize;
|
|||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, PublicKey},
|
primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
use message_queue::{Service, client::MessageQueue};
|
use message_queue::{Service, client::MessageQueue};
|
||||||
@@ -23,13 +24,17 @@ use message_queue::{Service, client::MessageQueue};
|
|||||||
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
||||||
|
|
||||||
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
||||||
use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
|
use serai_coordinator_substrate::{
|
||||||
use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
|
CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches,
|
||||||
|
PublishBatchTask, SlashReports, PublishSlashReportTask,
|
||||||
|
};
|
||||||
|
use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans};
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
use db::*;
|
use db::*;
|
||||||
|
|
||||||
mod tributary;
|
mod tributary;
|
||||||
|
mod dkg_confirmation;
|
||||||
|
|
||||||
mod substrate;
|
mod substrate;
|
||||||
use substrate::SubstrateTask;
|
use substrate::SubstrateTask;
|
||||||
@@ -145,11 +150,24 @@ fn spawn_cosigning<D: serai_db::Db>(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_processor_messages(
|
async fn handle_network(
|
||||||
mut db: impl serai_db::Db,
|
mut db: impl serai_db::Db,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
network: NetworkId,
|
serai: Arc<Serai>,
|
||||||
|
network: ExternalNetworkId,
|
||||||
) {
|
) {
|
||||||
|
// Spawn the task to publish batches for this network
|
||||||
|
{
|
||||||
|
let (publish_batch_task_def, publish_batch_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
PublishBatchTask::new(db.clone(), serai.clone(), network)
|
||||||
|
.continually_run(publish_batch_task_def, vec![]),
|
||||||
|
);
|
||||||
|
// Forget its handle so it always runs in the background
|
||||||
|
core::mem::forget(publish_batch_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Processor messages
|
||||||
loop {
|
loop {
|
||||||
let (msg_id, msg) = {
|
let (msg_id, msg) = {
|
||||||
let msg = message_queue.next(Service::Processor(network)).await;
|
let msg = message_queue.next(Service::Processor(network)).await;
|
||||||
@@ -179,8 +197,8 @@ async fn handle_processor_messages(
|
|||||||
match msg {
|
match msg {
|
||||||
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
||||||
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
let set = ValidatorSet { network, session };
|
let set = ExternalValidatorSet { network, session };
|
||||||
TributaryTransactions::send(
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
||||||
@@ -190,45 +208,84 @@ async fn handle_processor_messages(
|
|||||||
session,
|
session,
|
||||||
substrate_key,
|
substrate_key,
|
||||||
network_key,
|
network_key,
|
||||||
} => todo!("TODO Transaction::DkgConfirmationPreprocess"),
|
} => {
|
||||||
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
KeysToConfirm::set(
|
||||||
let set = ValidatorSet { network, session };
|
|
||||||
TributaryTransactions::send(
|
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
ExternalValidatorSet { network, session },
|
||||||
&Transaction::RemoveParticipant {
|
&KeyPair(
|
||||||
participant: todo!("TODO"),
|
PublicKey::from_raw(substrate_key),
|
||||||
signed: Signed::default(),
|
network_key
|
||||||
},
|
.try_into()
|
||||||
|
.expect("generated a network key which exceeds the maximum key length"),
|
||||||
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
|
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant);
|
||||||
|
}
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Sign(msg) => match msg {
|
messages::ProcessorMessage::Sign(msg) => match msg {
|
||||||
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
||||||
let set = ValidatorSet { network, session };
|
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant);
|
||||||
TributaryTransactions::send(
|
}
|
||||||
|
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
||||||
|
let set = ExternalValidatorSet { network, session: id.session };
|
||||||
|
if id.attempt == 0 {
|
||||||
|
// Batches are declared by their intent to be signed
|
||||||
|
if let messages::sign::VariantSignId::Batch(hash) = id.id {
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::Batch { hash },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::RemoveParticipant {
|
&Transaction::Sign {
|
||||||
participant: todo!("TODO"),
|
id: id.id,
|
||||||
|
attempt: id.attempt,
|
||||||
|
round: SigningProtocolRound::Preprocess,
|
||||||
|
data: preprocesses,
|
||||||
signed: Signed::default(),
|
signed: Signed::default(),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
messages::sign::ProcessorMessage::Shares { id, shares } => {
|
||||||
todo!("TODO Transaction::Batch + Transaction::Sign")
|
let set = ExternalValidatorSet { network, session: id.session };
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::Sign {
|
||||||
|
id: id.id,
|
||||||
|
attempt: id.attempt,
|
||||||
|
round: SigningProtocolRound::Share,
|
||||||
|
data: shares,
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
|
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
||||||
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||||
SignedCosigns::send(&mut txn, &cosign);
|
SignedCosigns::send(&mut txn, &cosign);
|
||||||
}
|
}
|
||||||
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
||||||
todo!("TODO PublishBatchTask")
|
SignedBatches::send(&mut txn, &batch);
|
||||||
}
|
}
|
||||||
messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
||||||
todo!("TODO PublishSlashReportTask")
|
session,
|
||||||
|
slash_report,
|
||||||
|
signature,
|
||||||
|
} => {
|
||||||
|
SlashReports::set(
|
||||||
|
&mut txn,
|
||||||
|
ExternalValidatorSet { network, session },
|
||||||
|
slash_report,
|
||||||
|
Signature::from(signature),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Substrate(msg) => match msg {
|
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||||
@@ -241,9 +298,9 @@ async fn handle_processor_messages(
|
|||||||
.push(plan.transaction_plan_id);
|
.push(plan.transaction_plan_id);
|
||||||
}
|
}
|
||||||
for (session, plans) in by_session {
|
for (session, plans) in by_session {
|
||||||
let set = ValidatorSet { network, session };
|
let set = ExternalValidatorSet { network, session };
|
||||||
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
||||||
TributaryTransactions::send(
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::SubstrateBlock { hash: block },
|
&Transaction::SubstrateBlock { hash: block },
|
||||||
@@ -295,7 +352,7 @@ async fn main() {
|
|||||||
let mut key_bytes = [0; 32];
|
let mut key_bytes = [0; 32];
|
||||||
key_bytes.copy_from_slice(&key_vec);
|
key_bytes.copy_from_slice(&key_vec);
|
||||||
key_vec.zeroize();
|
key_vec.zeroize();
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::from_repr(key_bytes).unwrap());
|
||||||
key_bytes.zeroize();
|
key_bytes.zeroize();
|
||||||
key
|
key
|
||||||
};
|
};
|
||||||
@@ -309,10 +366,16 @@ async fn main() {
|
|||||||
// Cleanup all historic Tributaries
|
// Cleanup all historic Tributaries
|
||||||
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
||||||
prune_tributary_db(to_cleanup);
|
prune_tributary_db(to_cleanup);
|
||||||
|
// Remove the keys to confirm for this network
|
||||||
|
KeysToConfirm::take(&mut txn, to_cleanup);
|
||||||
|
KeySet::take(&mut txn, to_cleanup);
|
||||||
// Drain the cosign intents created for this set
|
// Drain the cosign intents created for this set
|
||||||
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
||||||
// Drain the transactions to publish for this set
|
// Drain the transactions to publish for this set
|
||||||
while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
|
while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
|
while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
|
// Drain the participants to remove for this set
|
||||||
|
while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
// Remove the SignSlashReport notification
|
// Remove the SignSlashReport notification
|
||||||
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
||||||
}
|
}
|
||||||
@@ -376,7 +439,7 @@ async fn main() {
|
|||||||
EphemeralEventStream::new(
|
EphemeralEventStream::new(
|
||||||
db.clone(),
|
db.clone(),
|
||||||
serai.clone(),
|
serai.clone(),
|
||||||
PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
SeraiAddress((<Ristretto as WrappedGroup>::generator() * serai_key.deref()).to_bytes()),
|
||||||
)
|
)
|
||||||
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
||||||
);
|
);
|
||||||
@@ -417,12 +480,29 @@ async fn main() {
|
|||||||
.continually_run(substrate_task_def, vec![]),
|
.continually_run(substrate_task_def, vec![]),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Handle all of the Processors' messages
|
// Handle each of the networks
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if network == NetworkId::Serai {
|
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
||||||
continue;
|
}
|
||||||
}
|
|
||||||
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
|
// Spawn the task to set keys
|
||||||
|
{
|
||||||
|
let (set_keys_task_def, set_keys_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]),
|
||||||
|
);
|
||||||
|
// Forget its handle so it always runs in the background
|
||||||
|
core::mem::forget(set_keys_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn the task to publish slash reports
|
||||||
|
{
|
||||||
|
let (publish_slash_report_task_def, publish_slash_report_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]),
|
||||||
|
);
|
||||||
|
// Always have this run in the background
|
||||||
|
core::mem::forget(publish_slash_report_task);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the spawned tasks ad-infinitum
|
// Run the spawned tasks ad-infinitum
|
||||||
|
|||||||
@@ -3,13 +3,14 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db as DbTrait};
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{Session, ValidatorSet};
|
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
|
||||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
use tributary_sdk::Tributary;
|
use tributary_sdk::Tributary;
|
||||||
@@ -19,16 +20,16 @@ use serai_task::ContinuallyRan;
|
|||||||
use serai_coordinator_tributary::Transaction;
|
use serai_coordinator_tributary::Transaction;
|
||||||
use serai_coordinator_p2p::P2p;
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
use crate::Db;
|
use crate::{Db, KeySet};
|
||||||
|
|
||||||
pub(crate) struct SubstrateTask<P: P2p> {
|
pub(crate) struct SubstrateTask<P: P2p> {
|
||||||
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
pub(crate) serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
pub(crate) db: Db,
|
pub(crate) db: Db,
|
||||||
pub(crate) message_queue: Arc<MessageQueue>,
|
pub(crate) message_queue: Arc<MessageQueue>,
|
||||||
pub(crate) p2p: P,
|
pub(crate) p2p: P,
|
||||||
pub(crate) p2p_add_tributary:
|
pub(crate) p2p_add_tributary:
|
||||||
mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ValidatorSet>,
|
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||||
@@ -38,7 +39,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
|
|
||||||
// Handle the Canonical events
|
// Handle the Canonical events
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||||
@@ -47,8 +48,9 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match msg {
|
match msg {
|
||||||
// TODO: Stop trying to confirm the DKG
|
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
|
||||||
messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
|
KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &());
|
||||||
|
}
|
||||||
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||||
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||||
let next_to_be_retired =
|
let next_to_be_retired =
|
||||||
@@ -57,7 +59,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
||||||
self
|
self
|
||||||
.p2p_retire_tributary
|
.p2p_retire_tributary
|
||||||
.send(ValidatorSet { network, session })
|
.send(ExternalValidatorSet { network, session })
|
||||||
.expect("p2p retire_tributary channel dropped?");
|
.expect("p2p retire_tributary channel dropped?");
|
||||||
}
|
}
|
||||||
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
||||||
@@ -107,7 +109,10 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
*/
|
*/
|
||||||
crate::db::TributaryCleanup::send(
|
crate::db::TributaryCleanup::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
&ValidatorSet { network: new_set.set.network, session: Session(historic_session) },
|
&ExternalValidatorSet {
|
||||||
|
network: new_set.set.network,
|
||||||
|
session: Session(historic_session),
|
||||||
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,14 +4,15 @@ use std::sync::Arc;
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
use blake2::{digest::typenum::U32, Digest, Blake2s};
|
use blake2::{digest::typenum::U32, Digest, Blake2s};
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||||
|
|
||||||
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||||
|
|
||||||
@@ -21,14 +22,25 @@ use message_queue::{Service, Metadata, client::MessageQueue};
|
|||||||
|
|
||||||
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||||
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||||
use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
|
use serai_coordinator_tributary::{
|
||||||
|
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
|
||||||
|
};
|
||||||
use serai_coordinator_p2p::P2p;
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
use crate::{Db, TributaryTransactions};
|
use crate::{
|
||||||
|
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
|
||||||
|
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
|
||||||
|
};
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Coordinator {
|
||||||
|
PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
db_channel! {
|
db_channel! {
|
||||||
Coordinator {
|
Coordinator {
|
||||||
PendingCosigns: (set: ValidatorSet) -> CosignIntent,
|
PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -37,7 +49,7 @@ db_channel! {
|
|||||||
/// This is not a well-designed function. This is specific to the context in which its called,
|
/// This is not a well-designed function. This is specific to the context in which its called,
|
||||||
/// within this file. It should only be considered an internal helper for this domain alone.
|
/// within this file. It should only be considered an internal helper for this domain alone.
|
||||||
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
||||||
set: ValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
tributary: &Tributary<TD, Transaction, P>,
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
tx: Transaction,
|
tx: Transaction,
|
||||||
) {
|
) {
|
||||||
@@ -56,9 +68,7 @@ async fn provide_transaction<TD: DbTrait, P: P2p>(
|
|||||||
// advancing
|
// advancing
|
||||||
Err(ProvidedError::LocalMismatchesOnChain) => loop {
|
Err(ProvidedError::LocalMismatchesOnChain) => loop {
|
||||||
log::error!(
|
log::error!(
|
||||||
"Tributary {:?} was supposed to provide {:?} but peers disagree, halting Tributary",
|
"Tributary {set:?} was supposed to provide {tx:?} but peers disagree, halting Tributary",
|
||||||
set,
|
|
||||||
tx,
|
|
||||||
);
|
);
|
||||||
// Print this every five minutes as this does need to be handled
|
// Print this every five minutes as this does need to be handled
|
||||||
tokio::time::sleep(Duration::from_secs(5 * 60)).await;
|
tokio::time::sleep(Duration::from_secs(5 * 60)).await;
|
||||||
@@ -147,13 +157,102 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds all of the transactions sent via `TributaryTransactions`.
|
#[must_use]
|
||||||
|
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
mut tx: Transaction,
|
||||||
|
) -> bool {
|
||||||
|
// If this is a signed transaction, sign it
|
||||||
|
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
|
||||||
|
tx.sign(&mut OsRng, tributary.genesis(), key);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
|
||||||
|
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
|
||||||
|
// issue is this transaction was already included on-chain
|
||||||
|
Err(TransactionError::InvalidNonce) => {
|
||||||
|
let TransactionKind::Signed(order, signed) = tx.kind() else {
|
||||||
|
panic!("non-Signed transaction had InvalidNonce");
|
||||||
|
};
|
||||||
|
let next_nonce = tributary
|
||||||
|
.next_nonce(&signed.signer, &order)
|
||||||
|
.await
|
||||||
|
.expect("signer who is a present validator didn't have a nonce");
|
||||||
|
assert!(next_nonce != signed.nonce);
|
||||||
|
// We're publishing an old transaction
|
||||||
|
if next_nonce > signed.nonce {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary_db: &mut TD,
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
tx: Transaction,
|
||||||
|
) -> bool {
|
||||||
|
let kind = tx.kind();
|
||||||
|
match kind {
|
||||||
|
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
|
||||||
|
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||||
|
// If this is a transaction with signing data, check the topic is recognized before
|
||||||
|
// publishing
|
||||||
|
let topic = tx.topic();
|
||||||
|
let still_requires_recognition = if let Some(topic) = topic {
|
||||||
|
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
|
||||||
|
.then_some(topic)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
if let Some(topic) = still_requires_recognition {
|
||||||
|
// Queue the transaction until the topic is recognized
|
||||||
|
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
|
||||||
|
let mut tributary_txn = tributary_db.txn();
|
||||||
|
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
|
||||||
|
tributary_txn.commit();
|
||||||
|
} else {
|
||||||
|
// Actually add the transaction
|
||||||
|
if !add_signed_unsigned_transaction(tributary, key, tx).await {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
|
||||||
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
db: CD,
|
db: CD,
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
tributary: Tributary<TD, Transaction, P>,
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
set: ValidatorSet,
|
set: NewSetInformation,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
}
|
}
|
||||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||||
type Error = DoesNotError;
|
type Error = DoesNotError;
|
||||||
@@ -161,49 +260,87 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Provide/add all transactions sent our way
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
|
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
let kind = tx.kind();
|
if !add_with_recognition_check(
|
||||||
match kind {
|
self.set.set,
|
||||||
TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
|
&mut self.tributary_db,
|
||||||
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
&self.tributary,
|
||||||
// If this is a signed transaction, sign it
|
&self.key,
|
||||||
if matches!(kind, TransactionKind::Signed(_, _)) {
|
tx,
|
||||||
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
)
|
||||||
}
|
.await
|
||||||
|
{
|
||||||
// Actually add the transaction
|
break;
|
||||||
// TODO: If this is a preprocess, make sure the topic has been recognized
|
|
||||||
let res = self.tributary.add_transaction(tx.clone()).await;
|
|
||||||
match &res {
|
|
||||||
// Fresh publication, already published
|
|
||||||
Ok(true | false) => {}
|
|
||||||
Err(
|
|
||||||
TransactionError::TooLargeTransaction |
|
|
||||||
TransactionError::InvalidSigner |
|
|
||||||
TransactionError::InvalidNonce |
|
|
||||||
TransactionError::InvalidSignature |
|
|
||||||
TransactionError::InvalidContent,
|
|
||||||
) => {
|
|
||||||
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
|
||||||
}
|
|
||||||
// We've published too many transactions recently
|
|
||||||
// Drop this txn to try to publish it again later on a future iteration
|
|
||||||
Err(TransactionError::TooManyInMempool) => {
|
|
||||||
drop(txn);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// This isn't a Provided transaction so this should never be hit
|
|
||||||
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
made_progress = true;
|
made_progress = true;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
if !add_with_recognition_check(
|
||||||
|
self.set.set,
|
||||||
|
&mut self.tributary_db,
|
||||||
|
&self.tributary,
|
||||||
|
&self.key,
|
||||||
|
tx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide/add all transactions due to newly recognized topics
|
||||||
|
loop {
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
let Some(topic) =
|
||||||
|
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
|
||||||
|
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
tributary_txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish any participant removals
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
|
||||||
|
let tx = Transaction::RemoveParticipant {
|
||||||
|
participant: self.set.participant_indexes_reverse_lookup[&participant],
|
||||||
|
signed: Default::default(),
|
||||||
|
};
|
||||||
|
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -212,7 +349,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
|
|||||||
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
||||||
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
set: ValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
}
|
}
|
||||||
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
||||||
@@ -245,7 +382,7 @@ pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
|||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
tributary: Tributary<TD, Transaction, P>,
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
set: NewSetInformation,
|
set: NewSetInformation,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
}
|
}
|
||||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
|
||||||
type Error = DoesNotError;
|
type Error = DoesNotError;
|
||||||
@@ -292,7 +429,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD
|
|||||||
/// Run the scan task whenever the Tributary adds a new block.
|
/// Run the scan task whenever the Tributary adds a new block.
|
||||||
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
||||||
db: CD,
|
db: CD,
|
||||||
set: ValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
tributary: Tributary<TD, Transaction, P>,
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
scan_tributary_task: TaskHandle,
|
scan_tributary_task: TaskHandle,
|
||||||
tasks_to_keep_alive: Vec<TaskHandle>,
|
tasks_to_keep_alive: Vec<TaskHandle>,
|
||||||
@@ -323,15 +460,17 @@ async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
|||||||
/// - Spawn the ScanTributaryTask
|
/// - Spawn the ScanTributaryTask
|
||||||
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||||
/// - Spawn the TributaryProcessorMessagesTask
|
/// - Spawn the TributaryProcessorMessagesTask
|
||||||
|
/// - Spawn the AddTributaryTransactionsTask
|
||||||
|
/// - Spawn the ConfirmDkgTask
|
||||||
/// - Spawn the SignSlashReportTask
|
/// - Spawn the SignSlashReportTask
|
||||||
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||||
pub(crate) async fn spawn_tributary<P: P2p>(
|
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||||
db: Db,
|
db: Db,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
p2p_add_tributary: &mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
set: NewSetInformation,
|
set: NewSetInformation,
|
||||||
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
) {
|
) {
|
||||||
// Don't spawn retired Tributaries
|
// Don't spawn retired Tributaries
|
||||||
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
|
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
|
||||||
@@ -351,7 +490,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
|
|
||||||
let mut tributary_validators = Vec::with_capacity(set.validators.len());
|
let mut tributary_validators = Vec::with_capacity(set.validators.len());
|
||||||
for (validator, weight) in set.validators.iter().copied() {
|
for (validator, weight) in set.validators.iter().copied() {
|
||||||
let validator_key = <Ristretto as Ciphersuite>::read_G(&mut validator.0.as_slice())
|
let validator_key = <Ristretto as GroupIo>::read_G(&mut validator.0.as_slice())
|
||||||
.expect("Serai validator had an invalid public key");
|
.expect("Serai validator had an invalid public key");
|
||||||
let weight = u64::from(weight);
|
let weight = u64::from(weight);
|
||||||
tributary_validators.push((validator_key, weight));
|
tributary_validators.push((validator_key, weight));
|
||||||
@@ -403,38 +542,45 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
// Spawn the scan task
|
// Spawn the scan task
|
||||||
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
|
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader)
|
||||||
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||||
// dropped, it will be too
|
// dropped, it will be too
|
||||||
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Spawn the sign slash report task
|
|
||||||
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
(SignSlashReportTask {
|
|
||||||
db: db.clone(),
|
|
||||||
tributary_db: tributary_db.clone(),
|
|
||||||
tributary: tributary.clone(),
|
|
||||||
set: set.clone(),
|
|
||||||
key: serai_key.clone(),
|
|
||||||
})
|
|
||||||
.continually_run(sign_slash_report_task_def, vec![]),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Spawn the add transactions task
|
// Spawn the add transactions task
|
||||||
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(AddTributaryTransactionsTask {
|
(AddTributaryTransactionsTask {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
tributary_db,
|
tributary_db: tributary_db.clone(),
|
||||||
tributary: tributary.clone(),
|
tributary: tributary.clone(),
|
||||||
set: set.set,
|
set: set.clone(),
|
||||||
key: serai_key,
|
key: serai_key.clone(),
|
||||||
})
|
})
|
||||||
.continually_run(add_tributary_transactions_task_def, vec![]),
|
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Spawn the task to confirm the DKG result
|
||||||
|
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
|
||||||
|
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the sign slash report task
|
||||||
|
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(SignSlashReportTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
key: serai_key,
|
||||||
|
})
|
||||||
|
.continually_run(sign_slash_report_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
// Whenever a new block occurs, immediately run the scan task
|
// Whenever a new block occurs, immediately run the scan task
|
||||||
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||||
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||||
@@ -444,10 +590,6 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
set.set,
|
set.set,
|
||||||
tributary,
|
tributary,
|
||||||
scan_tributary_task,
|
scan_tributary_task,
|
||||||
vec![
|
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task],
|
||||||
provide_cosign_cosigned_transactions_task,
|
|
||||||
sign_slash_report_task,
|
|
||||||
add_tributary_transactions_task,
|
|
||||||
],
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -22,6 +22,9 @@ bitvec = { version = "1", default-features = false, features = ["std"] }
|
|||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023-2024 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::Serai;
|
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
||||||
|
|
||||||
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
||||||
|
|
||||||
@@ -152,6 +152,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
||||||
};
|
};
|
||||||
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
crate::Canonical::send(
|
crate::Canonical::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set.network,
|
set.network,
|
||||||
@@ -159,7 +160,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
let mut batch = None;
|
let mut batch = None;
|
||||||
for this_batch in &block.batch_events {
|
for this_batch in &block.batch_events {
|
||||||
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
||||||
@@ -180,7 +181,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
batch = Some(ExecutedBatch {
|
batch = Some(ExecutedBatch {
|
||||||
id: *id,
|
id: *id,
|
||||||
publisher: *publishing_session,
|
publisher: *publishing_session,
|
||||||
external_network_block_hash: *external_network_block_hash,
|
external_network_block_hash: external_network_block_hash.0,
|
||||||
in_instructions_hash: *in_instructions_hash,
|
in_instructions_hash: *in_instructions_hash,
|
||||||
in_instruction_results: in_instruction_results
|
in_instruction_results: in_instruction_results
|
||||||
.iter()
|
.iter()
|
||||||
@@ -201,7 +202,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
||||||
&burn
|
&burn
|
||||||
else {
|
else {
|
||||||
panic!("Burn event wasn't a Burn.in event: {burn:?}");
|
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
|
||||||
};
|
};
|
||||||
if instruction.balance.coin.network() == network {
|
if instruction.balance.coin.network() == network {
|
||||||
burns.push(instruction.clone());
|
burns.push(instruction.clone());
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ use std::sync::Arc;
|
|||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
|
primitives::{SeraiAddress, EmbeddedEllipticCurve},
|
||||||
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
|
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -26,14 +26,14 @@ create_db!(
|
|||||||
pub struct EphemeralEventStream<D: Db> {
|
pub struct EphemeralEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
validator: PublicKey,
|
validator: SeraiAddress,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> EphemeralEventStream<D> {
|
impl<D: Db> EphemeralEventStream<D> {
|
||||||
/// Create a new ephemeral event stream.
|
/// Create a new ephemeral event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self {
|
||||||
Self { db, serai, validator }
|
Self { db, serai, validator }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -130,21 +130,22 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
||||||
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
||||||
};
|
};
|
||||||
|
|
||||||
// We only coordinate over external networks
|
// We only coordinate over external networks
|
||||||
if set.network == NetworkId::Serai {
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let serai = self.serai.as_of(block.block_hash);
|
let serai = self.serai.as_of(block.block_hash);
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let Some(validators) =
|
let Some(validators) =
|
||||||
serai.participants(set.network).await.map_err(|e| format!("{e:?}"))?
|
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||||
else {
|
else {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"block #{block_number} declared a new set but didn't have the participants"
|
"block #{block_number} declared a new set but didn't have the participants"
|
||||||
))?
|
))?
|
||||||
};
|
};
|
||||||
|
let validators = validators
|
||||||
|
.into_iter()
|
||||||
|
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||||
if in_set {
|
if in_set {
|
||||||
if u16::try_from(validators.len()).is_err() {
|
if u16::try_from(validators.len()).is_err() {
|
||||||
@@ -177,14 +178,16 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
embedded_elliptic_curve_keys.push_back(async move {
|
embedded_elliptic_curve_keys.push_back(async move {
|
||||||
tokio::try_join!(
|
tokio::try_join!(
|
||||||
// One future to fetch the substrate embedded key
|
// One future to fetch the substrate embedded key
|
||||||
serai
|
serai.embedded_elliptic_curve_key(
|
||||||
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
|
validator.into(),
|
||||||
|
EmbeddedEllipticCurve::Embedwards25519
|
||||||
|
),
|
||||||
// One future to fetch the external embedded key, if there is a distinct curve
|
// One future to fetch the external embedded key, if there is a distinct curve
|
||||||
async {
|
async {
|
||||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||||
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
|
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@@ -215,19 +218,22 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crate::NewSet::send(
|
let mut new_set = NewSetInformation {
|
||||||
&mut txn,
|
set,
|
||||||
&NewSetInformation {
|
serai_block: block.block_hash,
|
||||||
set: *set,
|
declaration_time: block.time,
|
||||||
serai_block: block.block_hash,
|
// TODO: This should be inlined into the Processor's key gen code
|
||||||
declaration_time: block.time,
|
// It's legacy from when we removed participants from the key gen
|
||||||
// TODO: Why do we have this as an explicit field here?
|
threshold: ((total_weight * 2) / 3) + 1,
|
||||||
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
validators,
|
||||||
threshold: ((total_weight * 2) / 3) + 1,
|
evrf_public_keys,
|
||||||
validators,
|
participant_indexes: Default::default(),
|
||||||
evrf_public_keys,
|
participant_indexes_reverse_lookup: Default::default(),
|
||||||
},
|
};
|
||||||
);
|
// These aren't serialized, and we immediately serialize and drop this, so this isn't
|
||||||
|
// necessary. It's just good practice not have this be dirty
|
||||||
|
new_set.init_participant_indexes();
|
||||||
|
crate::NewSet::send(&mut txn, &new_set);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,7 +243,8 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||||
};
|
};
|
||||||
crate::SignSlashReport::send(&mut txn, *set);
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
|
crate::SignSlashReport::send(&mut txn, set);
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|||||||
@@ -1,13 +1,17 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use borsh::{io, BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
|
primitives::{ExternalNetworkId, SeraiAddress, Signature},
|
||||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
|
||||||
in_instructions::primitives::SignedBatch,
|
in_instructions::primitives::SignedBatch,
|
||||||
Transaction,
|
Transaction,
|
||||||
};
|
};
|
||||||
@@ -26,25 +30,12 @@ pub use publish_batch::PublishBatchTask;
|
|||||||
mod publish_slash_report;
|
mod publish_slash_report;
|
||||||
pub use publish_slash_report::PublishSlashReportTask;
|
pub use publish_slash_report::PublishSlashReportTask;
|
||||||
|
|
||||||
fn borsh_serialize_validators<W: io::Write>(
|
|
||||||
validators: &Vec<(PublicKey, u16)>,
|
|
||||||
writer: &mut W,
|
|
||||||
) -> Result<(), io::Error> {
|
|
||||||
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
|
|
||||||
writer.write_all(&validators.encode())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn borsh_deserialize_validators<R: io::Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
|
|
||||||
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The information for a new set.
|
/// The information for a new set.
|
||||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
#[borsh(init = init_participant_indexes)]
|
||||||
pub struct NewSetInformation {
|
pub struct NewSetInformation {
|
||||||
/// The set.
|
/// The set.
|
||||||
pub set: ValidatorSet,
|
pub set: ExternalValidatorSet,
|
||||||
/// The Serai block which declared it.
|
/// The Serai block which declared it.
|
||||||
pub serai_block: [u8; 32],
|
pub serai_block: [u8; 32],
|
||||||
/// The time of the block which declared it, in seconds.
|
/// The time of the block which declared it, in seconds.
|
||||||
@@ -52,13 +43,37 @@ pub struct NewSetInformation {
|
|||||||
/// The threshold to use.
|
/// The threshold to use.
|
||||||
pub threshold: u16,
|
pub threshold: u16,
|
||||||
/// The validators, with the amount of key shares they have.
|
/// The validators, with the amount of key shares they have.
|
||||||
#[borsh(
|
pub validators: Vec<(SeraiAddress, u16)>,
|
||||||
serialize_with = "borsh_serialize_validators",
|
|
||||||
deserialize_with = "borsh_deserialize_validators"
|
|
||||||
)]
|
|
||||||
pub validators: Vec<(PublicKey, u16)>,
|
|
||||||
/// The eVRF public keys.
|
/// The eVRF public keys.
|
||||||
|
///
|
||||||
|
/// This will have the necessary copies of the keys proper for each validator's weight,
|
||||||
|
/// accordingly syncing up with `participant_indexes`.
|
||||||
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
|
/// The participant indexes, indexed by their validator.
|
||||||
|
#[borsh(skip)]
|
||||||
|
pub participant_indexes: HashMap<SeraiAddress, Vec<Participant>>,
|
||||||
|
/// The validators, indexed by their participant indexes.
|
||||||
|
#[borsh(skip)]
|
||||||
|
pub participant_indexes_reverse_lookup: HashMap<Participant, SeraiAddress>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NewSetInformation {
|
||||||
|
fn init_participant_indexes(&mut self) {
|
||||||
|
let mut next_i = 1;
|
||||||
|
self.participant_indexes = HashMap::with_capacity(self.validators.len());
|
||||||
|
self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len());
|
||||||
|
for (validator, weight) in &self.validators {
|
||||||
|
let mut these_is = Vec::with_capacity((*weight).into());
|
||||||
|
for _ in 0 .. *weight {
|
||||||
|
let this_i = Participant::new(next_i).unwrap();
|
||||||
|
next_i += 1;
|
||||||
|
|
||||||
|
these_is.push(this_i);
|
||||||
|
self.participant_indexes_reverse_lookup.insert(this_i, *validator);
|
||||||
|
}
|
||||||
|
self.participant_indexes.insert(*validator, these_is);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod _public_db {
|
mod _public_db {
|
||||||
@@ -67,24 +82,24 @@ mod _public_db {
|
|||||||
db_channel!(
|
db_channel!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
// Canonical messages to send to the processor
|
// Canonical messages to send to the processor
|
||||||
Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage,
|
Canonical: (network: ExternalNetworkId) -> messages::substrate::CoordinatorMessage,
|
||||||
|
|
||||||
// Relevant new set, from an ephemeral event stream
|
// Relevant new set, from an ephemeral event stream
|
||||||
NewSet: () -> NewSetInformation,
|
NewSet: () -> NewSetInformation,
|
||||||
// Potentially relevant sign slash report, from an ephemeral event stream
|
// Potentially relevant sign slash report, from an ephemeral event stream
|
||||||
SignSlashReport: (set: ValidatorSet) -> (),
|
SignSlashReport: (set: ExternalValidatorSet) -> (),
|
||||||
|
|
||||||
// Signed batches to publish onto the Serai network
|
// Signed batches to publish onto the Serai network
|
||||||
SignedBatches: (network: NetworkId) -> SignedBatch,
|
SignedBatches: (network: ExternalNetworkId) -> SignedBatch,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
// Keys to set on the Serai network
|
// Keys to set on the Serai network
|
||||||
Keys: (network: NetworkId) -> (Session, Vec<u8>),
|
Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
||||||
// Slash reports to publish onto the Serai network
|
// Slash reports to publish onto the Serai network
|
||||||
SlashReports: (network: NetworkId) -> (Session, Vec<u8>),
|
SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -94,7 +109,7 @@ pub struct Canonical;
|
|||||||
impl Canonical {
|
impl Canonical {
|
||||||
pub(crate) fn send(
|
pub(crate) fn send(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
msg: &messages::substrate::CoordinatorMessage,
|
msg: &messages::substrate::CoordinatorMessage,
|
||||||
) {
|
) {
|
||||||
_public_db::Canonical::send(txn, network, msg);
|
_public_db::Canonical::send(txn, network, msg);
|
||||||
@@ -102,7 +117,7 @@ impl Canonical {
|
|||||||
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
||||||
pub fn try_recv(
|
pub fn try_recv(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
) -> Option<messages::substrate::CoordinatorMessage> {
|
) -> Option<messages::substrate::CoordinatorMessage> {
|
||||||
_public_db::Canonical::try_recv(txn, network)
|
_public_db::Canonical::try_recv(txn, network)
|
||||||
}
|
}
|
||||||
@@ -126,12 +141,12 @@ impl NewSet {
|
|||||||
/// notifications for all relevant validator sets will be included.
|
/// notifications for all relevant validator sets will be included.
|
||||||
pub struct SignSlashReport;
|
pub struct SignSlashReport;
|
||||||
impl SignSlashReport {
|
impl SignSlashReport {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
||||||
_public_db::SignSlashReport::send(txn, set, &());
|
_public_db::SignSlashReport::send(txn, set, &());
|
||||||
}
|
}
|
||||||
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
||||||
/// receive.
|
/// receive.
|
||||||
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<()> {
|
pub fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<()> {
|
||||||
_public_db::SignSlashReport::try_recv(txn, set)
|
_public_db::SignSlashReport::try_recv(txn, set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -145,7 +160,7 @@ impl Keys {
|
|||||||
/// reported at once.
|
/// reported at once.
|
||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
@@ -165,7 +180,10 @@ impl Keys {
|
|||||||
);
|
);
|
||||||
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
}
|
}
|
||||||
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
pub(crate) fn take(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
) -> Option<(Session, Transaction)> {
|
||||||
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
||||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
}
|
}
|
||||||
@@ -175,20 +193,14 @@ impl Keys {
|
|||||||
pub struct SignedBatches;
|
pub struct SignedBatches;
|
||||||
impl SignedBatches {
|
impl SignedBatches {
|
||||||
/// Send a `SignedBatch` to publish onto Serai.
|
/// Send a `SignedBatch` to publish onto Serai.
|
||||||
///
|
|
||||||
/// These will be published sequentially. Out-of-order sending risks hanging the task.
|
|
||||||
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||||
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: NetworkId) -> Option<SignedBatch> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
|
||||||
_public_db::SignedBatches::try_recv(txn, network)
|
_public_db::SignedBatches::try_recv(txn, network)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The slash report was invalid.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct InvalidSlashReport;
|
|
||||||
|
|
||||||
/// The slash reports to publish onto Serai.
|
/// The slash reports to publish onto Serai.
|
||||||
pub struct SlashReports;
|
pub struct SlashReports;
|
||||||
impl SlashReports {
|
impl SlashReports {
|
||||||
@@ -196,32 +208,30 @@ impl SlashReports {
|
|||||||
///
|
///
|
||||||
/// This only saves the most recent slashes as only a single session is eligible to have its
|
/// This only saves the most recent slashes as only a single session is eligible to have its
|
||||||
/// slashes reported at once.
|
/// slashes reported at once.
|
||||||
///
|
|
||||||
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
|
|
||||||
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
|
|
||||||
/// as invalid here.
|
|
||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
slashes: Vec<(SeraiAddress, u32)>,
|
slash_report: SlashReport,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) -> Result<(), InvalidSlashReport> {
|
) {
|
||||||
// If we have a more recent slash report, don't write this historic one
|
// If we have a more recent slash report, don't write this historic one
|
||||||
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
||||||
if existing_session.0 >= set.session.0 {
|
if existing_session.0 >= set.session.0 {
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||||
set.network,
|
set.network,
|
||||||
slashes.try_into().map_err(|_| InvalidSlashReport)?,
|
slash_report,
|
||||||
signature,
|
signature,
|
||||||
);
|
);
|
||||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
pub(crate) fn take(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
) -> Option<(Session, Transaction)> {
|
||||||
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,31 +1,32 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
#[rustfmt::skip]
|
||||||
|
use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
|
||||||
use serai_client::{primitives::NetworkId, SeraiError, Serai};
|
|
||||||
|
|
||||||
|
use serai_db::{Get, DbTxn, Db, create_db};
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
use crate::SignedBatches;
|
use crate::SignedBatches;
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
CoordinatorSubstrate {
|
||||||
|
LastPublishedBatch: (network: ExternalNetworkId) -> u32,
|
||||||
|
BatchesToPublish: (network: ExternalNetworkId, batch: u32) -> SignedBatch,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
||||||
pub struct PublishBatchTask<D: Db> {
|
pub struct PublishBatchTask<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> PublishBatchTask<D> {
|
impl<D: Db> PublishBatchTask<D> {
|
||||||
/// Create a task to publish `SignedBatch`s onto Serai.
|
/// Create a task to publish `SignedBatch`s onto Serai.
|
||||||
///
|
pub fn new(db: D, serai: Arc<Serai>, network: ExternalNetworkId) -> Self {
|
||||||
/// Returns None if `network == NetworkId::Serai`.
|
Self { db, serai, network }
|
||||||
// TODO: ExternalNetworkId
|
|
||||||
pub fn new(db: D, serai: Arc<Serai>, network: NetworkId) -> Option<Self> {
|
|
||||||
if network == NetworkId::Serai {
|
|
||||||
None?
|
|
||||||
};
|
|
||||||
Some(Self { db, serai, network })
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -34,32 +35,52 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
// Read from SignedBatches, which is sequential, into our own mapping
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
||||||
// No batch to publish at this time
|
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Publish this Batch if it hasn't already been published
|
// If this is a Batch not yet published, save it into our unordered mapping
|
||||||
|
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
|
||||||
|
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Synchronize our last published batch with the Serai network's
|
||||||
|
let next_to_publish = {
|
||||||
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||||
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||||
if last_batch < Some(batch.batch.id) {
|
|
||||||
// This stream of Batches *should* be sequential within the larger context of the Serai
|
let mut txn = self.db.txn();
|
||||||
// coordinator. In this library, we use a more relaxed definition and don't assert
|
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
||||||
// sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
|
while our_last_batch < last_batch {
|
||||||
// that is a documented fault of the `SignedBatches` API.
|
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
||||||
|
// Clean up the Batch to publish since it's already been published
|
||||||
|
BatchesToPublish::take(&mut txn, self.network, next_batch);
|
||||||
|
our_last_batch = Some(next_batch);
|
||||||
|
}
|
||||||
|
if let Some(last_batch) = our_last_batch {
|
||||||
|
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
||||||
|
}
|
||||||
|
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
||||||
|
};
|
||||||
|
|
||||||
|
let made_progress =
|
||||||
|
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
||||||
self
|
self
|
||||||
.serai
|
.serai
|
||||||
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||||
.await?;
|
.await?;
|
||||||
}
|
true
|
||||||
|
} else {
|
||||||
txn.commit();
|
false
|
||||||
made_progress = true;
|
};
|
||||||
}
|
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
|
||||||
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
@@ -22,66 +22,78 @@ impl<D: Db> PublishSlashReportTask<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<D: Db> PublishSlashReportTask<D> {
|
||||||
|
// Returns if a slash report was successfully published
|
||||||
|
async fn publish(&mut self, network: ExternalNetworkId) -> Result<bool, String> {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
||||||
|
// No slash report to publish
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
|
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let serai = serai.validator_sets();
|
||||||
|
let session_after_slash_report = Session(session.0 + 1);
|
||||||
|
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||||
|
// active
|
||||||
|
let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0);
|
||||||
|
if session_after_slash_report_retired {
|
||||||
|
// Commit the txn to drain this slash report from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session_after_slash_report.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session_after_slash_report.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session which should publish a slash report already has, move on
|
||||||
|
let key_pending_slash_report =
|
||||||
|
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
if key_pending_slash_report.is_none() {
|
||||||
|
txn.commit();
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&slash_report).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window available to publish the slash report. That makes
|
||||||
|
// this a non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
for network in serai_client::primitives::NETWORKS {
|
let mut error = None;
|
||||||
if network == NetworkId::Serai {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
continue;
|
let network_res = self.publish(network).await;
|
||||||
};
|
// We made progress if any network successfully published their slash report
|
||||||
|
made_progress |= network_res == Ok(true);
|
||||||
let mut txn = self.db.txn();
|
// We want to yield the first error *after* attempting for every network
|
||||||
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
error = error.or(network_res.err());
|
||||||
// No slash report to publish
|
}
|
||||||
continue;
|
// Yield the error
|
||||||
};
|
if let Some(error) = error {
|
||||||
|
Err(error)?
|
||||||
let serai =
|
|
||||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let serai = serai.validator_sets();
|
|
||||||
let session_after_slash_report = Session(session.0 + 1);
|
|
||||||
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let current_session = current_session.map(|session| session.0);
|
|
||||||
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
|
||||||
// active
|
|
||||||
let session_after_slash_report_retired =
|
|
||||||
current_session > Some(session_after_slash_report.0);
|
|
||||||
if session_after_slash_report_retired {
|
|
||||||
// Commit the txn to drain this slash report from the database and not try it again later
|
|
||||||
txn.commit();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if Some(session_after_slash_report.0) != current_session {
|
|
||||||
// We already checked the current session wasn't greater, and they're not equal
|
|
||||||
assert!(current_session < Some(session_after_slash_report.0));
|
|
||||||
// This would mean the Serai node is resyncing and is behind where it prior was
|
|
||||||
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this session which should publish a slash report already has, move on
|
|
||||||
let key_pending_slash_report =
|
|
||||||
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
if key_pending_slash_report.is_none() {
|
|
||||||
txn.commit();
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
match self.serai.publish(&slash_report).await {
|
|
||||||
Ok(()) => {
|
|
||||||
txn.commit();
|
|
||||||
made_progress = true;
|
|
||||||
}
|
|
||||||
// This could be specific to this TX (such as an already in mempool error) and it may be
|
|
||||||
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
|
||||||
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
|
||||||
// miniscule compared to the window available to publish the slash report. That makes
|
|
||||||
// this a non-issue.
|
|
||||||
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
||||||
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
@@ -28,21 +28,19 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if network == NetworkId::Serai {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||||
// No keys to set
|
// No keys to set
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
let serai =
|
let serai =
|
||||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
let current_session = current_session.map(|session| session.0);
|
let current_session = current_session.map(|session| session.0);
|
||||||
// Only attempt to set these keys if this isn't a retired session
|
// Only attempt to set these keys if this isn't a retired session
|
||||||
if Some(session.0) < current_session {
|
if Some(session.0) < current_session {
|
||||||
@@ -60,7 +58,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
|
|
||||||
// If this session already has had its keys set, move on
|
// If this session already has had its keys set, move on
|
||||||
if serai
|
if serai
|
||||||
.keys(ValidatorSet { network, session })
|
.keys(ExternalValidatorSet { network, session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
.is_some()
|
.is_some()
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ license = "AGPL-3.0-only"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -24,11 +24,12 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
|||||||
rand = { version = "0.8", default-features = false, features = ["std"] }
|
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
|
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
|
||||||
|
|
||||||
ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] }
|
ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] }
|
dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||||
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std", "aggregate"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::{VecDeque, HashSet};
|
use std::collections::{VecDeque, HashSet};
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db};
|
use serai_db::{Get, DbTxn, Db};
|
||||||
|
|
||||||
@@ -20,7 +21,7 @@ pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
|
|||||||
|
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
tip: [u8; 32],
|
tip: [u8; 32],
|
||||||
participants: HashSet<<Ristretto as Ciphersuite>::G>,
|
participants: HashSet<[u8; 32]>,
|
||||||
|
|
||||||
provided: ProvidedTransactions<D, T>,
|
provided: ProvidedTransactions<D, T>,
|
||||||
mempool: Mempool<D, T>,
|
mempool: Mempool<D, T>,
|
||||||
@@ -55,7 +56,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
}
|
}
|
||||||
fn next_nonce_key(
|
fn next_nonce_key(
|
||||||
genesis: &[u8; 32],
|
genesis: &[u8; 32],
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: &[u8],
|
order: &[u8],
|
||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
D::key(
|
D::key(
|
||||||
@@ -68,12 +69,15 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
db: D,
|
db: D,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
participants: &[<Ristretto as Ciphersuite>::G],
|
participants: &[<Ristretto as WrappedGroup>::G],
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut res = Self {
|
let mut res = Self {
|
||||||
db: Some(db.clone()),
|
db: Some(db.clone()),
|
||||||
genesis,
|
genesis,
|
||||||
participants: participants.iter().copied().collect(),
|
participants: participants
|
||||||
|
.iter()
|
||||||
|
.map(<<Ristretto as WrappedGroup>::G as GroupEncoding>::to_bytes)
|
||||||
|
.collect(),
|
||||||
|
|
||||||
block_number: 0,
|
block_number: 0,
|
||||||
tip: genesis,
|
tip: genesis,
|
||||||
@@ -172,7 +176,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
|
|
||||||
self.mempool.add::<N, _>(
|
self.mempool.add::<N, _>(
|
||||||
|signer, order| {
|
|signer, order| {
|
||||||
if self.participants.contains(&signer) {
|
if self.participants.contains(&signer.to_bytes()) {
|
||||||
Some(
|
Some(
|
||||||
db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
|
db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
|
||||||
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
|
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
|
||||||
@@ -195,13 +199,13 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
|
|
||||||
pub(crate) fn next_nonce(
|
pub(crate) fn next_nonce(
|
||||||
&self,
|
&self,
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: &[u8],
|
order: &[u8],
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
|
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
|
||||||
return Some(next_nonce);
|
return Some(next_nonce);
|
||||||
}
|
}
|
||||||
if self.participants.contains(signer) {
|
if self.participants.contains(&signer.to_bytes()) {
|
||||||
Some(
|
Some(
|
||||||
self
|
self
|
||||||
.db
|
.db
|
||||||
@@ -250,7 +254,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
self.tip,
|
self.tip,
|
||||||
self.provided.transactions.clone(),
|
self.provided.transactions.clone(),
|
||||||
&mut |signer, order| {
|
&mut |signer, order| {
|
||||||
if self.participants.contains(signer) {
|
if self.participants.contains(&signer.to_bytes()) {
|
||||||
let key = Self::next_nonce_key(&self.genesis, signer, order);
|
let key = Self::next_nonce_key(&self.genesis, signer, order);
|
||||||
let next = txn
|
let next = txn
|
||||||
.get(&key)
|
.get(&key)
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ use std::{sync::Arc, io};
|
|||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
use scale::Decode;
|
use scale::Decode;
|
||||||
use futures_channel::mpsc::UnboundedReceiver;
|
use futures_channel::mpsc::UnboundedReceiver;
|
||||||
@@ -161,8 +162,8 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
db: D,
|
db: D,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
start_time: u64,
|
start_time: u64,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
validators: Vec<(<Ristretto as WrappedGroup>::G, u64)>,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
) -> Option<Self> {
|
) -> Option<Self> {
|
||||||
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
||||||
@@ -234,7 +235,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
|
|
||||||
pub async fn next_nonce(
|
pub async fn next_nonce(
|
||||||
&self,
|
&self,
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: &[u8],
|
order: &[u8],
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
self.network.blockchain.read().await.next_nonce(signer, order)
|
self.network.blockchain.read().await.next_nonce(signer, order)
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
@@ -20,9 +21,9 @@ pub(crate) struct Mempool<D: Db, T: TransactionTrait> {
|
|||||||
db: D,
|
db: D,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
|
|
||||||
last_nonce_in_mempool: HashMap<(<Ristretto as Ciphersuite>::G, Vec<u8>), u32>,
|
last_nonce_in_mempool: HashMap<([u8; 32], Vec<u8>), u32>,
|
||||||
txs: HashMap<[u8; 32], Transaction<T>>,
|
txs: HashMap<[u8; 32], Transaction<T>>,
|
||||||
txs_per_signer: HashMap<<Ristretto as Ciphersuite>::G, u32>,
|
txs_per_signer: HashMap<[u8; 32], u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||||
@@ -81,6 +82,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
}
|
}
|
||||||
Transaction::Application(tx) => match tx.kind() {
|
Transaction::Application(tx) => match tx.kind() {
|
||||||
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
||||||
|
let signer = signer.to_bytes();
|
||||||
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
||||||
res.txs_per_signer.insert(signer, amount);
|
res.txs_per_signer.insert(signer, amount);
|
||||||
|
|
||||||
@@ -106,7 +108,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
||||||
pub(crate) fn add<
|
pub(crate) fn add<
|
||||||
N: Network,
|
N: Network,
|
||||||
F: FnOnce(<Ristretto as Ciphersuite>::G, Vec<u8>) -> Option<u32>,
|
F: FnOnce(<Ristretto as WrappedGroup>::G, Vec<u8>) -> Option<u32>,
|
||||||
>(
|
>(
|
||||||
&mut self,
|
&mut self,
|
||||||
blockchain_next_nonce: F,
|
blockchain_next_nonce: F,
|
||||||
@@ -139,6 +141,8 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
};
|
};
|
||||||
let mut next_nonce = blockchain_next_nonce;
|
let mut next_nonce = blockchain_next_nonce;
|
||||||
|
|
||||||
|
let signer = signer.to_bytes();
|
||||||
|
|
||||||
if let Some(mempool_last_nonce) =
|
if let Some(mempool_last_nonce) =
|
||||||
self.last_nonce_in_mempool.get(&(signer, order.clone()))
|
self.last_nonce_in_mempool.get(&(signer, order.clone()))
|
||||||
{
|
{
|
||||||
@@ -178,10 +182,10 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
// Returns None if the mempool doesn't have a nonce tracked.
|
// Returns None if the mempool doesn't have a nonce tracked.
|
||||||
pub(crate) fn next_nonce_in_mempool(
|
pub(crate) fn next_nonce_in_mempool(
|
||||||
&self,
|
&self,
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: Vec<u8>,
|
order: Vec<u8>,
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1)
|
self.last_nonce_in_mempool.get(&(signer.to_bytes(), order)).copied().map(|nonce| nonce + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get transactions to include in a block.
|
/// Get transactions to include in a block.
|
||||||
@@ -242,6 +246,8 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
|
|
||||||
if let Some(tx) = self.txs.remove(tx) {
|
if let Some(tx) = self.txs.remove(tx) {
|
||||||
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
|
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
|
||||||
|
let signer = signer.to_bytes();
|
||||||
|
|
||||||
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
|
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
|
||||||
self.txs_per_signer.insert(signer, amount);
|
self.txs_per_signer.insert(signer, amount);
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] {
|
|||||||
let zero = [0; 32];
|
let zero = [0; 32];
|
||||||
let mut interim;
|
let mut interim;
|
||||||
while hashes.len() > 1 {
|
while hashes.len() > 1 {
|
||||||
interim = Vec::with_capacity((hashes.len() + 1) / 2);
|
interim = Vec::with_capacity(hashes.len().div_ceil(2));
|
||||||
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < hashes.len() {
|
while i < hashes.len() {
|
||||||
|
|||||||
@@ -10,12 +10,10 @@ use rand_chacha::ChaCha12Rng;
|
|||||||
use transcript::{Transcript, RecommendedTranscript};
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{
|
group::{ff::PrimeField, GroupEncoding},
|
||||||
GroupEncoding,
|
*,
|
||||||
ff::{Field, PrimeField},
|
|
||||||
},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
};
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
use schnorr::{
|
use schnorr::{
|
||||||
SchnorrSignature,
|
SchnorrSignature,
|
||||||
aggregate::{SchnorrAggregator, SchnorrAggregate},
|
aggregate::{SchnorrAggregator, SchnorrAggregate},
|
||||||
@@ -50,24 +48,26 @@ fn challenge(
|
|||||||
key: [u8; 32],
|
key: [u8; 32],
|
||||||
nonce: &[u8],
|
nonce: &[u8],
|
||||||
msg: &[u8],
|
msg: &[u8],
|
||||||
) -> <Ristretto as Ciphersuite>::F {
|
) -> <Ristretto as WrappedGroup>::F {
|
||||||
let mut transcript = RecommendedTranscript::new(b"Tributary Chain Tendermint Message");
|
let mut transcript = RecommendedTranscript::new(b"Tributary Chain Tendermint Message");
|
||||||
transcript.append_message(b"genesis", genesis);
|
transcript.append_message(b"genesis", genesis);
|
||||||
transcript.append_message(b"key", key);
|
transcript.append_message(b"key", key);
|
||||||
transcript.append_message(b"nonce", nonce);
|
transcript.append_message(b"nonce", nonce);
|
||||||
transcript.append_message(b"message", msg);
|
transcript.append_message(b"message", msg);
|
||||||
|
|
||||||
<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&transcript.challenge(b"schnorr").into())
|
<Ristretto as WrappedGroup>::F::from_bytes_mod_order_wide(
|
||||||
|
&transcript.challenge(b"schnorr").into(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct Signer {
|
pub struct Signer {
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Signer {
|
impl Signer {
|
||||||
pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<<Ristretto as Ciphersuite>::F>) -> Signer {
|
pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<<Ristretto as WrappedGroup>::F>) -> Signer {
|
||||||
Signer { genesis, key }
|
Signer { genesis, key }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -100,10 +100,10 @@ impl SignerTrait for Signer {
|
|||||||
assert_eq!(nonce_ref, [0; 64].as_ref());
|
assert_eq!(nonce_ref, [0; 64].as_ref());
|
||||||
|
|
||||||
let nonce =
|
let nonce =
|
||||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));
|
Zeroizing::new(<Ristretto as WrappedGroup>::F::from_bytes_mod_order_wide(&nonce_arr));
|
||||||
nonce_arr.zeroize();
|
nonce_arr.zeroize();
|
||||||
|
|
||||||
assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));
|
assert!(!bool::from(nonce.ct_eq(&<Ristretto as WrappedGroup>::F::ZERO)));
|
||||||
|
|
||||||
let challenge = challenge(
|
let challenge = challenge(
|
||||||
self.genesis,
|
self.genesis,
|
||||||
@@ -132,7 +132,7 @@ pub struct Validators {
|
|||||||
impl Validators {
|
impl Validators {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
validators: Vec<(<Ristretto as WrappedGroup>::G, u64)>,
|
||||||
) -> Option<Validators> {
|
) -> Option<Validators> {
|
||||||
let mut total_weight = 0;
|
let mut total_weight = 0;
|
||||||
let mut weights = HashMap::new();
|
let mut weights = HashMap::new();
|
||||||
@@ -163,7 +163,6 @@ impl SignatureScheme for Validators {
|
|||||||
type AggregateSignature = Vec<u8>;
|
type AggregateSignature = Vec<u8>;
|
||||||
type Signer = Arc<Signer>;
|
type Signer = Arc<Signer>;
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {
|
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {
|
||||||
if !self.weights.contains_key(&validator) {
|
if !self.weights.contains_key(&validator) {
|
||||||
return false;
|
return false;
|
||||||
@@ -196,7 +195,6 @@ impl SignatureScheme for Validators {
|
|||||||
aggregate.serialize()
|
aggregate.serialize()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
fn verify_aggregate(
|
fn verify_aggregate(
|
||||||
&self,
|
&self,
|
||||||
signers: &[Self::ValidatorId],
|
signers: &[Self::ValidatorId],
|
||||||
@@ -221,7 +219,7 @@ impl SignatureScheme for Validators {
|
|||||||
signers
|
signers
|
||||||
.iter()
|
.iter()
|
||||||
.zip(challenges)
|
.zip(challenges)
|
||||||
.map(|(s, c)| (<Ristretto as Ciphersuite>::read_G(&mut s.as_slice()).unwrap(), c))
|
.map(|(s, c)| (<Ristretto as GroupIo>::read_G(&mut s.as_slice()).unwrap(), c))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.as_slice(),
|
.as_slice(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ use scale::{Encode, Decode, IoReader};
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::*;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
transaction::{Transaction, TransactionKind, TransactionError},
|
transaction::{Transaction, TransactionKind, TransactionError},
|
||||||
@@ -49,7 +50,7 @@ impl Transaction for TendermintTx {
|
|||||||
Blake2s256::digest(self.serialize()).into()
|
Blake2s256::digest(self.serialize()).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sig_hash(&self, _genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
|
fn sig_hash(&self, _genesis: [u8; 32]) -> <Ristretto as WrappedGroup>::F {
|
||||||
match self {
|
match self {
|
||||||
TendermintTx::SlashEvidence(_) => panic!("sig_hash called on slash evidence transaction"),
|
TendermintTx::SlashEvidence(_) => panic!("sig_hash called on slash evidence transaction"),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
|
use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
use ciphersuite::{
|
|
||||||
group::{ff::Field, Group},
|
use dalek_ff_group::Ristretto;
|
||||||
Ciphersuite, Ristretto,
|
use ciphersuite::{group::Group, *};
|
||||||
};
|
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
use serai_db::MemDb;
|
use serai_db::MemDb;
|
||||||
@@ -30,11 +29,11 @@ impl NonceTransaction {
|
|||||||
nonce,
|
nonce,
|
||||||
distinguisher,
|
distinguisher,
|
||||||
Signed {
|
Signed {
|
||||||
signer: <Ristretto as Ciphersuite>::G::identity(),
|
signer: <Ristretto as WrappedGroup>::G::identity(),
|
||||||
nonce,
|
nonce,
|
||||||
signature: SchnorrSignature::<Ristretto> {
|
signature: SchnorrSignature::<Ristretto> {
|
||||||
R: <Ristretto as Ciphersuite>::G::identity(),
|
R: <Ristretto as WrappedGroup>::G::identity(),
|
||||||
s: <Ristretto as Ciphersuite>::F::ZERO,
|
s: <Ristretto as WrappedGroup>::F::ZERO,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -10,7 +10,8 @@ use rand::rngs::OsRng;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::*;
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db, MemDb};
|
use serai_db::{DbTxn, Db, MemDb};
|
||||||
|
|
||||||
@@ -30,7 +31,7 @@ type N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;
|
|||||||
|
|
||||||
fn new_blockchain<T: TransactionTrait>(
|
fn new_blockchain<T: TransactionTrait>(
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
participants: &[<Ristretto as Ciphersuite>::G],
|
participants: &[<Ristretto as WrappedGroup>::G],
|
||||||
) -> (MemDb, Blockchain<MemDb, T>) {
|
) -> (MemDb, Blockchain<MemDb, T>) {
|
||||||
let db = MemDb::new();
|
let db = MemDb::new();
|
||||||
let blockchain = Blockchain::new(db.clone(), genesis, participants);
|
let blockchain = Blockchain::new(db.clone(), genesis, participants);
|
||||||
@@ -81,7 +82,7 @@ fn invalid_block() {
|
|||||||
assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());
|
assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);
|
let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);
|
||||||
|
|
||||||
// Not a participant
|
// Not a participant
|
||||||
@@ -133,7 +134,7 @@ fn invalid_block() {
|
|||||||
blockchain.verify_block::<N>(&block, &validators, false).unwrap();
|
blockchain.verify_block::<N>(&block, &validators, false).unwrap();
|
||||||
match &mut block.transactions[0] {
|
match &mut block.transactions[0] {
|
||||||
Transaction::Application(tx) => {
|
Transaction::Application(tx) => {
|
||||||
tx.1.signature.s += <Ristretto as Ciphersuite>::F::ONE;
|
tx.1.signature.s += <Ristretto as WrappedGroup>::F::ONE;
|
||||||
}
|
}
|
||||||
_ => panic!("non-signed tx found"),
|
_ => panic!("non-signed tx found"),
|
||||||
}
|
}
|
||||||
@@ -149,7 +150,7 @@ fn invalid_block() {
|
|||||||
fn signed_transaction() {
|
fn signed_transaction() {
|
||||||
let genesis = new_genesis();
|
let genesis = new_genesis();
|
||||||
let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());
|
let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);
|
let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);
|
||||||
let signer = tx.1.signer;
|
let signer = tx.1.signer;
|
||||||
|
|
||||||
@@ -338,7 +339,7 @@ fn provided_transaction() {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tendermint_evidence_tx() {
|
async fn tendermint_evidence_tx() {
|
||||||
let genesis = new_genesis();
|
let genesis = new_genesis();
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
let signer = Signer::new(genesis, key.clone());
|
let signer = Signer::new(genesis, key.clone());
|
||||||
let signer_id = Ristretto::generator() * key.deref();
|
let signer_id = Ristretto::generator() * key.deref();
|
||||||
let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap());
|
let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap());
|
||||||
@@ -378,7 +379,7 @@ async fn tendermint_evidence_tx() {
|
|||||||
let mut mempool: Vec<Transaction<SignedTransaction>> = vec![];
|
let mut mempool: Vec<Transaction<SignedTransaction>> = vec![];
|
||||||
let mut signers = vec![];
|
let mut signers = vec![];
|
||||||
for _ in 0 .. 5 {
|
for _ in 0 .. 5 {
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
let signer = Signer::new(genesis, key.clone());
|
let signer = Signer::new(genesis, key.clone());
|
||||||
let signer_id = Ristretto::generator() * key.deref();
|
let signer_id = Ristretto::generator() * key.deref();
|
||||||
signers.push((signer_id, 1));
|
signers.push((signer_id, 1));
|
||||||
@@ -445,7 +446,7 @@ async fn block_tx_ordering() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let genesis = new_genesis();
|
let genesis = new_genesis();
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
|
|
||||||
// signer
|
// signer
|
||||||
let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer;
|
let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer;
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ use std::{sync::Arc, collections::HashMap};
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand::{RngCore, rngs::OsRng};
|
use rand::{RngCore, rngs::OsRng};
|
||||||
|
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::*;
|
||||||
|
|
||||||
use tendermint::ext::Commit;
|
use tendermint::ext::Commit;
|
||||||
|
|
||||||
@@ -32,7 +33,7 @@ async fn mempool_addition() {
|
|||||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||||
};
|
};
|
||||||
let unsigned_in_chain = |_: [u8; 32]| false;
|
let unsigned_in_chain = |_: [u8; 32]| false;
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
|
|
||||||
let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0);
|
let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0);
|
||||||
let signer = first_tx.1.signer;
|
let signer = first_tx.1.signer;
|
||||||
@@ -124,7 +125,7 @@ async fn mempool_addition() {
|
|||||||
|
|
||||||
// If the mempool doesn't have a nonce for an account, it should successfully use the
|
// If the mempool doesn't have a nonce for an account, it should successfully use the
|
||||||
// blockchain's
|
// blockchain's
|
||||||
let second_key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let second_key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
let tx = signed_transaction(&mut OsRng, genesis, &second_key, 2);
|
let tx = signed_transaction(&mut OsRng, genesis, &second_key, 2);
|
||||||
let second_signer = tx.1.signer;
|
let second_signer = tx.1.signer;
|
||||||
assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), None);
|
assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), None);
|
||||||
@@ -164,7 +165,7 @@ fn too_many_mempool() {
|
|||||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||||
};
|
};
|
||||||
let unsigned_in_chain = |_: [u8; 32]| false;
|
let unsigned_in_chain = |_: [u8; 32]| false;
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng));
|
||||||
|
|
||||||
// We should be able to add transactions up to the limit
|
// We should be able to add transactions up to the limit
|
||||||
for i in 0 .. ACCOUNT_MEMPOOL_LIMIT {
|
for i in 0 .. ACCOUNT_MEMPOOL_LIMIT {
|
||||||
|
|||||||
@@ -6,10 +6,8 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use ciphersuite::{
|
use dalek_ff_group::Ristretto;
|
||||||
group::{ff::Field, Group},
|
use ciphersuite::*;
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
@@ -33,11 +31,11 @@ mod tendermint;
|
|||||||
|
|
||||||
pub fn random_signed<R: RngCore + CryptoRng>(rng: &mut R) -> Signed {
|
pub fn random_signed<R: RngCore + CryptoRng>(rng: &mut R) -> Signed {
|
||||||
Signed {
|
Signed {
|
||||||
signer: <Ristretto as Ciphersuite>::G::random(&mut *rng),
|
signer: <Ristretto as WrappedGroup>::G::random(&mut *rng),
|
||||||
nonce: u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(),
|
nonce: u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(),
|
||||||
signature: SchnorrSignature::<Ristretto> {
|
signature: SchnorrSignature::<Ristretto> {
|
||||||
R: <Ristretto as Ciphersuite>::G::random(&mut *rng),
|
R: <Ristretto as WrappedGroup>::G::random(&mut *rng),
|
||||||
s: <Ristretto as Ciphersuite>::F::random(rng),
|
s: <Ristretto as WrappedGroup>::F::random(rng),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -136,18 +134,18 @@ impl Transaction for SignedTransaction {
|
|||||||
pub fn signed_transaction<R: RngCore + CryptoRng>(
|
pub fn signed_transaction<R: RngCore + CryptoRng>(
|
||||||
rng: &mut R,
|
rng: &mut R,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
nonce: u32,
|
nonce: u32,
|
||||||
) -> SignedTransaction {
|
) -> SignedTransaction {
|
||||||
let mut data = vec![0; 512];
|
let mut data = vec![0; 512];
|
||||||
rng.fill_bytes(&mut data);
|
rng.fill_bytes(&mut data);
|
||||||
|
|
||||||
let signer = <Ristretto as Ciphersuite>::generator() * **key;
|
let signer = <Ristretto as WrappedGroup>::generator() * **key;
|
||||||
|
|
||||||
let mut tx =
|
let mut tx =
|
||||||
SignedTransaction(data, Signed { signer, nonce, signature: random_signed(rng).signature });
|
SignedTransaction(data, Signed { signer, nonce, signature: random_signed(rng).signature });
|
||||||
|
|
||||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
let sig_nonce = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(rng));
|
||||||
tx.1.signature.R = Ristretto::generator() * sig_nonce.deref();
|
tx.1.signature.R = Ristretto::generator() * sig_nonce.deref();
|
||||||
tx.1.signature = SchnorrSignature::sign(key, sig_nonce, tx.sig_hash(genesis));
|
tx.1.signature = SchnorrSignature::sign(key, sig_nonce, tx.sig_hash(genesis));
|
||||||
|
|
||||||
@@ -162,7 +160,7 @@ pub fn random_signed_transaction<R: RngCore + CryptoRng>(
|
|||||||
let mut genesis = [0; 32];
|
let mut genesis = [0; 32];
|
||||||
rng.fill_bytes(&mut genesis);
|
rng.fill_bytes(&mut genesis);
|
||||||
|
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut *rng));
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut *rng));
|
||||||
// Shift over an additional bit to ensure it won't overflow when incremented
|
// Shift over an additional bit to ensure it won't overflow when incremented
|
||||||
let nonce = u32::try_from(rng.next_u64() >> 32 >> 1).unwrap();
|
let nonce = u32::try_from(rng.next_u64() >> 32 >> 1).unwrap();
|
||||||
|
|
||||||
@@ -179,12 +177,11 @@ pub async fn tendermint_meta() -> ([u8; 32], Signer, [u8; 32], Arc<Validators>)
|
|||||||
// signer
|
// signer
|
||||||
let genesis = new_genesis();
|
let genesis = new_genesis();
|
||||||
let signer =
|
let signer =
|
||||||
Signer::new(genesis, Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng)));
|
Signer::new(genesis, Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)));
|
||||||
let validator_id = signer.validator_id().await.unwrap();
|
let validator_id = signer.validator_id().await.unwrap();
|
||||||
|
|
||||||
// schema
|
// schema
|
||||||
let signer_pub =
|
let signer_pub = <Ristretto as GroupIo>::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap();
|
||||||
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap();
|
|
||||||
let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap());
|
let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap());
|
||||||
|
|
||||||
(genesis, signer, validator_id, validators)
|
(genesis, signer, validator_id, validators)
|
||||||
|
|||||||
@@ -2,7 +2,8 @@ use rand::rngs::OsRng;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::*;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ReadWrite,
|
ReadWrite,
|
||||||
@@ -68,7 +69,7 @@ fn signed_transaction() {
|
|||||||
}
|
}
|
||||||
{
|
{
|
||||||
let mut tx = tx.clone();
|
let mut tx = tx.clone();
|
||||||
tx.1.signature.s += <Ristretto as Ciphersuite>::F::ONE;
|
tx.1.signature.s += <Ristretto as WrappedGroup>::F::ONE;
|
||||||
assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());
|
assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,8 @@ use std::sync::Arc;
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand::{RngCore, rngs::OsRng};
|
use rand::{RngCore, rngs::OsRng};
|
||||||
|
|
||||||
use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::*;
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
|
|
||||||
@@ -260,7 +261,7 @@ async fn conflicting_msgs_evidence_tx() {
|
|||||||
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
|
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
|
||||||
|
|
||||||
let signer_2 =
|
let signer_2 =
|
||||||
Signer::new(genesis, Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng)));
|
Signer::new(genesis, Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)));
|
||||||
let signed_id_2 = signer_2.validator_id().await.unwrap();
|
let signed_id_2 = signer_2.validator_id().await.unwrap();
|
||||||
let signed_2 = signed_from_data::<N>(
|
let signed_2 = signed_from_data::<N>(
|
||||||
signer_2.into(),
|
signer_2.into(),
|
||||||
@@ -277,10 +278,9 @@ async fn conflicting_msgs_evidence_tx() {
|
|||||||
));
|
));
|
||||||
|
|
||||||
// update schema so that we don't fail due to invalid signature
|
// update schema so that we don't fail due to invalid signature
|
||||||
let signer_pub =
|
let signer_pub = <Ristretto as GroupIo>::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap();
|
||||||
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap();
|
|
||||||
let signer_pub_2 =
|
let signer_pub_2 =
|
||||||
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap();
|
<Ristretto as GroupIo>::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap();
|
||||||
let validators =
|
let validators =
|
||||||
Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap());
|
Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap());
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,9 @@ use blake2::{Digest, Blake2b512};
|
|||||||
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{Group, GroupEncoding},
|
group::{Group, GroupEncoding},
|
||||||
Ciphersuite, Ristretto,
|
*,
|
||||||
};
|
};
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
|
|
||||||
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
|
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
|
||||||
@@ -42,7 +43,7 @@ pub enum TransactionError {
|
|||||||
/// Data for a signed transaction.
|
/// Data for a signed transaction.
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct Signed {
|
pub struct Signed {
|
||||||
pub signer: <Ristretto as Ciphersuite>::G,
|
pub signer: <Ristretto as WrappedGroup>::G,
|
||||||
pub nonce: u32,
|
pub nonce: u32,
|
||||||
pub signature: SchnorrSignature<Ristretto>,
|
pub signature: SchnorrSignature<Ristretto>,
|
||||||
}
|
}
|
||||||
@@ -159,10 +160,10 @@ pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
|||||||
/// Do not override this unless you know what you're doing.
|
/// Do not override this unless you know what you're doing.
|
||||||
///
|
///
|
||||||
/// Panics if called on non-signed transactions.
|
/// Panics if called on non-signed transactions.
|
||||||
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
|
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as WrappedGroup>::F {
|
||||||
match self.kind() {
|
match self.kind() {
|
||||||
TransactionKind::Signed(order, Signed { signature, .. }) => {
|
TransactionKind::Signed(order, Signed { signature, .. }) => {
|
||||||
<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(
|
<Ristretto as WrappedGroup>::F::from_bytes_mod_order_wide(
|
||||||
&Blake2b512::digest(
|
&Blake2b512::digest(
|
||||||
[
|
[
|
||||||
b"Tributary Signed Transaction",
|
b"Tributary Signed Transaction",
|
||||||
@@ -181,8 +182,8 @@ pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
|
pub trait GAIN: FnMut(&<Ristretto as WrappedGroup>::G, &[u8]) -> Option<u32> {}
|
||||||
impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
impl<F: FnMut(&<Ristretto as WrappedGroup>::G, &[u8]) -> Option<u32>> GAIN for F {}
|
||||||
|
|
||||||
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
|
||||||
tx: &T,
|
tx: &T,
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ license = "MIT"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.81"
|
rust-version = "1.75"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user