mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 14:09:25 +00:00
Compare commits
343 Commits
201a444e89
...
next
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca93c82156 | ||
|
|
5b1875dae6 | ||
|
|
bcd68441be | ||
|
|
4ebf9ad9c7 | ||
|
|
807572199c | ||
|
|
3cdc1536c5 | ||
|
|
9e13e5ebff | ||
|
|
9b2c254eee | ||
|
|
0883479068 | ||
|
|
c5480c63be | ||
|
|
4280ee6987 | ||
|
|
91673d7ae3 | ||
|
|
927f07b62b | ||
|
|
7e774d6d2d | ||
|
|
fccd06b376 | ||
|
|
e3edc0a7fc | ||
|
|
9c47ef2658 | ||
|
|
e1b6b638c6 | ||
|
|
c24768f922 | ||
|
|
87ee879dea | ||
|
|
b5603560e8 | ||
|
|
5818f1a41c | ||
|
|
1b781b4b57 | ||
|
|
94faf098b6 | ||
|
|
03e45f73cd | ||
|
|
63f7e220c0 | ||
|
|
7d49366373 | ||
|
|
55ed33d2d1 | ||
|
|
138a0e9b40 | ||
|
|
4fc7263ac3 | ||
|
|
f27fd59fa6 | ||
|
|
437f0e9a93 | ||
|
|
cc5d38f1ce | ||
|
|
0ce025e0c2 | ||
|
|
224cf4ea21 | ||
|
|
a9b1e5293c | ||
|
|
80009ab67f | ||
|
|
df9fda2971 | ||
|
|
ca8afb83a1 | ||
|
|
18a9cf2535 | ||
|
|
10c126ad92 | ||
|
|
19305aebc9 | ||
|
|
be68e27551 | ||
|
|
d6d96fe8ff | ||
|
|
95909d83a4 | ||
|
|
3bd48974f3 | ||
|
|
29093715e3 | ||
|
|
87b4dfc8f3 | ||
|
|
4db78b1787 | ||
|
|
02a5f15535 | ||
|
|
865e351f96 | ||
|
|
ea275df26c | ||
|
|
2216ade8c4 | ||
|
|
5265cc69de | ||
|
|
a141deaf36 | ||
|
|
215e41fdb6 | ||
|
|
41c34d7f11 | ||
|
|
974bc82387 | ||
|
|
47ef24a7cc | ||
|
|
c0e48867e1 | ||
|
|
0066b94d38 | ||
|
|
7d54c02ec6 | ||
|
|
568324f631 | ||
|
|
2a02a8dc59 | ||
|
|
eaa9a0e5a6 | ||
|
|
251996c1b0 | ||
|
|
98b9cc82a7 | ||
|
|
263d75d380 | ||
|
|
030185c7fc | ||
|
|
e2dc5db7aa | ||
|
|
90bc364f9f | ||
|
|
a4811c9a41 | ||
|
|
12cfa6b2a5 | ||
|
|
0c71b6fc4d | ||
|
|
ffe1b60a11 | ||
|
|
5526b8d439 | ||
|
|
beac35c119 | ||
|
|
62bb75e09a | ||
|
|
45bd376c08 | ||
|
|
da190759a9 | ||
|
|
f2d399ba1e | ||
|
|
220bcbc592 | ||
|
|
85949f4b04 | ||
|
|
f8adfb56ad | ||
|
|
2f833dec77 | ||
|
|
e3e41324c9 | ||
|
|
6ed7c5d65e | ||
|
|
9dddfd91c8 | ||
|
|
c24b694fb2 | ||
|
|
738babf7e9 | ||
|
|
33faa53b56 | ||
|
|
8c366107ae | ||
|
|
7a790f3a20 | ||
|
|
a7c77f8b5f | ||
|
|
da3095ed15 | ||
|
|
758d422595 | ||
|
|
9841061b49 | ||
|
|
4122a0135f | ||
|
|
b63ef32864 | ||
|
|
8be03a8fc2 | ||
|
|
677a2e5749 | ||
|
|
38bda1d586 | ||
|
|
2bc2ca6906 | ||
|
|
900a6612d7 | ||
|
|
17c1d5cd6b | ||
|
|
8a1b56a928 | ||
|
|
75964cf6da | ||
|
|
d407e35cee | ||
|
|
c8ef044acb | ||
|
|
ddbc32de4d | ||
|
|
e5ccfac19e | ||
|
|
432daae1d1 | ||
|
|
da3a85efe5 | ||
|
|
1e0240123d | ||
|
|
f6d4d1b084 | ||
|
|
1b37dd2951 | ||
|
|
f32e0609f1 | ||
|
|
ca85f9ba0c | ||
|
|
cfd1cb3a37 | ||
|
|
f2c13a0040 | ||
|
|
961f46bc04 | ||
|
|
2c4de3bab4 | ||
|
|
95c30720d2 | ||
|
|
ceede14f5c | ||
|
|
5e60ea9718 | ||
|
|
153f6f2f2f | ||
|
|
104c0d4492 | ||
|
|
7c8f13ab28 | ||
|
|
cb0deadf9a | ||
|
|
cb489f9cef | ||
|
|
cc662cb591 | ||
|
|
a8b8844e3f | ||
|
|
82b543ef75 | ||
|
|
72e80c1a3d | ||
|
|
b6edc94bcd | ||
|
|
cfce2b26e2 | ||
|
|
e87bbcda64 | ||
|
|
9f84adf8b3 | ||
|
|
3919cf55ae | ||
|
|
38dd8cb191 | ||
|
|
f2563d39cb | ||
|
|
15a9cbef40 | ||
|
|
078d6e51e5 | ||
|
|
6c33e18745 | ||
|
|
b743c9a43e | ||
|
|
0c2f2979a9 | ||
|
|
971951a1a6 | ||
|
|
92d9e908cb | ||
|
|
a32b97be88 | ||
|
|
e3809b2ff1 | ||
|
|
fd2d8b4f0a | ||
|
|
bc81614894 | ||
|
|
8df5aa2e2d | ||
|
|
b000740470 | ||
|
|
b9f554111d | ||
|
|
354c408e3e | ||
|
|
df3b60376a | ||
|
|
8d209c652e | ||
|
|
9ddad794b4 | ||
|
|
b934e484cc | ||
|
|
f8aee9b3c8 | ||
|
|
f51d77d26a | ||
|
|
0780deb643 | ||
|
|
75c38560f4 | ||
|
|
9f1c5268a5 | ||
|
|
35b113768b | ||
|
|
f2595c4939 | ||
|
|
8fcfa6d3d5 | ||
|
|
54c9d19726 | ||
|
|
25324c3cd5 | ||
|
|
ecb7df85b0 | ||
|
|
68c7acdbef | ||
|
|
8b60feed92 | ||
|
|
5c895efcd0 | ||
|
|
60e55656aa | ||
|
|
9536282418 | ||
|
|
8297d0679d | ||
|
|
d9f854b08a | ||
|
|
8aaf7f7dc6 | ||
|
|
ce447558ac | ||
|
|
fc850da30e | ||
|
|
d6f6cf1965 | ||
|
|
4438b51881 | ||
|
|
6ae0d9fad7 | ||
|
|
ad08b410a8 | ||
|
|
ec3cfd3ab7 | ||
|
|
01eb2daa0b | ||
|
|
885000f970 | ||
|
|
4be506414b | ||
|
|
1143d84e1d | ||
|
|
336922101f | ||
|
|
ffa033d978 | ||
|
|
23f986f57a | ||
|
|
bb726b58af | ||
|
|
387615705c | ||
|
|
c7f825a192 | ||
|
|
d363b1c173 | ||
|
|
d5077ae966 | ||
|
|
188fcc3cb4 | ||
|
|
cbab9486c6 | ||
|
|
a5f4c450c6 | ||
|
|
4f65a0b147 | ||
|
|
feb18d64a7 | ||
|
|
cb1e6535cb | ||
|
|
6b8cf6653a | ||
|
|
b426bfcfe8 | ||
|
|
21ce50ecf7 | ||
|
|
a4ceb2e756 | ||
|
|
b59b1f59dd | ||
|
|
cc4a65e82a | ||
|
|
eab5d9e64f | ||
|
|
4e0c58464f | ||
|
|
205da3fd38 | ||
|
|
f7e63d4944 | ||
|
|
b5608fc3d2 | ||
|
|
33018bf6da | ||
|
|
bef90b2f1a | ||
|
|
184c02714a | ||
|
|
5a7b815e2e | ||
|
|
22e411981a | ||
|
|
11d48d0685 | ||
|
|
e4cc23b72d | ||
|
|
52d853c8ba | ||
|
|
9c33a711d7 | ||
|
|
a275023cfc | ||
|
|
258c02ff39 | ||
|
|
3655dc723f | ||
|
|
315d4fb356 | ||
|
|
2bc880e372 | ||
|
|
19422de231 | ||
|
|
fa0dadc9bd | ||
|
|
f004c8726f | ||
|
|
835b5bb06f | ||
|
|
0484113254 | ||
|
|
17cc10b3f7 | ||
|
|
7e01589fba | ||
|
|
f8c3acae7b | ||
|
|
0957460f27 | ||
|
|
ea00ba9ff8 | ||
|
|
a9625364df | ||
|
|
75c6427d7c | ||
|
|
e742a6b0ec | ||
|
|
5164a710a2 | ||
|
|
27c1dc4646 | ||
|
|
3892fa30b7 | ||
|
|
ed599c8ab5 | ||
|
|
29bb5e21ab | ||
|
|
604a4b2442 | ||
|
|
977dcad86d | ||
|
|
cefc542744 | ||
|
|
164fe9a14f | ||
|
|
f948881eba | ||
|
|
201b675031 | ||
|
|
3d44766eff | ||
|
|
a63a86ba79 | ||
|
|
e922264ebf | ||
|
|
7e53eff642 | ||
|
|
669b8b776b | ||
|
|
6508957cbc | ||
|
|
373e794d2c | ||
|
|
c8f3a32fdf | ||
|
|
f690bf831f | ||
|
|
0b30ac175e | ||
|
|
47560fa9a9 | ||
|
|
9d57c4eb4d | ||
|
|
642ba00952 | ||
|
|
3c9c12d320 | ||
|
|
f6b52b3fd3 | ||
|
|
0d906363a0 | ||
|
|
8222ce78d8 | ||
|
|
cb906242e7 | ||
|
|
2a19e9da93 | ||
|
|
2226dd59cc | ||
|
|
be2098d2e1 | ||
|
|
6b41f32371 | ||
|
|
19b87c7f5a | ||
|
|
505f1b20a4 | ||
|
|
8b52b921f3 | ||
|
|
f36bbcba25 | ||
|
|
167826aa88 | ||
|
|
bea4f92b7a | ||
|
|
7312fa8d3c | ||
|
|
92a4cceeeb | ||
|
|
3357181fe2 | ||
|
|
7ce5bdad44 | ||
|
|
0de3fda921 | ||
|
|
cb410cc4e0 | ||
|
|
6c145a5ec3 | ||
|
|
a7fef2ba7a | ||
|
|
291ebf5e24 | ||
|
|
5e0e91c85d | ||
|
|
b5a6b0693e | ||
|
|
3cc2abfedc | ||
|
|
0ce9aad9b2 | ||
|
|
e35aa04afb | ||
|
|
e7de5125a2 | ||
|
|
158140c3a7 | ||
|
|
df9a9adaa8 | ||
|
|
d854807edd | ||
|
|
f501d46d44 | ||
|
|
74106b025f | ||
|
|
e731b546ab | ||
|
|
77d60660d2 | ||
|
|
3c664ff05f | ||
|
|
c05b0c9eba | ||
|
|
6d5049cab2 | ||
|
|
1419ba570a | ||
|
|
542bf2170a | ||
|
|
378d6b90cf | ||
|
|
cbe83956aa | ||
|
|
091d485fd8 | ||
|
|
2a3eaf4d7e | ||
|
|
23122712cb | ||
|
|
47eb793ce9 | ||
|
|
9b0b5fd1e2 | ||
|
|
893a24a1cc | ||
|
|
b101e2211a | ||
|
|
e9c1235b76 | ||
|
|
dc1b8dfccd | ||
|
|
d0201cf2e5 | ||
|
|
f3d20e60b3 | ||
|
|
dafba81b40 | ||
|
|
91f8ec53d9 | ||
|
|
fc9a4a08b8 | ||
|
|
45fadb21ac | ||
|
|
28619fbee1 | ||
|
|
bbe014c3a7 | ||
|
|
fb3fadb3d3 | ||
|
|
f481d20773 | ||
|
|
599b2dec8f | ||
|
|
435f1d9ae1 | ||
|
|
d7ecab605e | ||
|
|
805fea52ec | ||
|
|
48db06f901 | ||
|
|
e9d0a5e0ed | ||
|
|
44d05518aa | ||
|
|
23b433fe6c | ||
|
|
2e57168a97 | ||
|
|
5c6160c398 | ||
|
|
9eee1d971e | ||
|
|
e6300847d6 | ||
|
|
e0a3e7bea6 | ||
|
|
cbebaa1349 |
2
.github/actions/LICENSE → .github/LICENSE
vendored
2
.github/actions/LICENSE → .github/LICENSE
vendored
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: "27.0"
|
default: "30.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Bitcoin Daemon Cache
|
- name: Bitcoin Daemon Cache
|
||||||
id: cache-bitcoind
|
id: cache-bitcoind
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: bitcoin.tar.gz
|
path: bitcoin.tar.gz
|
||||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
54
.github/actions/build-dependencies/action.yml
vendored
54
.github/actions/build-dependencies/action.yml
vendored
@@ -7,13 +7,20 @@ runs:
|
|||||||
- name: Remove unused packages
|
- name: Remove unused packages
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
# Ensure the repositories are synced
|
||||||
|
sudo apt update -y
|
||||||
|
|
||||||
|
# Actually perform the removals
|
||||||
|
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||||
|
|
||||||
|
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
||||||
|
# This removal command requires the prior removals due to unmet dependencies otherwise
|
||||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||||
sudo apt autoremove -y
|
|
||||||
sudo apt clean
|
# Reinstall python3 as a general dependency of a functional operating system
|
||||||
docker system prune -a --volumes
|
sudo apt install -y python3 --fix-missing
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
- name: Remove unused packages
|
- name: Remove unused packages
|
||||||
@@ -31,19 +38,48 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||||
sudo apt install -y ca-certificates protobuf-compiler
|
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||||
choco install protoc
|
choco install protoc
|
||||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||||
brew install protobuf
|
brew install protobuf llvm
|
||||||
|
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||||
|
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||||
|
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||||
|
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Install solc
|
- name: Install solc
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo install svm-rs
|
cargo +1.91 install svm-rs --version =0.5.19
|
||||||
svm install 0.8.26
|
svm install 0.8.29
|
||||||
svm use 0.8.26
|
svm use 0.8.29
|
||||||
|
|
||||||
|
- name: Remove preinstalled Docker
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
docker system prune -a --volumes
|
||||||
|
sudo apt remove -y *docker*
|
||||||
|
# Install uidmap which will be required for the explicitly installed Docker
|
||||||
|
sudo apt install uidmap
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
|
- name: Update system dependencies
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
sudo apt update -y
|
||||||
|
sudo apt upgrade -y
|
||||||
|
sudo apt autoremove -y
|
||||||
|
sudo apt clean
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
|
- name: Install rootless Docker
|
||||||
|
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
||||||
|
with:
|
||||||
|
rootless: true
|
||||||
|
set-host: true
|
||||||
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
# - name: Cache Rust
|
# - name: Cache Rust
|
||||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||||
|
|||||||
4
.github/actions/monero-wallet-rpc/action.yml
vendored
4
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.3
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Monero Wallet RPC Cache
|
- name: Monero Wallet RPC Cache
|
||||||
id: cache-monero-wallet-rpc
|
id: cache-monero-wallet-rpc
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: monero-wallet-rpc
|
path: monero-wallet-rpc
|
||||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
4
.github/actions/monero/action.yml
vendored
4
.github/actions/monero/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.3
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Monero Daemon Cache
|
- name: Monero Daemon Cache
|
||||||
id: cache-monerod
|
id: cache-monerod
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: /usr/bin/monerod
|
path: /usr/bin/monerod
|
||||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
4
.github/actions/test-dependencies/action.yml
vendored
4
.github/actions/test-dependencies/action.yml
vendored
@@ -5,12 +5,12 @@ inputs:
|
|||||||
monero-version:
|
monero-version:
|
||||||
description: "Monero version to download and run as a regtest node"
|
description: "Monero version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.3
|
||||||
|
|
||||||
bitcoin-version:
|
bitcoin-version:
|
||||||
description: "Bitcoin version to download and run as a regtest node"
|
description: "Bitcoin version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: "27.1"
|
default: "30.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2024-07-01
|
nightly-2025-11-11
|
||||||
|
|||||||
14
.github/workflows/crypto-tests.yml
vendored
14
.github/workflows/crypto-tests.yml
vendored
@@ -32,13 +32,17 @@ jobs:
|
|||||||
-p dalek-ff-group \
|
-p dalek-ff-group \
|
||||||
-p minimal-ed448 \
|
-p minimal-ed448 \
|
||||||
-p ciphersuite \
|
-p ciphersuite \
|
||||||
|
-p ciphersuite-kp256 \
|
||||||
-p multiexp \
|
-p multiexp \
|
||||||
-p schnorr-signatures \
|
-p schnorr-signatures \
|
||||||
-p dleq \
|
-p prime-field \
|
||||||
-p generalized-bulletproofs \
|
-p short-weierstrass \
|
||||||
-p generalized-bulletproofs-circuit-abstraction \
|
-p secq256k1 \
|
||||||
-p ec-divisors \
|
-p embedwards25519 \
|
||||||
-p generalized-bulletproofs-ec-gadgets \
|
|
||||||
-p dkg \
|
-p dkg \
|
||||||
|
-p dkg-recovery \
|
||||||
|
-p dkg-dealer \
|
||||||
|
-p dkg-musig \
|
||||||
|
-p dkg-evrf \
|
||||||
-p modular-frost \
|
-p modular-frost \
|
||||||
-p frost-schnorrkel
|
-p frost-schnorrkel
|
||||||
|
|||||||
6
.github/workflows/daily-deny.yml
vendored
6
.github/workflows/daily-deny.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
- name: Advisory Cache
|
- name: Advisory Cache
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: ~/.cargo/advisory-db
|
path: ~/.cargo/advisory-db
|
||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo install --locked cargo-deny
|
run: cargo +1.91 install cargo-deny --version =0.18.5
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
|
|||||||
117
.github/workflows/lint.yml
vendored
117
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
clippy:
|
clippy:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install nightly rust
|
- name: Install nightly rust
|
||||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy
|
||||||
|
|
||||||
- name: Run Clippy
|
- name: Run Clippy
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||||
@@ -46,16 +46,16 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
- name: Advisory Cache
|
- name: Advisory Cache
|
||||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||||
with:
|
with:
|
||||||
path: ~/.cargo/advisory-db
|
path: ~/.cargo/advisory-db
|
||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo install --locked cargo-deny
|
run: cargo +1.91 install cargo-deny --version =0.18.5
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
|
|
||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -88,19 +88,114 @@ jobs:
|
|||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
- name: Verify all dependencies are in use
|
- name: Verify all dependencies are in use
|
||||||
run: |
|
run: |
|
||||||
cargo install cargo-machete
|
cargo +1.91 install cargo-machete --version =0.9.1
|
||||||
cargo machete
|
cargo +1.91 machete
|
||||||
|
|
||||||
|
msrv:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
- name: Verify claimed `rust-version`
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cargo +1.91 install cargo-msrv --version =0.18.4
|
||||||
|
|
||||||
|
function check_msrv {
|
||||||
|
# We `cd` into the directory passed as the first argument, but will return to the
|
||||||
|
# directory called from.
|
||||||
|
return_to=$(pwd)
|
||||||
|
echo "Checking $1"
|
||||||
|
cd $1
|
||||||
|
|
||||||
|
# We then find the existing `rust-version` using `grep` (for the right line) and then a
|
||||||
|
# regex (to strip to just the major and minor version).
|
||||||
|
existing=$(cat ./Cargo.toml | grep "rust-version" | grep -Eo "[0-9]+\.[0-9]+")
|
||||||
|
|
||||||
|
# We then backup the `Cargo.toml`, allowing us to restore it after, saving time on future
|
||||||
|
# MSRV checks (as they'll benefit from immediately exiting if the queried version is less
|
||||||
|
# than the declared MSRV).
|
||||||
|
mv ./Cargo.toml ./Cargo.toml.bak
|
||||||
|
|
||||||
|
# We then use an inverted (`-v`) grep to remove the existing `rust-version` from the
|
||||||
|
# `Cargo.toml`, as required because else earlier versions of Rust won't even attempt to
|
||||||
|
# compile this crate.
|
||||||
|
cat ./Cargo.toml.bak | grep -v "rust-version" > Cargo.toml
|
||||||
|
|
||||||
|
# We then find the actual `rust-version` using `cargo-msrv` (again stripping to just the
|
||||||
|
# major and minor version).
|
||||||
|
actual=$(cargo msrv find --output-format minimal | grep -Eo "^[0-9]+\.[0-9]+")
|
||||||
|
|
||||||
|
# Finally, we compare the two.
|
||||||
|
echo "Declared rust-version: $existing"
|
||||||
|
echo "Actual rust-version: $actual"
|
||||||
|
[ $existing == $actual ]
|
||||||
|
result=$?
|
||||||
|
|
||||||
|
# Restore the original `Cargo.toml`.
|
||||||
|
rm Cargo.toml
|
||||||
|
mv ./Cargo.toml.bak ./Cargo.toml
|
||||||
|
|
||||||
|
# Return to the directory called from and return the result.
|
||||||
|
cd $return_to
|
||||||
|
return $result
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check each member of the workspace
|
||||||
|
function check_workspace {
|
||||||
|
# Get the members array from the workspace's `Cargo.toml`
|
||||||
|
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
|
||||||
|
# Keep all lines after the start of the array, then keep all lines before the next "]"
|
||||||
|
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
|
||||||
|
|
||||||
|
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
|
||||||
|
# We accomplish the latter by pruning all characters after the entry's ","
|
||||||
|
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
|
||||||
|
# Replace the first line, which was "members = [" and is now "members = [,", with "["
|
||||||
|
members=$(echo "$members" | sed "1s/.*/\[/")
|
||||||
|
# Correct the last line, which was malleated to "],"
|
||||||
|
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
|
||||||
|
|
||||||
|
# Don't check the following
|
||||||
|
# Most of these are binaries, with the exception of the Substrate runtime which has a
|
||||||
|
# bespoke build pipeline
|
||||||
|
members=$(echo "$members" | grep -v "networks/ethereum/relayer\"")
|
||||||
|
members=$(echo "$members" | grep -v "message-queue\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/bin\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/bitcoin\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/ethereum\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/monero\"")
|
||||||
|
members=$(echo "$members" | grep -v "coordinator\"")
|
||||||
|
members=$(echo "$members" | grep -v "substrate/runtime\"")
|
||||||
|
members=$(echo "$members" | grep -v "substrate/node\"")
|
||||||
|
members=$(echo "$members" | grep -v "orchestration\"")
|
||||||
|
|
||||||
|
# Don't check the tests
|
||||||
|
members=$(echo "$members" | grep -v "mini\"")
|
||||||
|
members=$(echo "$members" | grep -v "tests/")
|
||||||
|
|
||||||
|
# Remove the trailing comma by replacing the last line's "," with ""
|
||||||
|
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
|
||||||
|
|
||||||
|
echo $members | jq -r ".[]" | while read -r member; do
|
||||||
|
check_msrv $member
|
||||||
|
correct=$?
|
||||||
|
if [ $correct -ne 0 ]; then
|
||||||
|
return $correct
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
check_workspace
|
||||||
|
|
||||||
slither:
|
slither:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Slither
|
- name: Slither
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install solc-select
|
|
||||||
solc-select install 0.8.26
|
|
||||||
solc-select use 0.8.26
|
|
||||||
|
|
||||||
python3 -m pip install slither-analyzer
|
python3 -m pip install slither-analyzer
|
||||||
|
|
||||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||||
|
|||||||
77
.github/workflows/monero-tests.yaml
vendored
77
.github/workflows/monero-tests.yaml
vendored
@@ -1,77 +0,0 @@
|
|||||||
name: Monero Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- develop
|
|
||||||
paths:
|
|
||||||
- "networks/monero/**"
|
|
||||||
- "processor/**"
|
|
||||||
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "networks/monero/**"
|
|
||||||
- "processor/**"
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# Only run these once since they will be consistent regardless of any node
|
|
||||||
unit-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Test Dependencies
|
|
||||||
uses: ./.github/actions/test-dependencies
|
|
||||||
|
|
||||||
- name: Run Unit Tests Without Features
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
|
||||||
|
|
||||||
# Doesn't run unit tests with features as the tests workflow will
|
|
||||||
|
|
||||||
integration-tests:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
# Test against all supported protocol versions
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
version: [v0.17.3.2, v0.18.3.4]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Test Dependencies
|
|
||||||
uses: ./.github/actions/test-dependencies
|
|
||||||
with:
|
|
||||||
monero-version: ${{ matrix.version }}
|
|
||||||
|
|
||||||
- name: Run Integration Tests Without Features
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
|
||||||
|
|
||||||
- name: Run Integration Tests
|
|
||||||
# Don't run if the the tests workflow also will
|
|
||||||
if: ${{ matrix.version != 'v0.18.3.4' }}
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
|
||||||
258
.github/workflows/msrv.yml
vendored
258
.github/workflows/msrv.yml
vendored
@@ -1,258 +0,0 @@
|
|||||||
name: Weekly MSRV Check
|
|
||||||
|
|
||||||
on:
|
|
||||||
schedule:
|
|
||||||
- cron: "0 0 * * 0"
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
msrv-common:
|
|
||||||
name: Run cargo msrv on common
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on common
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/env/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/db/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/task/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/request/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
|
|
||||||
|
|
||||||
msrv-crypto:
|
|
||||||
name: Run cargo msrv on crypto
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on crypto
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
|
|
||||||
|
|
||||||
msrv-networks:
|
|
||||||
name: Run cargo msrv on networks
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on networks
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
|
|
||||||
|
|
||||||
msrv-message-queue:
|
|
||||||
name: Run cargo msrv on message-queue
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on message-queue
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
|
|
||||||
|
|
||||||
msrv-processor:
|
|
||||||
name: Run cargo msrv on processor
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on processor
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/messages/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/signers/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path processor/monero/Cargo.toml
|
|
||||||
|
|
||||||
msrv-coordinator:
|
|
||||||
name: Run cargo msrv on coordinator
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on coordinator
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
|
||||||
|
|
||||||
msrv-substrate:
|
|
||||||
name: Run cargo msrv on substrate
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on substrate
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/client/Cargo.toml
|
|
||||||
|
|
||||||
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
|
|
||||||
cargo msrv verify --manifest-path substrate/node/Cargo.toml
|
|
||||||
|
|
||||||
msrv-orchestration:
|
|
||||||
name: Run cargo msrv on orchestration
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on message-queue
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path orchestration/Cargo.toml
|
|
||||||
|
|
||||||
msrv-mini:
|
|
||||||
name: Run cargo msrv on mini
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
|
|
||||||
- name: Install cargo msrv
|
|
||||||
run: cargo install --locked cargo-msrv
|
|
||||||
|
|
||||||
- name: Run cargo msrv on mini
|
|
||||||
run: |
|
|
||||||
cargo msrv verify --manifest-path mini/Cargo.toml
|
|
||||||
16
.github/workflows/networks-tests.yml
vendored
16
.github/workflows/networks-tests.yml
vendored
@@ -34,19 +34,3 @@ jobs:
|
|||||||
-p ethereum-schnorr-contract \
|
-p ethereum-schnorr-contract \
|
||||||
-p alloy-simple-request-transport \
|
-p alloy-simple-request-transport \
|
||||||
-p serai-ethereum-relayer \
|
-p serai-ethereum-relayer \
|
||||||
-p monero-io \
|
|
||||||
-p monero-generators \
|
|
||||||
-p monero-primitives \
|
|
||||||
-p monero-mlsag \
|
|
||||||
-p monero-clsag \
|
|
||||||
-p monero-borromean \
|
|
||||||
-p monero-bulletproofs \
|
|
||||||
-p monero-serai \
|
|
||||||
-p monero-rpc \
|
|
||||||
-p monero-simple-request-rpc \
|
|
||||||
-p monero-address \
|
|
||||||
-p monero-wallet \
|
|
||||||
-p monero-seed \
|
|
||||||
-p polyseed \
|
|
||||||
-p monero-wallet-util \
|
|
||||||
-p monero-serai-verify-chain
|
|
||||||
|
|||||||
14
.github/workflows/no-std.yml
vendored
14
.github/workflows/no-std.yml
vendored
@@ -28,8 +28,18 @@ jobs:
|
|||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Get nightly version to use
|
||||||
|
id: nightly
|
||||||
|
shell: bash
|
||||||
|
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Install RISC-V Toolchain
|
- name: Install RISC-V Toolchain
|
||||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib
|
||||||
|
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal --component rust-src --target riscv32imac-unknown-none-elf
|
||||||
|
|
||||||
- name: Verify no-std builds
|
- name: Verify no-std builds
|
||||||
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
|
run: |
|
||||||
|
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core -p serai-no-std-tests
|
||||||
|
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core,alloc -p serai-no-std-tests --features "alloc"
|
||||||
|
|||||||
43
.github/workflows/pages.yml
vendored
43
.github/workflows/pages.yml
vendored
@@ -1,6 +1,7 @@
|
|||||||
# MIT License
|
# MIT License
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022 just-the-docs
|
# Copyright (c) 2022 just-the-docs
|
||||||
|
# Copyright (c) 2022-2024 Luke Parker
|
||||||
#
|
#
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -20,31 +21,21 @@
|
|||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
# SOFTWARE.
|
# SOFTWARE.
|
||||||
|
|
||||||
# This workflow uses actions that are not certified by GitHub.
|
name: Deploy Rust docs and Jekyll site to Pages
|
||||||
# They are provided by a third-party and are governed by
|
|
||||||
# separate terms of service, privacy policy, and support
|
|
||||||
# documentation.
|
|
||||||
|
|
||||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
|
||||||
name: Deploy Jekyll site to Pages
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "develop"
|
- "develop"
|
||||||
paths:
|
|
||||||
- "docs/**"
|
|
||||||
|
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
pages: write
|
pages: write
|
||||||
id-token: write
|
id-token: write
|
||||||
|
|
||||||
# Allow one concurrent deployment
|
# Only allow one concurrent deployment
|
||||||
concurrency:
|
concurrency:
|
||||||
group: "pages"
|
group: "pages"
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -53,27 +44,37 @@ jobs:
|
|||||||
# Build job
|
# Build job
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: docs
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
- name: Setup Ruby
|
- name: Setup Ruby
|
||||||
uses: ruby/setup-ruby@v1
|
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
||||||
with:
|
with:
|
||||||
bundler-cache: true
|
bundler-cache: true
|
||||||
cache-version: 0
|
cache-version: 0
|
||||||
working-directory: "${{ github.workspace }}/docs"
|
working-directory: "${{ github.workspace }}/docs"
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@v3
|
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
||||||
- name: Build with Jekyll
|
- name: Build with Jekyll
|
||||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
env:
|
env:
|
||||||
JEKYLL_ENV: production
|
JEKYLL_ENV: production
|
||||||
|
|
||||||
|
- name: Get nightly version to use
|
||||||
|
id: nightly
|
||||||
|
shell: bash
|
||||||
|
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||||
|
- name: Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
- name: Buld Rust docs
|
||||||
|
run: |
|
||||||
|
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||||
|
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||||
|
mv target/doc docs/_site/rust
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v1
|
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
||||||
with:
|
with:
|
||||||
path: "docs/_site/"
|
path: "docs/_site/"
|
||||||
|
|
||||||
@@ -87,4 +88,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v2
|
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
||||||
|
|||||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -60,9 +60,10 @@ jobs:
|
|||||||
-p serai-ethereum-processor \
|
-p serai-ethereum-processor \
|
||||||
-p serai-monero-processor \
|
-p serai-monero-processor \
|
||||||
-p tendermint-machine \
|
-p tendermint-machine \
|
||||||
-p tributary-chain \
|
-p tributary-sdk \
|
||||||
-p serai-cosign \
|
-p serai-cosign \
|
||||||
-p serai-coordinator-substrate \
|
-p serai-coordinator-substrate \
|
||||||
|
-p serai-coordinator-tributary \
|
||||||
-p serai-coordinator-p2p \
|
-p serai-coordinator-p2p \
|
||||||
-p serai-coordinator-libp2p-p2p \
|
-p serai-coordinator-libp2p-p2p \
|
||||||
-p serai-coordinator \
|
-p serai-coordinator \
|
||||||
|
|||||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,7 +1,14 @@
|
|||||||
target
|
target
|
||||||
|
|
||||||
|
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||||
|
Cargo.lock
|
||||||
|
!./Cargo.lock
|
||||||
|
|
||||||
|
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||||
Dockerfile
|
Dockerfile
|
||||||
Dockerfile.fast-epoch
|
Dockerfile.fast-epoch
|
||||||
!orchestration/runtime/Dockerfile
|
!orchestration/runtime/Dockerfile
|
||||||
|
|
||||||
.test-logs
|
.test-logs
|
||||||
|
|
||||||
.vscode
|
.vscode
|
||||||
|
|||||||
7359
Cargo.lock
generated
7359
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
153
Cargo.toml
153
Cargo.toml
@@ -1,20 +1,6 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
# Version patches
|
|
||||||
"patches/parking_lot_core",
|
|
||||||
"patches/parking_lot",
|
|
||||||
"patches/zstd",
|
|
||||||
"patches/rocksdb",
|
|
||||||
|
|
||||||
# std patches
|
|
||||||
"patches/matches",
|
|
||||||
"patches/is-terminal",
|
|
||||||
|
|
||||||
# Rewrites/redirects
|
|
||||||
"patches/option-ext",
|
|
||||||
"patches/directories-next",
|
|
||||||
|
|
||||||
"common/std-shims",
|
"common/std-shims",
|
||||||
"common/zalloc",
|
"common/zalloc",
|
||||||
"common/patchable-async-sleep",
|
"common/patchable-async-sleep",
|
||||||
@@ -29,19 +15,21 @@ members = [
|
|||||||
"crypto/dalek-ff-group",
|
"crypto/dalek-ff-group",
|
||||||
"crypto/ed448",
|
"crypto/ed448",
|
||||||
"crypto/ciphersuite",
|
"crypto/ciphersuite",
|
||||||
|
"crypto/ciphersuite/kp256",
|
||||||
|
|
||||||
"crypto/multiexp",
|
"crypto/multiexp",
|
||||||
"crypto/schnorr",
|
"crypto/schnorr",
|
||||||
"crypto/dleq",
|
|
||||||
|
|
||||||
"crypto/evrf/secq256k1",
|
"crypto/prime-field",
|
||||||
"crypto/evrf/embedwards25519",
|
"crypto/short-weierstrass",
|
||||||
"crypto/evrf/generalized-bulletproofs",
|
"crypto/secq256k1",
|
||||||
"crypto/evrf/circuit-abstraction",
|
"crypto/embedwards25519",
|
||||||
"crypto/evrf/divisors",
|
|
||||||
"crypto/evrf/ec-gadgets",
|
|
||||||
|
|
||||||
"crypto/dkg",
|
"crypto/dkg",
|
||||||
|
"crypto/dkg/recovery",
|
||||||
|
"crypto/dkg/dealer",
|
||||||
|
"crypto/dkg/musig",
|
||||||
|
"crypto/dkg/evrf",
|
||||||
"crypto/frost",
|
"crypto/frost",
|
||||||
"crypto/schnorrkel",
|
"crypto/schnorrkel",
|
||||||
|
|
||||||
@@ -52,23 +40,6 @@ members = [
|
|||||||
"networks/ethereum/alloy-simple-request-transport",
|
"networks/ethereum/alloy-simple-request-transport",
|
||||||
"networks/ethereum/relayer",
|
"networks/ethereum/relayer",
|
||||||
|
|
||||||
"networks/monero/io",
|
|
||||||
"networks/monero/generators",
|
|
||||||
"networks/monero/primitives",
|
|
||||||
"networks/monero/ringct/mlsag",
|
|
||||||
"networks/monero/ringct/clsag",
|
|
||||||
"networks/monero/ringct/borromean",
|
|
||||||
"networks/monero/ringct/bulletproofs",
|
|
||||||
"networks/monero",
|
|
||||||
"networks/monero/rpc",
|
|
||||||
"networks/monero/rpc/simple-request",
|
|
||||||
"networks/monero/wallet/address",
|
|
||||||
"networks/monero/wallet",
|
|
||||||
"networks/monero/wallet/seed",
|
|
||||||
"networks/monero/wallet/polyseed",
|
|
||||||
"networks/monero/wallet/util",
|
|
||||||
"networks/monero/verify-chain",
|
|
||||||
|
|
||||||
"message-queue",
|
"message-queue",
|
||||||
|
|
||||||
"processor/messages",
|
"processor/messages",
|
||||||
@@ -96,10 +67,11 @@ members = [
|
|||||||
"processor/ethereum",
|
"processor/ethereum",
|
||||||
"processor/monero",
|
"processor/monero",
|
||||||
|
|
||||||
"coordinator/tributary/tendermint",
|
"coordinator/tributary-sdk/tendermint",
|
||||||
"coordinator/tributary",
|
"coordinator/tributary-sdk",
|
||||||
"coordinator/cosign",
|
"coordinator/cosign",
|
||||||
"coordinator/substrate",
|
"coordinator/substrate",
|
||||||
|
"coordinator/tributary",
|
||||||
"coordinator/p2p",
|
"coordinator/p2p",
|
||||||
"coordinator/p2p/libp2p",
|
"coordinator/p2p/libp2p",
|
||||||
"coordinator",
|
"coordinator",
|
||||||
@@ -143,62 +115,89 @@ members = [
|
|||||||
|
|
||||||
"tests/docker",
|
"tests/docker",
|
||||||
"tests/message-queue",
|
"tests/message-queue",
|
||||||
"tests/processor",
|
# TODO "tests/processor",
|
||||||
"tests/coordinator",
|
# TODO "tests/coordinator",
|
||||||
"tests/full-stack",
|
# TODO "tests/full-stack",
|
||||||
"tests/reproducible-runtime",
|
"tests/reproducible-runtime",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[profile.dev.package]
|
||||||
# Always compile Monero (and a variety of dependencies) with optimizations due
|
# Always compile Monero (and a variety of dependencies) with optimizations due
|
||||||
# to the extensive operations required for Bulletproofs
|
# to the extensive operations required for Bulletproofs
|
||||||
[profile.dev.package]
|
|
||||||
subtle = { opt-level = 3 }
|
subtle = { opt-level = 3 }
|
||||||
|
|
||||||
|
sha3 = { opt-level = 3 }
|
||||||
|
blake2 = { opt-level = 3 }
|
||||||
|
|
||||||
ff = { opt-level = 3 }
|
ff = { opt-level = 3 }
|
||||||
group = { opt-level = 3 }
|
group = { opt-level = 3 }
|
||||||
|
|
||||||
crypto-bigint = { opt-level = 3 }
|
crypto-bigint = { opt-level = 3 }
|
||||||
secp256k1 = { opt-level = 3 }
|
|
||||||
curve25519-dalek = { opt-level = 3 }
|
curve25519-dalek = { opt-level = 3 }
|
||||||
dalek-ff-group = { opt-level = 3 }
|
dalek-ff-group = { opt-level = 3 }
|
||||||
minimal-ed448 = { opt-level = 3 }
|
|
||||||
|
|
||||||
multiexp = { opt-level = 3 }
|
multiexp = { opt-level = 3 }
|
||||||
|
|
||||||
secq256k1 = { opt-level = 3 }
|
|
||||||
embedwards25519 = { opt-level = 3 }
|
|
||||||
generalized-bulletproofs = { opt-level = 3 }
|
|
||||||
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
|
||||||
ec-divisors = { opt-level = 3 }
|
|
||||||
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
|
||||||
|
|
||||||
dkg = { opt-level = 3 }
|
|
||||||
|
|
||||||
monero-generators = { opt-level = 3 }
|
monero-generators = { opt-level = 3 }
|
||||||
monero-borromean = { opt-level = 3 }
|
monero-borromean = { opt-level = 3 }
|
||||||
monero-bulletproofs = { opt-level = 3 }
|
monero-bulletproofs = { opt-level = 3 }
|
||||||
monero-mlsag = { opt-level = 3 }
|
monero-mlsag = { opt-level = 3 }
|
||||||
monero-clsag = { opt-level = 3 }
|
monero-clsag = { opt-level = 3 }
|
||||||
|
monero-oxide = { opt-level = 3 }
|
||||||
|
|
||||||
|
# Always compile the eVRF DKG tree with optimizations as well
|
||||||
|
secp256k1 = { opt-level = 3 }
|
||||||
|
secq256k1 = { opt-level = 3 }
|
||||||
|
embedwards25519 = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||||
|
|
||||||
|
# revm also effectively requires being built with optimizations
|
||||||
|
revm = { opt-level = 3 }
|
||||||
|
revm-bytecode = { opt-level = 3 }
|
||||||
|
revm-context = { opt-level = 3 }
|
||||||
|
revm-context-interface = { opt-level = 3 }
|
||||||
|
revm-database = { opt-level = 3 }
|
||||||
|
revm-database-interface = { opt-level = 3 }
|
||||||
|
revm-handler = { opt-level = 3 }
|
||||||
|
revm-inspector = { opt-level = 3 }
|
||||||
|
revm-interpreter = { opt-level = 3 }
|
||||||
|
revm-precompile = { opt-level = 3 }
|
||||||
|
revm-primitives = { opt-level = 3 }
|
||||||
|
revm-state = { opt-level = 3 }
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
panic = "unwind"
|
panic = "unwind"
|
||||||
|
overflow-checks = true
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
|
# Point to empty crates for unused crates in our tree
|
||||||
|
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
|
||||||
|
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
|
||||||
|
c-kzg = { path = "patches/ethereum/c-kzg" }
|
||||||
|
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
|
||||||
|
|
||||||
|
# Dependencies from monero-oxide which originate from within our own tree
|
||||||
|
std-shims = { path = "patches/std-shims" }
|
||||||
|
simple-request = { path = "patches/simple-request" }
|
||||||
|
multiexp = { path = "crypto/multiexp" }
|
||||||
|
flexible-transcript = { path = "crypto/transcript" }
|
||||||
|
ciphersuite = { path = "patches/ciphersuite" }
|
||||||
|
dalek-ff-group = { path = "patches/dalek-ff-group" }
|
||||||
|
minimal-ed448 = { path = "crypto/ed448" }
|
||||||
|
modular-frost = { path = "crypto/frost" }
|
||||||
|
|
||||||
|
# This has a non-deprecated `std` alternative since Rust's 2024 edition
|
||||||
|
home = { path = "patches/home" }
|
||||||
|
|
||||||
|
# Updates to the latest version
|
||||||
|
darling = { path = "patches/darling" }
|
||||||
|
thiserror = { path = "patches/thiserror" }
|
||||||
|
|
||||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||||
|
|
||||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
|
||||||
parking_lot = { path = "patches/parking_lot" }
|
|
||||||
# wasmtime pulls in an old version for this
|
|
||||||
zstd = { path = "patches/zstd" }
|
|
||||||
# Needed for WAL compression
|
|
||||||
rocksdb = { path = "patches/rocksdb" }
|
|
||||||
|
|
||||||
# is-terminal now has an std-based solution with an equivalent API
|
|
||||||
is-terminal = { path = "patches/is-terminal" }
|
|
||||||
# So does matches
|
|
||||||
matches = { path = "patches/matches" }
|
|
||||||
|
|
||||||
# directories-next was created because directories was unmaintained
|
# directories-next was created because directories was unmaintained
|
||||||
# directories-next is now unmaintained while directories is maintained
|
# directories-next is now unmaintained while directories is maintained
|
||||||
# The directories author pulls in ridiculously pointless crates and prefers
|
# The directories author pulls in ridiculously pointless crates and prefers
|
||||||
@@ -207,12 +206,22 @@ matches = { path = "patches/matches" }
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
# The official pasta_curves repo doesn't support Zeroize
|
# Patch from a fork back to upstream
|
||||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
parity-bip39 = { path = "patches/parity-bip39" }
|
||||||
|
|
||||||
|
# Patch to include `FromUniformBytes<64>` over `Scalar`
|
||||||
|
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
|
||||||
|
# `jemalloc` conflicts with `mimalloc`, so patch to a `rocksdb` which never uses `jemalloc`
|
||||||
|
librocksdb-sys = { path = "patches/librocksdb-sys" }
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
unwrap_or_default = "allow"
|
unwrap_or_default = "allow"
|
||||||
map_unwrap_or = "allow"
|
map_unwrap_or = "allow"
|
||||||
|
needless_continue = "allow"
|
||||||
|
manual_is_multiple_of = "allow"
|
||||||
|
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||||
borrow_as_ptr = "deny"
|
borrow_as_ptr = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
cast_possible_truncation = "deny"
|
cast_possible_truncation = "deny"
|
||||||
@@ -243,7 +252,6 @@ manual_string_new = "deny"
|
|||||||
match_bool = "deny"
|
match_bool = "deny"
|
||||||
match_same_arms = "deny"
|
match_same_arms = "deny"
|
||||||
missing_fields_in_debug = "deny"
|
missing_fields_in_debug = "deny"
|
||||||
needless_continue = "deny"
|
|
||||||
needless_pass_by_value = "deny"
|
needless_pass_by_value = "deny"
|
||||||
ptr_cast_constness = "deny"
|
ptr_cast_constness = "deny"
|
||||||
range_minus_one = "deny"
|
range_minus_one = "deny"
|
||||||
@@ -252,7 +260,7 @@ redundant_closure_for_method_calls = "deny"
|
|||||||
redundant_else = "deny"
|
redundant_else = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
string_slice = "deny"
|
string_slice = "deny"
|
||||||
unchecked_duration_subtraction = "deny"
|
unchecked_time_subtraction = "deny"
|
||||||
uninlined_format_args = "deny"
|
uninlined_format_args = "deny"
|
||||||
unnecessary_box_returns = "deny"
|
unnecessary_box_returns = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
@@ -261,3 +269,6 @@ unnested_or_patterns = "deny"
|
|||||||
unused_async = "deny"
|
unused_async = "deny"
|
||||||
unused_self = "deny"
|
unused_self = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
|
||||||
|
[workspace.lints.rust]
|
||||||
|
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
|
|||||||
as a reference text. This copy should be provided with any distribution of a
|
as a reference text. This copy should be provided with any distribution of a
|
||||||
crate licensed under the AGPL-3.0, as per its terms.
|
crate licensed under the AGPL-3.0, as per its terms.
|
||||||
|
|
||||||
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
|
||||||
|
|||||||
@@ -59,7 +59,6 @@ issued at the discretion of the Immunefi program managers.
|
|||||||
- [Website](https://serai.exchange/): https://serai.exchange/
|
- [Website](https://serai.exchange/): https://serai.exchange/
|
||||||
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
||||||
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
||||||
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
|
|
||||||
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
||||||
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
||||||
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
||||||
|
|||||||
14
audits/Trail of Bits ethereum contracts April 2025/README.md
Normal file
14
audits/Trail of Bits ethereum contracts April 2025/README.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Trail of Bits Ethereum Contracts Audit, June 2025
|
||||||
|
|
||||||
|
This audit included:
|
||||||
|
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
|
||||||
|
- Our Ethereum primitives library (/processor/ethereum/primitives)
|
||||||
|
- Our Deployer contract and associated library (/processor/ethereum/deployer)
|
||||||
|
- Our ERC20 library (/processor/ethereum/erc20)
|
||||||
|
- Our Router contract and associated library (/processor/ethereum/router)
|
||||||
|
|
||||||
|
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
|
||||||
|
|
||||||
|
Please see
|
||||||
|
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
|
||||||
|
for the actual report.
|
||||||
50
audits/crypto/dkg/evrf/README.md
Normal file
50
audits/crypto/dkg/evrf/README.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# eVRF DKG
|
||||||
|
|
||||||
|
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
|
||||||
|
the IACR preprint server. Within it was a one-round unbiased DKG and a
|
||||||
|
one-round unbiased threshold DKG. Unfortunately, both simply describe
|
||||||
|
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
|
||||||
|
in practice, the need for an additional round of communication to occur where
|
||||||
|
all participants confirm they received their secret shares.
|
||||||
|
|
||||||
|
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
|
||||||
|
achieve a verifiable encryption scheme. This allows the secret shares to be
|
||||||
|
posted to any 'bulletin board' (such as a blockchain) and for all observers to
|
||||||
|
confirm:
|
||||||
|
|
||||||
|
- A participant participated
|
||||||
|
- The secret shares sent can be received by the intended recipient so long as
|
||||||
|
they can access the bulletin board
|
||||||
|
|
||||||
|
Additionally, Serai desired a robust scheme (albeit with an biased key as the
|
||||||
|
output, which is fine for our purposes). Accordingly, our implementation
|
||||||
|
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
|
||||||
|
for verifiable encryption, with the caller allowed to decide the set of
|
||||||
|
participants. They may:
|
||||||
|
|
||||||
|
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
|
||||||
|
paper
|
||||||
|
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
|
||||||
|
the eVRF paper
|
||||||
|
- Select a post-determined set (with any solution for the Common Subset
|
||||||
|
problem), allowing achieving a robust threshold biased DKG
|
||||||
|
|
||||||
|
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
|
||||||
|
this is unnecessary when the resulting key will be biased. Any proof of
|
||||||
|
knowledge for the coefficients, as necessary for their extraction within the
|
||||||
|
security proofs, would be sufficient.
|
||||||
|
|
||||||
|
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
|
||||||
|
provide proofs for its security. This resulted in
|
||||||
|
[this paper](<./Security Proofs.pdf>).
|
||||||
|
|
||||||
|
Our implementation itself is then built on top of the audited
|
||||||
|
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
|
||||||
|
and
|
||||||
|
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
|
||||||
|
|
||||||
|
Note we do not use the originally premised DDH eVRF yet the one premised on
|
||||||
|
elliptic curve divisors, the methodology of which is commented on
|
||||||
|
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
|
||||||
|
|
||||||
|
Our implementation itself is unaudited at this time however.
|
||||||
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71"
|
rust-version = "1.65"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
parity-db = { version = "0.5", default-features = false, optional = true }
|
||||||
rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
|
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
parity-db = ["dep:parity-db"]
|
parity-db = ["dep:parity-db"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71"
|
rust-version = "1.64"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
2
common/env/LICENSE
vendored
2
common/env/LICENSE
vendored
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
|
||||||
// Obtain a variable from the Serai environment/secret store.
|
// Obtain a variable from the Serai environment/secret store.
|
||||||
pub fn var(variable: &str) -> Option<String> {
|
pub fn var(variable: &str) -> Option<String> {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.71"
|
rust-version = "1.70"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2024 Luke Parker
|
Copyright (c) 2024-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "simple-request"
|
name = "simple-request"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
description = "A simple HTTP(S) request library"
|
description = "A simple HTTP(S) request library"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["http", "https", "async", "request", "ssl"]
|
keywords = ["http", "https", "async", "request", "ssl"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
@@ -19,9 +19,10 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
tower-service = { version = "0.3", default-features = false }
|
tower-service = { version = "0.3", default-features = false }
|
||||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] }
|
||||||
http-body-util = { version = "0.1", default-features = false }
|
http-body-util = { version = "0.1", default-features = false }
|
||||||
tokio = { version = "1", default-features = false }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
|
|
||||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||||
|
|
||||||
@@ -29,6 +30,8 @@ zeroize = { version = "1", optional = true }
|
|||||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
tls = ["hyper-rustls"]
|
tokio = ["hyper-util/tokio"]
|
||||||
|
tls = ["tokio", "hyper-rustls"]
|
||||||
|
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
||||||
basic-auth = ["zeroize", "base64ct"]
|
basic-auth = ["zeroize", "base64ct"]
|
||||||
default = ["tls"]
|
default = ["tls"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,19 +1,20 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
|
use core::{pin::Pin, future::Future};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
use futures_util::FutureExt;
|
||||||
|
use ::tokio::sync::Mutex;
|
||||||
|
|
||||||
use tower_service::Service as TowerService;
|
use tower_service::Service as TowerService;
|
||||||
|
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
|
||||||
|
pub use hyper;
|
||||||
|
|
||||||
|
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
|
||||||
use hyper_util::{
|
|
||||||
rt::tokio::TokioExecutor,
|
|
||||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
|
||||||
};
|
|
||||||
pub use hyper;
|
|
||||||
|
|
||||||
mod request;
|
mod request;
|
||||||
pub use request::*;
|
pub use request::*;
|
||||||
@@ -37,52 +38,86 @@ type Connector = HttpConnector;
|
|||||||
type Connector = HttpsConnector<HttpConnector>;
|
type Connector = HttpsConnector<HttpConnector>;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
enum Connection {
|
enum Connection<
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||||
Connection {
|
Connection {
|
||||||
|
executor: E,
|
||||||
connector: Connector,
|
connector: Connector,
|
||||||
host: Uri,
|
host: Uri,
|
||||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An HTTP client.
|
||||||
|
///
|
||||||
|
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
|
||||||
|
/// the `tls` feature is active without using the `tokio` executor will cause errors.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Client {
|
pub struct Client<
|
||||||
connection: Connection,
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
connection: Connection<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
fn connector() -> Connector {
|
Client<E>
|
||||||
|
{
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
|
fn connector() -> Result<Connector, Error> {
|
||||||
let mut res = HttpConnector::new();
|
let mut res = HttpConnector::new();
|
||||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||||
res.set_nodelay(true);
|
res.set_nodelay(true);
|
||||||
res.set_reuse_address(true);
|
res.set_reuse_address(true);
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
if core::any::TypeId::of::<E>() !=
|
||||||
|
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
|
||||||
|
{
|
||||||
|
Err(Error::ConnectionError(
|
||||||
|
"`tls` feature enabled but not using the `tokio` executor".into(),
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
res.enforce_http(false);
|
res.enforce_http(false);
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
let res = HttpsConnectorBuilder::new()
|
let https = HttpsConnectorBuilder::new().with_native_roots();
|
||||||
.with_native_roots()
|
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))]
|
||||||
.expect("couldn't fetch system's SSL roots")
|
let https = https.map_err(|e| {
|
||||||
.https_or_http()
|
Error::ConnectionError(
|
||||||
.enable_http1()
|
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}")
|
||||||
.wrap_connector(res);
|
.into(),
|
||||||
res
|
)
|
||||||
|
})?;
|
||||||
|
// Fallback to `webpki-roots` if present
|
||||||
|
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
|
||||||
|
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
let res = https.https_or_http().enable_http1().wrap_connector(res);
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_connection_pool() -> Client {
|
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> {
|
||||||
Client {
|
Ok(Client {
|
||||||
connection: Connection::ConnectionPool(
|
connection: Connection::ConnectionPool(
|
||||||
HyperClient::builder(TokioExecutor::new())
|
HyperClient::builder(executor)
|
||||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||||
.build(Self::connector()),
|
.build(Self::connector()?),
|
||||||
),
|
),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
pub fn with_executor_and_without_connection_pool(
|
||||||
|
executor: E,
|
||||||
|
host: &str,
|
||||||
|
) -> Result<Client<E>, Error> {
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
connection: Connection::Connection {
|
connection: Connection::Connection {
|
||||||
connector: Self::connector(),
|
executor,
|
||||||
|
connector: Self::connector()?,
|
||||||
host: {
|
host: {
|
||||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||||
if uri.host().is_none() {
|
if uri.host().is_none() {
|
||||||
@@ -95,9 +130,9 @@ impl Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> {
|
||||||
let request: Request = request.into();
|
let request: Request = request.into();
|
||||||
let mut request = request.0;
|
let Request { mut request, response_size_limit } = request;
|
||||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||||
match &self.connection {
|
match &self.connection {
|
||||||
Connection::ConnectionPool(_) => {}
|
Connection::ConnectionPool(_) => {}
|
||||||
@@ -131,7 +166,7 @@ impl Client {
|
|||||||
Connection::ConnectionPool(client) => {
|
Connection::ConnectionPool(client) => {
|
||||||
client.request(request).await.map_err(Error::HyperUtil)?
|
client.request(request).await.map_err(Error::HyperUtil)?
|
||||||
}
|
}
|
||||||
Connection::Connection { connector, host, connection } => {
|
Connection::Connection { executor, connector, host, connection } => {
|
||||||
let mut connection_lock = connection.lock().await;
|
let mut connection_lock = connection.lock().await;
|
||||||
|
|
||||||
// If there's not a connection...
|
// If there's not a connection...
|
||||||
@@ -143,28 +178,46 @@ impl Client {
|
|||||||
let call_res = call_res.map_err(Error::ConnectionError);
|
let call_res = call_res.map_err(Error::ConnectionError);
|
||||||
let (requester, connection) =
|
let (requester, connection) =
|
||||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
// This task will die when we drop the requester
|
||||||
// for it
|
executor.execute(Box::pin(connection.map(|_| ())));
|
||||||
tokio::spawn(connection);
|
|
||||||
*connection_lock = Some(requester);
|
*connection_lock = Some(requester);
|
||||||
}
|
}
|
||||||
|
|
||||||
let connection = connection_lock.as_mut().unwrap();
|
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned");
|
||||||
let mut err = connection.ready().await.err();
|
let mut err = connection.ready().await.err();
|
||||||
if err.is_none() {
|
if err.is_none() {
|
||||||
// Send the request
|
// Send the request
|
||||||
let res = connection.send_request(request).await;
|
let response = connection.send_request(request).await;
|
||||||
if let Ok(res) = res {
|
if let Ok(response) = response {
|
||||||
return Ok(Response(res, self));
|
return Ok(Response { response, size_limit: response_size_limit, client: self });
|
||||||
}
|
}
|
||||||
err = res.err();
|
err = response.err();
|
||||||
}
|
}
|
||||||
// Since this connection has been put into an error state, drop it
|
// Since this connection has been put into an error state, drop it
|
||||||
*connection_lock = None;
|
*connection_lock = None;
|
||||||
Err(Error::Hyper(err.unwrap()))?
|
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response(response, self))
|
Ok(Response { response, size_limit: response_size_limit, client: self })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
mod tokio {
|
||||||
|
use hyper_util::rt::tokio::TokioExecutor;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
pub type TokioClient = Client<TokioExecutor>;
|
||||||
|
impl Client<TokioExecutor> {
|
||||||
|
pub fn with_connection_pool() -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_connection_pool(TokioExecutor::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
pub use tokio::TokioClient;
|
||||||
|
|||||||
@@ -7,11 +7,15 @@ pub use http_body_util::Full;
|
|||||||
use crate::Error;
|
use crate::Error;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
pub struct Request {
|
||||||
|
pub(crate) request: hyper::Request<Full<Bytes>>,
|
||||||
|
pub(crate) response_size_limit: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
impl Request {
|
impl Request {
|
||||||
#[cfg(feature = "basic-auth")]
|
#[cfg(feature = "basic-auth")]
|
||||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||||
if let Some(authority) = self.0.uri().authority() {
|
if let Some(authority) = self.request.uri().authority() {
|
||||||
let authority = authority.as_str();
|
let authority = authority.as_str();
|
||||||
if authority.contains('@') {
|
if authority.contains('@') {
|
||||||
// Decode the username and password from the URI
|
// Decode the username and password from the URI
|
||||||
@@ -36,9 +40,10 @@ impl Request {
|
|||||||
let mut formatted = format!("{username}:{password}");
|
let mut formatted = format!("{username}:{password}");
|
||||||
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
||||||
formatted.zeroize();
|
formatted.zeroize();
|
||||||
self.0.headers_mut().insert(
|
self.request.headers_mut().insert(
|
||||||
hyper::header::AUTHORIZATION,
|
hyper::header::AUTHORIZATION,
|
||||||
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
|
HeaderValue::from_str(&format!("Basic {encoded}"))
|
||||||
|
.expect("couldn't form header from base64-encoded string"),
|
||||||
);
|
);
|
||||||
encoded.zeroize();
|
encoded.zeroize();
|
||||||
}
|
}
|
||||||
@@ -59,9 +64,17 @@ impl Request {
|
|||||||
pub fn with_basic_auth(&mut self) {
|
pub fn with_basic_auth(&mut self) {
|
||||||
let _ = self.basic_auth_from_uri();
|
let _ = self.basic_auth_from_uri();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
/// Set a size limit for the response.
|
||||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
///
|
||||||
Request(request)
|
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
|
||||||
|
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
|
||||||
|
self.response_size_limit = response_size_limit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||||
|
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||||
|
Request { request, response_size_limit: None }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,54 @@
|
|||||||
|
use core::{pin::Pin, future::Future};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
use hyper::{
|
use hyper::{
|
||||||
StatusCode,
|
StatusCode,
|
||||||
header::{HeaderValue, HeaderMap},
|
header::{HeaderValue, HeaderMap},
|
||||||
body::{Buf, Incoming},
|
body::Incoming,
|
||||||
|
rt::Executor,
|
||||||
};
|
};
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
|
use futures_util::{Stream, StreamExt};
|
||||||
|
|
||||||
use crate::{Client, Error};
|
use crate::{Client, Error};
|
||||||
|
|
||||||
// Borrows the client so its async task lives as long as this response exists.
|
// Borrows the client so its async task lives as long as this response exists.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
pub struct Response<
|
||||||
impl<'a> Response<'a> {
|
'a,
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
pub(crate) response: hyper::Response<Incoming>,
|
||||||
|
pub(crate) size_limit: Option<usize>,
|
||||||
|
pub(crate) client: &'a Client<E>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
|
Response<'_, E>
|
||||||
|
{
|
||||||
pub fn status(&self) -> StatusCode {
|
pub fn status(&self) -> StatusCode {
|
||||||
self.0.status()
|
self.response.status()
|
||||||
}
|
}
|
||||||
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
||||||
self.0.headers()
|
self.response.headers()
|
||||||
}
|
}
|
||||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
let mut body = self.response.into_body().into_data_stream();
|
||||||
|
let mut res: Vec<u8> = vec![];
|
||||||
|
loop {
|
||||||
|
if let Some(size_limit) = self.size_limit {
|
||||||
|
let (lower, upper) = body.size_hint();
|
||||||
|
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
|
||||||
|
Err(Error::ConnectionError("response exceeded size limit".into()))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(part) = body.next().await else { break };
|
||||||
|
let part = part.map_err(Error::Hyper)?;
|
||||||
|
res.extend(part.as_ref());
|
||||||
|
}
|
||||||
|
Ok(io::Cursor::new(res))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "std-shims"
|
name = "std-shims"
|
||||||
version = "0.1.1"
|
version = "0.1.5"
|
||||||
description = "A series of std shims to make alloc more feasible"
|
description = "A series of std shims to make alloc more feasible"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.80"
|
rust-version = "1.65"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -17,9 +17,11 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
rustversion = { version = "1", default-features = false }
|
||||||
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
|
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] }
|
||||||
|
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = []
|
alloc = ["hashbrown"]
|
||||||
|
std = ["alloc", "spin/std"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,6 +1,28 @@
|
|||||||
# std shims
|
# `std` shims
|
||||||
|
|
||||||
A crate which passes through to std when the default `std` feature is enabled,
|
`std-shims` is a Rust crate with two purposes:
|
||||||
yet provides a series of shims when it isn't.
|
- Expand the functionality of `core` and `alloc`
|
||||||
|
- Polyfill functionality only available on newer version of Rust
|
||||||
|
|
||||||
`HashSet` and `HashMap` are provided via `hashbrown`.
|
The goal is to make supporting no-`std` environments, and older versions of
|
||||||
|
Rust, as simple as possible. For most use cases, replacing `std::` with
|
||||||
|
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
|
||||||
|
advantage of `std-shims`.
|
||||||
|
|
||||||
|
# API Surface
|
||||||
|
|
||||||
|
`std-shims` only aims to have items _mutually available_ between `alloc` (with
|
||||||
|
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
|
||||||
|
no shims available, will not be exported by `std-shims`.
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
|
||||||
|
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
|
||||||
|
primitives are provided via `spin` (avoiding a requirement on
|
||||||
|
`critical-section`). Sections of `std::io` are independently matched as
|
||||||
|
possible. `rustversion` is used to detect when to provide polyfills.
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
|
||||||
|
No guarantee of one-to-one parity is provided. The shims provided aim to be
|
||||||
|
sufficient for the average case. Pull requests are _welcome_.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::collections::*;
|
||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use hashbrown::{HashSet, HashMap};
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use std::collections::*;
|
pub use std::collections::*;
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use alloc::collections::*;
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use hashbrown::{HashSet, HashMap};
|
|
||||||
|
|||||||
@@ -1,42 +1,74 @@
|
|||||||
#[cfg(feature = "std")]
|
|
||||||
pub use std::io::*;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
mod shims {
|
mod shims {
|
||||||
use core::fmt::{Debug, Formatter};
|
use core::fmt::{self, Debug, Display, Formatter};
|
||||||
use alloc::{boxed::Box, vec::Vec};
|
#[cfg(feature = "alloc")]
|
||||||
|
use extern_alloc::{boxed::Box, vec::Vec};
|
||||||
|
use crate::error::Error as CoreError;
|
||||||
|
|
||||||
|
/// The kind of error.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub enum ErrorKind {
|
pub enum ErrorKind {
|
||||||
UnexpectedEof,
|
UnexpectedEof,
|
||||||
Other,
|
Other,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An error.
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct Error {
|
pub struct Error {
|
||||||
kind: ErrorKind,
|
kind: ErrorKind,
|
||||||
error: Box<dyn Send + Sync>,
|
#[cfg(feature = "alloc")]
|
||||||
|
error: Box<dyn Send + Sync + CoreError>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for Error {
|
impl Display for Error {
|
||||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
|
<Self as Debug>::fmt(self, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl CoreError for Error {}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub trait IntoBoxSendSyncError {}
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
impl<I> IntoBoxSendSyncError for I {}
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
|
/// Create a new error.
|
||||||
Error { kind, error: Box::new(error) }
|
///
|
||||||
|
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
let res = Error { kind };
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
let res = Error { kind, error: error.into() };
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
|
/// Create a new error with `io::ErrorKind::Other` as its kind.
|
||||||
Error { kind: ErrorKind::Other, error: Box::new(error) }
|
///
|
||||||
|
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
let res = Error { kind: ErrorKind::Other };
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
let res = Error { kind: ErrorKind::Other, error: error.into() };
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The kind of error.
|
||||||
pub fn kind(&self) -> ErrorKind {
|
pub fn kind(&self) -> ErrorKind {
|
||||||
self.kind
|
self.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
|
/// Retrieve the inner error.
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
|
||||||
Some(self.error)
|
Some(self.error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,6 +96,12 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<R: Read> Read for &mut R {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||||
|
R::read(*self, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait BufRead: Read {
|
pub trait BufRead: Read {
|
||||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||||
fn consume(&mut self, amt: usize);
|
fn consume(&mut self, amt: usize);
|
||||||
@@ -88,6 +126,7 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
impl Write for Vec<u8> {
|
impl Write for Vec<u8> {
|
||||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||||
self.extend(buf);
|
self.extend(buf);
|
||||||
@@ -95,6 +134,8 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use shims::*;
|
pub use shims::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};
|
||||||
|
|||||||
@@ -1,13 +1,102 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
pub extern crate alloc;
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub use core::*;
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
|
||||||
|
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
#[rustversion::before(1.81)]
|
||||||
|
pub mod error {
|
||||||
|
use core::fmt::Debug::Display;
|
||||||
|
pub trait Error: Debug + Display {}
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
#[rustversion::since(1.81)]
|
||||||
|
pub use core::error;
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
extern crate alloc as extern_alloc;
|
||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||||
|
|
||||||
pub mod sync;
|
|
||||||
pub mod collections;
|
pub mod collections;
|
||||||
pub mod io;
|
pub mod io;
|
||||||
|
pub mod sync;
|
||||||
|
|
||||||
pub use alloc::vec;
|
pub mod prelude {
|
||||||
pub use alloc::str;
|
// Shim the `std` prelude
|
||||||
pub use alloc::string;
|
#[cfg(feature = "alloc")]
|
||||||
|
pub use extern_alloc::{
|
||||||
|
format, vec,
|
||||||
|
borrow::ToOwned,
|
||||||
|
boxed::Box,
|
||||||
|
vec::Vec,
|
||||||
|
string::{String, ToString},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Shim `div_ceil`
|
||||||
|
#[rustversion::before(1.73)]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub trait StdShimsDivCeil {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self;
|
||||||
|
}
|
||||||
|
#[rustversion::before(1.73)]
|
||||||
|
mod impl_divceil {
|
||||||
|
use super::StdShimsDivCeil;
|
||||||
|
impl StdShimsDivCeil for u8 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u16 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u32 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u64 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for u128 {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl StdShimsDivCeil for usize {
|
||||||
|
fn div_ceil(self, rhs: Self) -> Self {
|
||||||
|
(self + (rhs - 1)) / rhs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shim `io::Error::other`
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
#[rustversion::before(1.74)]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub trait StdShimsIoErrorOther {
|
||||||
|
fn other<E>(error: E) -> Self
|
||||||
|
where
|
||||||
|
E: Into<Box<dyn std::error::Error + Send + Sync>>;
|
||||||
|
}
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
#[rustversion::before(1.74)]
|
||||||
|
impl StdShimsIoErrorOther for std::io::Error {
|
||||||
|
fn other<E>(error: E) -> Self
|
||||||
|
where
|
||||||
|
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||||
|
{
|
||||||
|
std::io::Error::new(std::io::ErrorKind::Other, error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,19 +1,28 @@
|
|||||||
pub use core::sync::*;
|
pub use core::sync::atomic;
|
||||||
pub use alloc::sync::*;
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::sync::{Arc, Weak};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
mod mutex_shim {
|
mod mutex_shim {
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub use std::sync::*;
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::*;
|
pub use spin::{Mutex, MutexGuard};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::{Mutex, MutexGuard};
|
||||||
|
|
||||||
|
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct ShimMutex<T>(Mutex<T>);
|
pub struct ShimMutex<T>(Mutex<T>);
|
||||||
impl<T> ShimMutex<T> {
|
impl<T> ShimMutex<T> {
|
||||||
|
/// Construct a new `Mutex`.
|
||||||
pub const fn new(value: T) -> Self {
|
pub const fn new(value: T) -> Self {
|
||||||
Self(Mutex::new(value))
|
Self(Mutex::new(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Acquire a lock on the contents of the `Mutex`.
|
||||||
|
///
|
||||||
|
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
|
||||||
|
/// this may panic if the `Mutex` was poisoned.
|
||||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
let res = self.0.lock().unwrap();
|
let res = self.0.lock().unwrap();
|
||||||
@@ -25,7 +34,12 @@ mod mutex_shim {
|
|||||||
}
|
}
|
||||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[rustversion::before(1.80)]
|
||||||
pub use std::sync::LazyLock;
|
pub use spin::Lazy as LazyLock;
|
||||||
|
|
||||||
|
#[rustversion::since(1.80)]
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::Lazy as LazyLock;
|
pub use spin::Lazy as LazyLock;
|
||||||
|
#[rustversion::since(1.80)]
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::LazyLock;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2022-2024 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -1,11 +1,17 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{future::Future, time::Duration};
|
use core::{
|
||||||
|
fmt::{self, Debug},
|
||||||
|
future::Future,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
mod type_name;
|
||||||
|
|
||||||
/// A handle for a task.
|
/// A handle for a task.
|
||||||
///
|
///
|
||||||
/// The task will only stop running once all handles for it are dropped.
|
/// The task will only stop running once all handles for it are dropped.
|
||||||
@@ -45,8 +51,6 @@ impl Task {
|
|||||||
|
|
||||||
impl TaskHandle {
|
impl TaskHandle {
|
||||||
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||||
///
|
|
||||||
/// Panics if the task has been dropped.
|
|
||||||
pub fn run_now(&self) {
|
pub fn run_now(&self) {
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match self.run_now.try_send(()) {
|
match self.run_now.try_send(()) {
|
||||||
@@ -54,12 +58,22 @@ impl TaskHandle {
|
|||||||
// NOP on full, as this task will already be ran as soon as possible
|
// NOP on full, as this task will already be ran as soon as possible
|
||||||
Err(mpsc::error::TrySendError::Full(())) => {}
|
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||||
Err(mpsc::error::TrySendError::Closed(())) => {
|
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||||
|
// The task should only be closed if all handles are dropped, and this one hasn't been
|
||||||
panic!("task was unexpectedly closed when calling run_now")
|
panic!("task was unexpectedly closed when calling run_now")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An enum which can't be constructed, representing that the task does not error.
|
||||||
|
pub enum DoesNotError {}
|
||||||
|
impl Debug for DoesNotError {
|
||||||
|
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
// This type can't be constructed so we'll never have a `&self` to call this fn with
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A task to be continually ran.
|
/// A task to be continually ran.
|
||||||
pub trait ContinuallyRan: Sized + Send {
|
pub trait ContinuallyRan: Sized + Send {
|
||||||
/// The amount of seconds before this task should be polled again.
|
/// The amount of seconds before this task should be polled again.
|
||||||
@@ -69,11 +83,14 @@ pub trait ContinuallyRan: Sized + Send {
|
|||||||
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||||
|
|
||||||
|
/// The error potentially yielded upon running an iteration of this task.
|
||||||
|
type Error: Debug;
|
||||||
|
|
||||||
/// Run an iteration of the task.
|
/// Run an iteration of the task.
|
||||||
///
|
///
|
||||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||||
/// (without waiting for whatever timer they were already on).
|
/// (without waiting for whatever timer they were already on).
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>>;
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>>;
|
||||||
|
|
||||||
/// Continually run the task.
|
/// Continually run the task.
|
||||||
fn continually_run(
|
fn continually_run(
|
||||||
@@ -115,12 +132,20 @@ pub trait ContinuallyRan: Sized + Send {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log::warn!("{}", e);
|
// Get the type name
|
||||||
|
let type_name = type_name::strip_type_name(core::any::type_name::<Self>());
|
||||||
|
// Print the error as a warning, prefixed by the task's type
|
||||||
|
log::warn!("{type_name}: {e:?}");
|
||||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't run the task again for another few seconds UNLESS told to run now
|
// Don't run the task again for another few seconds UNLESS told to run now
|
||||||
|
/*
|
||||||
|
We could replace tokio::mpsc with async_channel, tokio::time::sleep with
|
||||||
|
patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or
|
||||||
|
It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio
|
||||||
|
*/
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||||
msg = task.run_now.recv() => {
|
msg = task.run_now.recv() => {
|
||||||
|
|||||||
31
common/task/src/type_name.rs
Normal file
31
common/task/src/type_name.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
/// Strip the modules from a type name.
|
||||||
|
// This may be of the form `a::b::C`, in which case we only want `C`
|
||||||
|
pub(crate) fn strip_type_name(full_type_name: &'static str) -> String {
|
||||||
|
// It also may be `a::b::C<d::e::F>`, in which case, we only attempt to strip `a::b`
|
||||||
|
let mut by_generics = full_type_name.split('<');
|
||||||
|
|
||||||
|
// Strip to just `C`
|
||||||
|
let full_outer_object_name = by_generics.next().unwrap();
|
||||||
|
let mut outer_object_name_parts = full_outer_object_name.split("::");
|
||||||
|
let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap();
|
||||||
|
for part in outer_object_name_parts {
|
||||||
|
last_part_in_outer_object_name = part;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push back on the generic terms
|
||||||
|
let mut type_name = last_part_in_outer_object_name.to_string();
|
||||||
|
for generic in by_generics {
|
||||||
|
type_name.push('<');
|
||||||
|
type_name.push_str(generic);
|
||||||
|
}
|
||||||
|
type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_strip_type_name() {
|
||||||
|
assert_eq!(strip_type_name("core::option::Option"), "Option");
|
||||||
|
assert_eq!(
|
||||||
|
strip_type_name("core::option::Option<alloc::string::String>"),
|
||||||
|
"Option<alloc::string::String>"
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -7,7 +7,9 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.77"
|
# This must be specified with the patch version, else Rust believes `1.77` < `1.77.0` and will
|
||||||
|
# refuse to compile due to relying on versions introduced with `1.77.0`
|
||||||
|
rust-version = "1.77.0"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||||
|
|
||||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -22,16 +21,18 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
|||||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
dkg = { package = "dkg-musig", path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
||||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
zalloc = { path = "../common/zalloc" }
|
zalloc = { path = "../common/zalloc" }
|
||||||
serai-db = { path = "../common/db" }
|
serai-db = { path = "../common/db" }
|
||||||
@@ -40,28 +41,22 @@ serai-task = { path = "../common/task", version = "0.1" }
|
|||||||
|
|
||||||
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
tributary-sdk = { path = "./tributary-sdk" }
|
||||||
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|
||||||
|
tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] }
|
||||||
|
|
||||||
serai-cosign = { path = "./cosign" }
|
serai-cosign = { path = "./cosign" }
|
||||||
serai-coordinator-substrate = { path = "./substrate" }
|
serai-coordinator-substrate = { path = "./substrate" }
|
||||||
|
serai-coordinator-tributary = { path = "./tributary" }
|
||||||
serai-coordinator-p2p = { path = "./p2p" }
|
serai-coordinator-p2p = { path = "./p2p" }
|
||||||
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
longer-reattempts = []
|
longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"]
|
||||||
parity-db = ["serai-db/parity-db"]
|
parity-db = ["serai-db/parity-db"]
|
||||||
rocksdb = ["serai-db/rocksdb"]
|
rocksdb = ["serai-db/rocksdb"]
|
||||||
|
|||||||
@@ -1,19 +1,29 @@
|
|||||||
# Coordinator
|
# Coordinator
|
||||||
|
|
||||||
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint BFT algorithm.
|
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint
|
||||||
|
BFT algorithm.
|
||||||
|
|
||||||
- [`tributary`](./tributary) is a micro-blockchain framework. Instead of a producing a blockchain
|
- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead
|
||||||
daemon like the Polkadot SDK or Cosmos SDK intend to, `tributary` is solely intended to be an
|
of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend
|
||||||
embedded asynchronous task within an application.
|
to, `tributary` is solely intended to be an embedded asynchronous task within
|
||||||
|
an application.
|
||||||
|
|
||||||
The Serai coordinator spawns a tributary for each validator set it's coordinating. This allows
|
The Serai coordinator spawns a tributary for each validator set it's
|
||||||
the participating validators to communicate in a byzantine-fault-tolerant manner (relying on
|
coordinating. This allows the participating validators to communicate in a
|
||||||
Tendermint for consensus).
|
byzantine-fault-tolerant manner (relying on Tendermint for consensus).
|
||||||
|
|
||||||
- [`cosign`](./cosign) contains a library to decide which Substrate blocks should be cosigned and
|
- [`cosign`](./cosign) contains a library to decide which Substrate blocks
|
||||||
to evaluate cosigns.
|
should be cosigned and to evaluate cosigns.
|
||||||
|
|
||||||
- [`substrate`](./substrate) contains a library to index the Substrate blockchain and handle its
|
- [`substrate`](./substrate) contains a library to index the Substrate
|
||||||
events.
|
blockchain and handle its events.
|
||||||
|
|
||||||
|
- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the
|
||||||
|
Serai processor. It includes the `Transaction` definition and deferred
|
||||||
|
execution logic.
|
||||||
|
|
||||||
|
- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator.
|
||||||
|
|
||||||
|
- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API.
|
||||||
|
|
||||||
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -18,7 +18,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023-2024 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use core::future::Future;
|
|||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
use crate::evaluator::CosignedBlocks;
|
use crate::evaluator::CosignedBlocks;
|
||||||
|
|
||||||
@@ -25,7 +25,9 @@ pub(crate) struct CosignDelayTask<D: Db> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -77,10 +77,22 @@ pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u
|
|||||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
pub(crate) request: R,
|
pub(crate) request: R,
|
||||||
|
pub(crate) last_request_for_cosigns: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
|
||||||
|
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
|
||||||
|
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*last_request_for_cosigns = Instant::now();
|
||||||
|
true
|
||||||
|
};
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let mut known_cosign = None;
|
let mut known_cosign = None;
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
@@ -116,12 +128,13 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// Check if the sum weight doesn't cross the required threshold
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
// Request the necessary cosigns over the network
|
// Request the necessary cosigns over the network
|
||||||
// TODO: Add a timer to ensure this isn't called too often
|
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
||||||
self
|
self
|
||||||
.request
|
.request
|
||||||
.request_notable_cosigns(global_session)
|
.request_notable_cosigns(global_session)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
}
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
@@ -178,11 +191,13 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||||
// explicitly request them
|
// explicitly request them
|
||||||
self
|
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
||||||
.request
|
self
|
||||||
.request_notable_cosigns(global_session)
|
.request
|
||||||
.await
|
.request_notable_cosigns(global_session)
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
}
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::collections::HashMap;
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Amount},
|
primitives::{SeraiAddress, Amount},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::ExternalValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ db_channel! {
|
|||||||
CosignIntendChannels {
|
CosignIntendChannels {
|
||||||
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||||
BlockEvents: () -> BlockEventData,
|
BlockEvents: () -> BlockEventData,
|
||||||
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,11 +57,13 @@ async fn block_has_events_justifying_a_cosign(
|
|||||||
/// A task to determine which blocks we should intend to cosign.
|
/// A task to determine which blocks we should intend to cosign.
|
||||||
pub(crate) struct CosignIntendTask<D: Db> {
|
pub(crate) struct CosignIntendTask<D: Db> {
|
||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
pub(crate) serai: Serai,
|
pub(crate) serai: Arc<Serai>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||||
let latest_block_number =
|
let latest_block_number =
|
||||||
@@ -78,7 +80,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
// Check we are indexing a linear chain
|
// Check we are indexing a linear chain
|
||||||
if (block_number > 1) &&
|
if (block_number > 1) &&
|
||||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||||
SubstrateBlocks::get(&txn, block_number - 1)
|
SubstrateBlockHash::get(&txn, block_number - 1)
|
||||||
.expect("indexing a block but haven't indexed its parent"))
|
.expect("indexing a block but haven't indexed its parent"))
|
||||||
{
|
{
|
||||||
Err(format!(
|
Err(format!(
|
||||||
@@ -86,14 +88,15 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
block_number - 1
|
block_number - 1
|
||||||
))?;
|
))?;
|
||||||
}
|
}
|
||||||
SubstrateBlocks::set(&mut txn, block_number, &block.hash());
|
let block_hash = block.hash();
|
||||||
|
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||||
|
|
||||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||||
|
|
||||||
// If this is notable, it creates a new global session, which we index into the database
|
// If this is notable, it creates a new global session, which we index into the database
|
||||||
// now
|
// now
|
||||||
if has_events == HasEvents::Notable {
|
if has_events == HasEvents::Notable {
|
||||||
let serai = self.serai.as_of(block.hash());
|
let serai = self.serai.as_of(block_hash);
|
||||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||||
let global_session =
|
let global_session =
|
||||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||||
@@ -107,7 +110,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
keys.insert(set.network, SeraiAddress::from(*key));
|
keys.insert(set.network, SeraiAddress::from(*key));
|
||||||
let stake = serai
|
let stake = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.total_allocated_stake(set.network)
|
.total_allocated_stake(set.network.into())
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
.unwrap_or(Amount(0))
|
.unwrap_or(Amount(0))
|
||||||
@@ -152,14 +155,14 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
|
|
||||||
// Tell each set of their expectation to cosign this block
|
// Tell each set of their expectation to cosign this block
|
||||||
for set in global_session_info.sets {
|
for set in global_session_info.sets {
|
||||||
log::debug!("{:?} will be cosigning block #{block_number}", set);
|
log::debug!("{set:?} will be cosigning block #{block_number}");
|
||||||
IntendedCosigns::send(
|
IntendedCosigns::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&CosignIntent {
|
&CosignIntent {
|
||||||
global_session: global_session_for_this_block,
|
global_session: global_session_for_this_block,
|
||||||
block_number,
|
block_number,
|
||||||
block_hash: block.hash(),
|
block_hash,
|
||||||
notable: has_events == HasEvents::Notable,
|
notable: has_events == HasEvents::Notable,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{fmt::Debug, future::Future};
|
use core::{fmt::Debug, future::Future};
|
||||||
use std::collections::HashMap;
|
use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
@@ -11,8 +11,8 @@ use scale::{Encode, Decode};
|
|||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, SeraiAddress},
|
primitives::{ExternalNetworkId, SeraiAddress},
|
||||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||||
Public, Block, Serai, TemporalSerai,
|
Public, Block, Serai, TemporalSerai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -52,13 +52,13 @@ pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
|||||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub(crate) struct GlobalSession {
|
pub(crate) struct GlobalSession {
|
||||||
pub(crate) start_block_number: u64,
|
pub(crate) start_block_number: u64,
|
||||||
pub(crate) sets: Vec<ValidatorSet>,
|
pub(crate) sets: Vec<ExternalValidatorSet>,
|
||||||
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
|
||||||
pub(crate) stakes: HashMap<NetworkId, u64>,
|
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
||||||
pub(crate) total_stake: u64,
|
pub(crate) total_stake: u64,
|
||||||
}
|
}
|
||||||
impl GlobalSession {
|
impl GlobalSession {
|
||||||
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
fn id(mut cosigners: Vec<ExternalValidatorSet>) -> [u8; 32] {
|
||||||
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||||
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||||
}
|
}
|
||||||
@@ -82,13 +82,13 @@ enum HasEvents {
|
|||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct CosignIntent {
|
pub struct CosignIntent {
|
||||||
/// The global session this cosign is being performed under.
|
/// The global session this cosign is being performed under.
|
||||||
global_session: [u8; 32],
|
pub global_session: [u8; 32],
|
||||||
/// The number of the block to cosign.
|
/// The number of the block to cosign.
|
||||||
block_number: u64,
|
pub block_number: u64,
|
||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// If this cosign must be handled before further cosigns are.
|
/// If this cosign must be handled before further cosigns are.
|
||||||
notable: bool,
|
pub notable: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A cosign.
|
/// A cosign.
|
||||||
@@ -101,7 +101,25 @@ pub struct Cosign {
|
|||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// The actual cosigner.
|
/// The actual cosigner.
|
||||||
pub cosigner: NetworkId,
|
pub cosigner: ExternalNetworkId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CosignIntent {
|
||||||
|
/// Convert this into a `Cosign`.
|
||||||
|
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
|
||||||
|
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
|
||||||
|
Cosign { global_session, block_number, block_hash, cosigner }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Cosign {
|
||||||
|
/// The message to sign to sign this cosign.
|
||||||
|
///
|
||||||
|
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
|
||||||
|
pub fn signature_message(&self) -> Vec<u8> {
|
||||||
|
// We use a schnorrkel context to domain-separate this
|
||||||
|
self.encode()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A signed cosign.
|
/// A signed cosign.
|
||||||
@@ -118,7 +136,7 @@ impl SignedCosign {
|
|||||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||||
|
|
||||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
|
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,7 +145,7 @@ create_db! {
|
|||||||
// The following are populated by the intend task and used throughout the library
|
// The following are populated by the intend task and used throughout the library
|
||||||
|
|
||||||
// An index of Substrate blocks
|
// An index of Substrate blocks
|
||||||
SubstrateBlocks: (block_number: u64) -> [u8; 32],
|
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
|
||||||
// A mapping from a global session's ID to its relevant information.
|
// A mapping from a global session's ID to its relevant information.
|
||||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||||
// The last block to be cosigned by a global session.
|
// The last block to be cosigned by a global session.
|
||||||
@@ -148,7 +166,10 @@ create_db! {
|
|||||||
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||||
// block, causing the latest cosigned block for a global session to either be the global
|
// block, causing the latest cosigned block for a global session to either be the global
|
||||||
// session's notable cosigns or the network's latest cosigns.
|
// session's notable cosigns or the network's latest cosigns.
|
||||||
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
NetworksLatestCosignedBlock: (
|
||||||
|
global_session: [u8; 32],
|
||||||
|
network: ExternalNetworkId
|
||||||
|
) -> SignedCosign,
|
||||||
// Cosigns received for blocks not locally recognized as finalized.
|
// Cosigns received for blocks not locally recognized as finalized.
|
||||||
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||||
// The global session which faulted.
|
// The global session which faulted.
|
||||||
@@ -159,15 +180,10 @@ create_db! {
|
|||||||
/// Fetch the keys used for cosigning by a specific network.
|
/// Fetch the keys used for cosigning by a specific network.
|
||||||
async fn keys_for_network(
|
async fn keys_for_network(
|
||||||
serai: &TemporalSerai<'_>,
|
serai: &TemporalSerai<'_>,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||||
// The Serai network never cosigns so it has no keys for cosigning
|
|
||||||
if network == NetworkId::Serai {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(latest_session) =
|
let Some(latest_session) =
|
||||||
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||||
else {
|
else {
|
||||||
// If this network hasn't had a session declared, move on
|
// If this network hasn't had a session declared, move on
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@@ -176,7 +192,7 @@ async fn keys_for_network(
|
|||||||
// Get the keys for the latest session
|
// Get the keys for the latest session
|
||||||
if let Some(keys) = serai
|
if let Some(keys) = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ValidatorSet { network, session: latest_session })
|
.keys(ExternalValidatorSet { network, session: latest_session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
{
|
{
|
||||||
@@ -187,7 +203,7 @@ async fn keys_for_network(
|
|||||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||||
if let Some(keys) = serai
|
if let Some(keys) = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ValidatorSet { network, session: prior_session })
|
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
{
|
{
|
||||||
@@ -198,16 +214,19 @@ async fn keys_for_network(
|
|||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
||||||
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
/// block.
|
||||||
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
async fn cosigning_sets(
|
||||||
for network in serai_client::primitives::NETWORKS {
|
serai: &TemporalSerai<'_>,
|
||||||
|
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
|
||||||
|
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||||
// If this network doesn't have usable keys, move on
|
// If this network doesn't have usable keys, move on
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
sets.push((ValidatorSet { network, session }, keys.0));
|
sets.push((ExternalValidatorSet { network, session }, keys.0));
|
||||||
}
|
}
|
||||||
Ok(sets)
|
Ok(sets)
|
||||||
}
|
}
|
||||||
@@ -228,6 +247,43 @@ pub trait RequestNotableCosigns: 'static + Send {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Faulted;
|
pub struct Faulted;
|
||||||
|
|
||||||
|
/// An error incurred while intaking a cosign.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum IntakeCosignError {
|
||||||
|
/// Cosign is for a not-yet-indexed block
|
||||||
|
NotYetIndexedBlock,
|
||||||
|
/// A later cosign for this cosigner has already been handled
|
||||||
|
StaleCosign,
|
||||||
|
/// The cosign's global session isn't recognized
|
||||||
|
UnrecognizedGlobalSession,
|
||||||
|
/// The cosign is for a block before its global session starts
|
||||||
|
BeforeGlobalSessionStart,
|
||||||
|
/// The cosign is for a block after its global session ends
|
||||||
|
AfterGlobalSessionEnd,
|
||||||
|
/// The cosign's signing network wasn't a participant in this global session
|
||||||
|
NonParticipatingNetwork,
|
||||||
|
/// The cosign had an invalid signature
|
||||||
|
InvalidSignature,
|
||||||
|
/// The cosign is for a global session which has yet to have its declaration block cosigned
|
||||||
|
FutureGlobalSession,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntakeCosignError {
|
||||||
|
/// If this error is temporal to the local view
|
||||||
|
pub fn temporal(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
IntakeCosignError::NotYetIndexedBlock |
|
||||||
|
IntakeCosignError::StaleCosign |
|
||||||
|
IntakeCosignError::UnrecognizedGlobalSession |
|
||||||
|
IntakeCosignError::FutureGlobalSession => true,
|
||||||
|
IntakeCosignError::BeforeGlobalSessionStart |
|
||||||
|
IntakeCosignError::AfterGlobalSessionEnd |
|
||||||
|
IntakeCosignError::NonParticipatingNetwork |
|
||||||
|
IntakeCosignError::InvalidSignature => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The interface to manage cosigning with.
|
/// The interface to manage cosigning with.
|
||||||
pub struct Cosigning<D: Db> {
|
pub struct Cosigning<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
@@ -239,7 +295,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
/// only used once at any given time.
|
/// only used once at any given time.
|
||||||
pub fn spawn<R: RequestNotableCosigns>(
|
pub fn spawn<R: RequestNotableCosigns>(
|
||||||
db: D,
|
db: D,
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
request: R,
|
request: R,
|
||||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@@ -251,8 +307,12 @@ impl<D: Db> Cosigning<D> {
|
|||||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
(evaluator::CosignEvaluatorTask {
|
||||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
db: db.clone(),
|
||||||
|
request,
|
||||||
|
last_request_for_cosigns: Instant::now(),
|
||||||
|
})
|
||||||
|
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(delay::CosignDelayTask { db: db.clone() })
|
(delay::CosignDelayTask { db: db.clone() })
|
||||||
@@ -270,14 +330,14 @@ impl<D: Db> Cosigning<D> {
|
|||||||
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch an cosigned Substrate block by its block number.
|
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
||||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Some(
|
Ok(Some(
|
||||||
SubstrateBlocks::get(getter, block_number).expect("cosigned block but didn't index it"),
|
SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -286,8 +346,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||||
/// cosigns for this session.
|
/// cosigns for this session.
|
||||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -304,7 +364,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||||
// identification in those who see the faulty cosigns as honest
|
// identification in those who see the faulty cosigns as honest
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||||
if cosign.cosign.global_session == faulted {
|
if cosign.cosign.global_session == faulted {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
@@ -316,8 +376,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -326,27 +386,16 @@ impl<D: Db> Cosigning<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Intake a cosign from the Serai network.
|
/// Intake a cosign.
|
||||||
///
|
|
||||||
/// - Returns Err(_) if there was an error trying to validate the cosign and it should be retired
|
|
||||||
/// later.
|
|
||||||
/// - Returns Ok(true) if the cosign was successfully handled or could not be handled at this
|
|
||||||
/// time.
|
|
||||||
/// - Returns Ok(false) if the cosign was invalid.
|
|
||||||
//
|
|
||||||
// We collapse a cosign which shouldn't be handled yet into a valid cosign (`Ok(true)`) as we
|
|
||||||
// assume we'll either explicitly request it if we need it or we'll naturally see it (or a later,
|
|
||||||
// more relevant, cosign) again.
|
|
||||||
//
|
//
|
||||||
// Takes `&mut self` as this should only be called once at any given moment.
|
// Takes `&mut self` as this should only be called once at any given moment.
|
||||||
// TODO: Don't overload bool here
|
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> {
|
||||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<bool, String> {
|
|
||||||
let cosign = &signed_cosign.cosign;
|
let cosign = &signed_cosign.cosign;
|
||||||
let network = cosign.cosigner;
|
let network = cosign.cosigner;
|
||||||
|
|
||||||
// Check our indexed blockchain includes a block with this block number
|
// Check our indexed blockchain includes a block with this block number
|
||||||
let Some(our_block_hash) = SubstrateBlocks::get(&self.db, cosign.block_number) else {
|
let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else {
|
||||||
return Ok(true);
|
Err(IntakeCosignError::NotYetIndexedBlock)?
|
||||||
};
|
};
|
||||||
let faulty = cosign.block_hash != our_block_hash;
|
let faulty = cosign.block_hash != our_block_hash;
|
||||||
|
|
||||||
@@ -356,20 +405,19 @@ impl<D: Db> Cosigning<D> {
|
|||||||
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||||
{
|
{
|
||||||
if existing.cosign.block_number >= cosign.block_number {
|
if existing.cosign.block_number >= cosign.block_number {
|
||||||
return Ok(true);
|
Err(IntakeCosignError::StaleCosign)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||||
// Unrecognized global session
|
Err(IntakeCosignError::UnrecognizedGlobalSession)?
|
||||||
return Ok(true);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check the cosigned block number is in range to the global session
|
// Check the cosigned block number is in range to the global session
|
||||||
if cosign.block_number < global_session.start_block_number {
|
if cosign.block_number < global_session.start_block_number {
|
||||||
// Cosign is for a block predating the global session
|
// Cosign is for a block predating the global session
|
||||||
return Ok(false);
|
Err(IntakeCosignError::BeforeGlobalSessionStart)?;
|
||||||
}
|
}
|
||||||
if !faulty {
|
if !faulty {
|
||||||
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||||
@@ -377,7 +425,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||||
if cosign.block_number > last_block {
|
if cosign.block_number > last_block {
|
||||||
// Cosign is for a block after the last block this global session should have signed
|
// Cosign is for a block after the last block this global session should have signed
|
||||||
return Ok(false);
|
Err(IntakeCosignError::AfterGlobalSessionEnd)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,13 +434,13 @@ impl<D: Db> Cosigning<D> {
|
|||||||
{
|
{
|
||||||
let key = Public::from({
|
let key = Public::from({
|
||||||
let Some(key) = global_session.keys.get(&network) else {
|
let Some(key) = global_session.keys.get(&network) else {
|
||||||
return Ok(false);
|
Err(IntakeCosignError::NonParticipatingNetwork)?
|
||||||
};
|
};
|
||||||
*key
|
*key
|
||||||
});
|
});
|
||||||
|
|
||||||
if !signed_cosign.verify_signature(key) {
|
if !signed_cosign.verify_signature(key) {
|
||||||
return Ok(false);
|
Err(IntakeCosignError::InvalidSignature)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -408,7 +456,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
// block declaring it was cosigned
|
// block declaring it was cosigned
|
||||||
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||||
drop(txn);
|
drop(txn);
|
||||||
return Ok(true);
|
return Err(IntakeCosignError::FutureGlobalSession);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||||
@@ -422,9 +470,10 @@ impl<D: Db> Cosigning<D> {
|
|||||||
|
|
||||||
let mut weight_cosigned = 0;
|
let mut weight_cosigned = 0;
|
||||||
for fault in &faults {
|
for fault in &faults {
|
||||||
let Some(stake) = global_session.stakes.get(&fault.cosign.cosigner) else {
|
let stake = global_session
|
||||||
Err("cosigner with recognized key didn't have a stake entry saved".to_string())?
|
.stakes
|
||||||
};
|
.get(&fault.cosign.cosigner)
|
||||||
|
.expect("cosigner with recognized key didn't have a stake entry saved");
|
||||||
weight_cosigned += stake;
|
weight_cosigned += stake;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -436,15 +485,15 @@ impl<D: Db> Cosigning<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
Ok(true)
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Receive intended cosigns to produce for this ValidatorSet.
|
/// Receive intended cosigns to produce for this ExternalValidatorSet.
|
||||||
///
|
///
|
||||||
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
||||||
///
|
///
|
||||||
/// This will drain the internal channel and not re-yield these intentions again.
|
/// This will drain the internal channel and not re-yield these intentions again.
|
||||||
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
|
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<CosignIntent> {
|
||||||
let mut res: Vec<CosignIntent> = vec![];
|
let mut res: Vec<CosignIntent> = vec![];
|
||||||
// While we have yet to find a notable cosign...
|
// While we have yet to find a notable cosign...
|
||||||
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -24,10 +24,10 @@ serai-db = { path = "../../common/db", version = "0.1" }
|
|||||||
|
|
||||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
serai-cosign = { path = "../cosign" }
|
serai-cosign = { path = "../cosign" }
|
||||||
tributary = { package = "tributary-chain", path = "../tributary" }
|
tributary-sdk = { path = "../tributary-sdk" }
|
||||||
|
|
||||||
async-channel = { version = "2", default-features = false, features = ["std"] }
|
|
||||||
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync", "macros"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
serai-task = { path = "../../common/task", version = "0.1" }
|
serai-task = { path = "../../common/task", version = "0.1" }
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
# Serai Coordinator P2P
|
# Serai Coordinator P2P
|
||||||
|
|
||||||
The P2P abstraction used by Serai's coordinator.
|
The P2P abstraction used by Serai's coordinator, and tasks over it.
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.87"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -23,7 +23,7 @@ async-trait = { version = "0.1", default-features = false }
|
|||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
@@ -31,12 +31,11 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
|||||||
|
|
||||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
serai-cosign = { path = "../../cosign" }
|
serai-cosign = { path = "../../cosign" }
|
||||||
tributary = { package = "tributary-chain", path = "../../tributary" }
|
tributary-sdk = { path = "../../tributary-sdk" }
|
||||||
|
|
||||||
void = { version = "1", default-features = false }
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
serai-task = { path = "../../../common/task", version = "0.1" }
|
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ use serai_client::primitives::PublicKey as Public;
|
|||||||
|
|
||||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::UpgradeInfo,
|
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade},
|
||||||
InboundUpgrade, OutboundUpgrade,
|
|
||||||
identity::{self, PeerId},
|
identity::{self, PeerId},
|
||||||
noise,
|
noise,
|
||||||
};
|
};
|
||||||
@@ -119,12 +118,18 @@ impl UpgradeInfo for OnlyValidators {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S>
|
||||||
|
for OnlyValidators
|
||||||
|
{
|
||||||
type Output = (PeerId, noise::Output<S>);
|
type Output = (PeerId, noise::Output<S>);
|
||||||
type Error = io::Error;
|
type Error = io::Error;
|
||||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
|
fn upgrade_inbound(
|
||||||
|
self,
|
||||||
|
socket: S,
|
||||||
|
info: <Self as UpgradeInfo>::Info,
|
||||||
|
) -> <Self as InboundConnectionUpgrade<S>>::Future {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -147,12 +152,18 @@ impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for O
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S>
|
||||||
|
for OnlyValidators
|
||||||
|
{
|
||||||
type Output = (PeerId, noise::Output<S>);
|
type Output = (PeerId, noise::Output<S>);
|
||||||
type Error = io::Error;
|
type Error = io::Error;
|
||||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
|
fn upgrade_outbound(
|
||||||
|
self,
|
||||||
|
socket: S,
|
||||||
|
info: <Self as UpgradeInfo>::Info,
|
||||||
|
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::collections::HashSet;
|
use std::{sync::Arc, collections::HashSet};
|
||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::Serai;
|
use serai_client::{SeraiError, Serai};
|
||||||
|
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::multiaddr::{Protocol, Multiaddr},
|
core::multiaddr::{Protocol, Multiaddr},
|
||||||
@@ -29,14 +29,18 @@ const TARGET_PEERS_PER_NETWORK: usize = 5;
|
|||||||
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
||||||
|
|
||||||
pub(crate) struct DialTask {
|
pub(crate) struct DialTask {
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
validators: Validators,
|
validators: Validators,
|
||||||
peers: Peers,
|
peers: Peers,
|
||||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DialTask {
|
impl DialTask {
|
||||||
pub(crate) fn new(serai: Serai, peers: Peers, to_dial: mpsc::UnboundedSender<DialOpts>) -> Self {
|
pub(crate) fn new(
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
peers: Peers,
|
||||||
|
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||||
|
) -> Self {
|
||||||
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -46,7 +50,9 @@ impl ContinuallyRan for DialTask {
|
|||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
self.validators.update().await?;
|
self.validators.update().await?;
|
||||||
|
|
||||||
@@ -79,8 +85,7 @@ impl ContinuallyRan for DialTask {
|
|||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
.saturating_sub(1))
|
.saturating_sub(1))
|
||||||
{
|
{
|
||||||
let mut potential_peers =
|
let mut potential_peers = self.serai.p2p_validators(network).await?;
|
||||||
self.serai.p2p_validators(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
||||||
if potential_peers.is_empty() {
|
if potential_peers.is_empty() {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ pub use libp2p::gossipsub::Event;
|
|||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
// Block size limit + 16 KB of space for signatures/metadata
|
// Block size limit + 16 KB of space for signatures/metadata
|
||||||
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 16384;
|
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384;
|
||||||
|
|
||||||
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
||||||
const BASE_TOPIC: &str = "/";
|
const BASE_TOPIC: &str = "/";
|
||||||
@@ -42,9 +42,10 @@ pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilt
|
|||||||
pub(crate) fn new_behavior() -> Behavior {
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
||||||
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
||||||
let heartbeat_interval = tributary::tendermint::LATENCY_TIME;
|
let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME;
|
||||||
// The amount of heartbeats which will occur within a single Tributary block
|
// The amount of heartbeats which will occur within a single Tributary block
|
||||||
let heartbeats_per_block = tributary::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
let heartbeats_per_block =
|
||||||
|
tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
||||||
// libp2p-rs defaults to 5, whereas ours will be ~8
|
// libp2p-rs defaults to 5, whereas ours will be ~8
|
||||||
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
||||||
// libp2p-rs defaults to 3 whereas ours will be ~4
|
// libp2p-rs defaults to 3 whereas ours will be ~4
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -14,12 +14,12 @@ use zeroize::Zeroizing;
|
|||||||
use schnorrkel::Keypair;
|
use schnorrkel::Keypair;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{NetworkId, PublicKey},
|
primitives::{ExternalNetworkId, PublicKey},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::ExternalValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
use tokio::sync::{mpsc, oneshot, Mutex, RwLock};
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -35,7 +35,7 @@ use libp2p::{
|
|||||||
SwarmBuilder,
|
SwarmBuilder,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_coordinator_p2p::{oneshot, Heartbeat, TributaryBlockWithCommit};
|
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||||
|
|
||||||
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||||
mod validators;
|
mod validators;
|
||||||
@@ -50,7 +50,7 @@ mod ping;
|
|||||||
|
|
||||||
/// The request-response messages and behavior
|
/// The request-response messages and behavior
|
||||||
mod reqres;
|
mod reqres;
|
||||||
use reqres::{RequestId, Request, Response};
|
use reqres::{InboundRequestId, Request, Response};
|
||||||
|
|
||||||
/// The gossip messages and behavior
|
/// The gossip messages and behavior
|
||||||
mod gossip;
|
mod gossip;
|
||||||
@@ -66,14 +66,6 @@ use dial::DialTask;
|
|||||||
|
|
||||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||||
|
|
||||||
// usize::max, manually implemented, as max isn't a const fn
|
|
||||||
const MAX_LIBP2P_MESSAGE_SIZE: usize =
|
|
||||||
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
|
||||||
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
|
||||||
} else {
|
|
||||||
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
|
||||||
};
|
|
||||||
|
|
||||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||||
// 0 represents the identity Multihash, that no hash was performed
|
// 0 represents the identity Multihash, that no hash was performed
|
||||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||||
@@ -112,7 +104,7 @@ impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct Peers {
|
struct Peers {
|
||||||
peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
|
peers: Arc<RwLock<HashMap<ExternalNetworkId, HashSet<PeerId>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
||||||
@@ -131,33 +123,36 @@ struct Behavior {
|
|||||||
gossip: gossip::Behavior,
|
gossip: gossip::Behavior,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The libp2p-backed P2P implementation.
|
|
||||||
///
|
|
||||||
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
|
||||||
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Clone)]
|
struct Libp2pInner {
|
||||||
pub struct Libp2p {
|
|
||||||
peers: Peers,
|
peers: Peers,
|
||||||
|
|
||||||
gossip: mpsc::UnboundedSender<Message>,
|
gossip: mpsc::UnboundedSender<Message>,
|
||||||
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
tributary_gossip: Arc<Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>>,
|
tributary_gossip: Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>,
|
||||||
|
|
||||||
signed_cosigns: Arc<Mutex<mpsc::UnboundedReceiver<SignedCosign>>>,
|
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||||
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
|
||||||
heartbeat_requests: Arc<Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>>,
|
heartbeat_requests:
|
||||||
notable_cosign_requests: Arc<Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>>,
|
Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ExternalValidatorSet, [u8; 32])>>,
|
||||||
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>,
|
||||||
|
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The libp2p-backed P2P implementation.
|
||||||
|
///
|
||||||
|
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
||||||
|
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Libp2p(Arc<Libp2pInner>);
|
||||||
|
|
||||||
impl Libp2p {
|
impl Libp2p {
|
||||||
/// Create a new libp2p-backed P2P instance.
|
/// Create a new libp2p-backed P2P instance.
|
||||||
///
|
///
|
||||||
/// This will spawn all of the internal tasks necessary for functioning.
|
/// This will spawn all of the internal tasks necessary for functioning.
|
||||||
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Serai) -> Libp2p {
|
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Arc<Serai>) -> Libp2p {
|
||||||
// Define the object we track peers with
|
// Define the object we track peers with
|
||||||
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
||||||
|
|
||||||
@@ -174,19 +169,9 @@ impl Libp2p {
|
|||||||
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_yamux = || {
|
|
||||||
let mut config = yamux::Config::default();
|
|
||||||
// 1 MiB default + max message size
|
|
||||||
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
|
|
||||||
// 256 KiB default + max message size
|
|
||||||
config
|
|
||||||
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
|
|
||||||
config
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||||
.with_tokio()
|
.with_tokio()
|
||||||
.with_tcp(TcpConfig::default().nodelay(false), new_only_validators, new_yamux)
|
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_behaviour(|_| Behavior {
|
.with_behaviour(|_| Behavior {
|
||||||
allow_list: allow_block_list::Behaviour::default(),
|
allow_list: allow_block_list::Behaviour::default(),
|
||||||
@@ -239,28 +224,29 @@ impl Libp2p {
|
|||||||
inbound_request_responses_recv,
|
inbound_request_responses_recv,
|
||||||
);
|
);
|
||||||
|
|
||||||
Libp2p {
|
Libp2p(Arc::new(Libp2pInner {
|
||||||
peers,
|
peers,
|
||||||
|
|
||||||
gossip: gossip_send,
|
gossip: gossip_send,
|
||||||
outbound_requests: outbound_requests_send,
|
outbound_requests: outbound_requests_send,
|
||||||
|
|
||||||
tributary_gossip: Arc::new(Mutex::new(tributary_gossip_recv)),
|
tributary_gossip: Mutex::new(tributary_gossip_recv),
|
||||||
|
|
||||||
signed_cosigns: Arc::new(Mutex::new(signed_cosigns_recv)),
|
signed_cosigns: Mutex::new(signed_cosigns_recv),
|
||||||
signed_cosigns_send,
|
signed_cosigns_send,
|
||||||
|
|
||||||
heartbeat_requests: Arc::new(Mutex::new(heartbeat_requests_recv)),
|
heartbeat_requests: Mutex::new(heartbeat_requests_recv),
|
||||||
notable_cosign_requests: Arc::new(Mutex::new(notable_cosign_requests_recv)),
|
notable_cosign_requests: Mutex::new(notable_cosign_requests_recv),
|
||||||
inbound_request_responses: inbound_request_responses_send,
|
inbound_request_responses: inbound_request_responses_send,
|
||||||
}
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl tributary::P2p for Libp2p {
|
impl tributary_sdk::P2p for Libp2p {
|
||||||
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||||
async move {
|
async move {
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.gossip
|
.gossip
|
||||||
.send(Message::Tributary { tributary, message })
|
.send(Message::Tributary { tributary, message })
|
||||||
.expect("gossip recv channel was dropped?");
|
.expect("gossip recv channel was dropped?");
|
||||||
@@ -281,7 +267,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
|
|
||||||
let request = Request::NotableCosigns { global_session };
|
let request = Request::NotableCosigns { global_session };
|
||||||
|
|
||||||
let peers = self.peers.peers.read().await.clone();
|
let peers = self.0.peers.peers.read().await.clone();
|
||||||
// HashSet of all peers
|
// HashSet of all peers
|
||||||
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
||||||
// Vec of all peers
|
// Vec of all peers
|
||||||
@@ -297,6 +283,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
|
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (sender, receiver) = oneshot::channel();
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.outbound_requests
|
.outbound_requests
|
||||||
.send((peer, request, sender))
|
.send((peer, request, sender))
|
||||||
.expect("outbound requests recv channel was dropped?");
|
.expect("outbound requests recv channel was dropped?");
|
||||||
@@ -310,6 +297,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
{
|
{
|
||||||
for cosign in cosigns {
|
for cosign in cosigns {
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.signed_cosigns_send
|
.signed_cosigns_send
|
||||||
.send(cosign)
|
.send(cosign)
|
||||||
.expect("signed_cosigns recv in this object was dropped?");
|
.expect("signed_cosigns recv in this object was dropped?");
|
||||||
@@ -325,24 +313,31 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
impl serai_coordinator_p2p::P2p for Libp2p {
|
impl serai_coordinator_p2p::P2p for Libp2p {
|
||||||
type Peer<'a> = Peer<'a>;
|
type Peer<'a> = Peer<'a>;
|
||||||
|
|
||||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||||
async move {
|
async move {
|
||||||
let Some(peer_ids) = self.peers.peers.read().await.get(&network).cloned() else {
|
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
for id in peer_ids {
|
for id in peer_ids {
|
||||||
res.push(Peer { outbound_requests: &self.outbound_requests, id });
|
res.push(Peer { outbound_requests: &self.0.outbound_requests, id });
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()> {
|
||||||
|
async move {
|
||||||
|
self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn heartbeat(
|
fn heartbeat(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
||||||
async move {
|
async move {
|
||||||
let (request_id, set, latest_block_hash) = self
|
let (request_id, set, latest_block_hash) = self
|
||||||
|
.0
|
||||||
.heartbeat_requests
|
.heartbeat_requests
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
@@ -351,7 +346,7 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
.expect("heartbeat_requests_send was dropped?");
|
.expect("heartbeat_requests_send was dropped?");
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (sender, receiver) = oneshot::channel();
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let respond = self.inbound_request_responses.clone();
|
let respond = self.0.inbound_request_responses.clone();
|
||||||
async move {
|
async move {
|
||||||
// The swarm task expects us to respond to every request. If the caller drops this
|
// The swarm task expects us to respond to every request. If the caller drops this
|
||||||
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
||||||
@@ -375,6 +370,7 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
||||||
async move {
|
async move {
|
||||||
let (request_id, global_session) = self
|
let (request_id, global_session) = self
|
||||||
|
.0
|
||||||
.notable_cosign_requests
|
.notable_cosign_requests
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
@@ -383,7 +379,7 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
.expect("notable_cosign_requests_send was dropped?");
|
.expect("notable_cosign_requests_send was dropped?");
|
||||||
let (sender, receiver) = oneshot::channel();
|
let (sender, receiver) = oneshot::channel();
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let respond = self.inbound_request_responses.clone();
|
let respond = self.0.inbound_request_responses.clone();
|
||||||
async move {
|
async move {
|
||||||
let response = if let Ok(notable_cosigns) = receiver.await {
|
let response = if let Ok(notable_cosigns) = receiver.await {
|
||||||
Response::NotableCosigns(notable_cosigns)
|
Response::NotableCosigns(notable_cosigns)
|
||||||
@@ -401,13 +397,14 @@ impl serai_coordinator_p2p::P2p for Libp2p {
|
|||||||
|
|
||||||
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
||||||
async move {
|
async move {
|
||||||
self.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
||||||
async move {
|
async move {
|
||||||
self
|
self
|
||||||
|
.0
|
||||||
.signed_cosigns
|
.signed_cosigns
|
||||||
.lock()
|
.lock()
|
||||||
.await
|
.await
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use core::time::Duration;
|
use core::time::Duration;
|
||||||
|
|
||||||
use tributary::tendermint::LATENCY_TIME;
|
use tributary_sdk::tendermint::LATENCY_TIME;
|
||||||
|
|
||||||
use libp2p::ping::{self, Config, Behaviour};
|
use libp2p::ping::{self, Config, Behaviour};
|
||||||
pub use ping::Event;
|
pub use ping::Event;
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
|||||||
use libp2p::request_response::{
|
use libp2p::request_response::{
|
||||||
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||||
};
|
};
|
||||||
pub use request_response::{RequestId, Message};
|
pub use request_response::{InboundRequestId, Message};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
@@ -19,7 +19,7 @@ use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
|||||||
/// The maximum message size for the request-response protocol
|
/// The maximum message size for the request-response protocol
|
||||||
// This is derived from the heartbeat message size as it's our largest message
|
// This is derived from the heartbeat message size as it's our largest message
|
||||||
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||||
(tributary::BLOCK_SIZE_LIMIT * serai_coordinator_p2p::heartbeat::BLOCKS_PER_BATCH) + 1024;
|
1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT;
|
||||||
|
|
||||||
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
||||||
|
|
||||||
@@ -129,7 +129,6 @@ pub(crate) type Event = GenericEvent<Request, Response>;
|
|||||||
|
|
||||||
pub(crate) type Behavior = Behaviour<Codec>;
|
pub(crate) type Behavior = Behaviour<Codec>;
|
||||||
pub(crate) fn new_behavior() -> Behavior {
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
let mut config = Config::default();
|
let config = Config::default().with_request_timeout(Duration::from_secs(5));
|
||||||
config.set_request_timeout(Duration::from_secs(5));
|
|
||||||
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ use std::{
|
|||||||
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||||
|
|
||||||
use tokio::sync::{mpsc, RwLock};
|
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||||
|
|
||||||
use serai_task::TaskHandle;
|
use serai_task::TaskHandle;
|
||||||
|
|
||||||
@@ -17,11 +17,11 @@ use serai_cosign::SignedCosign;
|
|||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
identity::PeerId,
|
identity::PeerId,
|
||||||
request_response::{RequestId, ResponseChannel},
|
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel},
|
||||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_coordinator_p2p::{oneshot, Heartbeat};
|
use serai_coordinator_p2p::Heartbeat;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Peers, BehaviorEvent, Behavior,
|
Peers, BehaviorEvent, Behavior,
|
||||||
@@ -65,17 +65,12 @@ pub(crate) struct SwarmTask {
|
|||||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||||
|
|
||||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
|
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>,
|
||||||
|
|
||||||
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>,
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
||||||
/* TODO
|
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
||||||
let cosigns = Cosigning::<D>::notable_cosigns(&self.db, global_session);
|
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
||||||
let res = reqres::Response::NotableCosigns(cosigns);
|
|
||||||
let _: Result<_, _> = self.swarm.behaviour_mut().reqres.send_response(channel, res);
|
|
||||||
*/
|
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SwarmTask {
|
impl SwarmTask {
|
||||||
@@ -97,7 +92,8 @@ impl SwarmTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
gossip::Event::GossipsubNotSupported { peer_id } |
|
||||||
|
gossip::Event::SlowPeer { peer_id, .. } => {
|
||||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -227,25 +223,21 @@ impl SwarmTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SwarmEvent::Behaviour(
|
SwarmEvent::Behaviour(event) => {
|
||||||
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
match event {
|
||||||
) => {
|
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => {
|
||||||
// Ensure these are unreachable cases, not actual events
|
// This *is* an exhaustive match as these events are empty enums
|
||||||
let _: void::Void = event;
|
match event {}
|
||||||
}
|
}
|
||||||
SwarmEvent::Behaviour(
|
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => {
|
||||||
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
if result.is_err() {
|
||||||
) => {
|
self.swarm.close_connection(connection);
|
||||||
if result.is_err() {
|
}
|
||||||
self.swarm.close_connection(connection);
|
}
|
||||||
|
BehaviorEvent::Reqres(event) => self.handle_reqres(event),
|
||||||
|
BehaviorEvent::Gossip(event) => self.handle_gossip(event),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
|
|
||||||
self.handle_reqres(event)
|
|
||||||
}
|
|
||||||
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
|
|
||||||
self.handle_gossip(event)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't handle any of these
|
// We don't handle any of these
|
||||||
SwarmEvent::IncomingConnection { .. } |
|
SwarmEvent::IncomingConnection { .. } |
|
||||||
@@ -255,7 +247,14 @@ impl SwarmTask {
|
|||||||
SwarmEvent::ExpiredListenAddr { .. } |
|
SwarmEvent::ExpiredListenAddr { .. } |
|
||||||
SwarmEvent::ListenerClosed { .. } |
|
SwarmEvent::ListenerClosed { .. } |
|
||||||
SwarmEvent::ListenerError { .. } |
|
SwarmEvent::ListenerError { .. } |
|
||||||
SwarmEvent::Dialing { .. } => {}
|
SwarmEvent::Dialing { .. } |
|
||||||
|
SwarmEvent::NewExternalAddrCandidate { .. } |
|
||||||
|
SwarmEvent::ExternalAddrConfirmed { .. } |
|
||||||
|
SwarmEvent::ExternalAddrExpired { .. } |
|
||||||
|
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
|
||||||
|
|
||||||
|
// Requires as SwarmEvent is non-exhaustive
|
||||||
|
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,9 +325,9 @@ impl SwarmTask {
|
|||||||
|
|
||||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
||||||
) {
|
) {
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
SwarmTask {
|
SwarmTask {
|
||||||
|
|||||||
@@ -4,7 +4,9 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
use serai_client::{
|
||||||
|
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
|
||||||
|
};
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -21,21 +23,21 @@ pub(crate) struct Changes {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct Validators {
|
pub(crate) struct Validators {
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
|
|
||||||
// A cache for which session we're populated with the validators of
|
// A cache for which session we're populated with the validators of
|
||||||
sessions: HashMap<NetworkId, Session>,
|
sessions: HashMap<ExternalNetworkId, Session>,
|
||||||
// The validators by network
|
// The validators by network
|
||||||
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
by_network: HashMap<ExternalNetworkId, HashSet<PeerId>>,
|
||||||
// The validators and their networks
|
// The validators and their networks
|
||||||
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
validators: HashMap<PeerId, HashSet<ExternalNetworkId>>,
|
||||||
|
|
||||||
// The channel to send the changes down
|
// The channel to send the changes down
|
||||||
changes: mpsc::UnboundedSender<Changes>,
|
changes: mpsc::UnboundedSender<Changes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Validators {
|
impl Validators {
|
||||||
pub(crate) fn new(serai: Serai) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
pub(crate) fn new(serai: Arc<Serai>) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
||||||
let (send, recv) = mpsc::unbounded_channel();
|
let (send, recv) = mpsc::unbounded_channel();
|
||||||
let validators = Validators {
|
let validators = Validators {
|
||||||
serai,
|
serai,
|
||||||
@@ -49,10 +51,17 @@ impl Validators {
|
|||||||
|
|
||||||
async fn session_changes(
|
async fn session_changes(
|
||||||
serai: impl Borrow<Serai>,
|
serai: impl Borrow<Serai>,
|
||||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
||||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, String> {
|
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||||
let temporal_serai =
|
/*
|
||||||
serai.borrow().as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
||||||
|
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
||||||
|
bypass the cosign protocol unless a historical global session was malicious, in which case
|
||||||
|
the cosign protocol already breaks.
|
||||||
|
|
||||||
|
Besides, we can't connect to historical validators, only the current validators.
|
||||||
|
*/
|
||||||
|
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
|
|
||||||
let mut session_changes = vec![];
|
let mut session_changes = vec![];
|
||||||
@@ -60,28 +69,25 @@ impl Validators {
|
|||||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||||
// we poll it till it yields all futures with the most minimal processing possible
|
// we poll it till it yields all futures with the most minimal processing possible
|
||||||
let mut futures = FuturesUnordered::new();
|
let mut futures = FuturesUnordered::new();
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
if network == NetworkId::Serai {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let sessions = sessions.borrow();
|
let sessions = sessions.borrow();
|
||||||
futures.push(async move {
|
futures.push(async move {
|
||||||
let session = match temporal_serai.session(network).await {
|
let session = match temporal_serai.session(network.into()).await {
|
||||||
Ok(Some(session)) => session,
|
Ok(Some(session)) => session,
|
||||||
Ok(None) => return Ok(None),
|
Ok(None) => return Ok(None),
|
||||||
Err(e) => return Err(format!("{e:?}")),
|
Err(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
if sessions.get(&network) == Some(&session) {
|
if sessions.get(&network) == Some(&session) {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
match temporal_serai.active_network_validators(network).await {
|
match temporal_serai.active_network_validators(network.into()).await {
|
||||||
Ok(validators) => Ok(Some((
|
Ok(validators) => Ok(Some((
|
||||||
network,
|
network,
|
||||||
session,
|
session,
|
||||||
validators.into_iter().map(peer_id_from_public).collect(),
|
validators.into_iter().map(peer_id_from_public).collect(),
|
||||||
))),
|
))),
|
||||||
Err(e) => Err(format!("{e:?}")),
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -98,7 +104,7 @@ impl Validators {
|
|||||||
|
|
||||||
fn incorporate_session_changes(
|
fn incorporate_session_changes(
|
||||||
&mut self,
|
&mut self,
|
||||||
session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
|
session_changes: Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>,
|
||||||
) {
|
) {
|
||||||
let mut removed = HashSet::new();
|
let mut removed = HashSet::new();
|
||||||
let mut added = HashSet::new();
|
let mut added = HashSet::new();
|
||||||
@@ -147,17 +153,17 @@ impl Validators {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Update the view of the validators.
|
/// Update the view of the validators.
|
||||||
pub(crate) async fn update(&mut self) -> Result<(), String> {
|
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
||||||
let session_changes = Self::session_changes(&self.serai, &self.sessions).await?;
|
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||||
self.incorporate_session_changes(session_changes);
|
self.incorporate_session_changes(session_changes);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
pub(crate) fn by_network(&self) -> &HashMap<ExternalNetworkId, HashSet<PeerId>> {
|
||||||
&self.by_network
|
&self.by_network
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<ExternalNetworkId>> {
|
||||||
self.validators.get(peer_id)
|
self.validators.get(peer_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -174,7 +180,9 @@ impl UpdateValidatorsTask {
|
|||||||
/// Spawn a new instance of the UpdateValidatorsTask.
|
/// Spawn a new instance of the UpdateValidatorsTask.
|
||||||
///
|
///
|
||||||
/// This returns a reference to the Validators it updates after spawning itself.
|
/// This returns a reference to the Validators it updates after spawning itself.
|
||||||
pub(crate) fn spawn(serai: Serai) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
pub(crate) fn spawn(
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
||||||
// The validators which will be updated
|
// The validators which will be updated
|
||||||
let (validators, changes) = Validators::new(serai);
|
let (validators, changes) = Validators::new(serai);
|
||||||
let validators = Arc::new(RwLock::new(validators));
|
let validators = Arc::new(RwLock::new(validators));
|
||||||
@@ -198,13 +206,13 @@ impl ContinuallyRan for UpdateValidatorsTask {
|
|||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let session_changes = {
|
let session_changes = {
|
||||||
let validators = self.validators.read().await;
|
let validators = self.validators.read().await;
|
||||||
Validators::session_changes(validators.serai.clone(), validators.sessions.clone())
|
Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await?
|
||||||
.await
|
|
||||||
.map_err(|e| format!("{e:?}"))?
|
|
||||||
};
|
};
|
||||||
self.validators.write().await.incorporate_session_changes(session_changes);
|
self.validators.write().await.incorporate_session_changes(session_changes);
|
||||||
Ok(true)
|
Ok(true)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
||||||
|
|
||||||
use futures_lite::FutureExt;
|
use futures_lite::FutureExt;
|
||||||
|
|
||||||
use tributary::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -13,25 +13,41 @@ use serai_task::ContinuallyRan;
|
|||||||
use crate::{Heartbeat, Peer, P2p};
|
use crate::{Heartbeat, Peer, P2p};
|
||||||
|
|
||||||
// Amount of blocks in a minute
|
// Amount of blocks in a minute
|
||||||
const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
const BLOCKS_PER_MINUTE: usize =
|
||||||
|
(60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||||
|
|
||||||
/// The maximum amount of blocks to include/included within a batch.
|
/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to
|
||||||
pub const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
/// include in the batch.
|
||||||
|
///
|
||||||
|
/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount
|
||||||
|
/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within
|
||||||
|
/// the size limit.
|
||||||
|
pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||||
|
|
||||||
|
/// The size limit for a batch of blocks sent in response to a Heartbeat.
|
||||||
|
///
|
||||||
|
/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a
|
||||||
|
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||||
|
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||||
|
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||||
|
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
||||||
|
|
||||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||||
/// tip.
|
/// tip.
|
||||||
///
|
///
|
||||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||||
/// the sync protocol for our Tributaries.
|
/// the sync protocol for our Tributaries.
|
||||||
pub struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||||
set: ValidatorSet,
|
pub(crate) set: ExternalValidatorSet,
|
||||||
tributary: Tributary<TD, Tx, P>,
|
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||||
reader: TributaryReader<TD, Tx>,
|
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||||
p2p: P,
|
pub(crate) p2p: P,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
||||||
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
||||||
@@ -80,7 +96,7 @@ impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD,
|
|||||||
|
|
||||||
// This is the final batch if it has less than the maximum amount of blocks
|
// This is the final batch if it has less than the maximum amount of blocks
|
||||||
// (signifying there weren't more blocks after this to fill the batch with)
|
// (signifying there weren't more blocks after this to fill the batch with)
|
||||||
let final_batch = blocks.len() < BLOCKS_PER_BATCH;
|
let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH;
|
||||||
|
|
||||||
// Sync each block
|
// Sync each block
|
||||||
for block_with_commit in blocks {
|
for block_with_commit in blocks {
|
||||||
|
|||||||
@@ -1,26 +1,31 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_db::Db;
|
||||||
|
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||||
|
use serai_cosign::{SignedCosign, Cosigning};
|
||||||
|
|
||||||
/// A oneshot channel.
|
use tokio::sync::{mpsc, oneshot};
|
||||||
pub mod oneshot;
|
|
||||||
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
/// The heartbeat task, effecting sync of Tributaries
|
/// The heartbeat task, effecting sync of Tributaries
|
||||||
pub mod heartbeat;
|
pub mod heartbeat;
|
||||||
|
use crate::heartbeat::HeartbeatTask;
|
||||||
|
|
||||||
/// A heartbeat for a Tributary.
|
/// A heartbeat for a Tributary.
|
||||||
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||||
pub struct Heartbeat {
|
pub struct Heartbeat {
|
||||||
/// The Tributary this is the heartbeat of.
|
/// The Tributary this is the heartbeat of.
|
||||||
pub set: ValidatorSet,
|
pub set: ExternalValidatorSet,
|
||||||
/// The hash of the latest block added to the Tributary.
|
/// The hash of the latest block added to the Tributary.
|
||||||
pub latest_block_hash: [u8; 32],
|
pub latest_block_hash: [u8; 32],
|
||||||
}
|
}
|
||||||
@@ -44,17 +49,23 @@ pub trait Peer<'a>: Send {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The representation of the P2P network.
|
/// The representation of the P2P network.
|
||||||
pub trait P2p: Send + Sync + Clone + tributary::P2p + serai_cosign::RequestNotableCosigns {
|
pub trait P2p:
|
||||||
|
Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns
|
||||||
|
{
|
||||||
/// The representation of a peer.
|
/// The representation of a peer.
|
||||||
type Peer<'a>: Peer<'a>;
|
type Peer<'a>: Peer<'a>;
|
||||||
|
|
||||||
/// Fetch the peers for this network.
|
/// Fetch the peers for this network.
|
||||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||||
|
|
||||||
|
/// Broadcast a cosign.
|
||||||
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||||
|
|
||||||
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
||||||
///
|
///
|
||||||
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
||||||
/// descending blocks.
|
/// descending blocks. This channel MUST NOT and will not have its receiver dropped before a
|
||||||
|
/// message is sent.
|
||||||
fn heartbeat(
|
fn heartbeat(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
||||||
@@ -62,6 +73,7 @@ pub trait P2p: Send + Sync + Clone + tributary::P2p + serai_cosign::RequestNotab
|
|||||||
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
||||||
///
|
///
|
||||||
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
||||||
|
/// This channel MUST NOT and will not have its receiver dropped before a message is sent.
|
||||||
fn notable_cosigns_request(
|
fn notable_cosigns_request(
|
||||||
&self,
|
&self,
|
||||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
||||||
@@ -74,3 +86,119 @@ pub trait P2p: Send + Sync + Clone + tributary::P2p + serai_cosign::RequestNotab
|
|||||||
/// A cancel-safe future for the next cosign received.
|
/// A cancel-safe future for the next cosign received.
|
||||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn handle_notable_cosigns_request<D: Db>(
|
||||||
|
db: &D,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
channel: oneshot::Sender<Vec<SignedCosign>>,
|
||||||
|
) {
|
||||||
|
let cosigns = Cosigning::<D>::notable_cosigns(db, global_session);
|
||||||
|
channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
||||||
|
reader: &TributaryReader<D, T>,
|
||||||
|
mut latest_block_hash: [u8; 32],
|
||||||
|
channel: oneshot::Sender<Vec<TributaryBlockWithCommit>>,
|
||||||
|
) {
|
||||||
|
let mut res_size = 8;
|
||||||
|
let mut res = vec![];
|
||||||
|
// This former case should be covered by this latter case
|
||||||
|
while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) {
|
||||||
|
let Some(block_after) = reader.block_after(&latest_block_hash) else { break };
|
||||||
|
|
||||||
|
// These `break` conditions should only occur under edge cases, such as if we're actively
|
||||||
|
// deleting this Tributary due to being done with it
|
||||||
|
let Some(block) = reader.block(&block_after) else { break };
|
||||||
|
let block = block.serialize();
|
||||||
|
let Some(commit) = reader.commit(&block_after) else { break };
|
||||||
|
res_size += 8 + block.len() + 8 + commit.len();
|
||||||
|
res.push(TributaryBlockWithCommit { block, commit });
|
||||||
|
|
||||||
|
latest_block_hash = block_after;
|
||||||
|
}
|
||||||
|
channel
|
||||||
|
.send(res)
|
||||||
|
.map_err(|_| ())
|
||||||
|
.expect("channel listening for heartbeat oneshot response was dropped?");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the P2P instance.
|
||||||
|
///
|
||||||
|
/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must
|
||||||
|
/// never be dropped. `retire_tributary` is not required to only be instructed with added
|
||||||
|
/// Tributaries.
|
||||||
|
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||||
|
db: impl Db,
|
||||||
|
p2p: P,
|
||||||
|
mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary<TD, Tx, P>)>,
|
||||||
|
mut retire_tributary: mpsc::UnboundedReceiver<ExternalValidatorSet>,
|
||||||
|
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
) {
|
||||||
|
let mut readers = HashMap::<ExternalValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||||
|
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||||
|
let mut heartbeat_tasks = HashMap::<ExternalValidatorSet, _>::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
tributary = add_tributary.recv() => {
|
||||||
|
let (set, tributary) = tributary.expect("add_tributary send was dropped");
|
||||||
|
let reader = tributary.reader();
|
||||||
|
readers.insert(set, reader.clone());
|
||||||
|
|
||||||
|
let (heartbeat_task_def, heartbeat_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(HeartbeatTask {
|
||||||
|
set,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
reader: reader.clone(),
|
||||||
|
p2p: p2p.clone(),
|
||||||
|
}).continually_run(heartbeat_task_def, vec![])
|
||||||
|
);
|
||||||
|
heartbeat_tasks.insert(set, heartbeat_task);
|
||||||
|
|
||||||
|
let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel();
|
||||||
|
tributaries.insert(tributary.genesis(), tributary_message_send);
|
||||||
|
// For as long as this sender exists, handle the messages from it on a dedicated task
|
||||||
|
tokio::spawn(async move {
|
||||||
|
while let Some(message) = tributary_message_recv.recv().await {
|
||||||
|
tributary.handle_message(&message).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
set = retire_tributary.recv() => {
|
||||||
|
let set = set.expect("retire_tributary send was dropped");
|
||||||
|
let Some(reader) = readers.remove(&set) else { continue };
|
||||||
|
tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary");
|
||||||
|
heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task");
|
||||||
|
}
|
||||||
|
|
||||||
|
(heartbeat, channel) = p2p.heartbeat() => {
|
||||||
|
if let Some(reader) = readers.get(&heartbeat.set) {
|
||||||
|
let reader = reader.clone(); // This is a cheap clone
|
||||||
|
// We spawn this on a task due to the DB reads needed
|
||||||
|
tokio::spawn(async move {
|
||||||
|
handle_heartbeat(&reader, heartbeat.latest_block_hash, channel)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(global_session, channel) = p2p.notable_cosigns_request() => {
|
||||||
|
tokio::spawn({
|
||||||
|
let db = db.clone();
|
||||||
|
async move { handle_notable_cosigns_request(&db, global_session, channel) }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
(tributary, message) = p2p.tributary_message() => {
|
||||||
|
if let Some(tributary) = tributaries.get(&tributary) {
|
||||||
|
tributary.send(message).expect("tributary message recv was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosign = p2p.cosign() => {
|
||||||
|
// We don't call `Cosigning::intake_cosign` here as that can only be called from a single
|
||||||
|
// location. We also need to intake the cosigns we produce, which means we need to merge
|
||||||
|
// these streams (signing, network) somehow. That's done with this mpsc channel
|
||||||
|
send_cosigns.send(cosign).expect("channel receiving cosigns was dropped");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
use core::{
|
|
||||||
pin::Pin,
|
|
||||||
task::{Poll, Context},
|
|
||||||
future::Future,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub use async_channel::{SendError, RecvError};
|
|
||||||
|
|
||||||
/// The sender for a oneshot channel.
|
|
||||||
pub struct Sender<T: Send>(async_channel::Sender<T>);
|
|
||||||
impl<T: Send> Sender<T> {
|
|
||||||
/// Send a value down the channel.
|
|
||||||
///
|
|
||||||
/// Returns an error if the channel's receiver was dropped.
|
|
||||||
pub fn send(self, msg: T) -> Result<(), SendError<T>> {
|
|
||||||
self.0.send_blocking(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The receiver for a oneshot channel.
|
|
||||||
pub struct Receiver<T: Send>(async_channel::Receiver<T>);
|
|
||||||
impl<T: Send> Future for Receiver<T> {
|
|
||||||
type Output = Result<T, RecvError>;
|
|
||||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
let recv = self.0.recv();
|
|
||||||
futures_lite::pin!(recv);
|
|
||||||
recv.poll(cx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new oneshot channel.
|
|
||||||
pub fn channel<T: Send>() -> (Sender<T>, Receiver<T>) {
|
|
||||||
let (send, recv) = async_channel::bounded(1);
|
|
||||||
(Sender(send), Receiver(recv))
|
|
||||||
}
|
|
||||||
149
coordinator/src/db.rs
Normal file
149
coordinator/src/db.rs
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
use std::{path::Path, fs};
|
||||||
|
|
||||||
|
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||||
|
use serai_db::{create_db, db_channel};
|
||||||
|
|
||||||
|
use dkg::Participant;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::ExternalNetworkId,
|
||||||
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_cosign::SignedCosign;
|
||||||
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
|
||||||
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
|
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
pub(crate) type Db = serai_db::RocksDB;
|
||||||
|
|
||||||
|
#[allow(unused_variables, unreachable_code)]
|
||||||
|
fn db(path: &str) -> Db {
|
||||||
|
{
|
||||||
|
let path: &Path = path.as_ref();
|
||||||
|
// This may error if this path already exists, which we shouldn't propagate/panic on. If this
|
||||||
|
// is a problem (such as we don't have the necessary permissions to write to this path), we
|
||||||
|
// expect the following DB opening to error.
|
||||||
|
let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
|
||||||
|
panic!("built with parity-db and rocksdb");
|
||||||
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
|
let db = serai_db::new_parity_db(path);
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
let db = serai_db::new_rocksdb(path);
|
||||||
|
db
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn coordinator_db() -> Db {
|
||||||
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
|
db(&format!("{root_path}/coordinator/db"))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tributary_db_folder(set: ExternalValidatorSet) -> String {
|
||||||
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
|
let network = match set.network {
|
||||||
|
ExternalNetworkId::Bitcoin => "Bitcoin",
|
||||||
|
ExternalNetworkId::Ethereum => "Ethereum",
|
||||||
|
ExternalNetworkId::Monero => "Monero",
|
||||||
|
};
|
||||||
|
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db {
|
||||||
|
db(&format!("{}/db", tributary_db_folder(set)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) {
|
||||||
|
log::info!("pruning data directory for tributary {set:?}");
|
||||||
|
let db = tributary_db_folder(set);
|
||||||
|
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||||
|
fs::remove_dir_all(db).unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Coordinator {
|
||||||
|
// The currently active Tributaries
|
||||||
|
ActiveTributaries: () -> Vec<NewSetInformation>,
|
||||||
|
// The latest Tributary to have been retired for a network
|
||||||
|
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||||
|
// retired
|
||||||
|
RetiredTributary: (network: ExternalNetworkId) -> Session,
|
||||||
|
// The last handled message from a Processor
|
||||||
|
LastProcessorMessage: (network: ExternalNetworkId) -> u64,
|
||||||
|
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||||
|
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||||
|
// The keys to confirm and set on the Serai network
|
||||||
|
KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair,
|
||||||
|
// The key was set on the Serai network
|
||||||
|
KeySet: (set: ExternalValidatorSet) -> (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
// Cosigns we produced
|
||||||
|
SignedCosigns: () -> SignedCosign,
|
||||||
|
// Tributaries to clean up upon reboot
|
||||||
|
TributaryCleanup: () -> ExternalValidatorSet,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod _internal_db {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
// Tributary transactions to publish from the Processor messages
|
||||||
|
TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction,
|
||||||
|
// Tributary transactions to publish from the DKG confirmation task
|
||||||
|
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
||||||
|
// Participants to remove
|
||||||
|
RemoveParticipant: (set: ExternalValidatorSet) -> u16,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct TributaryTransactionsFromProcessorMessages;
|
||||||
|
impl TributaryTransactionsFromProcessorMessages {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
||||||
|
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
|
||||||
|
impl TributaryTransactionsFromDkgConfirmation {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
||||||
|
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct RemoveParticipant;
|
||||||
|
impl RemoveParticipant {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
||||||
|
_internal_db::RemoveParticipant::try_recv(txn, set)
|
||||||
|
.map(|i| Participant::new(i).expect("sent invalid participant index for removal"))
|
||||||
|
}
|
||||||
|
}
|
||||||
439
coordinator/src/dkg_confirmation.rs
Normal file
439
coordinator/src/dkg_confirmation.rs
Normal file
@@ -0,0 +1,439 @@
|
|||||||
|
use core::{ops::Deref, future::Future};
|
||||||
|
use std::{boxed::Box, collections::HashMap};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
use dkg::{Participant, musig};
|
||||||
|
use frost_schnorrkel::{
|
||||||
|
frost::{curve::Ristretto, FrostError, sign::*},
|
||||||
|
Schnorrkel,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::SeraiAddress,
|
||||||
|
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use serai_coordinator_substrate::{NewSetInformation, Keys};
|
||||||
|
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
|
||||||
|
|
||||||
|
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
|
||||||
|
|
||||||
|
fn schnorrkel() -> Schnorrkel {
|
||||||
|
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
|
||||||
|
}
|
||||||
|
|
||||||
|
fn our_i(
|
||||||
|
set: &NewSetInformation,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
data: &HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Participant {
|
||||||
|
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
|
||||||
|
|
||||||
|
let mut our_i = None;
|
||||||
|
for participant in data.keys() {
|
||||||
|
let validator_index = usize::from(u16::from(*participant) - 1);
|
||||||
|
let (validator, _weight) = set.validators[validator_index];
|
||||||
|
if validator == public {
|
||||||
|
our_i = Some(*participant);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
our_i.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take a HashMap of participations with non-contiguous Participants and convert them to a
|
||||||
|
// contiguous sequence.
|
||||||
|
//
|
||||||
|
// The input data is expected to not include our own data, which also won't be in the output data.
|
||||||
|
//
|
||||||
|
// Returns the mapping from the contiguous Participants to the original Participants.
|
||||||
|
fn make_contiguous<T>(
|
||||||
|
our_i: Participant,
|
||||||
|
mut data: HashMap<Participant, Vec<u8>>,
|
||||||
|
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
|
||||||
|
) -> Result<HashMap<Participant, T>, Participant> {
|
||||||
|
assert!(!data.contains_key(&our_i));
|
||||||
|
|
||||||
|
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
|
||||||
|
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
||||||
|
|
||||||
|
let mut our_i = Some(our_i);
|
||||||
|
let mut contiguous = HashMap::new();
|
||||||
|
let mut i = 1;
|
||||||
|
for participant in ordered_participants {
|
||||||
|
// If this is the first participant after our own index, increment to account for our index
|
||||||
|
if let Some(our_i_value) = our_i {
|
||||||
|
if u16::from(participant) > u16::from(our_i_value) {
|
||||||
|
i += 1;
|
||||||
|
our_i = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let contiguous_index = Participant::new(i).unwrap();
|
||||||
|
let data = match transform(data.remove(&participant).unwrap()) {
|
||||||
|
Ok(data) => data,
|
||||||
|
Err(_) => Err(participant)?,
|
||||||
|
};
|
||||||
|
contiguous.insert(contiguous_index, data);
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
Ok(contiguous)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
|
||||||
|
match &result {
|
||||||
|
Ok(_) => Ok(result.unwrap()),
|
||||||
|
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
|
||||||
|
Err(*participant)
|
||||||
|
}
|
||||||
|
// All of these should be unreachable
|
||||||
|
Err(
|
||||||
|
FrostError::InternalError(_) |
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_),
|
||||||
|
) => {
|
||||||
|
result.unwrap();
|
||||||
|
unreachable!("continued execution after unwrapping Result::Err");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
enum Signer {
|
||||||
|
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
|
||||||
|
Share {
|
||||||
|
attempt: u32,
|
||||||
|
musig_validators: Vec<SeraiAddress>,
|
||||||
|
share: [u8; 32],
|
||||||
|
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs the DKG Confirmation protocol.
|
||||||
|
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
|
||||||
|
db: CD,
|
||||||
|
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary_db: TD,
|
||||||
|
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
signer: Option<Signer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
||||||
|
pub(crate) fn new(
|
||||||
|
db: CD,
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary_db: TD,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
) -> Self {
|
||||||
|
Self { db, set, tributary_db, key, signer: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess(
|
||||||
|
db: &mut CD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
attempt: u32,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
signer: &mut Option<Signer>,
|
||||||
|
) {
|
||||||
|
// Perform the preprocess
|
||||||
|
let public_key = Ristretto::generator() * key.deref();
|
||||||
|
let (machine, preprocess) = AlgorithmMachine::new(
|
||||||
|
schnorrkel(),
|
||||||
|
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
||||||
|
musig(musig_context(set.into()), key, &[public_key]).unwrap(),
|
||||||
|
)
|
||||||
|
.preprocess(&mut OsRng);
|
||||||
|
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
||||||
|
// parameters
|
||||||
|
let seed = machine.cache();
|
||||||
|
|
||||||
|
let mut preprocess_bytes = [0u8; 64];
|
||||||
|
preprocess_bytes.copy_from_slice(&preprocess.serialize());
|
||||||
|
let preprocess = preprocess_bytes;
|
||||||
|
|
||||||
|
let mut txn = db.txn();
|
||||||
|
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
|
||||||
|
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// If we were sent a key to set, create the signer for it
|
||||||
|
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
|
||||||
|
// Create and publish the initial preprocess
|
||||||
|
Self::preprocess(&mut self.db, self.set.set, 0, self.key.clone(), &mut self.signer);
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have keys to confirm, handle all messages from the tributary
|
||||||
|
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
|
||||||
|
// Handle all messages from the Tributary
|
||||||
|
loop {
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::sign::CoordinatorMessage::Reattempt {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
} => {
|
||||||
|
// Create and publish the preprocess for the specified attempt
|
||||||
|
Self::preprocess(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
attempt,
|
||||||
|
self.key.clone(),
|
||||||
|
&mut self.signer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::sign::CoordinatorMessage::Preprocesses {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
mut preprocesses,
|
||||||
|
} => {
|
||||||
|
// Confirm the preprocess we're expected to sign with is the one we locally have
|
||||||
|
// It may be different if we rebooted and made a second preprocess for this attempt
|
||||||
|
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
|
||||||
|
self.signer.take()
|
||||||
|
else {
|
||||||
|
// If this message is not expected, commit the txn to drop it and move on
|
||||||
|
// At some point, we'll get a Reattempt and reset
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Determine the MuSig key signed with
|
||||||
|
let musig_validators = {
|
||||||
|
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
|
||||||
|
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
||||||
|
|
||||||
|
let mut res = vec![];
|
||||||
|
for participant in ordered_participants {
|
||||||
|
let (validator, _weight) =
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)];
|
||||||
|
res.push(validator);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
};
|
||||||
|
|
||||||
|
let musig_public_keys = musig_validators
|
||||||
|
.iter()
|
||||||
|
.map(|key| {
|
||||||
|
Ristretto::read_G(&mut key.0.as_slice())
|
||||||
|
.expect("Serai validator had invalid public key")
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let keys =
|
||||||
|
musig(musig_context(self.set.set.into()), self.key.clone(), &musig_public_keys)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Rebuild the machine
|
||||||
|
let (machine, preprocess_from_cache) =
|
||||||
|
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
|
||||||
|
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
|
||||||
|
|
||||||
|
// Ensure this is a consistent signing session
|
||||||
|
let our_i = our_i(&self.set, &self.key, &preprocesses);
|
||||||
|
let consistent = (attempt == our_attempt) &&
|
||||||
|
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
|
||||||
|
if !consistent {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reformat the preprocesses into the expected format for Musig
|
||||||
|
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
|
||||||
|
machine.read_preprocess(&mut preprocess.as_slice())
|
||||||
|
}) {
|
||||||
|
Ok(preprocesses) => preprocesses,
|
||||||
|
// This yields the *original participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calculate our share
|
||||||
|
let (machine, share) = match handle_frost_error(
|
||||||
|
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
||||||
|
) {
|
||||||
|
Ok((machine, share)) => (machine, share),
|
||||||
|
// This yields the *musig participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
musig_validators[usize::from(u16::from(participant) - 1)],
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send our share
|
||||||
|
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
self.set.set,
|
||||||
|
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
self.signer = Some(Signer::Share {
|
||||||
|
attempt,
|
||||||
|
musig_validators,
|
||||||
|
share,
|
||||||
|
machine: Box::new(machine),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
messages::sign::CoordinatorMessage::Shares {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
mut shares,
|
||||||
|
} => {
|
||||||
|
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
|
||||||
|
self.signer.take()
|
||||||
|
else {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Ensure this is a consistent signing session
|
||||||
|
let our_i = our_i(&self.set, &self.key, &shares);
|
||||||
|
let consistent = (attempt == our_attempt) &&
|
||||||
|
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
|
||||||
|
if !consistent {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reformat the shares into the expected format for Musig
|
||||||
|
let shares = match make_contiguous(our_i, shares, |share| {
|
||||||
|
machine.read_share(&mut share.as_slice())
|
||||||
|
}) {
|
||||||
|
Ok(shares) => shares,
|
||||||
|
// This yields the *original participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match handle_frost_error(machine.complete(shares)) {
|
||||||
|
Ok(signature) => {
|
||||||
|
// Create the bitvec of the participants
|
||||||
|
let mut signature_participants;
|
||||||
|
{
|
||||||
|
use bitvec::prelude::*;
|
||||||
|
signature_participants = bitvec![u8, Lsb0; 0; 0];
|
||||||
|
let mut i = 0;
|
||||||
|
for (validator, _) in &self.set.validators {
|
||||||
|
if Some(validator) == musig_validators.get(i) {
|
||||||
|
signature_participants.push(true);
|
||||||
|
i += 1;
|
||||||
|
} else {
|
||||||
|
signature_participants.push(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is safe to call multiple times as it'll just change which *valid*
|
||||||
|
// signature to publish
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
Keys::set(
|
||||||
|
&mut txn,
|
||||||
|
self.set.set,
|
||||||
|
key_pair.clone(),
|
||||||
|
signature_participants,
|
||||||
|
signature.into(),
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
// This yields the *musig participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
musig_validators[usize::from(u16::from(participant) - 1)],
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we successfully handled this message, note we made proress
|
||||||
|
made_progress = true;
|
||||||
|
tributary_txn.commit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the key has been set on Serai
|
||||||
|
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
|
||||||
|
KeySet::get(&self.db, self.set.set).is_some()
|
||||||
|
{
|
||||||
|
// Take the keys to confirm so we never instantiate the signer again
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
KeysToConfirm::take(&mut txn, self.set.set);
|
||||||
|
KeySet::take(&mut txn, self.set.set);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
// Drop our own signer
|
||||||
|
// The task won't die until the Tributary does, but now it'll never do anything again
|
||||||
|
self.signer = None;
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,10 +1,510 @@
|
|||||||
|
use core::{ops::Deref, time::Duration};
|
||||||
|
use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||||
|
|
||||||
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::{
|
||||||
|
group::{ff::PrimeField, GroupEncoding},
|
||||||
|
*,
|
||||||
|
};
|
||||||
|
|
||||||
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
|
||||||
|
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
use message_queue::{Service, client::MessageQueue};
|
||||||
|
|
||||||
|
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
||||||
|
|
||||||
|
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
||||||
|
use serai_coordinator_substrate::{
|
||||||
|
CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches,
|
||||||
|
PublishBatchTask, SlashReports, PublishSlashReportTask,
|
||||||
|
};
|
||||||
|
use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans};
|
||||||
|
|
||||||
|
mod db;
|
||||||
|
use db::*;
|
||||||
|
|
||||||
mod tributary;
|
mod tributary;
|
||||||
|
mod dkg_confirmation;
|
||||||
|
|
||||||
|
mod substrate;
|
||||||
|
use substrate::SubstrateTask;
|
||||||
|
|
||||||
mod p2p {
|
mod p2p {
|
||||||
use serai_coordinator_p2p::*;
|
pub use serai_coordinator_p2p::*;
|
||||||
pub use serai_coordinator_libp2p_p2p::Libp2p;
|
pub use serai_coordinator_libp2p_p2p::Libp2p;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
// Use a zeroizing allocator for this entire application
|
||||||
todo!("TODO")
|
// While secrets should already be zeroized, the presence of secret keys in a networked application
|
||||||
|
// (at increased risk of OOB reads) justifies the performance hit in case any secrets weren't
|
||||||
|
// already
|
||||||
|
#[global_allocator]
|
||||||
|
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
|
||||||
|
zalloc::ZeroizingAlloc(std::alloc::System);
|
||||||
|
|
||||||
|
async fn serai() -> Arc<Serai> {
|
||||||
|
const SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(10);
|
||||||
|
const MAX_SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(300);
|
||||||
|
|
||||||
|
let mut delay = SERAI_CONNECTION_DELAY;
|
||||||
|
loop {
|
||||||
|
let Ok(serai) = Serai::new(format!(
|
||||||
|
"http://{}:9944",
|
||||||
|
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
else {
|
||||||
|
log::error!("couldn't connect to the Serai node");
|
||||||
|
tokio::time::sleep(delay).await;
|
||||||
|
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
log::info!("made initial connection to Serai node");
|
||||||
|
return Arc::new(serai);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn spawn_cosigning<D: serai_db::Db>(
|
||||||
|
mut db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
p2p: impl p2p::P2p,
|
||||||
|
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||||
|
mut p2p_cosigns: mpsc::UnboundedReceiver<SignedCosign>,
|
||||||
|
) {
|
||||||
|
let mut cosigning = Cosigning::spawn(db.clone(), serai, p2p.clone(), tasks_to_run_upon_cosigning);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
let last_cosign_rebroadcast = Instant::now();
|
||||||
|
loop {
|
||||||
|
// Intake our own cosigns
|
||||||
|
match Cosigning::<D>::latest_cosigned_block_number(&db) {
|
||||||
|
Ok(latest_cosigned_block_number) => {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
// The cosigns we prior tried to intake yet failed to
|
||||||
|
let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]);
|
||||||
|
// The cosigns we have yet to intake
|
||||||
|
while let Some(cosign) = SignedCosigns::try_recv(&mut txn) {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut erroneous = vec![];
|
||||||
|
for cosign in cosigns {
|
||||||
|
// If this cosign is stale, move on
|
||||||
|
if cosign.cosign.block_number <= latest_cosigned_block_number {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
match cosigning.intake_cosign(&cosign) {
|
||||||
|
// Publish this cosign
|
||||||
|
Ok(()) => p2p.publish_cosign(cosign).await,
|
||||||
|
Err(e) => {
|
||||||
|
assert!(e.temporal(), "signed an invalid cosign: {e:?}");
|
||||||
|
// Since this had a temporal error, queue it to try again later
|
||||||
|
erroneous.push(cosign);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the cosigns with temporal errors to the database
|
||||||
|
ErroneousCosigns::set(&mut txn, &erroneous);
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
Err(Faulted) => {
|
||||||
|
// We don't panic here as the following code rebroadcasts our cosigns which is
|
||||||
|
// necessary to inform other coordinators of the faulty cosigns
|
||||||
|
log::error!("cosigning faulted");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let time_till_cosign_rebroadcast = (last_cosign_rebroadcast +
|
||||||
|
serai_cosign::BROADCAST_FREQUENCY)
|
||||||
|
.saturating_duration_since(Instant::now());
|
||||||
|
tokio::select! {
|
||||||
|
() = tokio::time::sleep(time_till_cosign_rebroadcast) => {
|
||||||
|
for cosign in cosigning.cosigns_to_rebroadcast() {
|
||||||
|
p2p.publish_cosign(cosign).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosign = p2p_cosigns.recv() => {
|
||||||
|
let cosign = cosign.expect("p2p cosigns channel was dropped?");
|
||||||
|
if cosigning.intake_cosign(&cosign).is_ok() {
|
||||||
|
p2p.publish_cosign(cosign).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure this loop runs at least this often
|
||||||
|
() = tokio::time::sleep(COSIGN_LOOP_INTERVAL) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_network(
|
||||||
|
mut db: impl serai_db::Db,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
) {
|
||||||
|
// Spawn the task to publish batches for this network
|
||||||
|
{
|
||||||
|
let (publish_batch_task_def, publish_batch_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
PublishBatchTask::new(db.clone(), serai.clone(), network)
|
||||||
|
.continually_run(publish_batch_task_def, vec![]),
|
||||||
|
);
|
||||||
|
// Forget its handle so it always runs in the background
|
||||||
|
core::mem::forget(publish_batch_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Processor messages
|
||||||
|
loop {
|
||||||
|
let (msg_id, msg) = {
|
||||||
|
let msg = message_queue.next(Service::Processor(network)).await;
|
||||||
|
// Check this message's sender is as expected
|
||||||
|
assert_eq!(msg.from, Service::Processor(network));
|
||||||
|
|
||||||
|
// Check this message's ID is as expected
|
||||||
|
let last = LastProcessorMessage::get(&db, network);
|
||||||
|
let next = last.map(|id| id + 1).unwrap_or(0);
|
||||||
|
// This should either be the last message's ID, if we committed but didn't send our ACK, or
|
||||||
|
// the expected next message's ID
|
||||||
|
assert!((Some(msg.id) == last) || (msg.id == next));
|
||||||
|
|
||||||
|
// TODO: Check msg.sig
|
||||||
|
|
||||||
|
// If this is the message we already handled, and just failed to ACK, ACK it now and move on
|
||||||
|
if Some(msg.id) == last {
|
||||||
|
message_queue.ack(Service::Processor(network), msg.id).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
(msg.id, messages::ProcessorMessage::deserialize(&mut msg.msg.as_slice()).unwrap())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut txn = db.txn();
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
||||||
|
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
|
let set = ExternalValidatorSet { network, session };
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::key_gen::ProcessorMessage::GeneratedKeyPair {
|
||||||
|
session,
|
||||||
|
substrate_key,
|
||||||
|
network_key,
|
||||||
|
} => {
|
||||||
|
KeysToConfirm::set(
|
||||||
|
&mut txn,
|
||||||
|
ExternalValidatorSet { network, session },
|
||||||
|
&KeyPair(
|
||||||
|
PublicKey::from_raw(substrate_key),
|
||||||
|
network_key
|
||||||
|
.try_into()
|
||||||
|
.expect("generated a network key which exceeds the maximum key length"),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
|
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
messages::ProcessorMessage::Sign(msg) => match msg {
|
||||||
|
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
||||||
|
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant);
|
||||||
|
}
|
||||||
|
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
||||||
|
let set = ExternalValidatorSet { network, session: id.session };
|
||||||
|
if id.attempt == 0 {
|
||||||
|
// Batches are declared by their intent to be signed
|
||||||
|
if let messages::sign::VariantSignId::Batch(hash) = id.id {
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::Batch { hash },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::Sign {
|
||||||
|
id: id.id,
|
||||||
|
attempt: id.attempt,
|
||||||
|
round: SigningProtocolRound::Preprocess,
|
||||||
|
data: preprocesses,
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::sign::ProcessorMessage::Shares { id, shares } => {
|
||||||
|
let set = ExternalValidatorSet { network, session: id.session };
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::Sign {
|
||||||
|
id: id.id,
|
||||||
|
attempt: id.attempt,
|
||||||
|
round: SigningProtocolRound::Share,
|
||||||
|
data: shares,
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
||||||
|
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||||
|
SignedCosigns::send(&mut txn, &cosign);
|
||||||
|
}
|
||||||
|
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
||||||
|
SignedBatches::send(&mut txn, &batch);
|
||||||
|
}
|
||||||
|
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
||||||
|
session,
|
||||||
|
slash_report,
|
||||||
|
signature,
|
||||||
|
} => {
|
||||||
|
SlashReports::set(
|
||||||
|
&mut txn,
|
||||||
|
ExternalValidatorSet { network, session },
|
||||||
|
slash_report,
|
||||||
|
Signature::from(signature),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||||
|
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
|
||||||
|
let mut by_session = HashMap::new();
|
||||||
|
for plan in plans {
|
||||||
|
by_session
|
||||||
|
.entry(plan.session)
|
||||||
|
.or_insert_with(|| Vec::with_capacity(1))
|
||||||
|
.push(plan.transaction_plan_id);
|
||||||
|
}
|
||||||
|
for (session, plans) in by_session {
|
||||||
|
let set = ExternalValidatorSet { network, session };
|
||||||
|
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
||||||
|
TributaryTransactionsFromProcessorMessages::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::SubstrateBlock { hash: block },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark this as the last handled message
|
||||||
|
LastProcessorMessage::set(&mut txn, network, &msg_id);
|
||||||
|
// Commit the txn
|
||||||
|
txn.commit();
|
||||||
|
// Now that we won't handle this message again, acknowledge it so we won't see it again
|
||||||
|
message_queue.ack(Service::Processor(network), msg_id).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
// Override the panic handler with one which will panic if any tokio task panics
|
||||||
|
{
|
||||||
|
let existing = std::panic::take_hook();
|
||||||
|
std::panic::set_hook(Box::new(move |panic| {
|
||||||
|
existing(panic);
|
||||||
|
const MSG: &str = "exiting the process due to a task panicking";
|
||||||
|
println!("{MSG}");
|
||||||
|
log::error!("{MSG}");
|
||||||
|
std::process::exit(1);
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the logger
|
||||||
|
if std::env::var("RUST_LOG").is_err() {
|
||||||
|
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
|
||||||
|
}
|
||||||
|
env_logger::init();
|
||||||
|
log::info!("starting coordinator service...");
|
||||||
|
|
||||||
|
// Read the Serai key from the env
|
||||||
|
let serai_key = {
|
||||||
|
let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided");
|
||||||
|
let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect("Serai key wasn't hex-encoded");
|
||||||
|
key_hex.zeroize();
|
||||||
|
if key_vec.len() != 32 {
|
||||||
|
key_vec.zeroize();
|
||||||
|
panic!("Serai key had an invalid length");
|
||||||
|
}
|
||||||
|
let mut key_bytes = [0; 32];
|
||||||
|
key_bytes.copy_from_slice(&key_vec);
|
||||||
|
key_vec.zeroize();
|
||||||
|
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::from_repr(key_bytes).unwrap());
|
||||||
|
key_bytes.zeroize();
|
||||||
|
key
|
||||||
|
};
|
||||||
|
|
||||||
|
// Open the database
|
||||||
|
let mut db = coordinator_db();
|
||||||
|
|
||||||
|
let existing_tributaries_at_boot = {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
|
||||||
|
// Cleanup all historic Tributaries
|
||||||
|
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
||||||
|
prune_tributary_db(to_cleanup);
|
||||||
|
// Remove the keys to confirm for this network
|
||||||
|
KeysToConfirm::take(&mut txn, to_cleanup);
|
||||||
|
KeySet::take(&mut txn, to_cleanup);
|
||||||
|
// Drain the cosign intents created for this set
|
||||||
|
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
||||||
|
// Drain the transactions to publish for this set
|
||||||
|
while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
|
while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
|
// Drain the participants to remove for this set
|
||||||
|
while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
|
// Remove the SignSlashReport notification
|
||||||
|
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove retired Tributaries from ActiveTributaries
|
||||||
|
let mut active_tributaries = ActiveTributaries::get(&txn).unwrap_or(vec![]);
|
||||||
|
active_tributaries.retain(|tributary| {
|
||||||
|
RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) <
|
||||||
|
Some(tributary.set.session.0)
|
||||||
|
});
|
||||||
|
ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
active_tributaries
|
||||||
|
};
|
||||||
|
|
||||||
|
// Connect to the message-queue
|
||||||
|
let message_queue = Arc::new(MessageQueue::from_env(Service::Coordinator));
|
||||||
|
|
||||||
|
// Connect to the Serai node
|
||||||
|
let serai = serai().await;
|
||||||
|
|
||||||
|
let (p2p_add_tributary_send, p2p_add_tributary_recv) = mpsc::unbounded_channel();
|
||||||
|
let (p2p_retire_tributary_send, p2p_retire_tributary_recv) = mpsc::unbounded_channel();
|
||||||
|
let (p2p_cosigns_send, p2p_cosigns_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
// Spawn the P2P network
|
||||||
|
let p2p = {
|
||||||
|
let serai_keypair = {
|
||||||
|
let mut key_bytes = serai_key.to_bytes();
|
||||||
|
// Schnorrkel SecretKey is the key followed by 32 bytes of entropy for nonces
|
||||||
|
let mut expanded_key = Zeroizing::new([0; 64]);
|
||||||
|
expanded_key.as_mut_slice()[.. 32].copy_from_slice(&key_bytes);
|
||||||
|
OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32 ..]);
|
||||||
|
key_bytes.zeroize();
|
||||||
|
Zeroizing::new(
|
||||||
|
schnorrkel::SecretKey::from_bytes(expanded_key.as_slice()).unwrap().to_keypair(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
let p2p = p2p::Libp2p::new(&serai_keypair, serai.clone());
|
||||||
|
tokio::spawn(p2p::run::<Db, Transaction, _>(
|
||||||
|
db.clone(),
|
||||||
|
p2p.clone(),
|
||||||
|
p2p_add_tributary_recv,
|
||||||
|
p2p_retire_tributary_recv,
|
||||||
|
p2p_cosigns_send,
|
||||||
|
));
|
||||||
|
p2p
|
||||||
|
};
|
||||||
|
|
||||||
|
// Spawn the Substrate scanners
|
||||||
|
let (substrate_task_def, substrate_task) = Task::new();
|
||||||
|
let (substrate_canonical_task_def, substrate_canonical_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
CanonicalEventStream::new(db.clone(), serai.clone())
|
||||||
|
.continually_run(substrate_canonical_task_def, vec![substrate_task.clone()]),
|
||||||
|
);
|
||||||
|
let (substrate_ephemeral_task_def, substrate_ephemeral_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
EphemeralEventStream::new(
|
||||||
|
db.clone(),
|
||||||
|
serai.clone(),
|
||||||
|
SeraiAddress((<Ristretto as WrappedGroup>::generator() * serai_key.deref()).to_bytes()),
|
||||||
|
)
|
||||||
|
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the cosign handler
|
||||||
|
spawn_cosigning(
|
||||||
|
db.clone(),
|
||||||
|
serai.clone(),
|
||||||
|
p2p.clone(),
|
||||||
|
// Run the Substrate scanners once we cosign new blocks
|
||||||
|
vec![substrate_canonical_task, substrate_ephemeral_task],
|
||||||
|
p2p_cosigns_recv,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn all Tributaries on-disk
|
||||||
|
for tributary in existing_tributaries_at_boot {
|
||||||
|
crate::tributary::spawn_tributary(
|
||||||
|
db.clone(),
|
||||||
|
message_queue.clone(),
|
||||||
|
p2p.clone(),
|
||||||
|
&p2p_add_tributary_send,
|
||||||
|
tributary,
|
||||||
|
serai_key.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the events from the Substrate scanner
|
||||||
|
tokio::spawn(
|
||||||
|
(SubstrateTask {
|
||||||
|
serai_key: serai_key.clone(),
|
||||||
|
db: db.clone(),
|
||||||
|
message_queue: message_queue.clone(),
|
||||||
|
p2p: p2p.clone(),
|
||||||
|
p2p_add_tributary: p2p_add_tributary_send.clone(),
|
||||||
|
p2p_retire_tributary: p2p_retire_tributary_send.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(substrate_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Handle each of the networks
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
|
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn the task to set keys
|
||||||
|
{
|
||||||
|
let (set_keys_task_def, set_keys_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]),
|
||||||
|
);
|
||||||
|
// Forget its handle so it always runs in the background
|
||||||
|
core::mem::forget(set_keys_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn the task to publish slash reports
|
||||||
|
{
|
||||||
|
let (publish_slash_report_task_def, publish_slash_report_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]),
|
||||||
|
);
|
||||||
|
// Always have this run in the background
|
||||||
|
core::mem::forget(publish_slash_report_task);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the spawned tasks ad-infinitum
|
||||||
|
core::future::pending().await
|
||||||
}
|
}
|
||||||
|
|||||||
164
coordinator/src/substrate.rs
Normal file
164
coordinator/src/substrate.rs
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
|
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
|
||||||
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
|
use tributary_sdk::Tributary;
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
|
use crate::{Db, KeySet};
|
||||||
|
|
||||||
|
pub(crate) struct SubstrateTask<P: P2p> {
|
||||||
|
pub(crate) serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
pub(crate) db: Db,
|
||||||
|
pub(crate) message_queue: Arc<MessageQueue>,
|
||||||
|
pub(crate) p2p: P,
|
||||||
|
pub(crate) p2p_add_tributary:
|
||||||
|
mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
|
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||||
|
type Error = String; // TODO
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Handle the Canonical events
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
|
||||||
|
KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &());
|
||||||
|
}
|
||||||
|
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||||
|
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||||
|
let next_to_be_retired =
|
||||||
|
prior_retired.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
|
||||||
|
assert_eq!(session, next_to_be_retired);
|
||||||
|
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
||||||
|
self
|
||||||
|
.p2p_retire_tributary
|
||||||
|
.send(ExternalValidatorSet { network, session })
|
||||||
|
.expect("p2p retire_tributary channel dropped?");
|
||||||
|
}
|
||||||
|
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let msg = messages::CoordinatorMessage::from(msg);
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the NewSet events
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(new_set) = serai_coordinator_substrate::NewSet::try_recv(&mut txn) else { break };
|
||||||
|
|
||||||
|
if let Some(historic_session) = new_set.set.session.0.checked_sub(2) {
|
||||||
|
// We should have retired this session if we're here
|
||||||
|
if crate::db::RetiredTributary::get(&txn, new_set.set.network).map(|session| session.0) <
|
||||||
|
Some(historic_session)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
If we haven't, it's because we're processing the NewSet event before the retiry
|
||||||
|
event from the Canonical event stream. This happens if the Canonical event, and
|
||||||
|
then the NewSet event, is fired while we're already iterating over NewSet events.
|
||||||
|
|
||||||
|
We break, dropping the txn, restoring this NewSet to the database, so we'll only
|
||||||
|
handle it once a future iteration of this loop handles the retiry event.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Queue this historical Tributary for deletion.
|
||||||
|
|
||||||
|
We explicitly don't queue this upon Tributary retire, instead here, to give time to
|
||||||
|
investigate retired Tributaries if questions are raised post-retiry. This gives a
|
||||||
|
week (the duration of the following session) after the Tributary has been retired to
|
||||||
|
make a backup of the data directory for any investigations.
|
||||||
|
*/
|
||||||
|
crate::db::TributaryCleanup::send(
|
||||||
|
&mut txn,
|
||||||
|
&ExternalValidatorSet {
|
||||||
|
network: new_set.set.network,
|
||||||
|
session: Session(historic_session),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save this Tributary as active to the database
|
||||||
|
{
|
||||||
|
let mut active_tributaries =
|
||||||
|
crate::db::ActiveTributaries::get(&txn).unwrap_or(Vec::with_capacity(1));
|
||||||
|
active_tributaries.push(new_set.clone());
|
||||||
|
crate::db::ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send GenerateKey to the processor
|
||||||
|
let msg = messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||||
|
session: new_set.set.session,
|
||||||
|
threshold: new_set.threshold,
|
||||||
|
evrf_public_keys: new_set.evrf_public_keys.clone(),
|
||||||
|
};
|
||||||
|
let msg = messages::CoordinatorMessage::from(msg);
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(new_set.set.network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
|
||||||
|
// Commit the transaction for all of this
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
// Now spawn the Tributary
|
||||||
|
// If we reboot after committing the txn, but before this is called, this will be called
|
||||||
|
// on boot
|
||||||
|
crate::tributary::spawn_tributary(
|
||||||
|
self.db.clone(),
|
||||||
|
self.message_queue.clone(),
|
||||||
|
self.p2p.clone(),
|
||||||
|
&self.p2p_add_tributary,
|
||||||
|
new_set,
|
||||||
|
self.serai_key.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
595
coordinator/src/tributary.rs
Normal file
595
coordinator/src/tributary.rs
Normal file
@@ -0,0 +1,595 @@
|
|||||||
|
use core::{future::Future, time::Duration};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use blake2::{digest::typenum::U32, Digest, Blake2s};
|
||||||
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||||
|
|
||||||
|
use scale::Encode;
|
||||||
|
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||||
|
|
||||||
|
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||||
|
|
||||||
|
use serai_task::{Task, TaskHandle, DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
|
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||||
|
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||||
|
use serai_coordinator_tributary::{
|
||||||
|
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
|
||||||
|
};
|
||||||
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
|
||||||
|
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
|
||||||
|
};
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Coordinator {
|
||||||
|
PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provide a Provided Transaction to the Tributary.
|
||||||
|
///
|
||||||
|
/// This is not a well-designed function. This is specific to the context in which its called,
|
||||||
|
/// within this file. It should only be considered an internal helper for this domain alone.
|
||||||
|
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
tx: Transaction,
|
||||||
|
) {
|
||||||
|
match tributary.provide_transaction(tx.clone()).await {
|
||||||
|
// The Tributary uses its own DB, so we may provide this multiple times if we reboot before
|
||||||
|
// committing the txn which provoked this
|
||||||
|
Ok(()) | Err(ProvidedError::AlreadyProvided) => {}
|
||||||
|
Err(ProvidedError::NotProvided) => {
|
||||||
|
panic!("providing a Transaction which wasn't a Provided transaction: {tx:?}");
|
||||||
|
}
|
||||||
|
Err(ProvidedError::InvalidProvided(e)) => {
|
||||||
|
panic!("providing an invalid Provided transaction, tx: {tx:?}, error: {e:?}")
|
||||||
|
}
|
||||||
|
// The Tributary's scan task won't advance if we don't have the Provided transactions
|
||||||
|
// present on-chain, and this enters an infinite loop to block the calling task from
|
||||||
|
// advancing
|
||||||
|
Err(ProvidedError::LocalMismatchesOnChain) => loop {
|
||||||
|
log::error!(
|
||||||
|
"Tributary {set:?} was supposed to provide {tx:?} but peers disagree, halting Tributary",
|
||||||
|
);
|
||||||
|
// Print this every five minutes as this does need to be handled
|
||||||
|
tokio::time::sleep(Duration::from_secs(5 * 60)).await;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides Cosign/Cosigned Transactions onto the Tributary.
|
||||||
|
pub(crate) struct ProvideCosignCosignedTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
||||||
|
for ProvideCosignCosignedTransactionsTask<CD, TD, P>
|
||||||
|
{
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Check if we produced any cosigns we were supposed to
|
||||||
|
let mut pending_notable_cosign = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
// Fetch the next cosign this tributary should handle
|
||||||
|
let Some(cosign) = PendingCosigns::try_recv(&mut txn, self.set.set) else { break };
|
||||||
|
pending_notable_cosign = cosign.notable;
|
||||||
|
|
||||||
|
// If we (Serai) haven't cosigned this block, break as this is still pending
|
||||||
|
let latest = match Cosigning::<CD>::latest_cosigned_block_number(&txn) {
|
||||||
|
Ok(latest) => latest,
|
||||||
|
Err(Faulted) => {
|
||||||
|
log::error!("cosigning faulted");
|
||||||
|
Err("cosigning faulted")?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if latest < cosign.block_number {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we've cosigned it, provide the TX for that
|
||||||
|
{
|
||||||
|
let mut txn = self.tributary_db.txn();
|
||||||
|
CosignIntents::provide(&mut txn, self.set.set, &cosign);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
provide_transaction(
|
||||||
|
self.set.set,
|
||||||
|
&self.tributary,
|
||||||
|
Transaction::Cosigned { substrate_block_hash: cosign.block_hash },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Clear pending_notable_cosign since this cosign isn't pending
|
||||||
|
pending_notable_cosign = false;
|
||||||
|
|
||||||
|
// Commit the txn to clear this from PendingCosigns
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have any notable cosigns pending, provide the next set of cosign intents
|
||||||
|
if !pending_notable_cosign {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
// intended_cosigns will only yield up to and including the next notable cosign
|
||||||
|
for cosign in Cosigning::<CD>::intended_cosigns(&mut txn, self.set.set) {
|
||||||
|
// Flag this cosign as pending
|
||||||
|
PendingCosigns::send(&mut txn, self.set.set, &cosign);
|
||||||
|
// Provide the transaction to queue it for work
|
||||||
|
provide_transaction(
|
||||||
|
self.set.set,
|
||||||
|
&self.tributary,
|
||||||
|
Transaction::Cosign { substrate_block_hash: cosign.block_hash },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
mut tx: Transaction,
|
||||||
|
) -> bool {
|
||||||
|
// If this is a signed transaction, sign it
|
||||||
|
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
|
||||||
|
tx.sign(&mut OsRng, tributary.genesis(), key);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
|
||||||
|
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
|
||||||
|
// issue is this transaction was already included on-chain
|
||||||
|
Err(TransactionError::InvalidNonce) => {
|
||||||
|
let TransactionKind::Signed(order, signed) = tx.kind() else {
|
||||||
|
panic!("non-Signed transaction had InvalidNonce");
|
||||||
|
};
|
||||||
|
let next_nonce = tributary
|
||||||
|
.next_nonce(&signed.signer, &order)
|
||||||
|
.await
|
||||||
|
.expect("signer who is a present validator didn't have a nonce");
|
||||||
|
assert!(next_nonce != signed.nonce);
|
||||||
|
// We're publishing an old transaction
|
||||||
|
if next_nonce > signed.nonce {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary_db: &mut TD,
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
tx: Transaction,
|
||||||
|
) -> bool {
|
||||||
|
let kind = tx.kind();
|
||||||
|
match kind {
|
||||||
|
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
|
||||||
|
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||||
|
// If this is a transaction with signing data, check the topic is recognized before
|
||||||
|
// publishing
|
||||||
|
let topic = tx.topic();
|
||||||
|
let still_requires_recognition = if let Some(topic) = topic {
|
||||||
|
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
|
||||||
|
.then_some(topic)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
if let Some(topic) = still_requires_recognition {
|
||||||
|
// Queue the transaction until the topic is recognized
|
||||||
|
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
|
||||||
|
let mut tributary_txn = tributary_db.txn();
|
||||||
|
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
|
||||||
|
tributary_txn.commit();
|
||||||
|
} else {
|
||||||
|
// Actually add the transaction
|
||||||
|
if !add_signed_unsigned_transaction(tributary, key, tx).await {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
|
||||||
|
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Provide/add all transactions sent our way
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
if !add_with_recognition_check(
|
||||||
|
self.set.set,
|
||||||
|
&mut self.tributary_db,
|
||||||
|
&self.tributary,
|
||||||
|
&self.key,
|
||||||
|
tx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
if !add_with_recognition_check(
|
||||||
|
self.set.set,
|
||||||
|
&mut self.tributary_db,
|
||||||
|
&self.tributary,
|
||||||
|
&self.key,
|
||||||
|
tx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide/add all transactions due to newly recognized topics
|
||||||
|
loop {
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
let Some(topic) =
|
||||||
|
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
|
||||||
|
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
tributary_txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish any participant removals
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
|
||||||
|
let tx = Transaction::RemoveParticipant {
|
||||||
|
participant: self.set.participant_indexes_reverse_lookup[&participant],
|
||||||
|
signed: Default::default(),
|
||||||
|
};
|
||||||
|
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
||||||
|
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
||||||
|
tributary_db: TD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
}
|
||||||
|
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
||||||
|
type Error = String; // TODO
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.tributary_db.txn();
|
||||||
|
let Some(msg) = ProcessorMessages::try_recv(&mut txn, self.set) else { break };
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(self.set.network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks for the notification to sign a slash report and does so if present.
|
||||||
|
pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(()) = SignSlashReport::try_recv(&mut txn, self.set.set) else { return Ok(false) };
|
||||||
|
|
||||||
|
// Fetch the slash report for this Tributary
|
||||||
|
let mut tx =
|
||||||
|
serai_coordinator_tributary::slash_report_transaction(&self.tributary_db, &self.set);
|
||||||
|
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||||
|
|
||||||
|
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidNonce |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid SlashReport transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
// Drop this txn to try to publish it again later on a future iteration
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
drop(txn);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the scan task whenever the Tributary adds a new block.
|
||||||
|
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
||||||
|
db: CD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
scan_tributary_task: TaskHandle,
|
||||||
|
tasks_to_keep_alive: Vec<TaskHandle>,
|
||||||
|
) {
|
||||||
|
loop {
|
||||||
|
// Break once this Tributary is retired
|
||||||
|
if crate::RetiredTributary::get(&db, set.network).map(|session| session.0) >=
|
||||||
|
Some(set.session.0)
|
||||||
|
{
|
||||||
|
drop(tasks_to_keep_alive);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Have the tributary scanner run as soon as there's a new block
|
||||||
|
match tributary.next_block_notification().await.await {
|
||||||
|
Ok(()) => scan_tributary_task.run_now(),
|
||||||
|
// unreachable since this owns the tributary object and doesn't drop it
|
||||||
|
Err(_) => panic!("tributary was dropped causing notification to error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a Tributary.
|
||||||
|
///
|
||||||
|
/// This will:
|
||||||
|
/// - Spawn the Tributary
|
||||||
|
/// - Inform the P2P network of the Tributary
|
||||||
|
/// - Spawn the ScanTributaryTask
|
||||||
|
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||||
|
/// - Spawn the TributaryProcessorMessagesTask
|
||||||
|
/// - Spawn the AddTributaryTransactionsTask
|
||||||
|
/// - Spawn the ConfirmDkgTask
|
||||||
|
/// - Spawn the SignSlashReportTask
|
||||||
|
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||||
|
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||||
|
db: Db,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
p2p: P,
|
||||||
|
p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
) {
|
||||||
|
// Don't spawn retired Tributaries
|
||||||
|
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
|
||||||
|
Some(set.set.session.0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
|
||||||
|
|
||||||
|
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will
|
||||||
|
// be a couple of minutes stale. While the Tributary will still function with a start time in the
|
||||||
|
// past, the Tributary will immediately incur round timeouts. We reduce these by adding a
|
||||||
|
// constant delay of a couple of minutes.
|
||||||
|
const TRIBUTARY_START_TIME_DELAY: u64 = 120;
|
||||||
|
let start_time = set.declaration_time + TRIBUTARY_START_TIME_DELAY;
|
||||||
|
|
||||||
|
let mut tributary_validators = Vec::with_capacity(set.validators.len());
|
||||||
|
for (validator, weight) in set.validators.iter().copied() {
|
||||||
|
let validator_key = <Ristretto as GroupIo>::read_G(&mut validator.0.as_slice())
|
||||||
|
.expect("Serai validator had an invalid public key");
|
||||||
|
let weight = u64::from(weight);
|
||||||
|
tributary_validators.push((validator_key, weight));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn the Tributary
|
||||||
|
let tributary_db = crate::db::tributary_db(set.set);
|
||||||
|
let tributary = Tributary::new(
|
||||||
|
tributary_db.clone(),
|
||||||
|
genesis,
|
||||||
|
start_time,
|
||||||
|
serai_key.clone(),
|
||||||
|
tributary_validators,
|
||||||
|
p2p,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let reader = tributary.reader();
|
||||||
|
|
||||||
|
// Inform the P2P network
|
||||||
|
p2p_add_tributary
|
||||||
|
.send((set.set, tributary.clone()))
|
||||||
|
.expect("p2p's add_tributary channel was closed?");
|
||||||
|
|
||||||
|
// Spawn the task to provide Cosign/Cosigned transactions onto the Tributary
|
||||||
|
let (provide_cosign_cosigned_transactions_task_def, provide_cosign_cosigned_transactions_task) =
|
||||||
|
Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(ProvideCosignCosignedTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(provide_cosign_cosigned_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the task to send all messages from the Tributary scanner to the message-queue
|
||||||
|
let (scan_tributary_messages_task_def, scan_tributary_messages_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(TributaryProcessorMessagesTask {
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
set: set.set,
|
||||||
|
message_queue,
|
||||||
|
})
|
||||||
|
.continually_run(scan_tributary_messages_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the scan task
|
||||||
|
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader)
|
||||||
|
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||||
|
// dropped, it will be too
|
||||||
|
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the add transactions task
|
||||||
|
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(AddTributaryTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
key: serai_key.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the task to confirm the DKG result
|
||||||
|
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
|
||||||
|
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the sign slash report task
|
||||||
|
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(SignSlashReportTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
key: serai_key,
|
||||||
|
})
|
||||||
|
.continually_run(sign_slash_report_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Whenever a new block occurs, immediately run the scan task
|
||||||
|
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||||
|
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||||
|
// infinitum
|
||||||
|
tokio::spawn(scan_on_new_block(
|
||||||
|
db,
|
||||||
|
set.set,
|
||||||
|
tributary,
|
||||||
|
scan_tributary_task,
|
||||||
|
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task],
|
||||||
|
));
|
||||||
|
}
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
mod transaction;
|
|
||||||
pub use transaction::Transaction;
|
|
||||||
|
|
||||||
mod db;
|
|
||||||
|
|
||||||
mod scan;
|
|
||||||
@@ -1,408 +0,0 @@
|
|||||||
use core::future::Future;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::SeraiAddress,
|
|
||||||
validator_sets::primitives::{ValidatorSet, Slash},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tributary::{
|
|
||||||
Signed as TributarySigned, TransactionKind, TransactionTrait,
|
|
||||||
Transaction as TributaryTransaction, Block, TributaryReader,
|
|
||||||
tendermint::{
|
|
||||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
|
||||||
TendermintNetwork,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::*;
|
|
||||||
use serai_task::ContinuallyRan;
|
|
||||||
|
|
||||||
use messages::sign::VariantSignId;
|
|
||||||
|
|
||||||
use crate::tributary::{
|
|
||||||
db::*,
|
|
||||||
transaction::{SigningProtocolRound, Signed, Transaction},
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ScanBlock<'a, D: DbTxn, TD: Db> {
|
|
||||||
txn: &'a mut D,
|
|
||||||
set: ValidatorSet,
|
|
||||||
validators: &'a [SeraiAddress],
|
|
||||||
total_weight: u64,
|
|
||||||
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
|
||||||
tributary: &'a TributaryReader<TD, Transaction>,
|
|
||||||
}
|
|
||||||
impl<'a, D: DbTxn, TD: Db> ScanBlock<'a, D, TD> {
|
|
||||||
fn potentially_start_cosign(&mut self) {
|
|
||||||
// Don't start a new cosigning instance if we're actively running one
|
|
||||||
if TributaryDb::actively_cosigning(self.txn, self.set) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start cosigning the latest intended-to-be-cosigned block
|
|
||||||
let Some(latest_substrate_block_to_cosign) =
|
|
||||||
TributaryDb::latest_substrate_block_to_cosign(self.txn, self.set)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let substrate_block_number = todo!("TODO");
|
|
||||||
|
|
||||||
// Mark us as actively cosigning
|
|
||||||
TributaryDb::start_cosigning(self.txn, self.set, substrate_block_number);
|
|
||||||
// Send the message for the processor to start signing
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
|
||||||
session: self.set.session,
|
|
||||||
block_number: substrate_block_number,
|
|
||||||
block: latest_substrate_block_to_cosign,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
|
||||||
let signer = |signed: Signed| SeraiAddress(signed.signer.to_bytes());
|
|
||||||
|
|
||||||
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
|
||||||
// Don't handle transactions from those fatally slashed
|
|
||||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
|
||||||
if TributaryDb::is_fatally_slashed(self.txn, self.set, SeraiAddress(signer.to_bytes())) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
match tx {
|
|
||||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
|
||||||
Transaction::RemoveParticipant { participant, signed } => {
|
|
||||||
let signer = signer(signed);
|
|
||||||
|
|
||||||
// Check the participant voted to be removed actually exists
|
|
||||||
if !self.validators.iter().any(|validator| *validator == participant) {
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
signer,
|
|
||||||
"voted to remove non-existent participant",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
match TributaryDb::accumulate(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
Topic::RemoveParticipant { participant },
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
&(),
|
|
||||||
) {
|
|
||||||
DataSet::None => {}
|
|
||||||
DataSet::Participating(_) => {
|
|
||||||
TributaryDb::fatal_slash(self.txn, self.set, participant, "voted to remove");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the participation to the processor
|
|
||||||
Transaction::DkgParticipation { participation, signed } => {
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
messages::key_gen::CoordinatorMessage::Participation {
|
|
||||||
session: self.set.session,
|
|
||||||
participant: todo!("TODO"),
|
|
||||||
participation,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
|
||||||
// Accumulate the preprocesses into our own FROST attempt manager
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
|
||||||
// Accumulate the shares into our own FROST attempt manager
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Cosign { substrate_block_hash } => {
|
|
||||||
// Update the latest intended-to-be-cosigned Substrate block
|
|
||||||
TributaryDb::set_latest_substrate_block_to_cosign(self.txn, self.set, substrate_block_hash);
|
|
||||||
// Start a new cosign if we weren't already working on one
|
|
||||||
self.potentially_start_cosign();
|
|
||||||
}
|
|
||||||
Transaction::Cosigned { substrate_block_hash } => {
|
|
||||||
TributaryDb::finish_cosigning(self.txn, self.set);
|
|
||||||
|
|
||||||
// Fetch the latest intended-to-be-cosigned block
|
|
||||||
let Some(latest_substrate_block_to_cosign) =
|
|
||||||
TributaryDb::latest_substrate_block_to_cosign(self.txn, self.set)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
// If this is the block we just cosigned, return, preventing us from signing it again
|
|
||||||
if latest_substrate_block_to_cosign == substrate_block_hash {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we do have a new cosign to work on, start it
|
|
||||||
self.potentially_start_cosign();
|
|
||||||
}
|
|
||||||
Transaction::SubstrateBlock { hash } => {
|
|
||||||
// Whitelist all of the IDs this Substrate block causes to be signed
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
Transaction::Batch { hash } => {
|
|
||||||
// Whitelist the signing of this batch, publishing our own preprocess
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { slash_points, signed } => {
|
|
||||||
let signer = signer(signed);
|
|
||||||
|
|
||||||
if slash_points.len() != self.validators.len() {
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
signer,
|
|
||||||
"slash report was for a distinct amount of signers",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
|
||||||
match TributaryDb::accumulate(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
Topic::SlashReport,
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
&slash_points,
|
|
||||||
) {
|
|
||||||
DataSet::None => {}
|
|
||||||
DataSet::Participating(data_set) => {
|
|
||||||
// Find the median reported slashes for this validator
|
|
||||||
// TODO: This lets 34% perform a fatal slash. Should that be allowed?
|
|
||||||
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
|
||||||
for i in 0 .. self.validators.len() {
|
|
||||||
let mut this_validator =
|
|
||||||
data_set.values().map(|report| report[i]).collect::<Vec<_>>();
|
|
||||||
this_validator.sort_unstable();
|
|
||||||
// Choose the median, where if there are two median values, the lower one is chosen
|
|
||||||
let median_index = if (this_validator.len() % 2) == 1 {
|
|
||||||
this_validator.len() / 2
|
|
||||||
} else {
|
|
||||||
(this_validator.len() / 2) - 1
|
|
||||||
};
|
|
||||||
median_slash_report.push(this_validator[median_index]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We only publish slashes for the `f` worst performers to:
|
|
||||||
// 1) Effect amnesty if there were network disruptions which affected everyone
|
|
||||||
// 2) Ensure the signing threshold doesn't have a disincentive to do their job
|
|
||||||
|
|
||||||
// Find the worst performer within the signing threshold's slash points
|
|
||||||
let f = (self.validators.len() - 1) / 3;
|
|
||||||
let worst_validator_in_supermajority_slash_points = {
|
|
||||||
let mut sorted_slash_points = median_slash_report.clone();
|
|
||||||
sorted_slash_points.sort_unstable();
|
|
||||||
// This won't be a valid index if `f == 0`, which means we don't have any validators
|
|
||||||
// to slash
|
|
||||||
let index_of_first_validator_to_slash = self.validators.len() - f;
|
|
||||||
let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1;
|
|
||||||
sorted_slash_points[index_of_worst_validator_in_supermajority]
|
|
||||||
};
|
|
||||||
|
|
||||||
// Perform the amortization
|
|
||||||
for slash_points in &mut median_slash_report {
|
|
||||||
*slash_points =
|
|
||||||
slash_points.saturating_sub(worst_validator_in_supermajority_slash_points)
|
|
||||||
}
|
|
||||||
let amortized_slash_report = median_slash_report;
|
|
||||||
|
|
||||||
// Create the resulting slash report
|
|
||||||
let mut slash_report = vec![];
|
|
||||||
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
|
||||||
if points != 0 {
|
|
||||||
slash_report.push(Slash { key: validator.into(), points });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert!(slash_report.len() <= f);
|
|
||||||
|
|
||||||
// Recognize the topic for signing the slash report
|
|
||||||
TributaryDb::recognize_topic(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
Topic::Sign {
|
|
||||||
id: VariantSignId::SlashReport,
|
|
||||||
attempt: 0,
|
|
||||||
round: SigningProtocolRound::Preprocess,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
// Send the message for the processor to start signing
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
|
||||||
session: self.set.session,
|
|
||||||
report: slash_report,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, data, signed } => {
|
|
||||||
let topic = Topic::Sign { id, attempt, round };
|
|
||||||
let signer = signer(signed);
|
|
||||||
|
|
||||||
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
signer,
|
|
||||||
"signer signed with a distinct amount of key shares than they had key shares",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
match TributaryDb::accumulate(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
topic,
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
&data,
|
|
||||||
) {
|
|
||||||
DataSet::None => {}
|
|
||||||
DataSet::Participating(data_set) => {
|
|
||||||
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
|
||||||
let flatten_data_set = |data_set| todo!("TODO");
|
|
||||||
let data_set = flatten_data_set(data_set);
|
|
||||||
TributaryDb::send_message(
|
|
||||||
self.txn,
|
|
||||||
self.set,
|
|
||||||
match round {
|
|
||||||
SigningProtocolRound::Preprocess => {
|
|
||||||
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
|
||||||
}
|
|
||||||
SigningProtocolRound::Share => {
|
|
||||||
messages::sign::CoordinatorMessage::Shares { id, shares: data_set }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
|
||||||
TributaryDb::start_of_block(self.txn, self.set, block_number);
|
|
||||||
|
|
||||||
for tx in block.transactions {
|
|
||||||
match tx {
|
|
||||||
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
|
||||||
// Since the evidence is on the chain, it will have already been validated
|
|
||||||
// We can just punish the signer
|
|
||||||
let data = match ev {
|
|
||||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
|
||||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
|
||||||
};
|
|
||||||
/* TODO
|
|
||||||
let msgs = (
|
|
||||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),
|
|
||||||
if data.1.is_some() {
|
|
||||||
Some(
|
|
||||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.1.unwrap())
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
|
||||||
// errors, mark the node as fatally slashed
|
|
||||||
TributaryDb::fatal_slash(
|
|
||||||
self.txn, msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}"));
|
|
||||||
*/
|
|
||||||
todo!("TODO")
|
|
||||||
}
|
|
||||||
TributaryTransaction::Application(tx) => {
|
|
||||||
self.handle_application_tx(block_number, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ScanTributaryTask<D: Db, TD: Db> {
|
|
||||||
db: D,
|
|
||||||
set: ValidatorSet,
|
|
||||||
validators: Vec<SeraiAddress>,
|
|
||||||
total_weight: u64,
|
|
||||||
validator_weights: HashMap<SeraiAddress, u64>,
|
|
||||||
tributary: TributaryReader<TD, Transaction>,
|
|
||||||
}
|
|
||||||
impl<D: Db, TD: Db> ContinuallyRan for ScanTributaryTask<D, TD> {
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
|
||||||
async move {
|
|
||||||
let (mut last_block_number, mut last_block_hash) =
|
|
||||||
TributaryDb::last_handled_tributary_block(&self.db, self.set)
|
|
||||||
.unwrap_or((0, self.tributary.genesis()));
|
|
||||||
|
|
||||||
let mut made_progess = false;
|
|
||||||
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
|
||||||
let block = self.tributary.block(&next).unwrap();
|
|
||||||
let block_number = last_block_number + 1;
|
|
||||||
let block_hash = block.hash();
|
|
||||||
|
|
||||||
// Make sure we have all of the provided transactions for this block
|
|
||||||
for tx in &block.transactions {
|
|
||||||
let TransactionKind::Provided(order) = tx.kind() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// make sure we have all the provided txs in this block locally
|
|
||||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
|
||||||
return Err(format!(
|
|
||||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
|
||||||
self.set
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
(ScanBlock {
|
|
||||||
txn: &mut txn,
|
|
||||||
set: self.set,
|
|
||||||
validators: &self.validators,
|
|
||||||
total_weight: self.total_weight,
|
|
||||||
validator_weights: &self.validator_weights,
|
|
||||||
tributary: &self.tributary,
|
|
||||||
})
|
|
||||||
.handle_block(block_number, block);
|
|
||||||
TributaryDb::set_last_handled_tributary_block(&mut txn, self.set, block_number, block_hash);
|
|
||||||
last_block_number = block_number;
|
|
||||||
last_block_hash = block_hash;
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
made_progess = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(made_progess)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,338 +0,0 @@
|
|||||||
use core::{ops::Deref, fmt::Debug};
|
|
||||||
use std::io;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use blake2::{digest::typenum::U32, Digest, Blake2b};
|
|
||||||
use ciphersuite::{
|
|
||||||
group::{ff::Field, GroupEncoding},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
use schnorr::SchnorrSignature;
|
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
|
||||||
|
|
||||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
|
|
||||||
|
|
||||||
use messages::sign::VariantSignId;
|
|
||||||
|
|
||||||
use tributary::{
|
|
||||||
ReadWrite,
|
|
||||||
transaction::{
|
|
||||||
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// The round this data is for, within a signing protocol.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
|
||||||
pub enum SigningProtocolRound {
|
|
||||||
/// A preprocess.
|
|
||||||
Preprocess,
|
|
||||||
/// A signature share.
|
|
||||||
Share,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SigningProtocolRound {
|
|
||||||
fn nonce(&self) -> u32 {
|
|
||||||
match self {
|
|
||||||
SigningProtocolRound::Preprocess => 0,
|
|
||||||
SigningProtocolRound::Share => 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `tributary::Signed` but without the nonce.
|
|
||||||
///
|
|
||||||
/// All of our nonces are deterministic to the type of transaction and fields within.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Signed {
|
|
||||||
/// The signer.
|
|
||||||
pub signer: <Ristretto as Ciphersuite>::G,
|
|
||||||
/// The signature.
|
|
||||||
pub signature: SchnorrSignature<Ristretto>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BorshSerialize for Signed {
|
|
||||||
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
|
|
||||||
writer.write_all(self.signer.to_bytes().as_ref())?;
|
|
||||||
self.signature.write(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl BorshDeserialize for Signed {
|
|
||||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
|
|
||||||
let signer = Ristretto::read_G(reader)?;
|
|
||||||
let signature = SchnorrSignature::read(reader)?;
|
|
||||||
Ok(Self { signer, signature })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signed {
|
|
||||||
/// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
|
|
||||||
fn nonce(&self, nonce: u32) -> TributarySigned {
|
|
||||||
TributarySigned { signer: self.signer, nonce, signature: self.signature }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The Tributary transaction definition used by Serai
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
|
||||||
pub enum Transaction {
|
|
||||||
/// A vote to remove a participant for invalid behavior
|
|
||||||
RemoveParticipant {
|
|
||||||
/// The participant to remove
|
|
||||||
participant: SeraiAddress,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// A participation in the DKG
|
|
||||||
DkgParticipation {
|
|
||||||
participation: Vec<u8>,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
/// The preprocess to confirm the DKG results on-chain
|
|
||||||
DkgConfirmationPreprocess {
|
|
||||||
/// The attempt number of this signing protocol
|
|
||||||
attempt: u32,
|
|
||||||
// The preprocess
|
|
||||||
preprocess: [u8; 64],
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
/// The signature share to confirm the DKG results on-chain
|
|
||||||
DkgConfirmationShare {
|
|
||||||
/// The attempt number of this signing protocol
|
|
||||||
attempt: u32,
|
|
||||||
// The signature share
|
|
||||||
share: [u8; 32],
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Intend to co-sign a finalized Substrate block
|
|
||||||
///
|
|
||||||
/// When the time comes to start a new co-signing protocol, the most recent Substrate block will
|
|
||||||
/// be the one selected to be cosigned.
|
|
||||||
Cosign {
|
|
||||||
/// The hash of the Substrate block to sign
|
|
||||||
substrate_block_hash: [u8; 32],
|
|
||||||
},
|
|
||||||
|
|
||||||
/// The cosign for a Substrate block
|
|
||||||
///
|
|
||||||
/// After producing this cosign, we need to start work on the latest intended-to-be cosigned
|
|
||||||
/// block. That requires agreement on when this cosign was produced, which we solve by embedding
|
|
||||||
/// this cosign on chain.
|
|
||||||
///
|
|
||||||
/// We ideally don't have this transaction at all. The coordinator, without access to any of the
|
|
||||||
/// key shares, could observe the FROST signing session and determine a successful completion.
|
|
||||||
/// Unfortunately, that functionality is not present in modular-frost, so we do need to support
|
|
||||||
/// *some* asynchronous flow (where the processor or P2P network informs us of the successful
|
|
||||||
/// completion).
|
|
||||||
///
|
|
||||||
/// If we use a `Provided` transaction, that requires everyone observe this cosign.
|
|
||||||
///
|
|
||||||
/// If we use an `Unsigned` transaction, we can't verify the cosign signature inside
|
|
||||||
/// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since
|
|
||||||
/// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`,
|
|
||||||
/// we can't verify the signature against the group's public key unless we also include that (but
|
|
||||||
/// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary
|
|
||||||
/// blobs on chain).
|
|
||||||
///
|
|
||||||
/// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally
|
|
||||||
/// slash. We have horrible performance though as for 100 validators, all 100 will publish this
|
|
||||||
/// transaction.
|
|
||||||
///
|
|
||||||
/// We could use a signed `Unsigned` transaction, where it includes a signer and signature but
|
|
||||||
/// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on
|
|
||||||
/// its contents.
|
|
||||||
///
|
|
||||||
/// The optimal choice is likely to use a `Provided` transaction. We don't actually need to
|
|
||||||
/// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in
|
|
||||||
/// question no longer needs to produced, which would mean the cosigning protocol at-large
|
|
||||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
|
||||||
Cosigned { substrate_block_hash: [u8; 32] },
|
|
||||||
|
|
||||||
/// Acknowledge a Substrate block
|
|
||||||
///
|
|
||||||
/// This is provided after the block has been cosigned.
|
|
||||||
///
|
|
||||||
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
|
||||||
/// resulting from its handling.
|
|
||||||
SubstrateBlock {
|
|
||||||
/// The hash of the Substrate block
|
|
||||||
hash: [u8; 32],
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Acknowledge a Batch
|
|
||||||
///
|
|
||||||
/// Once everyone has acknowledged the Batch, we can begin signing it.
|
|
||||||
Batch {
|
|
||||||
/// The hash of the Batch's serialization.
|
|
||||||
///
|
|
||||||
/// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
|
|
||||||
/// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
|
|
||||||
/// way to do that.
|
|
||||||
hash: [u8; 32],
|
|
||||||
},
|
|
||||||
|
|
||||||
/// Data from a signing protocol.
|
|
||||||
Sign {
|
|
||||||
/// The ID of the object being signed
|
|
||||||
id: VariantSignId,
|
|
||||||
/// The attempt number of this signing protocol
|
|
||||||
attempt: u32,
|
|
||||||
/// The round this data is for, within the signing protocol
|
|
||||||
round: SigningProtocolRound,
|
|
||||||
/// The data itself
|
|
||||||
///
|
|
||||||
/// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
|
|
||||||
/// this transaction has.
|
|
||||||
data: Vec<Vec<u8>>,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// The local view of slashes observed by the transaction's sender
|
|
||||||
SlashReport {
|
|
||||||
/// The slash points accrued by each validator
|
|
||||||
slash_points: Vec<u32>,
|
|
||||||
/// The transaction's signer and signature
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReadWrite for Transaction {
|
|
||||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
||||||
borsh::from_reader(reader)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
borsh::to_writer(writer, self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TransactionTrait for Transaction {
|
|
||||||
fn kind(&self) -> TransactionKind {
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant { participant, signed } => {
|
|
||||||
TransactionKind::Signed((b"RemoveParticipant", participant).encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgParticipation { signed, .. } => {
|
|
||||||
TransactionKind::Signed(b"DkgParticipation".encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::Cosign { .. } => TransactionKind::Provided("CosignSubstrateBlock"),
|
|
||||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
|
||||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
|
||||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, signed, .. } => {
|
|
||||||
TransactionKind::Signed((b"Sign", id, attempt).encode(), signed.nonce(round.nonce()))
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { signed, .. } => {
|
|
||||||
TransactionKind::Signed(b"SlashReport".encode(), signed.nonce(0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash(&self) -> [u8; 32] {
|
|
||||||
let mut tx = ReadWrite::serialize(self);
|
|
||||||
if let TransactionKind::Signed(_, signed) = self.kind() {
|
|
||||||
// Make sure the part we're cutting off is the signature
|
|
||||||
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
|
||||||
}
|
|
||||||
Blake2b::<U32>::digest(&tx).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a stateless verification which we use to enforce some size limits.
|
|
||||||
fn verify(&self) -> Result<(), TransactionError> {
|
|
||||||
#[allow(clippy::match_same_arms)]
|
|
||||||
match self {
|
|
||||||
// Fixed-length TX
|
|
||||||
Transaction::RemoveParticipant { .. } => {}
|
|
||||||
|
|
||||||
// TODO: MAX_DKG_PARTICIPATION_LEN
|
|
||||||
Transaction::DkgParticipation { .. } => {}
|
|
||||||
// These are fixed-length TXs
|
|
||||||
Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } => {}
|
|
||||||
|
|
||||||
// Provided TXs
|
|
||||||
Transaction::Cosign { .. } |
|
|
||||||
Transaction::Cosigned { .. } |
|
|
||||||
Transaction::SubstrateBlock { .. } |
|
|
||||||
Transaction::Batch { .. } => {}
|
|
||||||
|
|
||||||
Transaction::Sign { data, .. } => {
|
|
||||||
if data.len() > usize::try_from(MAX_KEY_SHARES_PER_SET).unwrap() {
|
|
||||||
Err(TransactionError::InvalidContent)?
|
|
||||||
}
|
|
||||||
// TODO: MAX_SIGN_LEN
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { slash_points, .. } => {
|
|
||||||
if slash_points.len() > usize::try_from(MAX_KEY_SHARES_PER_SET).unwrap() {
|
|
||||||
Err(TransactionError::InvalidContent)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Transaction {
|
|
||||||
// Sign a transaction
|
|
||||||
//
|
|
||||||
// Panics if signing a transaction type which isn't `TransactionKind::Signed`
|
|
||||||
pub fn sign<R: RngCore + CryptoRng>(
|
|
||||||
&mut self,
|
|
||||||
rng: &mut R,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
) {
|
|
||||||
fn signed(tx: &mut Transaction) -> &mut Signed {
|
|
||||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
|
||||||
match tx {
|
|
||||||
Transaction::RemoveParticipant { ref mut signed, .. } |
|
|
||||||
Transaction::DkgParticipation { ref mut signed, .. } |
|
|
||||||
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
|
||||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
|
||||||
|
|
||||||
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
|
||||||
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
|
||||||
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
|
||||||
Transaction::Batch { .. } => panic!("signing Batch"),
|
|
||||||
|
|
||||||
Transaction::Sign { ref mut signed, .. } => signed,
|
|
||||||
|
|
||||||
Transaction::SlashReport { ref mut signed, .. } => signed,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decide the nonce to sign with
|
|
||||||
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
|
||||||
|
|
||||||
{
|
|
||||||
// Set the signer and the nonce
|
|
||||||
let signed = signed(self);
|
|
||||||
signed.signer = Ristretto::generator() * key.deref();
|
|
||||||
signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
|
|
||||||
let sig_hash = self.sig_hash(genesis);
|
|
||||||
|
|
||||||
// Sign the signature
|
|
||||||
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
publish = false
|
publish = false
|
||||||
rust-version = "1.81"
|
rust-version = "1.85"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -18,8 +18,13 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023-2024 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Serai Coordinate Substrate Scanner
|
# Serai Coordinator Substrate
|
||||||
|
|
||||||
This is the scanner of the Serai blockchain for the purposes of Serai's coordinator.
|
This crate manages the Serai coordinators's interactions with Serai's Substrate blockchain.
|
||||||
|
|
||||||
Two event streams are defined:
|
Two event streams are defined:
|
||||||
|
|
||||||
@@ -12,3 +12,9 @@ Two event streams are defined:
|
|||||||
The canonical event stream is available without provision of a validator's public key. The ephemeral
|
The canonical event stream is available without provision of a validator's public key. The ephemeral
|
||||||
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
|
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
|
||||||
there are no ordering guarantees across the two.
|
there are no ordering guarantees across the two.
|
||||||
|
|
||||||
|
Additionally, a collection of tasks are defined to publish data onto Serai:
|
||||||
|
|
||||||
|
- `SetKeysTask`, which sets the keys generated via DKGs onto Serai.
|
||||||
|
- `PublishBatchTask`, which publishes `Batch`s onto Serai.
|
||||||
|
- `PublishSlashReportTask`, which publishes `SlashReport`s onto Serai.
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
use std::future::Future;
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::Serai;
|
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
||||||
|
|
||||||
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
||||||
|
|
||||||
@@ -20,20 +21,22 @@ create_db!(
|
|||||||
/// The event stream for canonical events.
|
/// The event stream for canonical events.
|
||||||
pub struct CanonicalEventStream<D: Db> {
|
pub struct CanonicalEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> CanonicalEventStream<D> {
|
impl<D: Db> CanonicalEventStream<D> {
|
||||||
/// Create a new canonical event stream.
|
/// Create a new canonical event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Serai) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||||
Self { db, serai }
|
Self { db, serai }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||||
let latest_finalized_block =
|
let latest_finalized_block =
|
||||||
@@ -149,6 +152,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
||||||
};
|
};
|
||||||
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
crate::Canonical::send(
|
crate::Canonical::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set.network,
|
set.network,
|
||||||
@@ -156,7 +160,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for network in serai_client::primitives::NETWORKS {
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
let mut batch = None;
|
let mut batch = None;
|
||||||
for this_batch in &block.batch_events {
|
for this_batch in &block.batch_events {
|
||||||
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
||||||
@@ -177,7 +181,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
batch = Some(ExecutedBatch {
|
batch = Some(ExecutedBatch {
|
||||||
id: *id,
|
id: *id,
|
||||||
publisher: *publishing_session,
|
publisher: *publishing_session,
|
||||||
external_network_block_hash: *external_network_block_hash,
|
external_network_block_hash: external_network_block_hash.0,
|
||||||
in_instructions_hash: *in_instructions_hash,
|
in_instructions_hash: *in_instructions_hash,
|
||||||
in_instruction_results: in_instruction_results
|
in_instruction_results: in_instruction_results
|
||||||
.iter()
|
.iter()
|
||||||
@@ -198,7 +202,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
||||||
&burn
|
&burn
|
||||||
else {
|
else {
|
||||||
panic!("Burn event wasn't a Burn.in event: {burn:?}");
|
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
|
||||||
};
|
};
|
||||||
if instruction.balance.coin.network() == network {
|
if instruction.balance.coin.network() == network {
|
||||||
burns.push(instruction.clone());
|
burns.push(instruction.clone());
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
use std::future::Future;
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
|
primitives::{SeraiAddress, EmbeddedEllipticCurve},
|
||||||
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
|
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -24,21 +25,23 @@ create_db!(
|
|||||||
/// The event stream for ephemeral events.
|
/// The event stream for ephemeral events.
|
||||||
pub struct EphemeralEventStream<D: Db> {
|
pub struct EphemeralEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Serai,
|
serai: Arc<Serai>,
|
||||||
validator: PublicKey,
|
validator: SeraiAddress,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> EphemeralEventStream<D> {
|
impl<D: Db> EphemeralEventStream<D> {
|
||||||
/// Create a new ephemeral event stream.
|
/// Create a new ephemeral event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Serai, validator: PublicKey) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self {
|
||||||
Self { db, serai, validator }
|
Self { db, serai, validator }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
let next_block = NextBlock::get(&self.db).unwrap_or(0);
|
||||||
let latest_finalized_block =
|
let latest_finalized_block =
|
||||||
@@ -127,21 +130,22 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
||||||
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
||||||
};
|
};
|
||||||
|
|
||||||
// We only coordinate over external networks
|
// We only coordinate over external networks
|
||||||
if set.network == NetworkId::Serai {
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let serai = self.serai.as_of(block.block_hash);
|
let serai = self.serai.as_of(block.block_hash);
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let Some(validators) =
|
let Some(validators) =
|
||||||
serai.participants(set.network).await.map_err(|e| format!("{e:?}"))?
|
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||||
else {
|
else {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"block #{block_number} declared a new set but didn't have the participants"
|
"block #{block_number} declared a new set but didn't have the participants"
|
||||||
))?
|
))?
|
||||||
};
|
};
|
||||||
|
let validators = validators
|
||||||
|
.into_iter()
|
||||||
|
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||||
if in_set {
|
if in_set {
|
||||||
if u16::try_from(validators.len()).is_err() {
|
if u16::try_from(validators.len()).is_err() {
|
||||||
@@ -156,8 +160,9 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
Err("validator's weight exceeded u16::MAX".to_string())?
|
Err("validator's weight exceeded u16::MAX".to_string())?
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Do the summation in u32 so we don't risk a u16 overflow
|
||||||
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
||||||
if total_weight > MAX_KEY_SHARES_PER_SET {
|
if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
||||||
))?;
|
))?;
|
||||||
@@ -173,14 +178,16 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
embedded_elliptic_curve_keys.push_back(async move {
|
embedded_elliptic_curve_keys.push_back(async move {
|
||||||
tokio::try_join!(
|
tokio::try_join!(
|
||||||
// One future to fetch the substrate embedded key
|
// One future to fetch the substrate embedded key
|
||||||
serai
|
serai.embedded_elliptic_curve_key(
|
||||||
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
|
validator.into(),
|
||||||
|
EmbeddedEllipticCurve::Embedwards25519
|
||||||
|
),
|
||||||
// One future to fetch the external embedded key, if there is a distinct curve
|
// One future to fetch the external embedded key, if there is a distinct curve
|
||||||
async {
|
async {
|
||||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||||
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
|
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@@ -211,19 +218,22 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
crate::NewSet::send(
|
let mut new_set = NewSetInformation {
|
||||||
&mut txn,
|
set,
|
||||||
&NewSetInformation {
|
serai_block: block.block_hash,
|
||||||
set: *set,
|
declaration_time: block.time,
|
||||||
serai_block: block.block_hash,
|
// TODO: This should be inlined into the Processor's key gen code
|
||||||
start_time: block.time,
|
// It's legacy from when we removed participants from the key gen
|
||||||
// TODO: Why do we have this as an explicit field here?
|
threshold: ((total_weight * 2) / 3) + 1,
|
||||||
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
validators,
|
||||||
threshold: ((total_weight * 2) / 3) + 1,
|
evrf_public_keys,
|
||||||
validators,
|
participant_indexes: Default::default(),
|
||||||
evrf_public_keys,
|
participant_indexes_reverse_lookup: Default::default(),
|
||||||
},
|
};
|
||||||
);
|
// These aren't serialized, and we immediately serialize and drop this, so this isn't
|
||||||
|
// necessary. It's just good practice not have this be dirty
|
||||||
|
new_set.init_participant_indexes();
|
||||||
|
crate::NewSet::send(&mut txn, &new_set);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -233,6 +243,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||||
};
|
};
|
||||||
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
crate::SignSlashReport::send(&mut txn, set);
|
crate::SignSlashReport::send(&mut txn, set);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,65 +1,105 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use borsh::{io, BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{PublicKey, NetworkId},
|
primitives::{ExternalNetworkId, SeraiAddress, Signature},
|
||||||
validator_sets::primitives::ValidatorSet,
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
|
||||||
|
in_instructions::primitives::SignedBatch,
|
||||||
|
Transaction,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
mod canonical;
|
mod canonical;
|
||||||
|
pub use canonical::CanonicalEventStream;
|
||||||
mod ephemeral;
|
mod ephemeral;
|
||||||
|
pub use ephemeral::EphemeralEventStream;
|
||||||
|
|
||||||
fn borsh_serialize_validators<W: io::Write>(
|
mod set_keys;
|
||||||
validators: &Vec<(PublicKey, u16)>,
|
pub use set_keys::SetKeysTask;
|
||||||
writer: &mut W,
|
mod publish_batch;
|
||||||
) -> Result<(), io::Error> {
|
pub use publish_batch::PublishBatchTask;
|
||||||
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
|
mod publish_slash_report;
|
||||||
writer.write_all(&validators.encode())
|
pub use publish_slash_report::PublishSlashReportTask;
|
||||||
}
|
|
||||||
|
|
||||||
fn borsh_deserialize_validators<R: io::Read>(
|
|
||||||
reader: &mut R,
|
|
||||||
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
|
|
||||||
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The information for a new set.
|
/// The information for a new set.
|
||||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
#[borsh(init = init_participant_indexes)]
|
||||||
pub struct NewSetInformation {
|
pub struct NewSetInformation {
|
||||||
set: ValidatorSet,
|
/// The set.
|
||||||
serai_block: [u8; 32],
|
pub set: ExternalValidatorSet,
|
||||||
start_time: u64,
|
/// The Serai block which declared it.
|
||||||
threshold: u16,
|
pub serai_block: [u8; 32],
|
||||||
#[borsh(
|
/// The time of the block which declared it, in seconds.
|
||||||
serialize_with = "borsh_serialize_validators",
|
pub declaration_time: u64,
|
||||||
deserialize_with = "borsh_deserialize_validators"
|
/// The threshold to use.
|
||||||
)]
|
pub threshold: u16,
|
||||||
validators: Vec<(PublicKey, u16)>,
|
/// The validators, with the amount of key shares they have.
|
||||||
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
pub validators: Vec<(SeraiAddress, u16)>,
|
||||||
|
/// The eVRF public keys.
|
||||||
|
///
|
||||||
|
/// This will have the necessary copies of the keys proper for each validator's weight,
|
||||||
|
/// accordingly syncing up with `participant_indexes`.
|
||||||
|
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
|
/// The participant indexes, indexed by their validator.
|
||||||
|
#[borsh(skip)]
|
||||||
|
pub participant_indexes: HashMap<SeraiAddress, Vec<Participant>>,
|
||||||
|
/// The validators, indexed by their participant indexes.
|
||||||
|
#[borsh(skip)]
|
||||||
|
pub participant_indexes_reverse_lookup: HashMap<Participant, SeraiAddress>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NewSetInformation {
|
||||||
|
fn init_participant_indexes(&mut self) {
|
||||||
|
let mut next_i = 1;
|
||||||
|
self.participant_indexes = HashMap::with_capacity(self.validators.len());
|
||||||
|
self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len());
|
||||||
|
for (validator, weight) in &self.validators {
|
||||||
|
let mut these_is = Vec::with_capacity((*weight).into());
|
||||||
|
for _ in 0 .. *weight {
|
||||||
|
let this_i = Participant::new(next_i).unwrap();
|
||||||
|
next_i += 1;
|
||||||
|
|
||||||
|
these_is.push(this_i);
|
||||||
|
self.participant_indexes_reverse_lookup.insert(this_i, *validator);
|
||||||
|
}
|
||||||
|
self.participant_indexes.insert(*validator, these_is);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod _public_db {
|
mod _public_db {
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
use super::*;
|
||||||
|
|
||||||
use serai_db::*;
|
|
||||||
|
|
||||||
use crate::NewSetInformation;
|
|
||||||
|
|
||||||
db_channel!(
|
db_channel!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
// Canonical messages to send to the processor
|
// Canonical messages to send to the processor
|
||||||
Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage,
|
Canonical: (network: ExternalNetworkId) -> messages::substrate::CoordinatorMessage,
|
||||||
|
|
||||||
// Relevant new set, from an ephemeral event stream
|
// Relevant new set, from an ephemeral event stream
|
||||||
NewSet: () -> NewSetInformation,
|
NewSet: () -> NewSetInformation,
|
||||||
// Relevant sign slash report, from an ephemeral event stream
|
// Potentially relevant sign slash report, from an ephemeral event stream
|
||||||
SignSlashReport: () -> ValidatorSet,
|
SignSlashReport: (set: ExternalValidatorSet) -> (),
|
||||||
|
|
||||||
|
// Signed batches to publish onto the Serai network
|
||||||
|
SignedBatches: (network: ExternalNetworkId) -> SignedBatch,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
CoordinatorSubstrate {
|
||||||
|
// Keys to set on the Serai network
|
||||||
|
Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
||||||
|
// Slash reports to publish onto the Serai network
|
||||||
|
SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -69,7 +109,7 @@ pub struct Canonical;
|
|||||||
impl Canonical {
|
impl Canonical {
|
||||||
pub(crate) fn send(
|
pub(crate) fn send(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
msg: &messages::substrate::CoordinatorMessage,
|
msg: &messages::substrate::CoordinatorMessage,
|
||||||
) {
|
) {
|
||||||
_public_db::Canonical::send(txn, network, msg);
|
_public_db::Canonical::send(txn, network, msg);
|
||||||
@@ -77,7 +117,7 @@ impl Canonical {
|
|||||||
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
||||||
pub fn try_recv(
|
pub fn try_recv(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: NetworkId,
|
network: ExternalNetworkId,
|
||||||
) -> Option<messages::substrate::CoordinatorMessage> {
|
) -> Option<messages::substrate::CoordinatorMessage> {
|
||||||
_public_db::Canonical::try_recv(txn, network)
|
_public_db::Canonical::try_recv(txn, network)
|
||||||
}
|
}
|
||||||
@@ -101,12 +141,98 @@ impl NewSet {
|
|||||||
/// notifications for all relevant validator sets will be included.
|
/// notifications for all relevant validator sets will be included.
|
||||||
pub struct SignSlashReport;
|
pub struct SignSlashReport;
|
||||||
impl SignSlashReport {
|
impl SignSlashReport {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: &ValidatorSet) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
||||||
_public_db::SignSlashReport::send(txn, set);
|
_public_db::SignSlashReport::send(txn, set, &());
|
||||||
}
|
}
|
||||||
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
||||||
/// receive.
|
/// receive.
|
||||||
pub fn try_recv(txn: &mut impl DbTxn) -> Option<ValidatorSet> {
|
pub fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<()> {
|
||||||
_public_db::SignSlashReport::try_recv(txn)
|
_public_db::SignSlashReport::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The keys to set on Serai.
|
||||||
|
pub struct Keys;
|
||||||
|
impl Keys {
|
||||||
|
/// Set the keys to report for a validator set.
|
||||||
|
///
|
||||||
|
/// This only saves the most recent keys as only a single session is eligible to have its keys
|
||||||
|
/// reported at once.
|
||||||
|
pub fn set(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
|
signature: Signature,
|
||||||
|
) {
|
||||||
|
// If we have a more recent pair of keys, don't write this historic one
|
||||||
|
if let Some((existing_session, _)) = _public_db::Keys::get(txn, set.network) {
|
||||||
|
if existing_session.0 >= set.session.0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
|
||||||
|
set.network,
|
||||||
|
key_pair,
|
||||||
|
signature_participants,
|
||||||
|
signature,
|
||||||
|
);
|
||||||
|
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
|
}
|
||||||
|
pub(crate) fn take(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
) -> Option<(Session, Transaction)> {
|
||||||
|
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
||||||
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The signed batches to publish onto Serai.
|
||||||
|
pub struct SignedBatches;
|
||||||
|
impl SignedBatches {
|
||||||
|
/// Send a `SignedBatch` to publish onto Serai.
|
||||||
|
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||||
|
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
|
||||||
|
_public_db::SignedBatches::try_recv(txn, network)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The slash reports to publish onto Serai.
|
||||||
|
pub struct SlashReports;
|
||||||
|
impl SlashReports {
|
||||||
|
/// Set the slashes to report for a validator set.
|
||||||
|
///
|
||||||
|
/// This only saves the most recent slashes as only a single session is eligible to have its
|
||||||
|
/// slashes reported at once.
|
||||||
|
pub fn set(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
slash_report: SlashReport,
|
||||||
|
signature: Signature,
|
||||||
|
) {
|
||||||
|
// If we have a more recent slash report, don't write this historic one
|
||||||
|
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
||||||
|
if existing_session.0 >= set.session.0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||||
|
set.network,
|
||||||
|
slash_report,
|
||||||
|
signature,
|
||||||
|
);
|
||||||
|
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
|
}
|
||||||
|
pub(crate) fn take(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
) -> Option<(Session, Transaction)> {
|
||||||
|
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||||
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
87
coordinator/substrate/src/publish_batch.rs
Normal file
87
coordinator/substrate/src/publish_batch.rs
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
|
||||||
|
|
||||||
|
use serai_db::{Get, DbTxn, Db, create_db};
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::SignedBatches;
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
CoordinatorSubstrate {
|
||||||
|
LastPublishedBatch: (network: ExternalNetworkId) -> u32,
|
||||||
|
BatchesToPublish: (network: ExternalNetworkId, batch: u32) -> SignedBatch,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
||||||
|
pub struct PublishBatchTask<D: Db> {
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> PublishBatchTask<D> {
|
||||||
|
/// Create a task to publish `SignedBatch`s onto Serai.
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>, network: ExternalNetworkId) -> Self {
|
||||||
|
Self { db, serai, network }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||||
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
// Read from SignedBatches, which is sequential, into our own mapping
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// If this is a Batch not yet published, save it into our unordered mapping
|
||||||
|
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
|
||||||
|
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Synchronize our last published batch with the Serai network's
|
||||||
|
let next_to_publish = {
|
||||||
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
|
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||||
|
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||||
|
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
||||||
|
while our_last_batch < last_batch {
|
||||||
|
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
||||||
|
// Clean up the Batch to publish since it's already been published
|
||||||
|
BatchesToPublish::take(&mut txn, self.network, next_batch);
|
||||||
|
our_last_batch = Some(next_batch);
|
||||||
|
}
|
||||||
|
if let Some(last_batch) = our_last_batch {
|
||||||
|
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
||||||
|
}
|
||||||
|
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
||||||
|
};
|
||||||
|
|
||||||
|
let made_progress =
|
||||||
|
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
||||||
|
self
|
||||||
|
.serai
|
||||||
|
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||||
|
.await?;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
101
coordinator/substrate/src/publish_slash_report.rs
Normal file
101
coordinator/substrate/src/publish_slash_report.rs
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::SlashReports;
|
||||||
|
|
||||||
|
/// Publish slash reports from `SlashReports` onto Serai.
|
||||||
|
pub struct PublishSlashReportTask<D: Db> {
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> PublishSlashReportTask<D> {
|
||||||
|
/// Create a task to publish slash reports onto Serai.
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||||
|
Self { db, serai }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> PublishSlashReportTask<D> {
|
||||||
|
// Returns if a slash report was successfully published
|
||||||
|
async fn publish(&mut self, network: ExternalNetworkId) -> Result<bool, String> {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
||||||
|
// No slash report to publish
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
|
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let serai = serai.validator_sets();
|
||||||
|
let session_after_slash_report = Session(session.0 + 1);
|
||||||
|
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||||
|
// active
|
||||||
|
let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0);
|
||||||
|
if session_after_slash_report_retired {
|
||||||
|
// Commit the txn to drain this slash report from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session_after_slash_report.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session_after_slash_report.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session which should publish a slash report already has, move on
|
||||||
|
let key_pending_slash_report =
|
||||||
|
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
if key_pending_slash_report.is_none() {
|
||||||
|
txn.commit();
|
||||||
|
return Ok(false);
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&slash_report).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window available to publish the slash report. That makes
|
||||||
|
// this a non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
let mut error = None;
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
|
let network_res = self.publish(network).await;
|
||||||
|
// We made progress if any network successfully published their slash report
|
||||||
|
made_progress |= network_res == Ok(true);
|
||||||
|
// We want to yield the first error *after* attempting for every network
|
||||||
|
error = error.or(network_res.err());
|
||||||
|
}
|
||||||
|
// Yield the error
|
||||||
|
if let Some(error) = error {
|
||||||
|
Err(error)?
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
86
coordinator/substrate/src/set_keys.rs
Normal file
86
coordinator/substrate/src/set_keys.rs
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
|
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::Keys;
|
||||||
|
|
||||||
|
/// Set keys from `Keys` on Serai.
|
||||||
|
pub struct SetKeysTask<D: Db> {
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> SetKeysTask<D> {
|
||||||
|
/// Create a task to publish slash reports onto Serai.
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>) -> Self {
|
||||||
|
Self { db, serai }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||||
|
// No keys to set
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
|
let serai =
|
||||||
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let serai = serai.validator_sets();
|
||||||
|
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to set these keys if this isn't a retired session
|
||||||
|
if Some(session.0) < current_session {
|
||||||
|
// Commit the txn to take these keys from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a keys for a session Serai has yet to start".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session already has had its keys set, move on
|
||||||
|
if serai
|
||||||
|
.keys(ExternalValidatorSet { network, session })
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&keys).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window reasonable to set the keys. That makes this a
|
||||||
|
// non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish set keys transaction: {e:?}"))?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
50
coordinator/tributary-sdk/Cargo.toml
Normal file
50
coordinator/tributary-sdk/Cargo.toml
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
[package]
|
||||||
|
name = "tributary-sdk"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "A micro-blockchain to provide consensus and ordering to P2P communication"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
edition = "2021"
|
||||||
|
rust-version = "1.85"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
subtle = { version = "^2", default-features = false, features = ["std"] }
|
||||||
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||||
|
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
|
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
|
||||||
|
|
||||||
|
ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||||
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std", "aggregate"] }
|
||||||
|
|
||||||
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
serai-db = { path = "../../common/db", version = "0.1" }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
|
||||||
|
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||||
|
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }
|
||||||
|
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { version = "1", features = ["macros"] }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
tests = []
|
||||||
15
coordinator/tributary-sdk/LICENSE
Normal file
15
coordinator/tributary-sdk/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
AGPL-3.0-only license
|
||||||
|
|
||||||
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
published by the Free Software Foundation.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
3
coordinator/tributary-sdk/README.md
Normal file
3
coordinator/tributary-sdk/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Tributary
|
||||||
|
|
||||||
|
A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain.
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::{VecDeque, HashSet};
|
use std::collections::{VecDeque, HashSet};
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db};
|
use serai_db::{Get, DbTxn, Db};
|
||||||
|
|
||||||
@@ -20,7 +21,7 @@ pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
|
|||||||
|
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
tip: [u8; 32],
|
tip: [u8; 32],
|
||||||
participants: HashSet<<Ristretto as Ciphersuite>::G>,
|
participants: HashSet<[u8; 32]>,
|
||||||
|
|
||||||
provided: ProvidedTransactions<D, T>,
|
provided: ProvidedTransactions<D, T>,
|
||||||
mempool: Mempool<D, T>,
|
mempool: Mempool<D, T>,
|
||||||
@@ -55,7 +56,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
}
|
}
|
||||||
fn next_nonce_key(
|
fn next_nonce_key(
|
||||||
genesis: &[u8; 32],
|
genesis: &[u8; 32],
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: &[u8],
|
order: &[u8],
|
||||||
) -> Vec<u8> {
|
) -> Vec<u8> {
|
||||||
D::key(
|
D::key(
|
||||||
@@ -68,12 +69,15 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
db: D,
|
db: D,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
participants: &[<Ristretto as Ciphersuite>::G],
|
participants: &[<Ristretto as WrappedGroup>::G],
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut res = Self {
|
let mut res = Self {
|
||||||
db: Some(db.clone()),
|
db: Some(db.clone()),
|
||||||
genesis,
|
genesis,
|
||||||
participants: participants.iter().copied().collect(),
|
participants: participants
|
||||||
|
.iter()
|
||||||
|
.map(<<Ristretto as WrappedGroup>::G as GroupEncoding>::to_bytes)
|
||||||
|
.collect(),
|
||||||
|
|
||||||
block_number: 0,
|
block_number: 0,
|
||||||
tip: genesis,
|
tip: genesis,
|
||||||
@@ -172,7 +176,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
|
|
||||||
self.mempool.add::<N, _>(
|
self.mempool.add::<N, _>(
|
||||||
|signer, order| {
|
|signer, order| {
|
||||||
if self.participants.contains(&signer) {
|
if self.participants.contains(&signer.to_bytes()) {
|
||||||
Some(
|
Some(
|
||||||
db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
|
db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
|
||||||
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
|
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
|
||||||
@@ -195,13 +199,13 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
|
|
||||||
pub(crate) fn next_nonce(
|
pub(crate) fn next_nonce(
|
||||||
&self,
|
&self,
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: &[u8],
|
order: &[u8],
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
|
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
|
||||||
return Some(next_nonce);
|
return Some(next_nonce);
|
||||||
}
|
}
|
||||||
if self.participants.contains(signer) {
|
if self.participants.contains(&signer.to_bytes()) {
|
||||||
Some(
|
Some(
|
||||||
self
|
self
|
||||||
.db
|
.db
|
||||||
@@ -250,7 +254,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
|||||||
self.tip,
|
self.tip,
|
||||||
self.provided.transactions.clone(),
|
self.provided.transactions.clone(),
|
||||||
&mut |signer, order| {
|
&mut |signer, order| {
|
||||||
if self.participants.contains(signer) {
|
if self.participants.contains(&signer.to_bytes()) {
|
||||||
let key = Self::next_nonce_key(&self.genesis, signer, order);
|
let key = Self::next_nonce_key(&self.genesis, signer, order);
|
||||||
let next = txn
|
let next = txn
|
||||||
.get(&key)
|
.get(&key)
|
||||||
389
coordinator/tributary-sdk/src/lib.rs
Normal file
389
coordinator/tributary-sdk/src/lib.rs
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
use core::{marker::PhantomData, fmt::Debug, future::Future};
|
||||||
|
use std::{sync::Arc, io};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
|
use scale::Decode;
|
||||||
|
use futures_channel::mpsc::UnboundedReceiver;
|
||||||
|
use futures_util::{StreamExt, SinkExt};
|
||||||
|
use ::tendermint::{
|
||||||
|
ext::{BlockNumber, Commit, Block as BlockTrait, Network},
|
||||||
|
SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||||
|
TendermintMachine, TendermintHandle,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub use ::tendermint::Evidence;
|
||||||
|
|
||||||
|
use serai_db::Db;
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
mod merkle;
|
||||||
|
pub(crate) use merkle::*;
|
||||||
|
|
||||||
|
pub mod transaction;
|
||||||
|
pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait};
|
||||||
|
|
||||||
|
use crate::tendermint::tx::TendermintTx;
|
||||||
|
|
||||||
|
mod provided;
|
||||||
|
pub(crate) use provided::*;
|
||||||
|
pub use provided::ProvidedError;
|
||||||
|
|
||||||
|
mod block;
|
||||||
|
pub use block::*;
|
||||||
|
|
||||||
|
mod blockchain;
|
||||||
|
pub(crate) use blockchain::*;
|
||||||
|
|
||||||
|
mod mempool;
|
||||||
|
pub(crate) use mempool::*;
|
||||||
|
|
||||||
|
pub mod tendermint;
|
||||||
|
pub(crate) use crate::tendermint::*;
|
||||||
|
|
||||||
|
#[cfg(any(test, feature = "tests"))]
|
||||||
|
pub mod tests;
|
||||||
|
|
||||||
|
/// Size limit for an individual transaction.
|
||||||
|
// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking
|
||||||
|
// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs
|
||||||
|
// with 49 key shares, and signing 120 Monero inputs with 49 key shares.
|
||||||
|
// TODO: Add a test for these properties
|
||||||
|
pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000;
|
||||||
|
/// Amount of transactions a single account may have in the mempool.
|
||||||
|
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||||
|
/// Block size limit.
|
||||||
|
// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious
|
||||||
|
// participant from flooding disks and causing out of space errors in order processes.
|
||||||
|
pub const BLOCK_SIZE_LIMIT: usize = 2_001_000;
|
||||||
|
|
||||||
|
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||||
|
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||||
|
|
||||||
|
#[allow(clippy::large_enum_variant)]
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum Transaction<T: TransactionTrait> {
|
||||||
|
Tendermint(TendermintTx),
|
||||||
|
Application(T),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: TransactionTrait> ReadWrite for Transaction<T> {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut kind = [0];
|
||||||
|
reader.read_exact(&mut kind)?;
|
||||||
|
match kind[0] {
|
||||||
|
0 => {
|
||||||
|
let tx = TendermintTx::read(reader)?;
|
||||||
|
Ok(Transaction::Tendermint(tx))
|
||||||
|
}
|
||||||
|
1 => {
|
||||||
|
let tx = T::read(reader)?;
|
||||||
|
Ok(Transaction::Application(tx))
|
||||||
|
}
|
||||||
|
_ => Err(io::Error::other("invalid transaction type")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
Transaction::Tendermint(tx) => {
|
||||||
|
writer.write_all(&[0])?;
|
||||||
|
tx.write(writer)
|
||||||
|
}
|
||||||
|
Transaction::Application(tx) => {
|
||||||
|
writer.write_all(&[1])?;
|
||||||
|
tx.write(writer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: TransactionTrait> Transaction<T> {
|
||||||
|
pub fn hash(&self) -> [u8; 32] {
|
||||||
|
match self {
|
||||||
|
Transaction::Tendermint(tx) => tx.hash(),
|
||||||
|
Transaction::Application(tx) => tx.hash(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn kind(&self) -> TransactionKind {
|
||||||
|
match self {
|
||||||
|
Transaction::Tendermint(tx) => tx.kind(),
|
||||||
|
Transaction::Application(tx) => tx.kind(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An item which can be read and written.
|
||||||
|
pub trait ReadWrite: Sized {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||||
|
|
||||||
|
fn serialize(&self) -> Vec<u8> {
|
||||||
|
// BlockHeader is 64 bytes and likely the smallest item in this system
|
||||||
|
let mut buf = Vec::with_capacity(64);
|
||||||
|
self.write(&mut buf).unwrap();
|
||||||
|
buf
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait P2p: 'static + Send + Sync + Clone {
|
||||||
|
/// Broadcast a message to all other members of the Tributary with the specified genesis.
|
||||||
|
///
|
||||||
|
/// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't
|
||||||
|
/// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based
|
||||||
|
/// deduplication to ensure a sane amount of load.
|
||||||
|
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P: P2p> P2p for Arc<P> {
|
||||||
|
fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||||
|
P::broadcast(self, genesis, msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
|
||||||
|
db: D,
|
||||||
|
|
||||||
|
genesis: [u8; 32],
|
||||||
|
network: TendermintNetwork<D, T, P>,
|
||||||
|
|
||||||
|
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
||||||
|
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
||||||
|
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||||
|
pub async fn new(
|
||||||
|
db: D,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
start_time: u64,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
validators: Vec<(<Ristretto as WrappedGroup>::G, u64)>,
|
||||||
|
p2p: P,
|
||||||
|
) -> Option<Self> {
|
||||||
|
log::info!("new Tributary with genesis {}", hex::encode(genesis));
|
||||||
|
|
||||||
|
let validators_vec = validators.iter().map(|validator| validator.0).collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let signer = Arc::new(Signer::new(genesis, key));
|
||||||
|
let validators = Arc::new(Validators::new(genesis, validators)?);
|
||||||
|
|
||||||
|
let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);
|
||||||
|
let block_number = BlockNumber(blockchain.block_number());
|
||||||
|
|
||||||
|
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
||||||
|
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
||||||
|
} else {
|
||||||
|
start_time
|
||||||
|
};
|
||||||
|
let proposal = TendermintBlock(
|
||||||
|
blockchain.build_block::<TendermintNetwork<D, T, P>>(&validators).serialize(),
|
||||||
|
);
|
||||||
|
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||||
|
|
||||||
|
let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };
|
||||||
|
|
||||||
|
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||||
|
TendermintMachine::new(
|
||||||
|
db.clone(),
|
||||||
|
network.clone(),
|
||||||
|
genesis,
|
||||||
|
block_number,
|
||||||
|
start_time,
|
||||||
|
proposal,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
tokio::spawn(machine.run());
|
||||||
|
|
||||||
|
Some(Self {
|
||||||
|
db,
|
||||||
|
genesis,
|
||||||
|
network,
|
||||||
|
synced_block: Arc::new(RwLock::new(synced_block)),
|
||||||
|
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
||||||
|
messages: Arc::new(RwLock::new(messages)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn block_time() -> u32 {
|
||||||
|
TendermintNetwork::<D, T, P>::block_time()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
|
self.genesis
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn block_number(&self) -> u64 {
|
||||||
|
self.network.blockchain.read().await.block_number()
|
||||||
|
}
|
||||||
|
pub async fn tip(&self) -> [u8; 32] {
|
||||||
|
self.network.blockchain.read().await.tip()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reader(&self) -> TributaryReader<D, T> {
|
||||||
|
TributaryReader(self.db.clone(), self.genesis, PhantomData)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> {
|
||||||
|
self.network.blockchain.write().await.provide_transaction(tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn next_nonce(
|
||||||
|
&self,
|
||||||
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
|
order: &[u8],
|
||||||
|
) -> Option<u32> {
|
||||||
|
self.network.blockchain.read().await.next_nonce(signer, order)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
||||||
|
// Safe to be &self since the only meaningful usage of self is self.network.blockchain which
|
||||||
|
// successfully acquires its own write lock
|
||||||
|
pub async fn add_transaction(&self, tx: T) -> Result<bool, TransactionError> {
|
||||||
|
let tx = Transaction::Application(tx);
|
||||||
|
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||||
|
tx.write(&mut to_broadcast).unwrap();
|
||||||
|
let res = self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||||
|
true,
|
||||||
|
tx,
|
||||||
|
&self.network.signature_scheme(),
|
||||||
|
);
|
||||||
|
if res == Ok(true) {
|
||||||
|
self.network.p2p.broadcast(self.genesis, to_broadcast).await;
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sync_block_internal(
|
||||||
|
&self,
|
||||||
|
block: Block<T>,
|
||||||
|
commit: Vec<u8>,
|
||||||
|
result: &mut UnboundedReceiver<bool>,
|
||||||
|
) -> bool {
|
||||||
|
let (tip, block_number) = {
|
||||||
|
let blockchain = self.network.blockchain.read().await;
|
||||||
|
(blockchain.tip(), blockchain.block_number())
|
||||||
|
};
|
||||||
|
|
||||||
|
if block.header.parent != tip {
|
||||||
|
log::debug!("told to sync a block whose parent wasn't our tip");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let block = TendermintBlock(block.serialize());
|
||||||
|
let mut commit_ref = commit.as_ref();
|
||||||
|
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
|
||||||
|
log::error!("sent an invalidly serialized commit");
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
// Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this,
|
||||||
|
// yet then we'd have to test the truncation was performed correctly.
|
||||||
|
if !commit_ref.is_empty() {
|
||||||
|
log::error!("sent an commit with additional data after it");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if !self.network.verify_commit(block.id(), &commit) {
|
||||||
|
log::error!("sent an invalid commit");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let number = BlockNumber(block_number + 1);
|
||||||
|
self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();
|
||||||
|
result.next().await.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync a block.
|
||||||
|
// TODO: Since we have a static validator set, we should only need the tail commit?
|
||||||
|
pub async fn sync_block(&self, block: Block<T>, commit: Vec<u8>) -> bool {
|
||||||
|
let mut result = self.synced_block_result.write().await;
|
||||||
|
self.sync_block_internal(block, commit, &mut result).await
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return true if the message should be rebroadcasted.
|
||||||
|
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||||
|
match msg.first() {
|
||||||
|
Some(&TRANSACTION_MESSAGE) => {
|
||||||
|
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||||
|
log::error!("received invalid transaction message");
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: Sync mempools with fellow peers
|
||||||
|
// Can we just rebroadcast transactions not included for at least two blocks?
|
||||||
|
let res =
|
||||||
|
self.network.blockchain.write().await.add_transaction::<TendermintNetwork<D, T, P>>(
|
||||||
|
false,
|
||||||
|
tx,
|
||||||
|
&self.network.signature_scheme(),
|
||||||
|
);
|
||||||
|
log::debug!("received transaction message. valid new transaction: {res:?}");
|
||||||
|
res == Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(&TENDERMINT_MESSAGE) => {
|
||||||
|
let Ok(msg) =
|
||||||
|
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
|
||||||
|
else {
|
||||||
|
log::error!("received invalid tendermint message");
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
self.messages.write().await.send(msg).await.unwrap();
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a Future which will resolve once the next block has been added.
|
||||||
|
pub async fn next_block_notification(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Sync + core::future::Future<Output = Result<(), impl Send + Sync>> {
|
||||||
|
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||||
|
self.network.blockchain.write().await.next_block_notifications.push_back(tx);
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct TributaryReader<D: Db, T: TransactionTrait>(D, [u8; 32], PhantomData<T>);
|
||||||
|
impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
|
||||||
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
|
self.1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since these values are static once set, they can be safely read from the database without lock
|
||||||
|
// acquisition
|
||||||
|
pub fn block(&self, hash: &[u8; 32]) -> Option<Block<T>> {
|
||||||
|
Blockchain::<D, T>::block_from_db(&self.0, self.1, hash)
|
||||||
|
}
|
||||||
|
pub fn commit(&self, hash: &[u8; 32]) -> Option<Vec<u8>> {
|
||||||
|
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
|
||||||
|
}
|
||||||
|
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
|
||||||
|
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
|
||||||
|
}
|
||||||
|
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
|
||||||
|
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
|
||||||
|
}
|
||||||
|
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
|
||||||
|
self
|
||||||
|
.commit(hash)
|
||||||
|
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {
|
||||||
|
Blockchain::<D, T>::locally_provided_txs_in_block(&self.0, &self.1, hash, order)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This isn't static, yet can be read with only minor discrepancy risks
|
||||||
|
pub fn tip(&self) -> [u8; 32] {
|
||||||
|
Blockchain::<D, T>::tip_from_db(&self.0, self.1)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ciphersuite::{Ciphersuite, Ristretto};
|
use dalek_ff_group::Ristretto;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
@@ -20,9 +21,9 @@ pub(crate) struct Mempool<D: Db, T: TransactionTrait> {
|
|||||||
db: D,
|
db: D,
|
||||||
genesis: [u8; 32],
|
genesis: [u8; 32],
|
||||||
|
|
||||||
last_nonce_in_mempool: HashMap<(<Ristretto as Ciphersuite>::G, Vec<u8>), u32>,
|
last_nonce_in_mempool: HashMap<([u8; 32], Vec<u8>), u32>,
|
||||||
txs: HashMap<[u8; 32], Transaction<T>>,
|
txs: HashMap<[u8; 32], Transaction<T>>,
|
||||||
txs_per_signer: HashMap<<Ristretto as Ciphersuite>::G, u32>,
|
txs_per_signer: HashMap<[u8; 32], u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||||
@@ -81,6 +82,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
}
|
}
|
||||||
Transaction::Application(tx) => match tx.kind() {
|
Transaction::Application(tx) => match tx.kind() {
|
||||||
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
||||||
|
let signer = signer.to_bytes();
|
||||||
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
||||||
res.txs_per_signer.insert(signer, amount);
|
res.txs_per_signer.insert(signer, amount);
|
||||||
|
|
||||||
@@ -106,7 +108,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
|
||||||
pub(crate) fn add<
|
pub(crate) fn add<
|
||||||
N: Network,
|
N: Network,
|
||||||
F: FnOnce(<Ristretto as Ciphersuite>::G, Vec<u8>) -> Option<u32>,
|
F: FnOnce(<Ristretto as WrappedGroup>::G, Vec<u8>) -> Option<u32>,
|
||||||
>(
|
>(
|
||||||
&mut self,
|
&mut self,
|
||||||
blockchain_next_nonce: F,
|
blockchain_next_nonce: F,
|
||||||
@@ -139,6 +141,8 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
};
|
};
|
||||||
let mut next_nonce = blockchain_next_nonce;
|
let mut next_nonce = blockchain_next_nonce;
|
||||||
|
|
||||||
|
let signer = signer.to_bytes();
|
||||||
|
|
||||||
if let Some(mempool_last_nonce) =
|
if let Some(mempool_last_nonce) =
|
||||||
self.last_nonce_in_mempool.get(&(signer, order.clone()))
|
self.last_nonce_in_mempool.get(&(signer, order.clone()))
|
||||||
{
|
{
|
||||||
@@ -178,10 +182,10 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
// Returns None if the mempool doesn't have a nonce tracked.
|
// Returns None if the mempool doesn't have a nonce tracked.
|
||||||
pub(crate) fn next_nonce_in_mempool(
|
pub(crate) fn next_nonce_in_mempool(
|
||||||
&self,
|
&self,
|
||||||
signer: &<Ristretto as Ciphersuite>::G,
|
signer: &<Ristretto as WrappedGroup>::G,
|
||||||
order: Vec<u8>,
|
order: Vec<u8>,
|
||||||
) -> Option<u32> {
|
) -> Option<u32> {
|
||||||
self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1)
|
self.last_nonce_in_mempool.get(&(signer.to_bytes(), order)).copied().map(|nonce| nonce + 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get transactions to include in a block.
|
/// Get transactions to include in a block.
|
||||||
@@ -242,6 +246,8 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
|||||||
|
|
||||||
if let Some(tx) = self.txs.remove(tx) {
|
if let Some(tx) = self.txs.remove(tx) {
|
||||||
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
|
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
|
||||||
|
let signer = signer.to_bytes();
|
||||||
|
|
||||||
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
|
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
|
||||||
self.txs_per_signer.insert(signer, amount);
|
self.txs_per_signer.insert(signer, amount);
|
||||||
|
|
||||||
@@ -9,7 +9,7 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] {
|
|||||||
let zero = [0; 32];
|
let zero = [0; 32];
|
||||||
let mut interim;
|
let mut interim;
|
||||||
while hashes.len() > 1 {
|
while hashes.len() > 1 {
|
||||||
interim = Vec::with_capacity((hashes.len() + 1) / 2);
|
interim = Vec::with_capacity(hashes.len().div_ceil(2));
|
||||||
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < hashes.len() {
|
while i < hashes.len() {
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user