mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-10 13:09:24 +00:00
Compare commits
173 Commits
3541197aa5
...
next-polka
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2fbe925c4d | ||
|
|
6aad496d86 | ||
|
|
d3464cfcb3 | ||
|
|
8dbea8452d | ||
|
|
f94b7ca50e | ||
|
|
5e39f9bc1e | ||
|
|
c98d757c0f | ||
|
|
6603100c7e | ||
|
|
f70fee65b8 | ||
|
|
0849d60f28 | ||
|
|
3a792f9ce5 | ||
|
|
50959fa0e3 | ||
|
|
2fb90ebe55 | ||
|
|
b24adcbd14 | ||
|
|
b791256648 | ||
|
|
36ac9c56a4 | ||
|
|
57bf4984f8 | ||
|
|
87750407de | ||
|
|
3ce90c55d9 | ||
|
|
ff95c58341 | ||
|
|
98044f93b1 | ||
|
|
eb04f873d5 | ||
|
|
af74c318aa | ||
|
|
d711d8915f | ||
|
|
3d549564a8 | ||
|
|
9a75f92864 | ||
|
|
30ea9d9a06 | ||
|
|
c45c973ca1 | ||
|
|
6e37ac030d | ||
|
|
e7c759c468 | ||
|
|
8ec0582237 | ||
|
|
8d8e8a7a77 | ||
|
|
028ec3cce0 | ||
|
|
c49215805f | ||
|
|
2ffdd2a01d | ||
|
|
e1e6e67d4a | ||
|
|
6b19780c7b | ||
|
|
6100c3ca90 | ||
|
|
fa0ed4b180 | ||
|
|
0ea16f9e01 | ||
|
|
7a314baa9f | ||
|
|
9891ccade8 | ||
|
|
f1f166c168 | ||
|
|
df4aee2d59 | ||
|
|
302a43653f | ||
|
|
d219b77bd0 | ||
|
|
fce26eaee1 | ||
|
|
3cfbd9add7 | ||
|
|
609cf06393 | ||
|
|
46b1f1b7ec | ||
|
|
09113201e7 | ||
|
|
556d294157 | ||
|
|
82ca889ed3 | ||
|
|
cde0f753c2 | ||
|
|
6ff0ef7aa6 | ||
|
|
f9e3d1b142 | ||
|
|
a793aa18ef | ||
|
|
5662beeb8a | ||
|
|
509bd58f4e | ||
|
|
367a5769e8 | ||
|
|
cb6eb6430a | ||
|
|
4f82e5912c | ||
|
|
ac7af40f2e | ||
|
|
264bdd46ca | ||
|
|
c52f7634de | ||
|
|
21eaa5793d | ||
|
|
c744a80d80 | ||
|
|
a34f9f6164 | ||
|
|
353683cfd2 | ||
|
|
d4f77159c4 | ||
|
|
191bf4bdea | ||
|
|
06a4824aba | ||
|
|
e65a37e639 | ||
|
|
4653ef4a61 | ||
|
|
ce08fad931 | ||
|
|
1866bb7ae3 | ||
|
|
aff2065c31 | ||
|
|
7300700108 | ||
|
|
31874ceeae | ||
|
|
012b8fddae | ||
|
|
d2f58232c8 | ||
|
|
49794b6a75 | ||
|
|
973287d0a1 | ||
|
|
1b499edfe1 | ||
|
|
642848bd24 | ||
|
|
f7fb78bdd6 | ||
|
|
9c47ef2658 | ||
|
|
e1b6b638c6 | ||
|
|
c24768f922 | ||
|
|
65613750e1 | ||
|
|
87ee879dea | ||
|
|
b5603560e8 | ||
|
|
5818f1a41c | ||
|
|
1b781b4b57 | ||
|
|
94faf098b6 | ||
|
|
03e45f73cd | ||
|
|
63f7e220c0 | ||
|
|
7d49366373 | ||
|
|
56f6ba2dac | ||
|
|
55ed33d2d1 | ||
|
|
138a0e9b40 | ||
|
|
4fc7263ac3 | ||
|
|
f27fd59fa6 | ||
|
|
08f6af8bb9 | ||
|
|
3512b3832d | ||
|
|
1164f92ea1 | ||
|
|
0a3ead0e19 | ||
|
|
437f0e9a93 | ||
|
|
cc5d38f1ce | ||
|
|
0ce025e0c2 | ||
|
|
ea66cd0d1a | ||
|
|
8b32fba458 | ||
|
|
e63acf3f67 | ||
|
|
d373d2a4c9 | ||
|
|
cbf998ff30 | ||
|
|
ef07253a27 | ||
|
|
ffae6753ec | ||
|
|
a04215bc13 | ||
|
|
28aea8a442 | ||
|
|
7b46477ca0 | ||
|
|
e62b62ddfb | ||
|
|
a2d8d0fd13 | ||
|
|
b2b36b17c4 | ||
|
|
9de8394efa | ||
|
|
3cb9432daa | ||
|
|
3f5150b3fa | ||
|
|
d74b00b9e4 | ||
|
|
224cf4ea21 | ||
|
|
3955f92cc2 | ||
|
|
a9b1e5293c | ||
|
|
80009ab67f | ||
|
|
df9fda2971 | ||
|
|
ca8afb83a1 | ||
|
|
18a9cf2535 | ||
|
|
10c126ad92 | ||
|
|
19305aebc9 | ||
|
|
be68e27551 | ||
|
|
d6d96fe8ff | ||
|
|
95909d83a4 | ||
|
|
3bd48974f3 | ||
|
|
29093715e3 | ||
|
|
87b4dfc8f3 | ||
|
|
4db78b1787 | ||
|
|
02a5f15535 | ||
|
|
a1ef18a039 | ||
|
|
bec806230a | ||
|
|
8bafeab5b3 | ||
|
|
3722df7326 | ||
|
|
ddb8e1398e | ||
|
|
2be69b23b1 | ||
|
|
a82ccadbb0 | ||
|
|
1ff2934927 | ||
|
|
cd4ffa862f | ||
|
|
c0a4d85ae6 | ||
|
|
55e845fe12 | ||
|
|
5ea087d177 | ||
|
|
dd7dc0c1dc | ||
|
|
c83fbb3e44 | ||
|
|
befbbbfb84 | ||
|
|
d0f497dc68 | ||
|
|
1b755a5d48 | ||
|
|
e5efcd56ba | ||
|
|
5d60b3c2ae | ||
|
|
ae923b24ff | ||
|
|
d304cd97e1 | ||
|
|
2b56dcdf3f | ||
|
|
865e351f96 | ||
|
|
ea275df26c | ||
|
|
90804c4c30 | ||
|
|
46caca2f51 | ||
|
|
2077e485bb | ||
|
|
28dbef8a1c | ||
|
|
2216ade8c4 |
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: "27.0"
|
||||
default: "30.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Bitcoin Daemon Cache
|
||||
id: cache-bitcoind
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||
with:
|
||||
path: bitcoin.tar.gz
|
||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
28
.github/actions/build-dependencies/action.yml
vendored
28
.github/actions/build-dependencies/action.yml
vendored
@@ -7,6 +7,10 @@ runs:
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: |
|
||||
# Ensure the repositories are synced
|
||||
sudo apt update -y
|
||||
|
||||
# Actually perform the removals
|
||||
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
@@ -14,8 +18,9 @@ runs:
|
||||
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
||||
# This removal command requires the prior removals due to unmet dependencies otherwise
|
||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
|
||||
# Reinstall python3 as a general dependency of a functional operating system
|
||||
sudo apt install python3
|
||||
sudo apt install -y python3 --fix-missing
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Remove unused packages
|
||||
@@ -33,19 +38,23 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt install -y ca-certificates protobuf-compiler
|
||||
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||
choco install protoc
|
||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||
brew install protobuf
|
||||
brew install protobuf llvm
|
||||
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||
fi
|
||||
|
||||
- name: Install solc
|
||||
shell: bash
|
||||
run: |
|
||||
cargo +1.89 install svm-rs --version =0.5.18
|
||||
svm install 0.8.26
|
||||
svm use 0.8.26
|
||||
cargo +1.91.1 install svm-rs --version =0.5.22
|
||||
svm install 0.8.29
|
||||
svm use 0.8.29
|
||||
|
||||
- name: Remove preinstalled Docker
|
||||
shell: bash
|
||||
@@ -53,7 +62,7 @@ runs:
|
||||
docker system prune -a --volumes
|
||||
sudo apt remove -y *docker*
|
||||
# Install uidmap which will be required for the explicitly installed Docker
|
||||
sudo apt install uidmap
|
||||
sudo apt install -y uidmap
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Update system dependencies
|
||||
@@ -66,11 +75,8 @@ runs:
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Install rootless Docker
|
||||
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
||||
uses: docker/setup-docker-action@e61617a16c407a86262fb923c35a616ddbe070b3 # 4.6.0
|
||||
with:
|
||||
rootless: true
|
||||
set-host: true
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
4
.github/actions/monero-wallet-rpc/action.yml
vendored
4
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.3.4
|
||||
default: v0.18.4.4
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Monero Wallet RPC Cache
|
||||
id: cache-monero-wallet-rpc
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||
with:
|
||||
path: monero-wallet-rpc
|
||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
33
.github/actions/monero/action.yml
vendored
33
.github/actions/monero/action.yml
vendored
@@ -5,39 +5,46 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.3.4
|
||||
default: v0.18.4.4
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||
with:
|
||||
path: /usr/bin/monerod
|
||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
- name: Download the Monero Daemon
|
||||
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
||||
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
|
||||
# to the contained folder not following the same naming scheme and
|
||||
# requiring further expansion not worth doing right now
|
||||
shell: bash
|
||||
run: |
|
||||
RUNNER_OS=${{ runner.os }}
|
||||
RUNNER_ARCH=${{ runner.arch }}
|
||||
OS=${{ runner.os }}
|
||||
ARCH=${{ runner.arch }}
|
||||
|
||||
RUNNER_OS=${RUNNER_OS,,}
|
||||
RUNNER_ARCH=${RUNNER_ARCH,,}
|
||||
OS=$(echo "$OS" | tr "[:upper:]" "[:lower:]")
|
||||
ARCH=$(echo "$ARCH" | tr "[:upper:]" "[:lower:]")
|
||||
|
||||
RUNNER_OS=linux
|
||||
RUNNER_ARCH=x64
|
||||
if [ "$OS" = "windows" ]; then
|
||||
OS=win
|
||||
echo "Windows is unsupported at this time"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$OS" = "macos" ]; then
|
||||
OS=mac
|
||||
fi
|
||||
if [ "$ARCH" = "arm64" ]; then
|
||||
ARCH=armv8
|
||||
fi
|
||||
|
||||
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
|
||||
FILE=monero-$OS-$ARCH-${{ inputs.version }}.tar.bz2
|
||||
wget https://downloads.getmonero.org/cli/$FILE
|
||||
tar -xvf $FILE
|
||||
rm $FILE
|
||||
|
||||
sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod
|
||||
sudo mv $(find . -name monerod) /usr/bin/monerod
|
||||
sudo chmod 777 /usr/bin/monerod
|
||||
sudo chmod +x /usr/bin/monerod
|
||||
|
||||
|
||||
8
.github/actions/test-dependencies/action.yml
vendored
8
.github/actions/test-dependencies/action.yml
vendored
@@ -5,12 +5,12 @@ inputs:
|
||||
monero-version:
|
||||
description: "Monero version to download and run as a regtest node"
|
||||
required: false
|
||||
default: v0.18.3.4
|
||||
default: v0.18.4.4
|
||||
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: "27.1"
|
||||
default: "30.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -19,9 +19,9 @@ runs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
|
||||
with:
|
||||
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||
version: v1.5.0
|
||||
cache: false
|
||||
|
||||
- name: Run a Monero Regtest Node
|
||||
|
||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
||||
nightly-2025-09-01
|
||||
nightly-2025-12-01
|
||||
|
||||
2
.github/workflows/common-tests.yml
vendored
2
.github/workflows/common-tests.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
test-common:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
2
.github/workflows/coordinator-tests.yml
vendored
2
.github/workflows/coordinator-tests.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
2
.github/workflows/crypto-tests.yml
vendored
2
.github/workflows/crypto-tests.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
test-crypto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
10
.github/workflows/daily-deny.yml
vendored
10
.github/workflows/daily-deny.yml
vendored
@@ -9,16 +9,10 @@ jobs:
|
||||
name: Run cargo deny
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
||||
run: cargo +1.91.1 install cargo-deny --version =0.18.9
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
|
||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
79
.github/workflows/lint.yml
vendored
79
.github/workflows/lint.yml
vendored
@@ -11,11 +11,11 @@ jobs:
|
||||
clippy:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy
|
||||
|
||||
- name: Run Clippy
|
||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||
@@ -43,16 +43,10 @@ jobs:
|
||||
deny:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
||||
run: cargo +1.91.1 install cargo-deny --version =0.18.9
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
@@ -60,7 +54,7 @@ jobs:
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
@@ -73,32 +67,32 @@ jobs:
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
|
||||
with:
|
||||
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||
version: v1.5.0
|
||||
cache: false
|
||||
|
||||
- name: Run forge fmt
|
||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -name "*.sol")
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
- name: Verify all dependencies are in use
|
||||
run: |
|
||||
cargo +1.89 install cargo-machete --version =0.8.0
|
||||
cargo +1.89 machete
|
||||
cargo +1.91.1 install cargo-machete --version =0.9.1
|
||||
cargo +1.91.1 machete
|
||||
|
||||
msrv:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
- name: Verify claimed `rust-version`
|
||||
shell: bash
|
||||
run: |
|
||||
cargo +1.89 install cargo-msrv --version =0.18.4
|
||||
cargo +1.91.1 install cargo-msrv --version =0.18.4
|
||||
|
||||
function check_msrv {
|
||||
# We `cd` into the directory passed as the first argument, but will return to the
|
||||
@@ -144,18 +138,17 @@ jobs:
|
||||
function check_workspace {
|
||||
# Get the members array from the workspace's `Cargo.toml`
|
||||
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
|
||||
# Keep all lines after the start of the array, then keep all lines before the next "]"
|
||||
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
|
||||
# Parse out any comments, including comments post-fixed on the same line as an entry
|
||||
members=$(echo "$members" | grep -Ev "^[[:space:]]+#" | grep -Ev "^[[:space:]]?$" | awk -F',' '{print $1","}')
|
||||
# Prune `members = [` to `[` by replacing the first line with just `[`
|
||||
|
||||
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
|
||||
# We accomplish the latter by pruning all characters after the entry's ","
|
||||
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
|
||||
# Replace the first line, which was "members = [" and is now "members = [,", with "["
|
||||
members=$(echo "$members" | sed "1s/.*/\[/")
|
||||
# Remove the trailing comma by replacing the last line's "," with ""
|
||||
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
|
||||
# Correct the last line, which was malleated to "]," when pruning comments
|
||||
# Correct the last line, which was malleated to "],"
|
||||
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
|
||||
|
||||
# Don't check the patches
|
||||
members=$(echo "$members" | grep -v "patches")
|
||||
# Don't check the following
|
||||
# Most of these are binaries, with the exception of the Substrate runtime which has a
|
||||
# bespoke build pipeline
|
||||
@@ -174,6 +167,9 @@ jobs:
|
||||
members=$(echo "$members" | grep -v "mini\"")
|
||||
members=$(echo "$members" | grep -v "tests/")
|
||||
|
||||
# Remove the trailing comma by replacing the last line's "," with ""
|
||||
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
|
||||
|
||||
echo $members | jq -r ".[]" | while read -r member; do
|
||||
check_msrv $member
|
||||
correct=$?
|
||||
@@ -187,16 +183,16 @@ jobs:
|
||||
slither:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Slither
|
||||
run: |
|
||||
python3 -m pip install solc-select
|
||||
solc-select install 0.8.26
|
||||
solc-select use 0.8.26
|
||||
python3 -m pip install slither-analyzer==0.11.3
|
||||
|
||||
python3 -m pip install slither-analyzer
|
||||
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||
slither ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||
@@ -205,3 +201,14 @@ jobs:
|
||||
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||
cd processor/ethereum/router/contracts
|
||||
slither Router.sol
|
||||
|
||||
shellcheck:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
- name: shellcheck
|
||||
run: |
|
||||
sudo apt install -y shellcheck
|
||||
find . -name "*.sh" | while read -r script; do
|
||||
shellcheck --enable=all --shell=sh --severity=info $script
|
||||
done
|
||||
|
||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
2
.github/workflows/mini-tests.yml
vendored
2
.github/workflows/mini-tests.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
test-common:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
2
.github/workflows/monthly-nightly-update.yml
vendored
2
.github/workflows/monthly-nightly-update.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
name: Update nightly
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
with:
|
||||
submodules: "recursive"
|
||||
|
||||
|
||||
2
.github/workflows/networks-tests.yml
vendored
2
.github/workflows/networks-tests.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
test-networks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
2
.github/workflows/no-std.yml
vendored
2
.github/workflows/no-std.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
14
.github/workflows/pages.yml
vendored
14
.github/workflows/pages.yml
vendored
@@ -46,16 +46,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
||||
uses: ruby/setup-ruby@8aeb6ff8030dd539317f8e1769a044873b56ea71 # 1.268.0
|
||||
with:
|
||||
bundler-cache: true
|
||||
cache-version: 0
|
||||
working-directory: "${{ github.workspace }}/docs"
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # 5.0.0
|
||||
- name: Build with Jekyll
|
||||
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
env:
|
||||
@@ -69,12 +69,12 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
- name: Buld Rust docs
|
||||
run: |
|
||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs -c rust-src
|
||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
|
||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||
mv target/doc docs/_site/rust
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # 4.0.0
|
||||
with:
|
||||
path: "docs/_site/"
|
||||
|
||||
@@ -88,4 +88,4 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # 4.0.5
|
||||
|
||||
2
.github/workflows/processor-tests.yml
vendored
2
.github/workflows/processor-tests.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
4
.github/workflows/reproducible-runtime.yml
vendored
4
.github/workflows/reproducible-runtime.yml
vendored
@@ -27,10 +27,10 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests -- --nocapture
|
||||
|
||||
166
.github/workflows/stack-size.yml
vendored
Normal file
166
.github/workflows/stack-size.yml
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
name: Check Update Default Stack Size
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "orchestration/increase_default_stack_size.sh"
|
||||
pull_request:
|
||||
paths:
|
||||
- "orchestration/increase_default_stack_size.sh"
|
||||
workflow_dispatch:
|
||||
# Also run weekly to ensure this doesn't inadvertently decay
|
||||
schedule:
|
||||
- cron: "0 0 * * 1"
|
||||
|
||||
jobs:
|
||||
stack_size:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, ubuntu-24.04, ubuntu-22.04, macos-15-intel, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # 6.1.0
|
||||
with:
|
||||
go-version: stable
|
||||
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||
with:
|
||||
path: monerod
|
||||
key: stack-size-monerod
|
||||
|
||||
- name: Download the Monero Daemon
|
||||
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
# We explicitly download the Linux binary as this script executes over an ELF binary
|
||||
wget https://downloads.getmonero.org/cli/monero-linux-x64-v0.18.4.4.tar.bz2
|
||||
tar -xvf monero-linux-x64-v0.18.4.4.tar.bz2
|
||||
mv $(find . -name monerod) .
|
||||
|
||||
- name: Verify expected behavior
|
||||
shell: bash
|
||||
run: |
|
||||
STACK=$((8 * 1024 * 1024))
|
||||
|
||||
OS=${{ runner.os }}
|
||||
if [ "$OS" = "Linux" ]; then
|
||||
sudo apt update -y
|
||||
sudo apt install -y ksh bash dash zsh busybox posh mksh yash
|
||||
sudo ln -s "$(which busybox)" /usr/bin/ash
|
||||
sudo ln -s "$(which busybox)" /usr/bin/hush
|
||||
wget http://ftp.us.debian.org/debian/pool/main/g/gash/gash_0.3.1-1_amd64.deb
|
||||
sudo apt install ./gash_0.3.1-1_amd64.deb
|
||||
SHELLS="sh ksh bash dash zsh ash hush posh mksh lksh gash yash"
|
||||
fi
|
||||
if [ "$OS" = "macOS" ]; then
|
||||
brew install binutils # `readelf`
|
||||
|
||||
# `binutils` is not placed within the path, so find its
|
||||
# `readelf` bin and manually move it into our path
|
||||
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||
sudo cp $(find "$HOMEBREW_ROOT_PATH" -name readelf) /usr/local/bin/
|
||||
|
||||
# macOS has the benefit of packaging `oksh`, `osh`, and having distinct core tools
|
||||
# TODO: `posh` is packaged but doesn't work: https://github.com/serai-dex/serai/issues/703
|
||||
brew install ksh93 bash dash-shell zsh mksh oksh yash oils-for-unix
|
||||
SHELLS="sh ksh bash dash zsh mksh oksh yash osh"
|
||||
|
||||
# macOS also has the benefit of packaging (via MacPorts) `mrsh`,
|
||||
# which explicitly attempts to be be exactly POSIX, without any extensions.
|
||||
# We first have to install MacPorts, the easiest method being via source.
|
||||
curl -O https://distfiles.macports.org/MacPorts/MacPorts-2.11.6.tar.bz2
|
||||
tar xf MacPorts-2.11.6.tar.bz2
|
||||
cd MacPorts-2.11.6
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
cd ..
|
||||
PATH=$PATH:/opt/local/bin
|
||||
sudo port -v selfupdate
|
||||
|
||||
# Now, we install `mrsh`
|
||||
# TODO: https://github.com/serai-dex/serai/issues/704
|
||||
# sudo port install mrsh
|
||||
# SHELLS="$SHELLS mrsh"
|
||||
fi
|
||||
|
||||
# Install shells available via `cargo`
|
||||
cargo install brush-shell
|
||||
SHELLS="$SHELLS brush"
|
||||
# We would also test with `nsh` here if not for https://github.com/nuta/nsh/issues/49
|
||||
# cargo install nsh
|
||||
# SHELLS="$SHELLS nsh"
|
||||
|
||||
# Install shells available via `go`
|
||||
# TODO: https://github.com/u-root/u-root/issues/3474
|
||||
# GOBIN=/usr/local/bin go install github.com/u-root/u-root/cmds/core/gosh@latest
|
||||
# SHELLS="$SHELLS gosh"
|
||||
|
||||
# Patch with `muslstack`
|
||||
cp monerod monerod-muslstack
|
||||
GOBIN=$(pwd) go install github.com/yaegashi/muslstack@d19cc5866abce3ca59dfc1666df7cc97097d0933
|
||||
./muslstack -s "$STACK" ./monerod-muslstack
|
||||
|
||||
# Patch with `chelf`, which only works on a Linux host (due to requiring `elf.h`)
|
||||
# TODO: Install the header on macOS so `chelf` may be used as the source of truth
|
||||
if [ "$OS" = "Linux" ]; then
|
||||
cp monerod monerod-chelf
|
||||
git clone https://github.com/Gottox/chelf
|
||||
cd chelf
|
||||
git checkout b2994186cea7b7d61a588fd06c1cc1ae75bcc21a
|
||||
make
|
||||
./chelf -s "$STACK" ../monerod-chelf
|
||||
cd ..
|
||||
fi
|
||||
|
||||
# Run our script with all installed shells
|
||||
for shell in $SHELLS; do
|
||||
echo "Executing \`$shell\`"
|
||||
cp monerod monerod-idss-$shell
|
||||
ln -s "$(which $shell)" sh
|
||||
./sh ./orchestration/increase_default_stack_size.sh monerod-idss-$shell
|
||||
rm ./sh
|
||||
done
|
||||
|
||||
# Verify they all had the same result
|
||||
sha256() {
|
||||
sha256sum "$1" | cut -d' ' -f1
|
||||
}
|
||||
CHELF=$(sha256 monerod-muslstack)
|
||||
find . -name "monerod-*" | while read -r bin; do
|
||||
BIN=$(sha256 "$bin")
|
||||
if [ ! "$CHELF" = "$BIN" ]; then
|
||||
echo "Different artifact between \`monerod-muslstack\` ($CHELF) and \`$bin\` ($BIN)"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Verify the integrity of the result
|
||||
read_stack() {
|
||||
STACK_INFO=$(readelf "$1" -l | grep STACK -A1)
|
||||
MEMSZ=$(printf "%s\n" "$STACK_INFO" | tail -n1 | sed -E s/^[[:space:]]*//g | cut -f2 -d' ')
|
||||
printf "%i" $((MEMSZ))
|
||||
}
|
||||
INITIAL_STACK=$(read_stack monerod)
|
||||
if [ "$INITIAL_STACK" -ne "0" ]; then
|
||||
echo "Initial \`PT_GNU_STACK\` wasn't 0"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
UPDATED_STACK=$(read_stack monerod-muslstack)
|
||||
if [ "$UPDATED_STACK" -ne "$STACK" ]; then
|
||||
echo "Updated \`PT_GNU_STACK\` ($UPDATED_STACK) wasn't 8 MB ($STACK)"
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# Only one byte should be different due to the bit pattern of 8 MB
|
||||
BYTES_DIFFERENT=$(cmp -l monerod monerod-muslstack | wc -l || true)
|
||||
if [ "$BYTES_DIFFERENT" -ne 1 ]; then
|
||||
echo "More than one byte was different between the two binaries"
|
||||
exit 4
|
||||
fi
|
||||
38
.github/workflows/tests.yml
vendored
38
.github/workflows/tests.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
test-infra:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
test-substrate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
@@ -83,31 +83,33 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-primitives \
|
||||
-p serai-coins-primitives \
|
||||
-p serai-coins-pallet \
|
||||
-p serai-dex-pallet \
|
||||
-p serai-validator-sets-primitives \
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-genesis-liquidity-primitives \
|
||||
-p serai-genesis-liquidity-pallet \
|
||||
-p serai-emissions-primitives \
|
||||
-p serai-emissions-pallet \
|
||||
-p serai-economic-security-pallet \
|
||||
-p serai-in-instructions-primitives \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-signals-primitives \
|
||||
-p serai-signals-pallet \
|
||||
-p serai-abi \
|
||||
-p substrate-median \
|
||||
-p serai-core-pallet \
|
||||
-p serai-coins-pallet \
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-signals-pallet \
|
||||
-p serai-dex-pallet \
|
||||
-p serai-genesis-liquidity-pallet \
|
||||
-p serai-economic-security-pallet \
|
||||
-p serai-emissions-pallet \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-runtime \
|
||||
-p serai-node
|
||||
-p serai-substrate-tests
|
||||
|
||||
test-serai-client:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,7 +1,13 @@
|
||||
target
|
||||
|
||||
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||
Cargo.lock
|
||||
!/Cargo.lock
|
||||
|
||||
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||
Dockerfile
|
||||
Dockerfile.fast-epoch
|
||||
!orchestration/runtime/Dockerfile
|
||||
|
||||
.test-logs
|
||||
|
||||
.vscode
|
||||
|
||||
5515
Cargo.lock
generated
5515
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
91
Cargo.toml
91
Cargo.toml
@@ -1,21 +1,6 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/parking_lot",
|
||||
"patches/rocksdb",
|
||||
|
||||
# Rewrites/redirects
|
||||
"patches/option-ext",
|
||||
"patches/directories-next",
|
||||
|
||||
# monero-oxide expects `ciphersuite`, yet the `ciphersuite` in-tree here has breaking changes
|
||||
# This re-exports the in-tree `ciphersuite` _without_ changes breaking to monero-oxide
|
||||
# Not included in workspace to prevent having two crates with the same name (an error)
|
||||
# "patches/ciphersuite",
|
||||
# Same for `dalek-ff-group`
|
||||
# "patches/dalek-ff-group",
|
||||
|
||||
"common/std-shims",
|
||||
"common/zalloc",
|
||||
"common/patchable-async-sleep",
|
||||
@@ -77,8 +62,8 @@ members = [
|
||||
"processor/ethereum/primitives",
|
||||
"processor/ethereum/test-primitives",
|
||||
"processor/ethereum/deployer",
|
||||
"processor/ethereum/router",
|
||||
"processor/ethereum/erc20",
|
||||
"processor/ethereum/router",
|
||||
"processor/ethereum",
|
||||
"processor/monero",
|
||||
|
||||
@@ -95,6 +80,9 @@ members = [
|
||||
"substrate/primitives",
|
||||
"substrate/abi",
|
||||
|
||||
"substrate/median",
|
||||
|
||||
"substrate/core",
|
||||
"substrate/coins",
|
||||
"substrate/validator-sets",
|
||||
"substrate/signals",
|
||||
@@ -107,6 +95,10 @@ members = [
|
||||
"substrate/runtime",
|
||||
"substrate/node",
|
||||
|
||||
"substrate/client/serai",
|
||||
"substrate/client/bitcoin",
|
||||
"substrate/client/ethereum",
|
||||
"substrate/client/monero",
|
||||
"substrate/client",
|
||||
|
||||
"orchestration",
|
||||
@@ -119,10 +111,24 @@ members = [
|
||||
"tests/message-queue",
|
||||
# TODO "tests/processor",
|
||||
# TODO "tests/coordinator",
|
||||
"tests/substrate",
|
||||
# TODO "tests/full-stack",
|
||||
"tests/reproducible-runtime",
|
||||
]
|
||||
|
||||
[profile.dev]
|
||||
panic = "abort"
|
||||
overflow-checks = true
|
||||
[profile.release]
|
||||
panic = "abort"
|
||||
overflow-checks = true
|
||||
# These do not respect the `panic` configuration value, so we don't provide them
|
||||
[profile.test]
|
||||
# panic = "abort" # https://github.com/rust-lang/issues/67650
|
||||
overflow-checks = true
|
||||
[profile.bench]
|
||||
overflow-checks = true
|
||||
|
||||
[profile.dev.package]
|
||||
# Always compile Monero (and a variety of dependencies) with optimizations due
|
||||
# to the extensive operations required for Bulletproofs
|
||||
@@ -140,11 +146,14 @@ dalek-ff-group = { opt-level = 3 }
|
||||
|
||||
multiexp = { opt-level = 3 }
|
||||
|
||||
monero-generators = { opt-level = 3 }
|
||||
monero-borromean = { opt-level = 3 }
|
||||
monero-bulletproofs = { opt-level = 3 }
|
||||
monero-io = { opt-level = 3 }
|
||||
monero-primitives = { opt-level = 3 }
|
||||
monero-ed25519 = { opt-level = 3 }
|
||||
monero-mlsag = { opt-level = 3 }
|
||||
monero-clsag = { opt-level = 3 }
|
||||
monero-borromean = { opt-level = 3 }
|
||||
monero-bulletproofs-generators = { opt-level = 3 }
|
||||
monero-bulletproofs = {opt-level = 3 }
|
||||
monero-oxide = { opt-level = 3 }
|
||||
|
||||
# Always compile the eVRF DKG tree with optimizations as well
|
||||
@@ -169,14 +178,21 @@ revm-precompile = { opt-level = 3 }
|
||||
revm-primitives = { opt-level = 3 }
|
||||
revm-state = { opt-level = 3 }
|
||||
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
overflow-checks = true
|
||||
|
||||
[patch.crates-io]
|
||||
# Dependencies from monero-oxide which originate from within our own tree
|
||||
std-shims = { path = "common/std-shims" }
|
||||
simple-request = { path = "common/request" }
|
||||
# Point to empty crates for crates unused within in our tree
|
||||
alloy-eip2124 = { path = "patches/ethereum/alloy-eip2124" }
|
||||
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
|
||||
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
|
||||
c-kzg = { path = "patches/ethereum/c-kzg" }
|
||||
fastrlp-3 = { package = "fastrlp", path = "patches/ethereum/fastrlp-0.3" }
|
||||
fastrlp-4 = { package = "fastrlp", path = "patches/ethereum/fastrlp-0.4" }
|
||||
primitive-types-12 = { package = "primitive-types", path = "patches/ethereum/primitive-types-0.12" }
|
||||
rlp = { path = "patches/ethereum/rlp" }
|
||||
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-0.30" }
|
||||
|
||||
# Dependencies from monero-oxide which originate from within our own tree, potentially shimmed to account for deviations since publishing
|
||||
std-shims = { path = "patches/std-shims" }
|
||||
simple-request = { path = "patches/simple-request" }
|
||||
multiexp = { path = "crypto/multiexp" }
|
||||
flexible-transcript = { path = "crypto/transcript" }
|
||||
ciphersuite = { path = "patches/ciphersuite" }
|
||||
@@ -184,13 +200,18 @@ dalek-ff-group = { path = "patches/dalek-ff-group" }
|
||||
minimal-ed448 = { path = "crypto/ed448" }
|
||||
modular-frost = { path = "crypto/frost" }
|
||||
|
||||
# Patch due to `std` now including the required functionality
|
||||
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
|
||||
# This has a non-deprecated `std` alternative since Rust's 2024 edition
|
||||
home = { path = "patches/home" }
|
||||
|
||||
# Updates to the latest version
|
||||
darling = { path = "patches/darling" }
|
||||
thiserror = { path = "patches/thiserror" }
|
||||
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
parking_lot = { path = "patches/parking_lot" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
|
||||
# directories-next was created because directories was unmaintained
|
||||
# directories-next is now unmaintained while directories is maintained
|
||||
# The directories author pulls in ridiculously pointless crates and prefers
|
||||
@@ -199,7 +220,10 @@ rocksdb = { path = "patches/rocksdb" }
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
# Patch to include `FromUniformBytes<64>` over Scalar
|
||||
# Patch from a fork back to upstream
|
||||
parity-bip39 = { path = "patches/parity-bip39" }
|
||||
|
||||
# Patch to include `FromUniformBytes<64>` over `Scalar`
|
||||
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||
|
||||
@@ -247,7 +271,7 @@ redundant_closure_for_method_calls = "deny"
|
||||
redundant_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
string_slice = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
unchecked_time_subtraction = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_join = "deny"
|
||||
@@ -256,3 +280,6 @@ unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
||||
[workspace.lints.rust]
|
||||
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||
|
||||
50
audits/crypto/dkg/evrf/README.md
Normal file
50
audits/crypto/dkg/evrf/README.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# eVRF DKG
|
||||
|
||||
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
|
||||
the IACR preprint server. Within it was a one-round unbiased DKG and a
|
||||
one-round unbiased threshold DKG. Unfortunately, both simply describe
|
||||
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
|
||||
in practice, the need for an additional round of communication to occur where
|
||||
all participants confirm they received their secret shares.
|
||||
|
||||
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
|
||||
achieve a verifiable encryption scheme. This allows the secret shares to be
|
||||
posted to any 'bulletin board' (such as a blockchain) and for all observers to
|
||||
confirm:
|
||||
|
||||
- A participant participated
|
||||
- The secret shares sent can be received by the intended recipient so long as
|
||||
they can access the bulletin board
|
||||
|
||||
Additionally, Serai desired a robust scheme (albeit with an biased key as the
|
||||
output, which is fine for our purposes). Accordingly, our implementation
|
||||
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
|
||||
for verifiable encryption, with the caller allowed to decide the set of
|
||||
participants. They may:
|
||||
|
||||
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
|
||||
paper
|
||||
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
|
||||
the eVRF paper
|
||||
- Select a post-determined set (with any solution for the Common Subset
|
||||
problem), allowing achieving a robust threshold biased DKG
|
||||
|
||||
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
|
||||
this is unnecessary when the resulting key will be biased. Any proof of
|
||||
knowledge for the coefficients, as necessary for their extraction within the
|
||||
security proofs, would be sufficient.
|
||||
|
||||
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
|
||||
provide proofs for its security. This resulted in
|
||||
[this paper](<./Security Proofs.pdf>).
|
||||
|
||||
Our implementation itself is then built on top of the audited
|
||||
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
|
||||
and
|
||||
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
|
||||
|
||||
Note we do not use the originally premised DDH eVRF yet the one premised on
|
||||
elliptic curve divisors, the methodology of which is commented on
|
||||
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
|
||||
|
||||
Our implementation itself is unaudited at this time however.
|
||||
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.65"
|
||||
rust-version = "1.77"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true }
|
||||
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
|
||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
// Obtain a variable from the Serai environment/secret store.
|
||||
pub fn var(variable: &str) -> Option<String> {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
[package]
|
||||
name = "simple-request"
|
||||
version = "0.1.0"
|
||||
version = "0.3.0"
|
||||
description = "A simple HTTP(S) request library"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["http", "https", "async", "request", "ssl"]
|
||||
edition = "2021"
|
||||
@@ -19,9 +19,10 @@ workspace = true
|
||||
[dependencies]
|
||||
tower-service = { version = "0.3", default-features = false }
|
||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] }
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||
|
||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
|
||||
@@ -29,6 +30,8 @@ zeroize = { version = "1", optional = true }
|
||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||
|
||||
[features]
|
||||
tls = ["hyper-rustls"]
|
||||
tokio = ["hyper-util/tokio"]
|
||||
tls = ["tokio", "hyper-rustls"]
|
||||
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
||||
basic-auth = ["zeroize", "base64ct"]
|
||||
default = ["tls"]
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
use futures_util::FutureExt;
|
||||
use ::tokio::sync::Mutex;
|
||||
|
||||
use tower_service::Service as TowerService;
|
||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
|
||||
pub use hyper;
|
||||
|
||||
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
||||
use hyper_util::{
|
||||
rt::tokio::TokioExecutor,
|
||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
||||
};
|
||||
pub use hyper;
|
||||
|
||||
mod request;
|
||||
pub use request::*;
|
||||
@@ -37,52 +38,86 @@ type Connector = HttpConnector;
|
||||
type Connector = HttpsConnector<HttpConnector>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum Connection {
|
||||
enum Connection<
|
||||
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||
> {
|
||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||
Connection {
|
||||
executor: E,
|
||||
connector: Connector,
|
||||
host: Uri,
|
||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// An HTTP client.
|
||||
///
|
||||
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
|
||||
/// the `tls` feature is active without using the `tokio` executor will cause errors.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Client {
|
||||
connection: Connection,
|
||||
pub struct Client<
|
||||
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||
> {
|
||||
connection: Connection<E>,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
fn connector() -> Connector {
|
||||
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||
Client<E>
|
||||
{
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn connector() -> Result<Connector, Error> {
|
||||
let mut res = HttpConnector::new();
|
||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||
res.set_nodelay(true);
|
||||
res.set_reuse_address(true);
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
if core::any::TypeId::of::<E>() !=
|
||||
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
|
||||
{
|
||||
Err(Error::ConnectionError(
|
||||
"`tls` feature enabled but not using the `tokio` executor".into(),
|
||||
))?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
res.enforce_http(false);
|
||||
#[cfg(feature = "tls")]
|
||||
let res = HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
.expect("couldn't fetch system's SSL roots")
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.wrap_connector(res);
|
||||
res
|
||||
let https = HttpsConnectorBuilder::new().with_native_roots();
|
||||
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))]
|
||||
let https = https.map_err(|e| {
|
||||
Error::ConnectionError(
|
||||
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}")
|
||||
.into(),
|
||||
)
|
||||
})?;
|
||||
// Fallback to `webpki-roots` if present
|
||||
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
|
||||
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
|
||||
#[cfg(feature = "tls")]
|
||||
let res = https.https_or_http().enable_http1().wrap_connector(res);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn with_connection_pool() -> Client {
|
||||
Client {
|
||||
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> {
|
||||
Ok(Client {
|
||||
connection: Connection::ConnectionPool(
|
||||
HyperClient::builder(TokioExecutor::new())
|
||||
HyperClient::builder(executor)
|
||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||
.build(Self::connector()),
|
||||
.build(Self::connector()?),
|
||||
),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
||||
pub fn with_executor_and_without_connection_pool(
|
||||
executor: E,
|
||||
host: &str,
|
||||
) -> Result<Client<E>, Error> {
|
||||
Ok(Client {
|
||||
connection: Connection::Connection {
|
||||
connector: Self::connector(),
|
||||
executor,
|
||||
connector: Self::connector()?,
|
||||
host: {
|
||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||
if uri.host().is_none() {
|
||||
@@ -95,9 +130,9 @@ impl Client {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> {
|
||||
let request: Request = request.into();
|
||||
let mut request = request.0;
|
||||
let Request { mut request, response_size_limit } = request;
|
||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||
match &self.connection {
|
||||
Connection::ConnectionPool(_) => {}
|
||||
@@ -131,7 +166,7 @@ impl Client {
|
||||
Connection::ConnectionPool(client) => {
|
||||
client.request(request).await.map_err(Error::HyperUtil)?
|
||||
}
|
||||
Connection::Connection { connector, host, connection } => {
|
||||
Connection::Connection { executor, connector, host, connection } => {
|
||||
let mut connection_lock = connection.lock().await;
|
||||
|
||||
// If there's not a connection...
|
||||
@@ -143,28 +178,46 @@ impl Client {
|
||||
let call_res = call_res.map_err(Error::ConnectionError);
|
||||
let (requester, connection) =
|
||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
||||
// for it
|
||||
tokio::spawn(connection);
|
||||
// This task will die when we drop the requester
|
||||
executor.execute(Box::pin(connection.map(|_| ())));
|
||||
*connection_lock = Some(requester);
|
||||
}
|
||||
|
||||
let connection = connection_lock.as_mut().unwrap();
|
||||
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned");
|
||||
let mut err = connection.ready().await.err();
|
||||
if err.is_none() {
|
||||
// Send the request
|
||||
let res = connection.send_request(request).await;
|
||||
if let Ok(res) = res {
|
||||
return Ok(Response(res, self));
|
||||
let response = connection.send_request(request).await;
|
||||
if let Ok(response) = response {
|
||||
return Ok(Response { response, size_limit: response_size_limit, client: self });
|
||||
}
|
||||
err = res.err();
|
||||
err = response.err();
|
||||
}
|
||||
// Since this connection has been put into an error state, drop it
|
||||
*connection_lock = None;
|
||||
Err(Error::Hyper(err.unwrap()))?
|
||||
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response(response, self))
|
||||
Ok(Response { response, size_limit: response_size_limit, client: self })
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "tokio")]
|
||||
mod tokio {
|
||||
use hyper_util::rt::tokio::TokioExecutor;
|
||||
use super::*;
|
||||
|
||||
pub type TokioClient = Client<TokioExecutor>;
|
||||
impl Client<TokioExecutor> {
|
||||
pub fn with_connection_pool() -> Result<Self, Error> {
|
||||
Self::with_executor_and_connection_pool(TokioExecutor::new())
|
||||
}
|
||||
|
||||
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
|
||||
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "tokio")]
|
||||
pub use tokio::TokioClient;
|
||||
|
||||
@@ -7,11 +7,15 @@ pub use http_body_util::Full;
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
||||
pub struct Request {
|
||||
pub(crate) request: hyper::Request<Full<Bytes>>,
|
||||
pub(crate) response_size_limit: Option<usize>,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
#[cfg(feature = "basic-auth")]
|
||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||
if let Some(authority) = self.0.uri().authority() {
|
||||
if let Some(authority) = self.request.uri().authority() {
|
||||
let authority = authority.as_str();
|
||||
if authority.contains('@') {
|
||||
// Decode the username and password from the URI
|
||||
@@ -36,9 +40,10 @@ impl Request {
|
||||
let mut formatted = format!("{username}:{password}");
|
||||
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
||||
formatted.zeroize();
|
||||
self.0.headers_mut().insert(
|
||||
self.request.headers_mut().insert(
|
||||
hyper::header::AUTHORIZATION,
|
||||
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
|
||||
HeaderValue::from_str(&format!("Basic {encoded}"))
|
||||
.expect("couldn't form header from base64-encoded string"),
|
||||
);
|
||||
encoded.zeroize();
|
||||
}
|
||||
@@ -59,9 +64,17 @@ impl Request {
|
||||
pub fn with_basic_auth(&mut self) {
|
||||
let _ = self.basic_auth_from_uri();
|
||||
}
|
||||
}
|
||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||
Request(request)
|
||||
|
||||
/// Set a size limit for the response.
|
||||
///
|
||||
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
|
||||
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
|
||||
self.response_size_limit = response_size_limit;
|
||||
}
|
||||
}
|
||||
|
||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||
Request { request, response_size_limit: None }
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,54 @@
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::io;
|
||||
|
||||
use hyper::{
|
||||
StatusCode,
|
||||
header::{HeaderValue, HeaderMap},
|
||||
body::{Buf, Incoming},
|
||||
body::Incoming,
|
||||
rt::Executor,
|
||||
};
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use futures_util::{Stream, StreamExt};
|
||||
|
||||
use crate::{Client, Error};
|
||||
|
||||
// Borrows the client so its async task lives as long as this response exists.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
||||
impl Response<'_> {
|
||||
pub struct Response<
|
||||
'a,
|
||||
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||
> {
|
||||
pub(crate) response: hyper::Response<Incoming>,
|
||||
pub(crate) size_limit: Option<usize>,
|
||||
pub(crate) client: &'a Client<E>,
|
||||
}
|
||||
|
||||
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||
Response<'_, E>
|
||||
{
|
||||
pub fn status(&self) -> StatusCode {
|
||||
self.0.status()
|
||||
self.response.status()
|
||||
}
|
||||
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
||||
self.0.headers()
|
||||
self.response.headers()
|
||||
}
|
||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
||||
let mut body = self.response.into_body().into_data_stream();
|
||||
let mut res: Vec<u8> = vec![];
|
||||
loop {
|
||||
if let Some(size_limit) = self.size_limit {
|
||||
let (lower, upper) = body.size_hint();
|
||||
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
|
||||
Err(Error::ConnectionError("response exceeded size limit".into()))?;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(part) = body.next().await else { break };
|
||||
let part = part.map_err(Error::Hyper)?;
|
||||
res.extend(part.as_ref());
|
||||
}
|
||||
Ok(io::Cursor::new(res))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "std-shims"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
description = "A series of std shims to make alloc more feasible"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||
@@ -18,9 +18,10 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustversion = { version = "1", default-features = false }
|
||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
|
||||
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"] }
|
||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] }
|
||||
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
|
||||
|
||||
[features]
|
||||
std = []
|
||||
alloc = ["hashbrown"]
|
||||
std = ["alloc", "spin/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,11 +1,28 @@
|
||||
# std shims
|
||||
# `std` shims
|
||||
|
||||
A crate which passes through to std when the default `std` feature is enabled,
|
||||
yet provides a series of shims when it isn't.
|
||||
`std-shims` is a Rust crate with two purposes:
|
||||
- Expand the functionality of `core` and `alloc`
|
||||
- Polyfill functionality only available on newer version of Rust
|
||||
|
||||
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
|
||||
average case.
|
||||
The goal is to make supporting no-`std` environments, and older versions of
|
||||
Rust, as simple as possible. For most use cases, replacing `std::` with
|
||||
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
|
||||
advantage of `std-shims`.
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
|
||||
`spin` (avoiding a requirement on `critical-section`).
|
||||
types are not guaranteed to be
|
||||
# API Surface
|
||||
|
||||
`std-shims` only aims to have items _mutually available_ between `alloc` (with
|
||||
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
|
||||
no shims available, will not be exported by `std-shims`.
|
||||
|
||||
# Dependencies
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
|
||||
primitives are provided via `spin` (avoiding a requirement on
|
||||
`critical-section`). Sections of `std::io` are independently matched as
|
||||
possible. `rustversion` is used to detect when to provide polyfills.
|
||||
|
||||
# Disclaimer
|
||||
|
||||
No guarantee of one-to-one parity is provided. The shims provided aim to be
|
||||
sufficient for the average case. Pull requests are _welcome_.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use extern_alloc::collections::*;
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use hashbrown::{HashSet, HashMap};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::collections::*;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use alloc::collections::*;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use hashbrown::{HashSet, HashMap};
|
||||
|
||||
@@ -1,42 +1,74 @@
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::io::*;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
mod shims {
|
||||
use core::fmt::{Debug, Formatter};
|
||||
use alloc::{boxed::Box, vec::Vec};
|
||||
use core::fmt::{self, Debug, Display, Formatter};
|
||||
#[cfg(feature = "alloc")]
|
||||
use extern_alloc::{boxed::Box, vec::Vec};
|
||||
use crate::error::Error as CoreError;
|
||||
|
||||
/// The kind of error.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum ErrorKind {
|
||||
UnexpectedEof,
|
||||
Other,
|
||||
}
|
||||
|
||||
/// An error.
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
kind: ErrorKind,
|
||||
error: Box<dyn Send + Sync>,
|
||||
#[cfg(feature = "alloc")]
|
||||
error: Box<dyn Send + Sync + CoreError>,
|
||||
}
|
||||
|
||||
impl Debug for Error {
|
||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
|
||||
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
<Self as Debug>::fmt(self, f)
|
||||
}
|
||||
}
|
||||
impl CoreError for Error {}
|
||||
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
pub trait IntoBoxSendSyncError {}
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
impl<I> IntoBoxSendSyncError for I {}
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
|
||||
|
||||
impl Error {
|
||||
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
|
||||
Error { kind, error: Box::new(error) }
|
||||
/// Create a new error.
|
||||
///
|
||||
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||
#[allow(unused)]
|
||||
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
let res = Error { kind };
|
||||
#[cfg(feature = "alloc")]
|
||||
let res = Error { kind, error: error.into() };
|
||||
res
|
||||
}
|
||||
|
||||
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
|
||||
Error { kind: ErrorKind::Other, error: Box::new(error) }
|
||||
/// Create a new error with `io::ErrorKind::Other` as its kind.
|
||||
///
|
||||
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||
#[allow(unused)]
|
||||
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
let res = Error { kind: ErrorKind::Other };
|
||||
#[cfg(feature = "alloc")]
|
||||
let res = Error { kind: ErrorKind::Other, error: error.into() };
|
||||
res
|
||||
}
|
||||
|
||||
/// The kind of error.
|
||||
pub fn kind(&self) -> ErrorKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
|
||||
/// Retrieve the inner error.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
|
||||
Some(self.error)
|
||||
}
|
||||
}
|
||||
@@ -64,6 +96,12 @@ mod shims {
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Read for &mut R {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
R::read(*self, buf)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BufRead: Read {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||
fn consume(&mut self, amt: usize);
|
||||
@@ -88,6 +126,7 @@ mod shims {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl Write for Vec<u8> {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||
self.extend(buf);
|
||||
@@ -95,6 +134,8 @@ mod shims {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use shims::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};
|
||||
|
||||
@@ -1,18 +1,45 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
pub extern crate alloc;
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
pub use core::*;
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[rustversion::before(1.81)]
|
||||
pub mod error {
|
||||
use core::fmt::Debug::Display;
|
||||
pub trait Error: Debug + Display {}
|
||||
}
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[rustversion::since(1.81)]
|
||||
pub use core::error;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate alloc as extern_alloc;
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||
|
||||
pub mod sync;
|
||||
pub mod collections;
|
||||
pub mod io;
|
||||
|
||||
pub use alloc::vec;
|
||||
pub use alloc::str;
|
||||
pub use alloc::string;
|
||||
pub mod sync;
|
||||
|
||||
pub mod prelude {
|
||||
// Shim the `std` prelude
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use extern_alloc::{
|
||||
format, vec,
|
||||
borrow::ToOwned,
|
||||
boxed::Box,
|
||||
vec::Vec,
|
||||
string::{String, ToString},
|
||||
};
|
||||
|
||||
// Shim `div_ceil`
|
||||
#[rustversion::before(1.73)]
|
||||
#[doc(hidden)]
|
||||
pub trait StdShimsDivCeil {
|
||||
@@ -53,6 +80,7 @@ pub mod prelude {
|
||||
}
|
||||
}
|
||||
|
||||
// Shim `io::Error::other`
|
||||
#[cfg(feature = "std")]
|
||||
#[rustversion::before(1.74)]
|
||||
#[doc(hidden)]
|
||||
|
||||
@@ -1,19 +1,80 @@
|
||||
pub use core::sync::*;
|
||||
pub use alloc::sync::*;
|
||||
pub use core::sync::atomic;
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use extern_alloc::sync::{Arc, Weak};
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::{Arc, Weak};
|
||||
|
||||
mod mutex_shim {
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::*;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::*;
|
||||
mod spin_mutex {
|
||||
use core::ops::{Deref, DerefMut};
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
// We wrap this in an `Option` so we can consider `None` as poisoned
|
||||
pub(super) struct Mutex<T>(spin::Mutex<Option<T>>);
|
||||
|
||||
/// An acquired view of a `Mutex`.
|
||||
pub struct MutexGuard<'mutex, T> {
|
||||
mutex: spin::MutexGuard<'mutex, Option<T>>,
|
||||
// This is `Some` for the lifetime of this guard, and is only represented as an `Option` due
|
||||
// to needing to move it on `Drop` (which solely gives us a mutable reference to `self`)
|
||||
value: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> Mutex<T> {
|
||||
pub(super) const fn new(value: T) -> Self {
|
||||
Self(spin::Mutex::new(Some(value)))
|
||||
}
|
||||
|
||||
pub(super) fn lock(&self) -> MutexGuard<'_, T> {
|
||||
let mut mutex = self.0.lock();
|
||||
// Take from the `Mutex` so future acquisitions will see `None` unless this is restored
|
||||
let value = mutex.take();
|
||||
// Check the prior acquisition did in fact restore the value
|
||||
if value.is_none() {
|
||||
panic!("locking a `spin::Mutex` held by a thread which panicked");
|
||||
}
|
||||
MutexGuard { mutex, value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for MutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
self.value.as_ref().expect("no value yet checked upon lock acquisition")
|
||||
}
|
||||
}
|
||||
impl<T> DerefMut for MutexGuard<'_, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
self.value.as_mut().expect("no value yet checked upon lock acquisition")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mutex, T> Drop for MutexGuard<'mutex, T> {
|
||||
fn drop(&mut self) {
|
||||
// Restore the value
|
||||
*self.mutex = self.value.take();
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin_mutex::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::{Mutex, MutexGuard};
|
||||
|
||||
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
||||
pub struct ShimMutex<T>(Mutex<T>);
|
||||
impl<T> ShimMutex<T> {
|
||||
/// Construct a new `Mutex`.
|
||||
pub const fn new(value: T) -> Self {
|
||||
Self(Mutex::new(value))
|
||||
}
|
||||
|
||||
/// Acquire a lock on the contents of the `Mutex`.
|
||||
///
|
||||
/// This will panic if the `Mutex` was poisoned.
|
||||
///
|
||||
/// On no-`std` environments, the implementation presumably defers to that of a spin lock.
|
||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||
#[cfg(feature = "std")]
|
||||
let res = self.0.lock().unwrap();
|
||||
@@ -25,10 +86,11 @@ mod mutex_shim {
|
||||
}
|
||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::before(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
|
||||
#[rustversion::since(1.80)]
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::since(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||
|
||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||
|
||||
@@ -42,7 +42,7 @@ messages = { package = "serai-processor-messages", path = "../processor/messages
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary-sdk = { path = "./tributary-sdk" }
|
||||
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
||||
serai-client-serai = { path = "../substrate/client/serai", default-features = false }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
@@ -19,10 +19,9 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai"] }
|
||||
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
use core::future::Future;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Amount},
|
||||
validator_sets::primitives::ExternalValidatorSet,
|
||||
Serai,
|
||||
use blake2::{Digest, Blake2b256};
|
||||
|
||||
use serai_client_serai::{
|
||||
abi::{
|
||||
primitives::{
|
||||
network_id::{ExternalNetworkId, NetworkId},
|
||||
balance::Amount,
|
||||
crypto::Public,
|
||||
validator_sets::{Session, ExternalValidatorSet},
|
||||
address::SeraiAddress,
|
||||
merkle::IncrementalUnbalancedMerkleTree,
|
||||
},
|
||||
validator_sets::Event,
|
||||
},
|
||||
Serai, Events,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
@@ -12,9 +23,20 @@ use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::*;
|
||||
|
||||
#[derive(BorshSerialize, BorshDeserialize)]
|
||||
struct Set {
|
||||
session: Session,
|
||||
key: Public,
|
||||
stake: Amount,
|
||||
}
|
||||
|
||||
create_db!(
|
||||
CosignIntend {
|
||||
ScanCosignFrom: () -> u64,
|
||||
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
|
||||
Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount,
|
||||
Validators: (set: ExternalValidatorSet) -> Vec<SeraiAddress>,
|
||||
LatestSet: (network: ExternalNetworkId) -> Set,
|
||||
}
|
||||
);
|
||||
|
||||
@@ -35,23 +57,38 @@ db_channel! {
|
||||
async fn block_has_events_justifying_a_cosign(
|
||||
serai: &Serai,
|
||||
block_number: u64,
|
||||
) -> Result<(Block, HasEvents), String> {
|
||||
) -> Result<(Block, Events, HasEvents), String> {
|
||||
let block = serai
|
||||
.finalized_block_by_number(block_number)
|
||||
.block_by_number(block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||
let serai = serai.as_of(block.hash());
|
||||
let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::Notable));
|
||||
if events.validator_sets().set_keys_events().next().is_some() {
|
||||
return Ok((block, events, HasEvents::Notable));
|
||||
}
|
||||
|
||||
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::NonNotable));
|
||||
if events.coins().burn_with_instruction_events().next().is_some() {
|
||||
return Ok((block, events, HasEvents::NonNotable));
|
||||
}
|
||||
|
||||
Ok((block, HasEvents::No))
|
||||
Ok((block, events, HasEvents::No))
|
||||
}
|
||||
|
||||
// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
||||
// block.
|
||||
fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> {
|
||||
let mut sets = vec![];
|
||||
for network in ExternalNetworkId::all() {
|
||||
let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else {
|
||||
// If this network doesn't have usable keys, move on
|
||||
continue;
|
||||
};
|
||||
|
||||
sets.push((ExternalValidatorSet { network, session }, key, stake));
|
||||
}
|
||||
sets
|
||||
}
|
||||
|
||||
/// A task to determine which blocks we should intend to cosign.
|
||||
@@ -67,56 +104,108 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
async move {
|
||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||
let latest_block_number =
|
||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
for block_number in start_block_number ..= latest_block_number {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
let (block, mut has_events) =
|
||||
let (block, events, mut has_events) =
|
||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
let mut builds_upon =
|
||||
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
|
||||
|
||||
// Check we are indexing a linear chain
|
||||
if (block_number > 1) &&
|
||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||
SubstrateBlockHash::get(&txn, block_number - 1)
|
||||
.expect("indexing a block but haven't indexed its parent"))
|
||||
if block.header.builds_upon() !=
|
||||
builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG)
|
||||
{
|
||||
Err(format!(
|
||||
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||
block_number - 1
|
||||
))?;
|
||||
}
|
||||
let block_hash = block.hash();
|
||||
let block_hash = block.header.hash();
|
||||
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||
builds_upon.append(
|
||||
serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG,
|
||||
Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_HEADER_LEAF_TAG])
|
||||
.chain_update(block_hash.0)
|
||||
.finalize()
|
||||
.into(),
|
||||
);
|
||||
BuildsUpon::set(&mut txn, &builds_upon);
|
||||
|
||||
// Update the stakes
|
||||
for event in events.validator_sets().allocation_events() {
|
||||
let Event::Allocation { validator, network, amount } = event else {
|
||||
panic!("event from `allocation_events` wasn't `Event::Allocation`")
|
||||
};
|
||||
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
|
||||
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
|
||||
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0));
|
||||
}
|
||||
for event in events.validator_sets().deallocation_events() {
|
||||
let Event::Deallocation { validator, network, amount, timeline: _ } = event else {
|
||||
panic!("event from `deallocation_events` wasn't `Event::Deallocation`")
|
||||
};
|
||||
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
|
||||
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
|
||||
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0));
|
||||
}
|
||||
|
||||
// Handle decided sets
|
||||
for event in events.validator_sets().set_decided_events() {
|
||||
let Event::SetDecided { set, validators } = event else {
|
||||
panic!("event from `set_decided_events` wasn't `Event::SetDecided`")
|
||||
};
|
||||
|
||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||
Validators::set(
|
||||
&mut txn,
|
||||
set,
|
||||
&validators.iter().map(|(validator, _key_shares)| *validator).collect(),
|
||||
);
|
||||
}
|
||||
|
||||
// Handle declarations of the latest set
|
||||
for event in events.validator_sets().set_keys_events() {
|
||||
let Event::SetKeys { set, key_pair } = event else {
|
||||
panic!("event from `set_keys_events` wasn't `Event::SetKeys`")
|
||||
};
|
||||
let mut stake = 0;
|
||||
for validator in
|
||||
Validators::take(&mut txn, *set).expect("set which wasn't decided set keys")
|
||||
{
|
||||
stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0;
|
||||
}
|
||||
LatestSet::set(
|
||||
&mut txn,
|
||||
set.network,
|
||||
&Set { session: set.session, key: key_pair.0, stake: Amount(stake) },
|
||||
);
|
||||
}
|
||||
|
||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||
|
||||
// If this is notable, it creates a new global session, which we index into the database
|
||||
// now
|
||||
if has_events == HasEvents::Notable {
|
||||
let serai = self.serai.as_of(block_hash);
|
||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||
let global_session =
|
||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||
let sets_and_keys_and_stakes = cosigning_sets(&txn);
|
||||
let global_session = GlobalSession::id(
|
||||
sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(),
|
||||
);
|
||||
|
||||
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
||||
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len());
|
||||
let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len());
|
||||
let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len());
|
||||
let mut total_stake = 0;
|
||||
for (set, key) in &sets_and_keys {
|
||||
sets.push(*set);
|
||||
keys.insert(set.network, SeraiAddress::from(*key));
|
||||
let stake = serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(set.network.into())
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.unwrap_or(Amount(0))
|
||||
.0;
|
||||
stakes.insert(set.network, stake);
|
||||
total_stake += stake;
|
||||
for (set, key, stake) in sets_and_keys_and_stakes {
|
||||
sets.push(set);
|
||||
keys.insert(set.network, key);
|
||||
stakes.insert(set.network, stake.0);
|
||||
total_stake += stake.0;
|
||||
}
|
||||
if total_stake == 0 {
|
||||
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -9,16 +9,24 @@ use blake2::{Digest, Blake2s256};
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{ExternalNetworkId, SeraiAddress},
|
||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||
Public, Block, Serai, TemporalSerai,
|
||||
use serai_client_serai::{
|
||||
abi::{
|
||||
primitives::{
|
||||
BlockHash,
|
||||
crypto::{Public, KeyPair},
|
||||
network_id::ExternalNetworkId,
|
||||
validator_sets::{Session, ExternalValidatorSet},
|
||||
address::SeraiAddress,
|
||||
},
|
||||
Block,
|
||||
},
|
||||
Serai, State,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::*;
|
||||
|
||||
use serai_cosign_types::*;
|
||||
pub use serai_cosign_types::*;
|
||||
|
||||
/// The cosigns which are intended to be performed.
|
||||
mod intend;
|
||||
@@ -29,9 +37,6 @@ mod delay;
|
||||
pub use delay::BROADCAST_FREQUENCY;
|
||||
use delay::LatestCosignedBlockNumber;
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
|
||||
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||
///
|
||||
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
|
||||
@@ -54,7 +59,7 @@ pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
pub(crate) struct GlobalSession {
|
||||
pub(crate) start_block_number: u64,
|
||||
pub(crate) sets: Vec<ExternalValidatorSet>,
|
||||
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
|
||||
pub(crate) keys: HashMap<ExternalNetworkId, Public>,
|
||||
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
||||
pub(crate) total_stake: u64,
|
||||
}
|
||||
@@ -84,7 +89,7 @@ create_db! {
|
||||
// The following are populated by the intend task and used throughout the library
|
||||
|
||||
// An index of Substrate blocks
|
||||
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
|
||||
SubstrateBlockHash: (block_number: u64) -> BlockHash,
|
||||
// A mapping from a global session's ID to its relevant information.
|
||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||
// The last block to be cosigned by a global session.
|
||||
@@ -116,60 +121,6 @@ create_db! {
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch the keys used for cosigning by a specific network.
|
||||
async fn keys_for_network(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: ExternalNetworkId,
|
||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||
let Some(latest_session) =
|
||||
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
// If this network hasn't had a session declared, move on
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Get the keys for the latest session
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ExternalValidatorSet { network, session: latest_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((latest_session, keys)));
|
||||
}
|
||||
|
||||
// If the latest session has yet to set keys, use the prior session
|
||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((prior_session, keys)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
||||
/// block.
|
||||
async fn cosigning_sets(
|
||||
serai: &TemporalSerai<'_>,
|
||||
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
|
||||
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||
// If this network doesn't have usable keys, move on
|
||||
continue;
|
||||
};
|
||||
|
||||
sets.push((ExternalValidatorSet { network, session }, keys.0));
|
||||
}
|
||||
Ok(sets)
|
||||
}
|
||||
|
||||
/// An object usable to request notable cosigns for a block.
|
||||
pub trait RequestNotableCosigns: 'static + Send {
|
||||
/// The error type which may be encountered when requesting notable cosigns.
|
||||
@@ -270,7 +221,10 @@ impl<D: Db> Cosigning<D> {
|
||||
}
|
||||
|
||||
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
||||
pub fn cosigned_block(
|
||||
getter: &impl Get,
|
||||
block_number: u64,
|
||||
) -> Result<Option<BlockHash>, Faulted> {
|
||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||
return Ok(None);
|
||||
}
|
||||
@@ -285,8 +239,8 @@ impl<D: Db> Cosigning<D> {
|
||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||
/// cosigns for this session.
|
||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
let mut cosigns = vec![];
|
||||
for network in ExternalNetworkId::all() {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
@@ -303,7 +257,7 @@ impl<D: Db> Cosigning<D> {
|
||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||
// identification in those who see the faulty cosigns as honest
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||
if cosign.cosign.global_session == faulted {
|
||||
cosigns.push(cosign);
|
||||
@@ -315,8 +269,8 @@ impl<D: Db> Cosigning<D> {
|
||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||
return vec![];
|
||||
};
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
let mut cosigns = vec![];
|
||||
for network in ExternalNetworkId::all() {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
@@ -371,13 +325,8 @@ impl<D: Db> Cosigning<D> {
|
||||
|
||||
// Check the cosign's signature
|
||||
{
|
||||
let key = Public::from({
|
||||
let Some(key) = global_session.keys.get(&network) else {
|
||||
Err(IntakeCosignError::NonParticipatingNetwork)?
|
||||
};
|
||||
*key
|
||||
});
|
||||
|
||||
let key =
|
||||
*global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?;
|
||||
if !signed_cosign.verify_signature(key) {
|
||||
Err(IntakeCosignError::InvalidSignature)?;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![deny(missing_docs)]
|
||||
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_primitives::{crypto::Public, network_id::ExternalNetworkId};
|
||||
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
@@ -16,7 +16,7 @@ pub struct CosignIntent {
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
pub block_hash: BlockHash,
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
pub notable: bool,
|
||||
}
|
||||
@@ -29,7 +29,7 @@ pub struct Cosign {
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
pub block_hash: BlockHash,
|
||||
/// The actual cosigner.
|
||||
pub cosigner: ExternalNetworkId,
|
||||
}
|
||||
|
||||
@@ -29,13 +29,13 @@ schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai"] }
|
||||
serai-client-serai = { path = "../../../substrate/client/serai", default-features = false }
|
||||
serai-cosign = { path = "../../cosign" }
|
||||
tributary-sdk = { path = "../../tributary-sdk" }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||
libp2p = { version = "0.54", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||
|
||||
@@ -7,7 +7,7 @@ use rand_core::{RngCore, OsRng};
|
||||
use blake2::{Digest, Blake2s256};
|
||||
use schnorrkel::{Keypair, PublicKey, Signature};
|
||||
|
||||
use serai_client::primitives::PublicKey as Public;
|
||||
use serai_client_serai::abi::primitives::crypto::Public;
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use libp2p::{
|
||||
@@ -104,7 +104,7 @@ impl OnlyValidators {
|
||||
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
||||
.map_err(|_| io::Error::other("invalid signature"))?;
|
||||
|
||||
Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
|
||||
Ok(peer_id_from_public(Public(public_key.to_bytes())))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use core::future::Future;
|
||||
use core::{future::Future, str::FromStr};
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_client::{SeraiError, Serai};
|
||||
use serai_client_serai::{RpcError, Serai};
|
||||
|
||||
use libp2p::{
|
||||
core::multiaddr::{Protocol, Multiaddr},
|
||||
@@ -50,7 +50,7 @@ impl ContinuallyRan for DialTask {
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||
|
||||
type Error = SeraiError;
|
||||
type Error = RpcError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
@@ -94,6 +94,13 @@ impl ContinuallyRan for DialTask {
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
||||
.unwrap();
|
||||
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
||||
let Ok(randomly_selected_peer) = libp2p::Multiaddr::from_str(&randomly_selected_peer)
|
||||
else {
|
||||
log::error!(
|
||||
"peer from substrate wasn't a valid `Multiaddr`: {randomly_selected_peer}"
|
||||
);
|
||||
continue;
|
||||
};
|
||||
|
||||
log::info!("found peer from substrate: {randomly_selected_peer}");
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -13,9 +13,10 @@ use rand_core::{RngCore, OsRng};
|
||||
use zeroize::Zeroizing;
|
||||
use schnorrkel::Keypair;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{ExternalNetworkId, PublicKey},
|
||||
validator_sets::primitives::ExternalValidatorSet,
|
||||
use serai_client_serai::{
|
||||
abi::primitives::{
|
||||
crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet,
|
||||
},
|
||||
Serai,
|
||||
};
|
||||
|
||||
@@ -66,7 +67,7 @@ use dial::DialTask;
|
||||
|
||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||
|
||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||
fn peer_id_from_public(public: Public) -> PeerId {
|
||||
// 0 represents the identity Multihash, that no hash was performed
|
||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::{
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||
|
||||
@@ -92,7 +92,8 @@ impl SwarmTask {
|
||||
}
|
||||
}
|
||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
||||
gossip::Event::GossipsubNotSupported { peer_id } |
|
||||
gossip::Event::SlowPeer { peer_id, .. } => {
|
||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,8 @@ use std::{
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::{
|
||||
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
|
||||
};
|
||||
use serai_client_serai::abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session};
|
||||
use serai_client_serai::{RpcError, Serai};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
@@ -52,7 +51,7 @@ impl Validators {
|
||||
async fn session_changes(
|
||||
serai: impl Borrow<Serai>,
|
||||
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
||||
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, RpcError> {
|
||||
/*
|
||||
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
||||
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
||||
@@ -61,18 +60,18 @@ impl Validators {
|
||||
|
||||
Besides, we can't connect to historical validators, only the current validators.
|
||||
*/
|
||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||
let temporal_serai = temporal_serai.validator_sets();
|
||||
let serai = serai.borrow().state().await?;
|
||||
|
||||
let mut session_changes = vec![];
|
||||
{
|
||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||
// we poll it till it yields all futures with the most minimal processing possible
|
||||
let mut futures = FuturesUnordered::new();
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
let sessions = sessions.borrow();
|
||||
let serai = serai.borrow();
|
||||
futures.push(async move {
|
||||
let session = match temporal_serai.session(network.into()).await {
|
||||
let session = match serai.current_session(network.into()).await {
|
||||
Ok(Some(session)) => session,
|
||||
Ok(None) => return Ok(None),
|
||||
Err(e) => return Err(e),
|
||||
@@ -81,12 +80,16 @@ impl Validators {
|
||||
if sessions.get(&network) == Some(&session) {
|
||||
Ok(None)
|
||||
} else {
|
||||
match temporal_serai.active_network_validators(network.into()).await {
|
||||
Ok(validators) => Ok(Some((
|
||||
match serai.current_validators(network.into()).await {
|
||||
Ok(Some(validators)) => Ok(Some((
|
||||
network,
|
||||
session,
|
||||
validators.into_iter().map(peer_id_from_public).collect(),
|
||||
validators
|
||||
.into_iter()
|
||||
.map(|validator| peer_id_from_public(validator.into()))
|
||||
.collect(),
|
||||
))),
|
||||
Ok(None) => panic!("network has session yet no validators"),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
@@ -153,7 +156,7 @@ impl Validators {
|
||||
}
|
||||
|
||||
/// Update the view of the validators.
|
||||
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
||||
pub(crate) async fn update(&mut self) -> Result<(), RpcError> {
|
||||
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||
self.incorporate_session_changes(session_changes);
|
||||
Ok(())
|
||||
@@ -206,7 +209,7 @@ impl ContinuallyRan for UpdateValidatorsTask {
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
|
||||
type Error = SeraiError;
|
||||
type Error = RpcError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
||||
use serai_primitives::validator_sets::{ExternalValidatorSet, KeyShares};
|
||||
|
||||
use futures_lite::FutureExt;
|
||||
|
||||
@@ -30,7 +30,7 @@ pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
||||
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((KeyShares::MAX_PER_SET as usize) * 128));
|
||||
|
||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||
/// tip.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -5,9 +5,10 @@ use serai_db::{create_db, db_channel};
|
||||
|
||||
use dkg::Participant;
|
||||
|
||||
use serai_client::{
|
||||
primitives::ExternalNetworkId,
|
||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||
use serai_client_serai::abi::primitives::{
|
||||
crypto::KeyPair,
|
||||
network_id::ExternalNetworkId,
|
||||
validator_sets::{Session, ExternalValidatorSet},
|
||||
};
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
@@ -103,7 +104,7 @@ mod _internal_db {
|
||||
// Tributary transactions to publish from the DKG confirmation task
|
||||
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
||||
// Participants to remove
|
||||
RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
|
||||
RemoveParticipant: (set: ExternalValidatorSet) -> u16,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,10 +140,11 @@ impl RemoveParticipant {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
||||
// If this set has yet to be retired, send this transaction
|
||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
||||
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant));
|
||||
}
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
||||
_internal_db::RemoveParticipant::try_recv(txn, set)
|
||||
.map(|i| Participant::new(i).expect("sent invalid participant index for removal"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,8 @@ use frost_schnorrkel::{
|
||||
|
||||
use serai_db::{DbTxn, Db as DbTrait};
|
||||
|
||||
use serai_client::{
|
||||
primitives::SeraiAddress,
|
||||
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
|
||||
};
|
||||
#[rustfmt::skip]
|
||||
use serai_client_serai::abi::primitives::{validator_sets::ExternalValidatorSet, address::SeraiAddress};
|
||||
|
||||
use serai_task::{DoesNotError, ContinuallyRan};
|
||||
|
||||
@@ -160,7 +158,7 @@ impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
||||
let (machine, preprocess) = AlgorithmMachine::new(
|
||||
schnorrkel(),
|
||||
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
||||
musig(musig_context(set.into()), key, &[public_key]).unwrap(),
|
||||
musig(ExternalValidatorSet::musig_context(&set), key, &[public_key]).unwrap(),
|
||||
)
|
||||
.preprocess(&mut OsRng);
|
||||
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
||||
@@ -260,9 +258,12 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let keys =
|
||||
musig(musig_context(self.set.set.into()), self.key.clone(), &musig_public_keys)
|
||||
.unwrap();
|
||||
let keys = musig(
|
||||
ExternalValidatorSet::musig_context(&self.set.set),
|
||||
self.key.clone(),
|
||||
&musig_public_keys,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Rebuild the machine
|
||||
let (machine, preprocess_from_cache) =
|
||||
@@ -296,9 +297,10 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
||||
};
|
||||
|
||||
// Calculate our share
|
||||
let (machine, share) = match handle_frost_error(
|
||||
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
||||
) {
|
||||
let (machine, share) = match handle_frost_error(machine.sign(
|
||||
preprocesses,
|
||||
&ExternalValidatorSet::set_keys_message(&self.set.set, &key_pair),
|
||||
)) {
|
||||
Ok((machine, share)) => (machine, share),
|
||||
// This yields the *musig participant index*
|
||||
Err(participant) => {
|
||||
|
||||
@@ -14,9 +14,14 @@ use borsh::BorshDeserialize;
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
||||
use serai_client_serai::{
|
||||
abi::primitives::{
|
||||
BlockHash,
|
||||
crypto::{Public, Signature, ExternalKey, KeyPair},
|
||||
network_id::ExternalNetworkId,
|
||||
validator_sets::ExternalValidatorSet,
|
||||
address::SeraiAddress,
|
||||
},
|
||||
Serai,
|
||||
};
|
||||
use message_queue::{Service, client::MessageQueue};
|
||||
@@ -61,9 +66,7 @@ async fn serai() -> Arc<Serai> {
|
||||
let Ok(serai) = Serai::new(format!(
|
||||
"http://{}:9944",
|
||||
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
|
||||
))
|
||||
.await
|
||||
else {
|
||||
)) else {
|
||||
log::error!("couldn't connect to the Serai node");
|
||||
tokio::time::sleep(delay).await;
|
||||
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
|
||||
@@ -213,10 +216,12 @@ async fn handle_network(
|
||||
&mut txn,
|
||||
ExternalValidatorSet { network, session },
|
||||
&KeyPair(
|
||||
PublicKey::from_raw(substrate_key),
|
||||
network_key
|
||||
.try_into()
|
||||
.expect("generated a network key which exceeds the maximum key length"),
|
||||
Public(substrate_key),
|
||||
ExternalKey(
|
||||
network_key
|
||||
.try_into()
|
||||
.expect("generated a network key which exceeds the maximum key length"),
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
@@ -290,6 +295,7 @@ async fn handle_network(
|
||||
},
|
||||
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
|
||||
let block = BlockHash(block);
|
||||
let mut by_session = HashMap::new();
|
||||
for plan in plans {
|
||||
by_session
|
||||
@@ -481,7 +487,7 @@ async fn main() {
|
||||
);
|
||||
|
||||
// Handle each of the networks
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,10 @@ use tokio::sync::mpsc;
|
||||
|
||||
use serai_db::{DbTxn, Db as DbTrait};
|
||||
|
||||
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
|
||||
use serai_client_serai::abi::primitives::{
|
||||
network_id::ExternalNetworkId,
|
||||
validator_sets::{Session, ExternalValidatorSet},
|
||||
};
|
||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
|
||||
use tributary_sdk::Tributary;
|
||||
@@ -39,7 +42,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||
let mut made_progress = false;
|
||||
|
||||
// Handle the Canonical events
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||
|
||||
@@ -11,7 +11,7 @@ use tokio::sync::mpsc;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||
|
||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
|
||||
|
||||
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||
|
||||
|
||||
@@ -24,12 +24,11 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
||||
|
||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
||||
|
||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai"] }
|
||||
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
futures = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
|
||||
@@ -3,7 +3,13 @@ use std::sync::Arc;
|
||||
|
||||
use futures::stream::{StreamExt, FuturesOrdered};
|
||||
|
||||
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
||||
use serai_client_serai::{
|
||||
abi::{
|
||||
self,
|
||||
primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
|
||||
},
|
||||
Serai,
|
||||
};
|
||||
|
||||
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
||||
|
||||
@@ -15,6 +21,7 @@ use serai_cosign::Cosigning;
|
||||
create_db!(
|
||||
CoordinatorSubstrateCanonical {
|
||||
NextBlock: () -> u64,
|
||||
LastIndexedBatchId: (network: ExternalNetworkId) -> u32,
|
||||
}
|
||||
);
|
||||
|
||||
@@ -45,10 +52,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
// These are all the events which generate canonical messages
|
||||
struct CanonicalEvents {
|
||||
time: u64,
|
||||
key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
|
||||
burn_events: Vec<serai_client::coins::CoinsEvent>,
|
||||
set_keys_events: Vec<abi::validator_sets::Event>,
|
||||
slash_report_events: Vec<abi::validator_sets::Event>,
|
||||
batch_events: Vec<abi::in_instructions::Event>,
|
||||
burn_events: Vec<abi::coins::Event>,
|
||||
}
|
||||
|
||||
// For a cosigned block, fetch all relevant events
|
||||
@@ -66,40 +73,24 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
}
|
||||
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
||||
};
|
||||
let temporal_serai = serai.as_of(block_hash);
|
||||
let temporal_serai_validators = temporal_serai.validator_sets();
|
||||
let temporal_serai_instructions = temporal_serai.in_instructions();
|
||||
let temporal_serai_coins = temporal_serai.coins();
|
||||
|
||||
let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
|
||||
tokio::try_join!(
|
||||
serai.block(block_hash),
|
||||
temporal_serai_validators.key_gen_events(),
|
||||
temporal_serai_validators.set_retired_events(),
|
||||
temporal_serai_instructions.batch_events(),
|
||||
temporal_serai_coins.burn_with_instruction_events(),
|
||||
)
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
let Some(block) = block else {
|
||||
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
|
||||
let set_keys_events = events.validator_sets().set_keys_events().cloned().collect();
|
||||
let slash_report_events =
|
||||
events.validator_sets().slash_report_events().cloned().collect();
|
||||
let batch_events = events.in_instructions().batch_events().cloned().collect();
|
||||
let burn_events = events.coins().burn_with_instruction_events().cloned().collect();
|
||||
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
|
||||
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
||||
};
|
||||
|
||||
let time = if block_number == 0 {
|
||||
block.time().unwrap_or(0)
|
||||
} else {
|
||||
// Serai's block time is in milliseconds
|
||||
block
|
||||
.time()
|
||||
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
|
||||
1000
|
||||
};
|
||||
|
||||
// We use time in seconds, not milliseconds, here
|
||||
let time = block.header.unix_time_in_millis() / 1000;
|
||||
Ok((
|
||||
block_number,
|
||||
CanonicalEvents {
|
||||
time,
|
||||
key_gen_events,
|
||||
set_retired_events,
|
||||
set_keys_events,
|
||||
slash_report_events,
|
||||
batch_events,
|
||||
burn_events,
|
||||
},
|
||||
@@ -131,10 +122,9 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
for key_gen in block.key_gen_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
|
||||
else {
|
||||
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
|
||||
for set_keys in block.set_keys_events {
|
||||
let abi::validator_sets::Event::SetKeys { set, key_pair } = &set_keys else {
|
||||
panic!("`SetKeys` event wasn't a `SetKeys` event: {set_keys:?}");
|
||||
};
|
||||
crate::Canonical::send(
|
||||
&mut txn,
|
||||
@@ -147,12 +137,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
);
|
||||
}
|
||||
|
||||
for set_retired in block.set_retired_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
|
||||
else {
|
||||
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
||||
for slash_report in block.slash_report_events {
|
||||
let abi::validator_sets::Event::SlashReport { set } = &slash_report else {
|
||||
panic!("`SlashReport` event wasn't a `SlashReport` event: {slash_report:?}");
|
||||
};
|
||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||
crate::Canonical::send(
|
||||
&mut txn,
|
||||
set.network,
|
||||
@@ -160,10 +148,12 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
);
|
||||
}
|
||||
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
let mut batch = None;
|
||||
for this_batch in &block.batch_events {
|
||||
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
||||
// Only irrefutable as this is the only member of the enum at this time
|
||||
#[expect(irrefutable_let_patterns)]
|
||||
let abi::in_instructions::Event::Batch {
|
||||
network: batch_network,
|
||||
publishing_session,
|
||||
id,
|
||||
@@ -194,14 +184,19 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
})
|
||||
.collect(),
|
||||
});
|
||||
|
||||
if LastIndexedBatchId::get(&txn, network) != id.checked_sub(1) {
|
||||
panic!(
|
||||
"next batch from Serai's ID was not an increment of the last indexed batch's ID"
|
||||
);
|
||||
}
|
||||
LastIndexedBatchId::set(&mut txn, network, id);
|
||||
}
|
||||
}
|
||||
|
||||
let mut burns = vec![];
|
||||
for burn in &block.burn_events {
|
||||
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
||||
&burn
|
||||
else {
|
||||
let abi::coins::Event::BurnWithInstruction { from: _, instruction } = &burn else {
|
||||
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
|
||||
};
|
||||
if instruction.balance.coin.network() == network {
|
||||
@@ -223,3 +218,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn last_indexed_batch_id(txn: &impl DbTxn, network: ExternalNetworkId) -> Option<u32> {
|
||||
LastIndexedBatchId::get(txn, network)
|
||||
}
|
||||
|
||||
@@ -3,9 +3,14 @@ use std::sync::Arc;
|
||||
|
||||
use futures::stream::{StreamExt, FuturesOrdered};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, EmbeddedEllipticCurve},
|
||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
|
||||
use serai_client_serai::{
|
||||
abi::primitives::{
|
||||
BlockHash,
|
||||
crypto::EmbeddedEllipticCurveKeys as EmbeddedEllipticCurveKeysStruct,
|
||||
network_id::ExternalNetworkId,
|
||||
validator_sets::{KeyShares, ExternalValidatorSet},
|
||||
address::SeraiAddress,
|
||||
},
|
||||
Serai,
|
||||
};
|
||||
|
||||
@@ -19,6 +24,10 @@ use crate::NewSetInformation;
|
||||
create_db!(
|
||||
CoordinatorSubstrateEphemeral {
|
||||
NextBlock: () -> u64,
|
||||
EmbeddedEllipticCurveKeys: (
|
||||
network: ExternalNetworkId,
|
||||
validator: SeraiAddress
|
||||
) -> EmbeddedEllipticCurveKeysStruct,
|
||||
}
|
||||
);
|
||||
|
||||
@@ -49,10 +58,11 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
|
||||
// These are all the events which generate canonical messages
|
||||
struct EphemeralEvents {
|
||||
block_hash: [u8; 32],
|
||||
block_hash: BlockHash,
|
||||
time: u64,
|
||||
new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
||||
embedded_elliptic_curve_keys_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
||||
set_decided_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
||||
accepted_handover_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
||||
}
|
||||
|
||||
// For a cosigned block, fetch all relevant events
|
||||
@@ -71,31 +81,31 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
||||
};
|
||||
|
||||
let temporal_serai = serai.as_of(block_hash);
|
||||
let temporal_serai_validators = temporal_serai.validator_sets();
|
||||
let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
|
||||
serai.block(block_hash),
|
||||
temporal_serai_validators.new_set_events(),
|
||||
temporal_serai_validators.accepted_handover_events(),
|
||||
)
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
let Some(block) = block else {
|
||||
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
|
||||
let embedded_elliptic_curve_keys_events = events
|
||||
.validator_sets()
|
||||
.set_embedded_elliptic_curve_keys_events()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
let set_decided_events =
|
||||
events.validator_sets().set_decided_events().cloned().collect::<Vec<_>>();
|
||||
let accepted_handover_events =
|
||||
events.validator_sets().accepted_handover_events().cloned().collect::<Vec<_>>();
|
||||
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
|
||||
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
||||
};
|
||||
|
||||
let time = if block_number == 0 {
|
||||
block.time().unwrap_or(0)
|
||||
} else {
|
||||
// Serai's block time is in milliseconds
|
||||
block
|
||||
.time()
|
||||
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
|
||||
1000
|
||||
};
|
||||
|
||||
// We use time in seconds, not milliseconds, here
|
||||
let time = block.header.unix_time_in_millis() / 1000;
|
||||
Ok((
|
||||
block_number,
|
||||
EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
|
||||
EphemeralEvents {
|
||||
block_hash,
|
||||
time,
|
||||
embedded_elliptic_curve_keys_events,
|
||||
set_decided_events,
|
||||
accepted_handover_events,
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
@@ -126,105 +136,82 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
for new_set in block.new_set_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
||||
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
||||
for event in block.embedded_elliptic_curve_keys_events {
|
||||
let serai_client_serai::abi::validator_sets::Event::SetEmbeddedEllipticCurveKeys {
|
||||
validator,
|
||||
keys,
|
||||
} = &event
|
||||
else {
|
||||
panic!(
|
||||
"{}: {event:?}",
|
||||
"`SetEmbeddedEllipticCurveKeys` event wasn't a `SetEmbeddedEllipticCurveKeys` event"
|
||||
);
|
||||
};
|
||||
|
||||
EmbeddedEllipticCurveKeys::set(&mut txn, keys.network(), *validator, keys);
|
||||
}
|
||||
|
||||
for set_decided in block.set_decided_events {
|
||||
let serai_client_serai::abi::validator_sets::Event::SetDecided { set, validators } =
|
||||
&set_decided
|
||||
else {
|
||||
panic!("`SetDecided` event wasn't a `SetDecided` event: {set_decided:?}");
|
||||
};
|
||||
|
||||
// We only coordinate over external networks
|
||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||
let validators =
|
||||
validators.iter().map(|(validator, weight)| (*validator, weight.0)).collect::<Vec<_>>();
|
||||
|
||||
let serai = self.serai.as_of(block.block_hash);
|
||||
let serai = serai.validator_sets();
|
||||
let Some(validators) =
|
||||
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
Err(format!(
|
||||
"block #{block_number} declared a new set but didn't have the participants"
|
||||
))?
|
||||
};
|
||||
let validators = validators
|
||||
.into_iter()
|
||||
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
|
||||
.collect::<Vec<_>>();
|
||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||
if in_set {
|
||||
if u16::try_from(validators.len()).is_err() {
|
||||
Err("more than u16::MAX validators sent")?;
|
||||
}
|
||||
|
||||
let Ok(validators) = validators
|
||||
.into_iter()
|
||||
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
else {
|
||||
Err("validator's weight exceeded u16::MAX".to_string())?
|
||||
};
|
||||
|
||||
// Do the summation in u32 so we don't risk a u16 overflow
|
||||
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
||||
if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
|
||||
if total_weight > u32::from(KeyShares::MAX_PER_SET) {
|
||||
Err(format!(
|
||||
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
||||
"{set:?} has {total_weight} key shares when the max is {}",
|
||||
KeyShares::MAX_PER_SET
|
||||
))?;
|
||||
}
|
||||
let total_weight = u16::try_from(total_weight).unwrap();
|
||||
let total_weight = u16::try_from(total_weight)
|
||||
.expect("value smaller than `u16` constant but doesn't fit in `u16`");
|
||||
|
||||
// Fetch all of the validators' embedded elliptic curve keys
|
||||
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
|
||||
for (validator, _) in &validators {
|
||||
let validator = *validator;
|
||||
// try_join doesn't return a future so we need to wrap it in this additional async
|
||||
// block
|
||||
embedded_elliptic_curve_keys.push_back(async move {
|
||||
tokio::try_join!(
|
||||
// One future to fetch the substrate embedded key
|
||||
serai.embedded_elliptic_curve_key(
|
||||
validator.into(),
|
||||
EmbeddedEllipticCurve::Embedwards25519
|
||||
),
|
||||
// One future to fetch the external embedded key, if there is a distinct curve
|
||||
async {
|
||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
)
|
||||
.map(|(substrate_embedded_key, external_embedded_key)| {
|
||||
(validator, substrate_embedded_key, external_embedded_key)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
|
||||
for (validator, weight) in &validators {
|
||||
let (future_validator, substrate_embedded_key, external_embedded_key) =
|
||||
embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
|
||||
assert_eq!(*validator, future_validator);
|
||||
let external_embedded_key =
|
||||
external_embedded_key.unwrap_or(substrate_embedded_key.clone());
|
||||
match (substrate_embedded_key, external_embedded_key) {
|
||||
(Some(substrate_embedded_key), Some(external_embedded_key)) => {
|
||||
let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
|
||||
.map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
|
||||
for _ in 0 .. *weight {
|
||||
evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
|
||||
}
|
||||
let keys = match EmbeddedEllipticCurveKeys::get(&txn, set.network, *validator)
|
||||
.expect("selected validator lacked embedded elliptic curve keys")
|
||||
{
|
||||
EmbeddedEllipticCurveKeysStruct::Bitcoin(substrate, external) => {
|
||||
assert_eq!(set.network, ExternalNetworkId::Bitcoin);
|
||||
(substrate, external.to_vec())
|
||||
}
|
||||
_ => Err("NewSet with validator missing an embedded key".to_string())?,
|
||||
EmbeddedEllipticCurveKeysStruct::Ethereum(substrate, external) => {
|
||||
assert_eq!(set.network, ExternalNetworkId::Ethereum);
|
||||
(substrate, external.to_vec())
|
||||
}
|
||||
EmbeddedEllipticCurveKeysStruct::Monero(substrate) => {
|
||||
assert_eq!(set.network, ExternalNetworkId::Monero);
|
||||
(substrate, substrate.to_vec())
|
||||
}
|
||||
};
|
||||
for _ in 0 .. *weight {
|
||||
evrf_public_keys.push(keys.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let mut new_set = NewSetInformation {
|
||||
set,
|
||||
serai_block: block.block_hash,
|
||||
serai_block: block.block_hash.0,
|
||||
declaration_time: block.time,
|
||||
// TODO: This should be inlined into the Processor's key gen code
|
||||
// It's legacy from when we removed participants from the key gen
|
||||
threshold: ((total_weight * 2) / 3) + 1,
|
||||
// TODO: Why are `validators` and `evrf_public_keys` two separate fields?
|
||||
validators,
|
||||
evrf_public_keys,
|
||||
participant_indexes: Default::default(),
|
||||
@@ -238,7 +225,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
||||
}
|
||||
|
||||
for accepted_handover in block.accepted_handover_events {
|
||||
let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
|
||||
let serai_client_serai::abi::validator_sets::Event::AcceptedHandover { set } =
|
||||
&accepted_handover
|
||||
else {
|
||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -8,10 +8,14 @@ use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use dkg::Participant;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{ExternalNetworkId, SeraiAddress, Signature},
|
||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
|
||||
in_instructions::primitives::SignedBatch,
|
||||
use serai_client_serai::abi::{
|
||||
primitives::{
|
||||
network_id::ExternalNetworkId,
|
||||
validator_sets::{Session, ExternalValidatorSet, SlashReport},
|
||||
crypto::{Signature, KeyPair},
|
||||
address::SeraiAddress,
|
||||
instructions::SignedBatch,
|
||||
},
|
||||
Transaction,
|
||||
};
|
||||
|
||||
@@ -19,6 +23,7 @@ use serai_db::*;
|
||||
|
||||
mod canonical;
|
||||
pub use canonical::CanonicalEventStream;
|
||||
use canonical::last_indexed_batch_id;
|
||||
mod ephemeral;
|
||||
pub use ephemeral::EphemeralEventStream;
|
||||
|
||||
@@ -37,7 +42,7 @@ pub struct NewSetInformation {
|
||||
pub set: ExternalValidatorSet,
|
||||
/// The Serai block which declared it.
|
||||
pub serai_block: [u8; 32],
|
||||
/// The time of the block which declared it, in seconds.
|
||||
/// The time of the block which declared it, in seconds since the epoch.
|
||||
pub declaration_time: u64,
|
||||
/// The threshold to use.
|
||||
pub threshold: u16,
|
||||
@@ -96,9 +101,9 @@ mod _public_db {
|
||||
create_db!(
|
||||
CoordinatorSubstrate {
|
||||
// Keys to set on the Serai network
|
||||
Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
||||
Keys: (network: ExternalNetworkId) -> (Session, Transaction),
|
||||
// Slash reports to publish onto the Serai network
|
||||
SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
||||
SlashReports: (network: ExternalNetworkId) -> (Session, Transaction),
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -171,7 +176,7 @@ impl Keys {
|
||||
}
|
||||
}
|
||||
|
||||
let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
|
||||
let tx = serai_client_serai::ValidatorSets::set_keys(
|
||||
set.network,
|
||||
key_pair,
|
||||
signature_participants,
|
||||
@@ -192,7 +197,7 @@ pub struct SignedBatches;
|
||||
impl SignedBatches {
|
||||
/// Send a `SignedBatch` to publish onto Serai.
|
||||
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||
_public_db::SignedBatches::send(txn, batch.batch.network(), batch);
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
|
||||
_public_db::SignedBatches::try_recv(txn, network)
|
||||
@@ -219,11 +224,8 @@ impl SlashReports {
|
||||
}
|
||||
}
|
||||
|
||||
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||
set.network,
|
||||
slash_report,
|
||||
signature,
|
||||
);
|
||||
let tx =
|
||||
serai_client_serai::ValidatorSets::report_slashes(set.network, slash_report, signature);
|
||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx));
|
||||
}
|
||||
pub(crate) fn take(
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use core::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[rustfmt::skip]
|
||||
use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
|
||||
use serai_client_serai::{
|
||||
abi::primitives::{network_id::ExternalNetworkId, instructions::SignedBatch},
|
||||
RpcError, Serai,
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, create_db};
|
||||
use serai_task::ContinuallyRan;
|
||||
@@ -31,7 +33,7 @@ impl<D: Db> PublishBatchTask<D> {
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||
type Error = SeraiError;
|
||||
type Error = RpcError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
@@ -43,8 +45,8 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||
};
|
||||
|
||||
// If this is a Batch not yet published, save it into our unordered mapping
|
||||
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
|
||||
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
|
||||
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id()) {
|
||||
BatchesToPublish::set(&mut txn, self.network, batch.batch.id(), &batch);
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
@@ -52,12 +54,8 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||
|
||||
// Synchronize our last published batch with the Serai network's
|
||||
let next_to_publish = {
|
||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
let last_batch = crate::last_indexed_batch_id(&txn, self.network);
|
||||
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
||||
while our_last_batch < last_batch {
|
||||
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
||||
@@ -68,6 +66,7 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||
if let Some(last_batch) = our_last_batch {
|
||||
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
||||
}
|
||||
txn.commit();
|
||||
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
||||
};
|
||||
|
||||
@@ -75,7 +74,7 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
||||
self
|
||||
.serai
|
||||
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||
.publish_transaction(&serai_client_serai::InInstructions::execute_batch(batch))
|
||||
.await?;
|
||||
true
|
||||
} else {
|
||||
|
||||
@@ -3,7 +3,10 @@ use std::sync::Arc;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
|
||||
use serai_client_serai::{
|
||||
abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session},
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
@@ -33,10 +36,10 @@ impl<D: Db> PublishSlashReportTask<D> {
|
||||
|
||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||
let serai = serai.validator_sets();
|
||||
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?;
|
||||
let session_after_slash_report = Session(session.0 + 1);
|
||||
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||
let current_session =
|
||||
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||
let current_session = current_session.map(|session| session.0);
|
||||
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||
// active
|
||||
@@ -55,14 +58,13 @@ impl<D: Db> PublishSlashReportTask<D> {
|
||||
}
|
||||
|
||||
// If this session which should publish a slash report already has, move on
|
||||
let key_pending_slash_report =
|
||||
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||
if key_pending_slash_report.is_none() {
|
||||
if !serai.pending_slash_report(network).await.map_err(|e| format!("{e:?}"))? {
|
||||
txn.commit();
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
match self.serai.publish(&slash_report).await {
|
||||
// Since this slash report is still pending, publish it
|
||||
match self.serai.publish_transaction(&slash_report).await {
|
||||
Ok(()) => {
|
||||
txn.commit();
|
||||
Ok(true)
|
||||
@@ -84,7 +86,7 @@ impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
let mut error = None;
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
let network_res = self.publish(network).await;
|
||||
// We made progress if any network successfully published their slash report
|
||||
made_progress |= network_res == Ok(true);
|
||||
|
||||
@@ -3,7 +3,10 @@ use std::sync::Arc;
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
||||
use serai_client_serai::{
|
||||
abi::primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
@@ -28,7 +31,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||
for network in ExternalNetworkId::all() {
|
||||
let mut txn = self.db.txn();
|
||||
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||
// No keys to set
|
||||
@@ -37,10 +40,9 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
||||
|
||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||
let serai =
|
||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||
let serai = serai.validator_sets();
|
||||
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?;
|
||||
let current_session =
|
||||
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||
let current_session = current_session.map(|session| session.0);
|
||||
// Only attempt to set these keys if this isn't a retired session
|
||||
if Some(session.0) < current_session {
|
||||
@@ -67,7 +69,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
||||
continue;
|
||||
};
|
||||
|
||||
match self.serai.publish(&keys).await {
|
||||
match self.serai.publish_transaction(&keys).await {
|
||||
Ok(()) => {
|
||||
txn.commit();
|
||||
made_progress = true;
|
||||
|
||||
@@ -21,7 +21,7 @@ pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
|
||||
|
||||
block_number: u64,
|
||||
tip: [u8; 32],
|
||||
participants: HashSet<<Ristretto as WrappedGroup>::G>,
|
||||
participants: HashSet<[u8; 32]>,
|
||||
|
||||
provided: ProvidedTransactions<D, T>,
|
||||
mempool: Mempool<D, T>,
|
||||
@@ -74,7 +74,10 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
let mut res = Self {
|
||||
db: Some(db.clone()),
|
||||
genesis,
|
||||
participants: participants.iter().copied().collect(),
|
||||
participants: participants
|
||||
.iter()
|
||||
.map(<<Ristretto as WrappedGroup>::G as GroupEncoding>::to_bytes)
|
||||
.collect(),
|
||||
|
||||
block_number: 0,
|
||||
tip: genesis,
|
||||
@@ -173,7 +176,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
|
||||
self.mempool.add::<N, _>(
|
||||
|signer, order| {
|
||||
if self.participants.contains(&signer) {
|
||||
if self.participants.contains(&signer.to_bytes()) {
|
||||
Some(
|
||||
db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
|
||||
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
|
||||
@@ -202,7 +205,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
|
||||
return Some(next_nonce);
|
||||
}
|
||||
if self.participants.contains(signer) {
|
||||
if self.participants.contains(&signer.to_bytes()) {
|
||||
Some(
|
||||
self
|
||||
.db
|
||||
@@ -251,7 +254,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
self.tip,
|
||||
self.provided.transactions.clone(),
|
||||
&mut |signer, order| {
|
||||
if self.participants.contains(signer) {
|
||||
if self.participants.contains(&signer.to_bytes()) {
|
||||
let key = Self::next_nonce_key(&self.genesis, signer, order);
|
||||
let next = txn
|
||||
.get(&key)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::*;
|
||||
use ciphersuite::{group::GroupEncoding, *};
|
||||
|
||||
use serai_db::{DbTxn, Db};
|
||||
|
||||
@@ -21,9 +21,9 @@ pub(crate) struct Mempool<D: Db, T: TransactionTrait> {
|
||||
db: D,
|
||||
genesis: [u8; 32],
|
||||
|
||||
last_nonce_in_mempool: HashMap<(<Ristretto as WrappedGroup>::G, Vec<u8>), u32>,
|
||||
last_nonce_in_mempool: HashMap<([u8; 32], Vec<u8>), u32>,
|
||||
txs: HashMap<[u8; 32], Transaction<T>>,
|
||||
txs_per_signer: HashMap<<Ristretto as WrappedGroup>::G, u32>,
|
||||
txs_per_signer: HashMap<[u8; 32], u32>,
|
||||
}
|
||||
|
||||
impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
@@ -82,6 +82,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
}
|
||||
Transaction::Application(tx) => match tx.kind() {
|
||||
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
|
||||
let signer = signer.to_bytes();
|
||||
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
|
||||
res.txs_per_signer.insert(signer, amount);
|
||||
|
||||
@@ -140,6 +141,8 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
};
|
||||
let mut next_nonce = blockchain_next_nonce;
|
||||
|
||||
let signer = signer.to_bytes();
|
||||
|
||||
if let Some(mempool_last_nonce) =
|
||||
self.last_nonce_in_mempool.get(&(signer, order.clone()))
|
||||
{
|
||||
@@ -182,7 +185,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
signer: &<Ristretto as WrappedGroup>::G,
|
||||
order: Vec<u8>,
|
||||
) -> Option<u32> {
|
||||
self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1)
|
||||
self.last_nonce_in_mempool.get(&(signer.to_bytes(), order)).copied().map(|nonce| nonce + 1)
|
||||
}
|
||||
|
||||
/// Get transactions to include in a block.
|
||||
@@ -243,6 +246,8 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
|
||||
if let Some(tx) = self.txs.remove(tx) {
|
||||
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
|
||||
let signer = signer.to_bytes();
|
||||
|
||||
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
|
||||
self.txs_per_signer.insert(signer, amount);
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::{group::Group, *};
|
||||
use ciphersuite::*;
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
use ::tendermint::{
|
||||
|
||||
@@ -6,7 +6,7 @@ license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
rust-version = "1.77"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -36,7 +36,7 @@ serai-task = { path = "../../common/task", version = "0.1" }
|
||||
|
||||
tributary-sdk = { path = "../tributary-sdk" }
|
||||
|
||||
serai-cosign = { path = "../cosign" }
|
||||
serai-cosign-types = { path = "../cosign/types" }
|
||||
serai-coordinator-substrate = { path = "../substrate" }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "../../processor/messages" }
|
||||
|
||||
@@ -2,13 +2,13 @@ use std::collections::HashMap;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_primitives::{address::SeraiAddress, validator_sets::primitives::ExternalValidatorSet};
|
||||
use serai_primitives::{BlockHash, validator_sets::ExternalValidatorSet, address::SeraiAddress};
|
||||
|
||||
use messages::sign::{VariantSignId, SignId};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
use serai_cosign::CosignIntent;
|
||||
use serai_cosign_types::CosignIntent;
|
||||
|
||||
use crate::transaction::SigningProtocolRound;
|
||||
|
||||
@@ -122,7 +122,7 @@ impl Topic {
|
||||
Topic::DkgConfirmation { attempt, round: _ } => Some({
|
||||
let id = {
|
||||
let mut id = [0; 32];
|
||||
let encoded_set = borsh::to_vec(set).unwrap();
|
||||
let encoded_set = borsh::to_vec(&set).unwrap();
|
||||
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
|
||||
VariantSignId::Batch(id)
|
||||
};
|
||||
@@ -232,18 +232,18 @@ create_db!(
|
||||
SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32,
|
||||
|
||||
// The cosign intent for a Substrate block
|
||||
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
|
||||
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: BlockHash) -> CosignIntent,
|
||||
// The latest Substrate block to cosign.
|
||||
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> [u8; 32],
|
||||
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> BlockHash,
|
||||
// The hash of the block we're actively cosigning.
|
||||
ActivelyCosigning: (set: ExternalValidatorSet) -> [u8; 32],
|
||||
ActivelyCosigning: (set: ExternalValidatorSet) -> BlockHash,
|
||||
// If this block has already been cosigned.
|
||||
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
||||
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: BlockHash) -> (),
|
||||
|
||||
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||
SubstrateBlockPlans: (
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32]
|
||||
substrate_block_hash: BlockHash
|
||||
) -> Vec<[u8; 32]>,
|
||||
|
||||
// The weight accumulated for a topic.
|
||||
@@ -291,26 +291,26 @@ impl TributaryDb {
|
||||
pub(crate) fn latest_substrate_block_to_cosign(
|
||||
getter: &impl Get,
|
||||
set: ExternalValidatorSet,
|
||||
) -> Option<[u8; 32]> {
|
||||
) -> Option<BlockHash> {
|
||||
LatestSubstrateBlockToCosign::get(getter, set)
|
||||
}
|
||||
pub(crate) fn set_latest_substrate_block_to_cosign(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
) {
|
||||
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
||||
}
|
||||
pub(crate) fn actively_cosigning(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
) -> Option<[u8; 32]> {
|
||||
) -> Option<BlockHash> {
|
||||
ActivelyCosigning::get(txn, set)
|
||||
}
|
||||
pub(crate) fn start_cosigning(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
substrate_block_number: u64,
|
||||
) {
|
||||
assert!(
|
||||
@@ -335,14 +335,14 @@ impl TributaryDb {
|
||||
pub(crate) fn mark_cosigned(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
) {
|
||||
Cosigned::set(txn, set, substrate_block_hash, &());
|
||||
}
|
||||
pub(crate) fn cosigned(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
) -> bool {
|
||||
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -9,8 +9,9 @@ use ciphersuite::group::GroupEncoding;
|
||||
use dkg::Participant;
|
||||
|
||||
use serai_primitives::{
|
||||
address::SeraiAddress,
|
||||
BlockHash,
|
||||
validator_sets::{ExternalValidatorSet, Slash},
|
||||
address::SeraiAddress,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
@@ -25,7 +26,7 @@ use tributary_sdk::{
|
||||
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
|
||||
};
|
||||
|
||||
use serai_cosign::CosignIntent;
|
||||
use serai_cosign_types::CosignIntent;
|
||||
use serai_coordinator_substrate::NewSetInformation;
|
||||
|
||||
use messages::sign::{VariantSignId, SignId};
|
||||
@@ -79,7 +80,7 @@ impl CosignIntents {
|
||||
fn take(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
) -> Option<CosignIntent> {
|
||||
db::CosignIntents::take(txn, set, substrate_block_hash)
|
||||
}
|
||||
@@ -113,7 +114,7 @@ impl SubstrateBlockPlans {
|
||||
pub fn set(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
plans: &Vec<[u8; 32]>,
|
||||
) {
|
||||
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
|
||||
@@ -121,7 +122,7 @@ impl SubstrateBlockPlans {
|
||||
fn take(
|
||||
txn: &mut impl DbTxn,
|
||||
set: ExternalValidatorSet,
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
) -> Option<Vec<[u8; 32]>> {
|
||||
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
||||
}
|
||||
@@ -574,14 +575,9 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
||||
};
|
||||
let msgs = (
|
||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
|
||||
if data.1.is_some() {
|
||||
Some(
|
||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.1.unwrap())
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
data.1.as_ref().map(|data| {
|
||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(data).unwrap()
|
||||
}),
|
||||
);
|
||||
|
||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||
|
||||
@@ -14,7 +14,7 @@ use schnorr::SchnorrSignature;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_primitives::{addess::SeraiAddress, validator_sets::MAX_KEY_SHARES_PER_SET};
|
||||
use serai_primitives::{BlockHash, validator_sets::KeyShares, address::SeraiAddress};
|
||||
|
||||
use messages::sign::VariantSignId;
|
||||
|
||||
@@ -137,7 +137,7 @@ pub enum Transaction {
|
||||
/// be the one selected to be cosigned.
|
||||
Cosign {
|
||||
/// The hash of the Substrate block to cosign
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
},
|
||||
|
||||
/// Note an intended-to-be-cosigned Substrate block as cosigned
|
||||
@@ -175,7 +175,7 @@ pub enum Transaction {
|
||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
||||
Cosigned {
|
||||
/// The hash of the Substrate block which was cosigned
|
||||
substrate_block_hash: [u8; 32],
|
||||
substrate_block_hash: BlockHash,
|
||||
},
|
||||
|
||||
/// Acknowledge a Substrate block
|
||||
@@ -186,7 +186,7 @@ pub enum Transaction {
|
||||
/// resulting from its handling.
|
||||
SubstrateBlock {
|
||||
/// The hash of the Substrate block
|
||||
hash: [u8; 32],
|
||||
hash: BlockHash,
|
||||
},
|
||||
|
||||
/// Acknowledge a Batch
|
||||
@@ -250,11 +250,11 @@ impl TransactionTrait for Transaction {
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
|
||||
borsh::to_vec(b"DkgConfirmation".as_slice(), attempt).unwrap(),
|
||||
borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
|
||||
borsh::to_vec(b"DkgConfirmation".as_slice(), attempt).unwrap(),
|
||||
borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(),
|
||||
signed.to_tributary_signed(1),
|
||||
),
|
||||
|
||||
@@ -264,7 +264,7 @@ impl TransactionTrait for Transaction {
|
||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||
|
||||
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||
borsh::to_vec(b"Sign".as_slice(), id, attempt).unwrap(),
|
||||
borsh::to_vec(&(b"Sign".as_slice(), id, attempt)).unwrap(),
|
||||
signed.to_tributary_signed(round.nonce()),
|
||||
),
|
||||
|
||||
@@ -303,14 +303,14 @@ impl TransactionTrait for Transaction {
|
||||
Transaction::Batch { .. } => {}
|
||||
|
||||
Transaction::Sign { data, .. } => {
|
||||
if data.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
||||
if data.len() > usize::from(KeyShares::MAX_PER_SET) {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
// TODO: MAX_SIGN_LEN
|
||||
}
|
||||
|
||||
Transaction::SlashReport { slash_points, .. } => {
|
||||
if slash_points.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
||||
if slash_points.len() > usize::from(KeyShares::MAX_PER_SET) {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { path = "../../common/std-shims", version = "0.1.4", default-features = false, optional = true }
|
||||
std-shims = { path = "../../common/std-shims", version = "0.1.4", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
@@ -33,7 +33,7 @@ hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["std-shims", "zeroize/alloc", "digest/alloc", "ff/alloc"]
|
||||
alloc = ["zeroize/alloc", "digest/alloc", "ff/alloc"]
|
||||
std = [
|
||||
"alloc",
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("lib.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
#[cfg(feature = "alloc")]
|
||||
use std_shims::io::{self, Read};
|
||||
|
||||
use subtle::{CtOption, ConstantTimeEq, ConditionallySelectable};
|
||||
@@ -112,7 +110,6 @@ pub trait GroupCanonicalEncoding: WrappedGroup {
|
||||
}
|
||||
|
||||
/// `std::io` extensions for `GroupCanonicalEncoding.`
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(non_snake_case)]
|
||||
pub trait GroupIo: GroupCanonicalEncoding {
|
||||
/// Read a canonical field element from something implementing `std::io::Read`.
|
||||
@@ -129,8 +126,6 @@ pub trait GroupIo: GroupCanonicalEncoding {
|
||||
}
|
||||
|
||||
/// Read a canonical point from something implementing `std::io::Read`.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(non_snake_case)]
|
||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
||||
let mut bytes = <Self::G as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(bytes.as_mut())?;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#![allow(deprecated)]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![allow(clippy::redundant_closure_call)]
|
||||
@@ -8,7 +8,6 @@ use core::{
|
||||
borrow::Borrow,
|
||||
ops::{Deref, Add, AddAssign, Sub, SubAssign, Neg, Mul, MulAssign},
|
||||
iter::{Iterator, Sum},
|
||||
hash::{Hash, Hasher},
|
||||
};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
@@ -20,9 +19,8 @@ use subtle::{Choice, CtOption};
|
||||
|
||||
use curve25519_dalek::{
|
||||
edwards::{EdwardsPoint as DEdwardsPoint, CompressedEdwardsY},
|
||||
ristretto::{RistrettoPoint as DRistrettoPoint, CompressedRistretto},
|
||||
};
|
||||
pub use curve25519_dalek::Scalar;
|
||||
pub use curve25519_dalek::{Scalar, ristretto::RistrettoPoint};
|
||||
|
||||
use ::ciphersuite::group::{Group, GroupEncoding, prime::PrimeGroup};
|
||||
|
||||
@@ -259,17 +257,6 @@ macro_rules! dalek_group {
|
||||
}
|
||||
|
||||
impl PrimeGroup for $Point {}
|
||||
|
||||
// Support being used as a key in a table
|
||||
// While it is expensive as a key, due to the field operations required, there's frequently
|
||||
// use cases for public key -> value lookups
|
||||
#[allow(unknown_lints, renamed_and_removed_lints)]
|
||||
#[allow(clippy::derived_hash_with_manual_eq, clippy::derive_hash_xor_eq)]
|
||||
impl Hash for $Point {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.to_bytes().hash(state);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -281,14 +268,6 @@ dalek_group!(
|
||||
CompressedEdwardsY,
|
||||
);
|
||||
|
||||
dalek_group!(
|
||||
RistrettoPoint,
|
||||
DRistrettoPoint,
|
||||
|_| true,
|
||||
RistrettoBasepointTable,
|
||||
CompressedRistretto,
|
||||
);
|
||||
|
||||
#[test]
|
||||
fn test_ed25519_group() {
|
||||
ff_group_tests::group::test_prime_group_bits::<_, EdwardsPoint>(&mut rand_core::OsRng);
|
||||
|
||||
@@ -21,21 +21,14 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der
|
||||
|
||||
thiserror = { version = "2", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||
|
||||
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"thiserror/std",
|
||||
|
||||
"std-shims/std",
|
||||
|
||||
"borsh?/std",
|
||||
|
||||
"ciphersuite/std",
|
||||
]
|
||||
borsh = ["dep:borsh"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -20,7 +20,7 @@ workspace = true
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
|
||||
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
|
||||
dkg = { path = "../", version = "0.6", default-features = false }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![no_std]
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ rand_core = { version = "0.6", default-features = false, features = ["alloc"] }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["alloc", "zeroize_derive"] }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||
|
||||
@@ -34,10 +34,10 @@ generic-array = { version = "1", default-features = false, features = ["alloc"]
|
||||
blake2 = { version = "0.11.0-rc.2", default-features = false }
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
ec-divisors = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
generalized-bulletproofs-circuit-abstraction = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
ec-divisors = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
generalized-bulletproofs-circuit-abstraction = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
|
||||
dkg = { path = "..", default-features = false }
|
||||
|
||||
@@ -52,7 +52,7 @@ rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||
ciphersuite = { path = "../../ciphersuite", default-features = false, features = ["std"] }
|
||||
embedwards25519 = { path = "../../embedwards25519", default-features = false, features = ["std"] }
|
||||
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false, features = ["std"] }
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", features = ["tests"] }
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", features = ["tests"] }
|
||||
dkg-recovery = { path = "../recovery" }
|
||||
|
||||
[features]
|
||||
|
||||
@@ -26,21 +26,9 @@ presented in section 4.2 is extended, with the following changes:
|
||||
just one round.
|
||||
|
||||
For a gist of the verifiable encryption scheme, please see
|
||||
https://gist.github.com/kayabaNerve/cfbde74b0660dfdf8dd55326d6ec33d7. Security
|
||||
proofs are currently being worked on.
|
||||
|
||||
---
|
||||
|
||||
This library relies on an implementation of Bulletproofs and various
|
||||
zero-knowledge gadgets. This library uses
|
||||
[`generalized-bulletproofs`](https://docs.rs/generalized-bulletproofs),
|
||||
[`generalized-bulletproofs-circuit-abstraction`](https://docs.rs/generalized-bulletproofs-circuit-abstraction),
|
||||
and
|
||||
[`generalized-bulletproofs-ec-gadgets`](https://docs.rs/generalized-bulletproofs-ec-gadgets)
|
||||
from the Monero project's FCMP++ codebase. These libraries have received the
|
||||
following audits in the past:
|
||||
- https://github.com/kayabaNerve/monero-oxide/tree/fcmp++/audits/generalized-bulletproofs
|
||||
- https://github.com/kayabaNerve/monero-oxide/tree/fcmp++/audits/fcmps
|
||||
https://gist.github.com/kayabaNerve/cfbde74b0660dfdf8dd55326d6ec33d7. For
|
||||
security proofs and audit information, please see
|
||||
[here](../../../audits/crypto/dkg/evrf).
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
|
||||
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
|
||||
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![no_std]
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
@@ -22,7 +22,6 @@ use ciphersuite::{
|
||||
|
||||
/// The ID of a participant, defined as a non-zero u16.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]
|
||||
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
|
||||
pub struct Participant(u16);
|
||||
impl Participant {
|
||||
/// Create a new Participant identifier from a u16.
|
||||
@@ -129,18 +128,8 @@ pub enum DkgError {
|
||||
NotParticipating,
|
||||
}
|
||||
|
||||
// Manually implements BorshDeserialize so we can enforce it's a valid index
|
||||
#[cfg(feature = "borsh")]
|
||||
impl borsh::BorshDeserialize for Participant {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
Participant::new(u16::deserialize_reader(reader)?)
|
||||
.ok_or_else(|| io::Error::other("invalid participant"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parameters for a multisig.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
|
||||
pub struct ThresholdParams {
|
||||
/// Participants needed to sign on behalf of the group.
|
||||
t: u16,
|
||||
@@ -210,16 +199,6 @@ impl ThresholdParams {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "borsh")]
|
||||
impl borsh::BorshDeserialize for ThresholdParams {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let t = u16::deserialize_reader(reader)?;
|
||||
let n = u16::deserialize_reader(reader)?;
|
||||
let i = Participant::deserialize_reader(reader)?;
|
||||
ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}")))
|
||||
}
|
||||
}
|
||||
|
||||
/// A method of interpolation.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub enum Interpolation<F: Zeroize + PrimeField> {
|
||||
|
||||
@@ -33,6 +33,6 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
ff-group-tests = { path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["zeroize/alloc", "sha3/alloc", "crypto-bigint/alloc", "prime-field/alloc", "ciphersuite/alloc"]
|
||||
alloc = ["zeroize/alloc", "sha3/alloc", "prime-field/alloc", "ciphersuite/alloc"]
|
||||
std = ["alloc", "zeroize/std", "prime-field/std", "ciphersuite/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![no_std]
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
[dependencies]
|
||||
hex-literal = { version = "1", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, optional = true }
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
@@ -29,7 +29,7 @@ curve25519-dalek = { version = "4", default-features = false, features = ["legac
|
||||
blake2 = { version = "0.11.0-rc.2", default-features = false }
|
||||
ciphersuite = { path = "../ciphersuite", version = "0.4", default-features = false }
|
||||
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false, optional = true }
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
@@ -39,6 +39,6 @@ rand_core = { version = "0.6", features = ["std"] }
|
||||
ff-group-tests = { path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["std-shims", "zeroize/alloc", "prime-field/alloc", "short-weierstrass/alloc", "curve25519-dalek/alloc", "blake2/alloc", "ciphersuite/alloc", "generalized-bulletproofs-ec-gadgets"]
|
||||
alloc = ["zeroize/alloc", "prime-field/alloc", "short-weierstrass/alloc", "curve25519-dalek/alloc", "blake2/alloc", "ciphersuite/alloc", "generalized-bulletproofs-ec-gadgets"]
|
||||
std = ["alloc", "std-shims/std", "zeroize/std", "prime-field/std", "short-weierstrass/std", "ciphersuite/std", "generalized-bulletproofs-ec-gadgets/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
/// Tests for the Field trait.
|
||||
|
||||
@@ -17,33 +17,35 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
thiserror = { version = "2", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
|
||||
subtle = { version = "^2.4", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["alloc"] }
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"], optional = true }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["alloc", "zeroize_derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||
|
||||
dalek-ff-group = { path = "../dalek-ff-group", version = "0.5", default-features = false, features = ["std"], optional = true }
|
||||
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["std"], optional = true }
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
|
||||
dalek-ff-group = { path = "../dalek-ff-group", version = "0.5", default-features = false, features = ["alloc"], optional = true }
|
||||
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
|
||||
sha2 = { version = "0.10.0", default-features = false, optional = true }
|
||||
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
|
||||
ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["std"], optional = true }
|
||||
ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["alloc", "batch"] }
|
||||
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["alloc"] }
|
||||
|
||||
dkg = { path = "../dkg", version = "0.6.1", default-features = false, features = ["std"] }
|
||||
dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, features = ["std"], optional = true }
|
||||
dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, features = ["std"], optional = true }
|
||||
dkg = { path = "../dkg", version = "0.6.1", default-features = false }
|
||||
dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, optional = true }
|
||||
dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
@@ -54,6 +56,38 @@ dkg-recovery = { path = "../dkg/recovery", default-features = false, features =
|
||||
dkg-dealer = { path = "../dkg/dealer", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
"thiserror/std",
|
||||
|
||||
"rand_core/std",
|
||||
"rand_chacha/std",
|
||||
|
||||
"zeroize/std",
|
||||
"subtle/std",
|
||||
|
||||
"hex?/std",
|
||||
|
||||
"transcript/std",
|
||||
|
||||
"dalek-ff-group?/std",
|
||||
"minimal-ed448?/std",
|
||||
|
||||
"ciphersuite/std",
|
||||
"sha2?/std",
|
||||
"elliptic-curve?/std",
|
||||
"ciphersuite-kp256?/std",
|
||||
|
||||
"multiexp/std",
|
||||
|
||||
"schnorr/std",
|
||||
|
||||
"dkg/std",
|
||||
"dkg-recovery?/std",
|
||||
"dkg-dealer?/std",
|
||||
]
|
||||
|
||||
ed25519 = ["dalek-ff-group"]
|
||||
ristretto = ["dalek-ff-group"]
|
||||
|
||||
@@ -63,3 +97,5 @@ p256 = ["sha2", "elliptic-curve", "ciphersuite-kp256"]
|
||||
ed448 = ["minimal-ed448"]
|
||||
|
||||
tests = ["hex", "rand_core/getrandom", "dkg-dealer", "dkg-recovery"]
|
||||
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use std::io::{self, Read, Write};
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::io::{self, Read, Write};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
@@ -26,8 +28,10 @@ impl<A: Send + Sync + Clone + PartialEq + Debug + WriteAddendum> Addendum for A
|
||||
|
||||
/// Algorithm trait usable by the FROST signing machine to produce signatures..
|
||||
pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
/// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible
|
||||
/// transcript included in this crate.
|
||||
/// The transcript format this algorithm uses.
|
||||
///
|
||||
/// This MUST NOT be the IETF-compatible transcript included in this crate UNLESS this is an
|
||||
/// IETF-specified ciphersuite.
|
||||
type Transcript: Sync + Clone + Debug + Transcript;
|
||||
/// Serializable addendum, used in algorithms requiring more data than just the nonces.
|
||||
type Addendum: Addendum;
|
||||
@@ -67,8 +71,10 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
) -> Result<(), FrostError>;
|
||||
|
||||
/// Sign a share with the given secret/nonce.
|
||||
///
|
||||
/// The secret will already have been its lagrange coefficient applied so it is the necessary
|
||||
/// key share.
|
||||
///
|
||||
/// The nonce will already have been processed into the combined form d + (e * p).
|
||||
fn sign_share(
|
||||
&mut self,
|
||||
@@ -83,6 +89,7 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;
|
||||
|
||||
/// Verify a specific share given as a response.
|
||||
///
|
||||
/// This function should return a series of pairs whose products should sum to zero for a valid
|
||||
/// share. Any error raised is treated as the share being invalid.
|
||||
#[allow(clippy::type_complexity, clippy::result_unit_err)]
|
||||
@@ -97,8 +104,10 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
mod sealed {
|
||||
pub use super::*;
|
||||
|
||||
/// IETF-compliant transcript. This is incredibly naive and should not be used within larger
|
||||
/// protocols.
|
||||
/// IETF-compliant transcript.
|
||||
///
|
||||
/// This is incredibly naive and MUST NOT be used within larger protocols. No guarantees are made
|
||||
/// about its safety EXCEPT as used with the IETF-specified FROST ciphersuites.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IetfTranscript(pub(crate) Vec<u8>);
|
||||
impl Transcript for IetfTranscript {
|
||||
@@ -129,6 +138,7 @@ pub(crate) use sealed::IetfTranscript;
|
||||
/// HRAm usable by the included Schnorr signature algorithm to generate challenges.
|
||||
pub trait Hram<C: Curve>: Send + Sync + Clone {
|
||||
/// HRAm function to generate a challenge.
|
||||
///
|
||||
/// H2 from the IETF draft, despite having a different argument set (not being pre-formatted).
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use core::{ops::Deref, convert::AsRef};
|
||||
use std::io::{self, Read};
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::io::{self, Read};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user