mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-11 13:39:25 +00:00
Compare commits
109 Commits
08f6af8bb9
...
next-polka
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee9b9778b5 | ||
|
|
5a3cf1f2be | ||
|
|
2fbe925c4d | ||
|
|
6aad496d86 | ||
|
|
d3464cfcb3 | ||
|
|
8dbea8452d | ||
|
|
f94b7ca50e | ||
|
|
5e39f9bc1e | ||
|
|
c98d757c0f | ||
|
|
6603100c7e | ||
|
|
f70fee65b8 | ||
|
|
0849d60f28 | ||
|
|
3a792f9ce5 | ||
|
|
50959fa0e3 | ||
|
|
2fb90ebe55 | ||
|
|
b24adcbd14 | ||
|
|
b791256648 | ||
|
|
36ac9c56a4 | ||
|
|
57bf4984f8 | ||
|
|
87750407de | ||
|
|
3ce90c55d9 | ||
|
|
ff95c58341 | ||
|
|
98044f93b1 | ||
|
|
eb04f873d5 | ||
|
|
af74c318aa | ||
|
|
d711d8915f | ||
|
|
3d549564a8 | ||
|
|
9a75f92864 | ||
|
|
30ea9d9a06 | ||
|
|
c45c973ca1 | ||
|
|
6e37ac030d | ||
|
|
e7c759c468 | ||
|
|
8ec0582237 | ||
|
|
8d8e8a7a77 | ||
|
|
028ec3cce0 | ||
|
|
c49215805f | ||
|
|
2ffdd2a01d | ||
|
|
e1e6e67d4a | ||
|
|
6b19780c7b | ||
|
|
6100c3ca90 | ||
|
|
fa0ed4b180 | ||
|
|
0ea16f9e01 | ||
|
|
7a314baa9f | ||
|
|
9891ccade8 | ||
|
|
f1f166c168 | ||
|
|
df4aee2d59 | ||
|
|
302a43653f | ||
|
|
d219b77bd0 | ||
|
|
fce26eaee1 | ||
|
|
3cfbd9add7 | ||
|
|
609cf06393 | ||
|
|
46b1f1b7ec | ||
|
|
09113201e7 | ||
|
|
556d294157 | ||
|
|
82ca889ed3 | ||
|
|
cde0f753c2 | ||
|
|
6ff0ef7aa6 | ||
|
|
f9e3d1b142 | ||
|
|
a793aa18ef | ||
|
|
5662beeb8a | ||
|
|
509bd58f4e | ||
|
|
367a5769e8 | ||
|
|
cb6eb6430a | ||
|
|
4f82e5912c | ||
|
|
ac7af40f2e | ||
|
|
264bdd46ca | ||
|
|
c52f7634de | ||
|
|
21eaa5793d | ||
|
|
c744a80d80 | ||
|
|
a34f9f6164 | ||
|
|
353683cfd2 | ||
|
|
d4f77159c4 | ||
|
|
191bf4bdea | ||
|
|
06a4824aba | ||
|
|
e65a37e639 | ||
|
|
4653ef4a61 | ||
|
|
ce08fad931 | ||
|
|
1866bb7ae3 | ||
|
|
aff2065c31 | ||
|
|
7300700108 | ||
|
|
31874ceeae | ||
|
|
012b8fddae | ||
|
|
d2f58232c8 | ||
|
|
49794b6a75 | ||
|
|
973287d0a1 | ||
|
|
1b499edfe1 | ||
|
|
642848bd24 | ||
|
|
f7fb78bdd6 | ||
|
|
9c47ef2658 | ||
|
|
e1b6b638c6 | ||
|
|
c24768f922 | ||
|
|
65613750e1 | ||
|
|
87ee879dea | ||
|
|
b5603560e8 | ||
|
|
5818f1a41c | ||
|
|
1b781b4b57 | ||
|
|
94faf098b6 | ||
|
|
03e45f73cd | ||
|
|
63f7e220c0 | ||
|
|
7d49366373 | ||
|
|
56f6ba2dac | ||
|
|
55ed33d2d1 | ||
|
|
138a0e9b40 | ||
|
|
4fc7263ac3 | ||
|
|
f27fd59fa6 | ||
|
|
437f0e9a93 | ||
|
|
cc5d38f1ce | ||
|
|
0ce025e0c2 | ||
|
|
224cf4ea21 |
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: "29.1"
|
default: "30.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Bitcoin Daemon Cache
|
- name: Bitcoin Daemon Cache
|
||||||
id: cache-bitcoind
|
id: cache-bitcoind
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||||
with:
|
with:
|
||||||
path: bitcoin.tar.gz
|
path: bitcoin.tar.gz
|
||||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
21
.github/actions/build-dependencies/action.yml
vendored
21
.github/actions/build-dependencies/action.yml
vendored
@@ -38,19 +38,23 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||||
sudo apt install -y ca-certificates protobuf-compiler
|
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||||
choco install protoc
|
choco install protoc
|
||||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||||
brew install protobuf
|
brew install protobuf llvm
|
||||||
|
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||||
|
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||||
|
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||||
|
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Install solc
|
- name: Install solc
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo +1.90 install svm-rs --version =0.5.19
|
cargo +1.91.1 install svm-rs --version =0.5.22
|
||||||
svm install 0.8.26
|
svm install 0.8.29
|
||||||
svm use 0.8.26
|
svm use 0.8.29
|
||||||
|
|
||||||
- name: Remove preinstalled Docker
|
- name: Remove preinstalled Docker
|
||||||
shell: bash
|
shell: bash
|
||||||
@@ -58,7 +62,7 @@ runs:
|
|||||||
docker system prune -a --volumes
|
docker system prune -a --volumes
|
||||||
sudo apt remove -y *docker*
|
sudo apt remove -y *docker*
|
||||||
# Install uidmap which will be required for the explicitly installed Docker
|
# Install uidmap which will be required for the explicitly installed Docker
|
||||||
sudo apt install uidmap
|
sudo apt install -y uidmap
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
- name: Update system dependencies
|
- name: Update system dependencies
|
||||||
@@ -71,11 +75,8 @@ runs:
|
|||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
- name: Install rootless Docker
|
- name: Install rootless Docker
|
||||||
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
|
uses: docker/setup-docker-action@e61617a16c407a86262fb923c35a616ddbe070b3 # 4.6.0
|
||||||
with:
|
with:
|
||||||
rootless: true
|
rootless: true
|
||||||
set-host: true
|
set-host: true
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
|
|
||||||
# - name: Cache Rust
|
|
||||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
|
||||||
|
|||||||
4
.github/actions/monero-wallet-rpc/action.yml
vendored
4
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.4
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Monero Wallet RPC Cache
|
- name: Monero Wallet RPC Cache
|
||||||
id: cache-monero-wallet-rpc
|
id: cache-monero-wallet-rpc
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||||
with:
|
with:
|
||||||
path: monero-wallet-rpc
|
path: monero-wallet-rpc
|
||||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|||||||
33
.github/actions/monero/action.yml
vendored
33
.github/actions/monero/action.yml
vendored
@@ -5,39 +5,46 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.4
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
steps:
|
steps:
|
||||||
- name: Monero Daemon Cache
|
- name: Monero Daemon Cache
|
||||||
id: cache-monerod
|
id: cache-monerod
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||||
with:
|
with:
|
||||||
path: /usr/bin/monerod
|
path: /usr/bin/monerod
|
||||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||||
|
|
||||||
- name: Download the Monero Daemon
|
- name: Download the Monero Daemon
|
||||||
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
||||||
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
|
|
||||||
# to the contained folder not following the same naming scheme and
|
|
||||||
# requiring further expansion not worth doing right now
|
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
RUNNER_OS=${{ runner.os }}
|
OS=${{ runner.os }}
|
||||||
RUNNER_ARCH=${{ runner.arch }}
|
ARCH=${{ runner.arch }}
|
||||||
|
|
||||||
RUNNER_OS=${RUNNER_OS,,}
|
OS=$(echo "$OS" | tr "[:upper:]" "[:lower:]")
|
||||||
RUNNER_ARCH=${RUNNER_ARCH,,}
|
ARCH=$(echo "$ARCH" | tr "[:upper:]" "[:lower:]")
|
||||||
|
|
||||||
RUNNER_OS=linux
|
if [ "$OS" = "windows" ]; then
|
||||||
RUNNER_ARCH=x64
|
OS=win
|
||||||
|
echo "Windows is unsupported at this time"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ "$OS" = "macos" ]; then
|
||||||
|
OS=mac
|
||||||
|
fi
|
||||||
|
if [ "$ARCH" = "arm64" ]; then
|
||||||
|
ARCH=armv8
|
||||||
|
fi
|
||||||
|
|
||||||
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
|
FILE=monero-$OS-$ARCH-${{ inputs.version }}.tar.bz2
|
||||||
wget https://downloads.getmonero.org/cli/$FILE
|
wget https://downloads.getmonero.org/cli/$FILE
|
||||||
tar -xvf $FILE
|
tar -xvf $FILE
|
||||||
|
rm $FILE
|
||||||
|
|
||||||
sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod
|
sudo mv $(find . -name monerod) /usr/bin/monerod
|
||||||
sudo chmod 777 /usr/bin/monerod
|
sudo chmod 777 /usr/bin/monerod
|
||||||
sudo chmod +x /usr/bin/monerod
|
sudo chmod +x /usr/bin/monerod
|
||||||
|
|
||||||
|
|||||||
8
.github/actions/test-dependencies/action.yml
vendored
8
.github/actions/test-dependencies/action.yml
vendored
@@ -5,12 +5,12 @@ inputs:
|
|||||||
monero-version:
|
monero-version:
|
||||||
description: "Monero version to download and run as a regtest node"
|
description: "Monero version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.4
|
||||||
|
|
||||||
bitcoin-version:
|
bitcoin-version:
|
||||||
description: "Bitcoin version to download and run as a regtest node"
|
description: "Bitcoin version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: "29.1"
|
default: "30.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -19,9 +19,9 @@ runs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install Foundry
|
- name: Install Foundry
|
||||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
|
||||||
with:
|
with:
|
||||||
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
version: v1.5.0
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
- name: Run a Monero Regtest Node
|
- name: Run a Monero Regtest Node
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2025-09-01
|
nightly-2025-12-01
|
||||||
|
|||||||
2
.github/workflows/common-tests.yml
vendored
2
.github/workflows/common-tests.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
test-common:
|
test-common:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Build Dependencies
|
- name: Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
2
.github/workflows/coordinator-tests.yml
vendored
2
.github/workflows/coordinator-tests.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
2
.github/workflows/crypto-tests.yml
vendored
2
.github/workflows/crypto-tests.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
|||||||
test-crypto:
|
test-crypto:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Build Dependencies
|
- name: Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
10
.github/workflows/daily-deny.yml
vendored
10
.github/workflows/daily-deny.yml
vendored
@@ -9,16 +9,10 @@ jobs:
|
|||||||
name: Run cargo deny
|
name: Run cargo deny
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Advisory Cache
|
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/advisory-db
|
|
||||||
key: rust-advisory-db
|
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo +1.90 install cargo-deny --version =0.18.4
|
run: cargo +1.91.1 install cargo-deny --version =0.18.9
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
|
|||||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
61
.github/workflows/lint.yml
vendored
61
.github/workflows/lint.yml
vendored
@@ -11,11 +11,11 @@ jobs:
|
|||||||
clippy:
|
clippy:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Get nightly version to use
|
- name: Get nightly version to use
|
||||||
id: nightly
|
id: nightly
|
||||||
@@ -26,7 +26,7 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install nightly rust
|
- name: Install nightly rust
|
||||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy
|
||||||
|
|
||||||
- name: Run Clippy
|
- name: Run Clippy
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||||
@@ -43,16 +43,10 @@ jobs:
|
|||||||
deny:
|
deny:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Advisory Cache
|
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
|
||||||
with:
|
|
||||||
path: ~/.cargo/advisory-db
|
|
||||||
key: rust-advisory-db
|
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo +1.90 install cargo-deny --version =0.18.4
|
run: cargo +1.91.1 install cargo-deny --version =0.18.9
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
@@ -60,7 +54,7 @@ jobs:
|
|||||||
fmt:
|
fmt:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Get nightly version to use
|
- name: Get nightly version to use
|
||||||
id: nightly
|
id: nightly
|
||||||
@@ -73,32 +67,32 @@ jobs:
|
|||||||
- name: Run rustfmt
|
- name: Run rustfmt
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||||
|
|
||||||
- name: Install foundry
|
- name: Install Foundry
|
||||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
|
||||||
with:
|
with:
|
||||||
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
version: v1.5.0
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
- name: Run forge fmt
|
- name: Run forge fmt
|
||||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -name "*.sol")
|
||||||
|
|
||||||
machete:
|
machete:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
- name: Verify all dependencies are in use
|
- name: Verify all dependencies are in use
|
||||||
run: |
|
run: |
|
||||||
cargo +1.90 install cargo-machete --version =0.9.1
|
cargo +1.91.1 install cargo-machete --version =0.9.1
|
||||||
cargo +1.90 machete
|
cargo +1.91.1 machete
|
||||||
|
|
||||||
msrv:
|
msrv:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
- name: Verify claimed `rust-version`
|
- name: Verify claimed `rust-version`
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo +1.90 install cargo-msrv --version =0.18.4
|
cargo +1.91.1 install cargo-msrv --version =0.18.4
|
||||||
|
|
||||||
function check_msrv {
|
function check_msrv {
|
||||||
# We `cd` into the directory passed as the first argument, but will return to the
|
# We `cd` into the directory passed as the first argument, but will return to the
|
||||||
@@ -189,16 +183,16 @@ jobs:
|
|||||||
slither:
|
slither:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
|
- name: Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Slither
|
- name: Slither
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install solc-select
|
python3 -m pip install slither-analyzer==0.11.3
|
||||||
solc-select install 0.8.26
|
|
||||||
solc-select use 0.8.26
|
|
||||||
|
|
||||||
python3 -m pip install slither-analyzer
|
slither ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||||
|
|
||||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
|
||||||
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||||
slither processor/ethereum/deployer/contracts/Deployer.sol
|
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||||
slither processor/ethereum/erc20/contracts/IERC20.sol
|
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||||
@@ -207,3 +201,14 @@ jobs:
|
|||||||
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||||
cd processor/ethereum/router/contracts
|
cd processor/ethereum/router/contracts
|
||||||
slither Router.sol
|
slither Router.sol
|
||||||
|
|
||||||
|
shellcheck:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
- name: shellcheck
|
||||||
|
run: |
|
||||||
|
sudo apt install -y shellcheck
|
||||||
|
find . -name "*.sh" | while read -r script; do
|
||||||
|
shellcheck --enable=all --shell=sh --severity=info $script
|
||||||
|
done
|
||||||
|
|||||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
2
.github/workflows/mini-tests.yml
vendored
2
.github/workflows/mini-tests.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
test-common:
|
test-common:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Build Dependencies
|
- name: Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
2
.github/workflows/monthly-nightly-update.yml
vendored
2
.github/workflows/monthly-nightly-update.yml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
name: Update nightly
|
name: Update nightly
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
with:
|
with:
|
||||||
submodules: "recursive"
|
submodules: "recursive"
|
||||||
|
|
||||||
|
|||||||
2
.github/workflows/networks-tests.yml
vendored
2
.github/workflows/networks-tests.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
|||||||
test-networks:
|
test-networks:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Test Dependencies
|
- name: Test Dependencies
|
||||||
uses: ./.github/actions/test-dependencies
|
uses: ./.github/actions/test-dependencies
|
||||||
|
|||||||
2
.github/workflows/no-std.yml
vendored
2
.github/workflows/no-std.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
14
.github/workflows/pages.yml
vendored
14
.github/workflows/pages.yml
vendored
@@ -46,16 +46,16 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
- name: Setup Ruby
|
- name: Setup Ruby
|
||||||
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
|
uses: ruby/setup-ruby@8aeb6ff8030dd539317f8e1769a044873b56ea71 # 1.268.0
|
||||||
with:
|
with:
|
||||||
bundler-cache: true
|
bundler-cache: true
|
||||||
cache-version: 0
|
cache-version: 0
|
||||||
working-directory: "${{ github.workspace }}/docs"
|
working-directory: "${{ github.workspace }}/docs"
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
|
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # 5.0.0
|
||||||
- name: Build with Jekyll
|
- name: Build with Jekyll
|
||||||
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
env:
|
env:
|
||||||
@@ -69,12 +69,12 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
- name: Buld Rust docs
|
- name: Buld Rust docs
|
||||||
run: |
|
run: |
|
||||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs -c rust-src
|
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
|
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||||
mv target/doc docs/_site/rust
|
mv target/doc docs/_site/rust
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
|
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # 4.0.0
|
||||||
with:
|
with:
|
||||||
path: "docs/_site/"
|
path: "docs/_site/"
|
||||||
|
|
||||||
@@ -88,4 +88,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e
|
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # 4.0.5
|
||||||
|
|||||||
2
.github/workflows/processor-tests.yml
vendored
2
.github/workflows/processor-tests.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|||||||
4
.github/workflows/reproducible-runtime.yml
vendored
4
.github/workflows/reproducible-runtime.yml
vendored
@@ -27,10 +27,10 @@ jobs:
|
|||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Reproducible Runtime tests
|
- name: Run Reproducible Runtime tests
|
||||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
|
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests -- --nocapture
|
||||||
|
|||||||
166
.github/workflows/stack-size.yml
vendored
Normal file
166
.github/workflows/stack-size.yml
vendored
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
name: Check Update Default Stack Size
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- "orchestration/increase_default_stack_size.sh"
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "orchestration/increase_default_stack_size.sh"
|
||||||
|
workflow_dispatch:
|
||||||
|
# Also run weekly to ensure this doesn't inadvertently decay
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * 1"
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stack_size:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, ubuntu-24.04, ubuntu-22.04, macos-15-intel, macos-latest]
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
|
- name: Install Go
|
||||||
|
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # 6.1.0
|
||||||
|
with:
|
||||||
|
go-version: stable
|
||||||
|
|
||||||
|
- name: Monero Daemon Cache
|
||||||
|
id: cache-monerod
|
||||||
|
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
|
||||||
|
with:
|
||||||
|
path: monerod
|
||||||
|
key: stack-size-monerod
|
||||||
|
|
||||||
|
- name: Download the Monero Daemon
|
||||||
|
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
# We explicitly download the Linux binary as this script executes over an ELF binary
|
||||||
|
wget https://downloads.getmonero.org/cli/monero-linux-x64-v0.18.4.4.tar.bz2
|
||||||
|
tar -xvf monero-linux-x64-v0.18.4.4.tar.bz2
|
||||||
|
mv $(find . -name monerod) .
|
||||||
|
|
||||||
|
- name: Verify expected behavior
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
STACK=$((8 * 1024 * 1024))
|
||||||
|
|
||||||
|
OS=${{ runner.os }}
|
||||||
|
if [ "$OS" = "Linux" ]; then
|
||||||
|
sudo apt update -y
|
||||||
|
sudo apt install -y ksh bash dash zsh busybox posh mksh yash
|
||||||
|
sudo ln -s "$(which busybox)" /usr/bin/ash
|
||||||
|
sudo ln -s "$(which busybox)" /usr/bin/hush
|
||||||
|
wget http://ftp.us.debian.org/debian/pool/main/g/gash/gash_0.3.1-1_amd64.deb
|
||||||
|
sudo apt install ./gash_0.3.1-1_amd64.deb
|
||||||
|
SHELLS="sh ksh bash dash zsh ash hush posh mksh lksh gash yash"
|
||||||
|
fi
|
||||||
|
if [ "$OS" = "macOS" ]; then
|
||||||
|
brew install binutils # `readelf`
|
||||||
|
|
||||||
|
# `binutils` is not placed within the path, so find its
|
||||||
|
# `readelf` bin and manually move it into our path
|
||||||
|
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||||
|
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||||
|
sudo cp $(find "$HOMEBREW_ROOT_PATH" -name readelf) /usr/local/bin/
|
||||||
|
|
||||||
|
# macOS has the benefit of packaging `oksh`, `osh`, and having distinct core tools
|
||||||
|
# TODO: `posh` is packaged but doesn't work: https://github.com/serai-dex/serai/issues/703
|
||||||
|
brew install ksh93 bash dash-shell zsh mksh oksh yash oils-for-unix
|
||||||
|
SHELLS="sh ksh bash dash zsh mksh oksh yash osh"
|
||||||
|
|
||||||
|
# macOS also has the benefit of packaging (via MacPorts) `mrsh`,
|
||||||
|
# which explicitly attempts to be be exactly POSIX, without any extensions.
|
||||||
|
# We first have to install MacPorts, the easiest method being via source.
|
||||||
|
curl -O https://distfiles.macports.org/MacPorts/MacPorts-2.11.6.tar.bz2
|
||||||
|
tar xf MacPorts-2.11.6.tar.bz2
|
||||||
|
cd MacPorts-2.11.6
|
||||||
|
./configure
|
||||||
|
make
|
||||||
|
sudo make install
|
||||||
|
cd ..
|
||||||
|
PATH=$PATH:/opt/local/bin
|
||||||
|
sudo port -v selfupdate
|
||||||
|
|
||||||
|
# Now, we install `mrsh`
|
||||||
|
# TODO: https://github.com/serai-dex/serai/issues/704
|
||||||
|
# sudo port install mrsh
|
||||||
|
# SHELLS="$SHELLS mrsh"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install shells available via `cargo`
|
||||||
|
cargo install brush-shell
|
||||||
|
SHELLS="$SHELLS brush"
|
||||||
|
# We would also test with `nsh` here if not for https://github.com/nuta/nsh/issues/49
|
||||||
|
# cargo install nsh
|
||||||
|
# SHELLS="$SHELLS nsh"
|
||||||
|
|
||||||
|
# Install shells available via `go`
|
||||||
|
# TODO: https://github.com/u-root/u-root/issues/3474
|
||||||
|
# GOBIN=/usr/local/bin go install github.com/u-root/u-root/cmds/core/gosh@latest
|
||||||
|
# SHELLS="$SHELLS gosh"
|
||||||
|
|
||||||
|
# Patch with `muslstack`
|
||||||
|
cp monerod monerod-muslstack
|
||||||
|
GOBIN=$(pwd) go install github.com/yaegashi/muslstack@d19cc5866abce3ca59dfc1666df7cc97097d0933
|
||||||
|
./muslstack -s "$STACK" ./monerod-muslstack
|
||||||
|
|
||||||
|
# Patch with `chelf`, which only works on a Linux host (due to requiring `elf.h`)
|
||||||
|
# TODO: Install the header on macOS so `chelf` may be used as the source of truth
|
||||||
|
if [ "$OS" = "Linux" ]; then
|
||||||
|
cp monerod monerod-chelf
|
||||||
|
git clone https://github.com/Gottox/chelf
|
||||||
|
cd chelf
|
||||||
|
git checkout b2994186cea7b7d61a588fd06c1cc1ae75bcc21a
|
||||||
|
make
|
||||||
|
./chelf -s "$STACK" ../monerod-chelf
|
||||||
|
cd ..
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run our script with all installed shells
|
||||||
|
for shell in $SHELLS; do
|
||||||
|
echo "Executing \`$shell\`"
|
||||||
|
cp monerod monerod-idss-$shell
|
||||||
|
ln -s "$(which $shell)" sh
|
||||||
|
./sh ./orchestration/increase_default_stack_size.sh monerod-idss-$shell
|
||||||
|
rm ./sh
|
||||||
|
done
|
||||||
|
|
||||||
|
# Verify they all had the same result
|
||||||
|
sha256() {
|
||||||
|
sha256sum "$1" | cut -d' ' -f1
|
||||||
|
}
|
||||||
|
CHELF=$(sha256 monerod-muslstack)
|
||||||
|
find . -name "monerod-*" | while read -r bin; do
|
||||||
|
BIN=$(sha256 "$bin")
|
||||||
|
if [ ! "$CHELF" = "$BIN" ]; then
|
||||||
|
echo "Different artifact between \`monerod-muslstack\` ($CHELF) and \`$bin\` ($BIN)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Verify the integrity of the result
|
||||||
|
read_stack() {
|
||||||
|
STACK_INFO=$(readelf "$1" -l | grep STACK -A1)
|
||||||
|
MEMSZ=$(printf "%s\n" "$STACK_INFO" | tail -n1 | sed -E s/^[[:space:]]*//g | cut -f2 -d' ')
|
||||||
|
printf "%i" $((MEMSZ))
|
||||||
|
}
|
||||||
|
INITIAL_STACK=$(read_stack monerod)
|
||||||
|
if [ "$INITIAL_STACK" -ne "0" ]; then
|
||||||
|
echo "Initial \`PT_GNU_STACK\` wasn't 0"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
UPDATED_STACK=$(read_stack monerod-muslstack)
|
||||||
|
if [ "$UPDATED_STACK" -ne "$STACK" ]; then
|
||||||
|
echo "Updated \`PT_GNU_STACK\` ($UPDATED_STACK) wasn't 8 MB ($STACK)"
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Only one byte should be different due to the bit pattern of 8 MB
|
||||||
|
BYTES_DIFFERENT=$(cmp -l monerod monerod-muslstack | wc -l || true)
|
||||||
|
if [ "$BYTES_DIFFERENT" -ne 1 ]; then
|
||||||
|
echo "More than one byte was different between the two binaries"
|
||||||
|
exit 4
|
||||||
|
fi
|
||||||
15
.github/workflows/tests.yml
vendored
15
.github/workflows/tests.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
|||||||
test-infra:
|
test-infra:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Build Dependencies
|
- name: Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
@@ -74,7 +74,7 @@ jobs:
|
|||||||
test-substrate:
|
test-substrate:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Build Dependencies
|
- name: Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
@@ -84,6 +84,7 @@ jobs:
|
|||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p serai-primitives \
|
-p serai-primitives \
|
||||||
-p serai-abi \
|
-p serai-abi \
|
||||||
|
-p substrate-median \
|
||||||
-p serai-core-pallet \
|
-p serai-core-pallet \
|
||||||
-p serai-coins-pallet \
|
-p serai-coins-pallet \
|
||||||
-p serai-validator-sets-pallet \
|
-p serai-validator-sets-pallet \
|
||||||
@@ -95,14 +96,20 @@ jobs:
|
|||||||
-p serai-in-instructions-pallet \
|
-p serai-in-instructions-pallet \
|
||||||
-p serai-runtime \
|
-p serai-runtime \
|
||||||
-p serai-node
|
-p serai-node
|
||||||
|
-p serai-substrate-tests
|
||||||
|
|
||||||
test-serai-client:
|
test-serai-client:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||||
|
|
||||||
- name: Build Dependencies
|
- name: Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Tests
|
- name: Run Tests
|
||||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
||||||
|
|||||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,7 +1,13 @@
|
|||||||
target
|
target
|
||||||
|
|
||||||
|
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||||
|
Cargo.lock
|
||||||
|
!/Cargo.lock
|
||||||
|
|
||||||
|
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||||
Dockerfile
|
Dockerfile
|
||||||
Dockerfile.fast-epoch
|
|
||||||
!orchestration/runtime/Dockerfile
|
!orchestration/runtime/Dockerfile
|
||||||
|
|
||||||
.test-logs
|
.test-logs
|
||||||
|
|
||||||
.vscode
|
.vscode
|
||||||
|
|||||||
2856
Cargo.lock
generated
2856
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
87
Cargo.toml
87
Cargo.toml
@@ -62,8 +62,8 @@ members = [
|
|||||||
"processor/ethereum/primitives",
|
"processor/ethereum/primitives",
|
||||||
"processor/ethereum/test-primitives",
|
"processor/ethereum/test-primitives",
|
||||||
"processor/ethereum/deployer",
|
"processor/ethereum/deployer",
|
||||||
"processor/ethereum/router",
|
|
||||||
"processor/ethereum/erc20",
|
"processor/ethereum/erc20",
|
||||||
|
"processor/ethereum/router",
|
||||||
"processor/ethereum",
|
"processor/ethereum",
|
||||||
"processor/monero",
|
"processor/monero",
|
||||||
|
|
||||||
@@ -80,6 +80,8 @@ members = [
|
|||||||
"substrate/primitives",
|
"substrate/primitives",
|
||||||
"substrate/abi",
|
"substrate/abi",
|
||||||
|
|
||||||
|
"substrate/median",
|
||||||
|
|
||||||
"substrate/core",
|
"substrate/core",
|
||||||
"substrate/coins",
|
"substrate/coins",
|
||||||
"substrate/validator-sets",
|
"substrate/validator-sets",
|
||||||
@@ -93,6 +95,10 @@ members = [
|
|||||||
"substrate/runtime",
|
"substrate/runtime",
|
||||||
"substrate/node",
|
"substrate/node",
|
||||||
|
|
||||||
|
"substrate/client/serai",
|
||||||
|
"substrate/client/bitcoin",
|
||||||
|
"substrate/client/ethereum",
|
||||||
|
"substrate/client/monero",
|
||||||
"substrate/client",
|
"substrate/client",
|
||||||
|
|
||||||
"orchestration",
|
"orchestration",
|
||||||
@@ -105,10 +111,24 @@ members = [
|
|||||||
"tests/message-queue",
|
"tests/message-queue",
|
||||||
# TODO "tests/processor",
|
# TODO "tests/processor",
|
||||||
# TODO "tests/coordinator",
|
# TODO "tests/coordinator",
|
||||||
|
"tests/substrate",
|
||||||
# TODO "tests/full-stack",
|
# TODO "tests/full-stack",
|
||||||
"tests/reproducible-runtime",
|
"tests/reproducible-runtime",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[profile.dev]
|
||||||
|
panic = "abort"
|
||||||
|
overflow-checks = true
|
||||||
|
[profile.release]
|
||||||
|
panic = "abort"
|
||||||
|
overflow-checks = true
|
||||||
|
# These do not respect the `panic` configuration value, so we don't provide them
|
||||||
|
[profile.test]
|
||||||
|
# panic = "abort" # https://github.com/rust-lang/issues/67650
|
||||||
|
overflow-checks = true
|
||||||
|
[profile.bench]
|
||||||
|
overflow-checks = true
|
||||||
|
|
||||||
[profile.dev.package]
|
[profile.dev.package]
|
||||||
# Always compile Monero (and a variety of dependencies) with optimizations due
|
# Always compile Monero (and a variety of dependencies) with optimizations due
|
||||||
# to the extensive operations required for Bulletproofs
|
# to the extensive operations required for Bulletproofs
|
||||||
@@ -126,11 +146,14 @@ dalek-ff-group = { opt-level = 3 }
|
|||||||
|
|
||||||
multiexp = { opt-level = 3 }
|
multiexp = { opt-level = 3 }
|
||||||
|
|
||||||
monero-generators = { opt-level = 3 }
|
monero-io = { opt-level = 3 }
|
||||||
monero-borromean = { opt-level = 3 }
|
monero-primitives = { opt-level = 3 }
|
||||||
monero-bulletproofs = { opt-level = 3 }
|
monero-ed25519 = { opt-level = 3 }
|
||||||
monero-mlsag = { opt-level = 3 }
|
monero-mlsag = { opt-level = 3 }
|
||||||
monero-clsag = { opt-level = 3 }
|
monero-clsag = { opt-level = 3 }
|
||||||
|
monero-borromean = { opt-level = 3 }
|
||||||
|
monero-bulletproofs-generators = { opt-level = 3 }
|
||||||
|
monero-bulletproofs = {opt-level = 3 }
|
||||||
monero-oxide = { opt-level = 3 }
|
monero-oxide = { opt-level = 3 }
|
||||||
|
|
||||||
# Always compile the eVRF DKG tree with optimizations as well
|
# Always compile the eVRF DKG tree with optimizations as well
|
||||||
@@ -155,37 +178,38 @@ revm-precompile = { opt-level = 3 }
|
|||||||
revm-primitives = { opt-level = 3 }
|
revm-primitives = { opt-level = 3 }
|
||||||
revm-state = { opt-level = 3 }
|
revm-state = { opt-level = 3 }
|
||||||
|
|
||||||
[profile.release]
|
|
||||||
panic = "unwind"
|
|
||||||
overflow-checks = true
|
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
# Point to empty crates for unused crates in our tree
|
# Point to empty crates for crates unused within in our tree
|
||||||
|
alloy-eip2124 = { path = "patches/ethereum/alloy-eip2124" }
|
||||||
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
|
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
|
||||||
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
|
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
|
||||||
c-kzg = { path = "patches/ethereum/c-kzg" }
|
c-kzg = { path = "patches/ethereum/c-kzg" }
|
||||||
libsecp256k1 = { path = "patches/ethereum/libsecp256k1" }
|
fastrlp-3 = { package = "fastrlp", path = "patches/ethereum/fastrlp-0.3" }
|
||||||
rug = { path = "patches/ethereum/rug" }
|
fastrlp-4 = { package = "fastrlp", path = "patches/ethereum/fastrlp-0.4" }
|
||||||
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
|
primitive-types-12 = { package = "primitive-types", path = "patches/ethereum/primitive-types-0.12" }
|
||||||
secp256k1-31 = { package = "secp256k1", path = "patches/ethereum/secp256k1-31" }
|
rlp = { path = "patches/ethereum/rlp" }
|
||||||
|
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-0.30" }
|
||||||
|
|
||||||
|
# Dependencies from monero-oxide which originate from within our own tree, potentially shimmed to account for deviations since publishing
|
||||||
|
std-shims = { path = "patches/std-shims" }
|
||||||
|
simple-request = { path = "patches/simple-request" }
|
||||||
|
multiexp = { path = "crypto/multiexp" }
|
||||||
|
flexible-transcript = { path = "crypto/transcript" }
|
||||||
|
ciphersuite = { path = "patches/ciphersuite" }
|
||||||
|
dalek-ff-group = { path = "patches/dalek-ff-group" }
|
||||||
|
minimal-ed448 = { path = "crypto/ed448" }
|
||||||
|
modular-frost = { path = "crypto/frost" }
|
||||||
|
|
||||||
|
# Patches due to `std` now including the required functionality
|
||||||
|
is_terminal_polyfill = { path = "patches/is_terminal_polyfill" }
|
||||||
|
lazy_static = { path = "patches/lazy_static" }
|
||||||
|
# This has a non-deprecated `std` alternative since Rust's 2024 edition
|
||||||
|
home = { path = "patches/home" }
|
||||||
|
|
||||||
# Updates to the latest version
|
# Updates to the latest version
|
||||||
darling = { path = "patches/darling" }
|
darling = { path = "patches/darling" }
|
||||||
thiserror = { path = "patches/thiserror" }
|
thiserror = { path = "patches/thiserror" }
|
||||||
|
|
||||||
# Dependencies from monero-oxide which originate from within our own tree
|
|
||||||
std-shims = { path = "patches/std-shims" }
|
|
||||||
simple-request = { path = "common/request" }
|
|
||||||
multiexp = { path = "crypto/multiexp" }
|
|
||||||
flexible-transcript = { path = "crypto/transcript" }
|
|
||||||
ciphersuite = { path = "patches/ciphersuite" }
|
|
||||||
dalek-ff-group = { path = "crypto/dalek-ff-group" }
|
|
||||||
minimal-ed448 = { path = "crypto/ed448" }
|
|
||||||
modular-frost = { path = "crypto/frost" }
|
|
||||||
|
|
||||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
|
||||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
|
||||||
|
|
||||||
# directories-next was created because directories was unmaintained
|
# directories-next was created because directories was unmaintained
|
||||||
# directories-next is now unmaintained while directories is maintained
|
# directories-next is now unmaintained while directories is maintained
|
||||||
# The directories author pulls in ridiculously pointless crates and prefers
|
# The directories author pulls in ridiculously pointless crates and prefers
|
||||||
@@ -194,13 +218,13 @@ lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
|
# Patch from a fork back to upstream
|
||||||
|
parity-bip39 = { path = "patches/parity-bip39" }
|
||||||
|
|
||||||
# Patch to include `FromUniformBytes<64>` over `Scalar`
|
# Patch to include `FromUniformBytes<64>` over `Scalar`
|
||||||
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
|
||||||
# Patch due to `std` now including the required functionality
|
|
||||||
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
|
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||||
manual_is_multiple_of = "allow"
|
manual_is_multiple_of = "allow"
|
||||||
@@ -245,7 +269,7 @@ redundant_closure_for_method_calls = "deny"
|
|||||||
redundant_else = "deny"
|
redundant_else = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
string_slice = "deny"
|
string_slice = "deny"
|
||||||
unchecked_duration_subtraction = "deny"
|
unchecked_time_subtraction = "deny"
|
||||||
uninlined_format_args = "deny"
|
uninlined_format_args = "deny"
|
||||||
unnecessary_box_returns = "deny"
|
unnecessary_box_returns = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
@@ -254,3 +278,6 @@ unnested_or_patterns = "deny"
|
|||||||
unused_async = "deny"
|
unused_async = "deny"
|
||||||
unused_self = "deny"
|
unused_self = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
|
||||||
|
[workspace.lints.rust]
|
||||||
|
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||||
|
|||||||
50
audits/crypto/dkg/evrf/README.md
Normal file
50
audits/crypto/dkg/evrf/README.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# eVRF DKG
|
||||||
|
|
||||||
|
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
|
||||||
|
the IACR preprint server. Within it was a one-round unbiased DKG and a
|
||||||
|
one-round unbiased threshold DKG. Unfortunately, both simply describe
|
||||||
|
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
|
||||||
|
in practice, the need for an additional round of communication to occur where
|
||||||
|
all participants confirm they received their secret shares.
|
||||||
|
|
||||||
|
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
|
||||||
|
achieve a verifiable encryption scheme. This allows the secret shares to be
|
||||||
|
posted to any 'bulletin board' (such as a blockchain) and for all observers to
|
||||||
|
confirm:
|
||||||
|
|
||||||
|
- A participant participated
|
||||||
|
- The secret shares sent can be received by the intended recipient so long as
|
||||||
|
they can access the bulletin board
|
||||||
|
|
||||||
|
Additionally, Serai desired a robust scheme (albeit with an biased key as the
|
||||||
|
output, which is fine for our purposes). Accordingly, our implementation
|
||||||
|
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
|
||||||
|
for verifiable encryption, with the caller allowed to decide the set of
|
||||||
|
participants. They may:
|
||||||
|
|
||||||
|
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
|
||||||
|
paper
|
||||||
|
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
|
||||||
|
the eVRF paper
|
||||||
|
- Select a post-determined set (with any solution for the Common Subset
|
||||||
|
problem), allowing achieving a robust threshold biased DKG
|
||||||
|
|
||||||
|
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
|
||||||
|
this is unnecessary when the resulting key will be biased. Any proof of
|
||||||
|
knowledge for the coefficients, as necessary for their extraction within the
|
||||||
|
security proofs, would be sufficient.
|
||||||
|
|
||||||
|
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
|
||||||
|
provide proofs for its security. This resulted in
|
||||||
|
[this paper](<./Security Proofs.pdf>).
|
||||||
|
|
||||||
|
Our implementation itself is then built on top of the audited
|
||||||
|
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
|
||||||
|
and
|
||||||
|
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
|
||||||
|
|
||||||
|
Note we do not use the originally premised DDH eVRF yet the one premised on
|
||||||
|
elliptic curve divisors, the methodology of which is commented on
|
||||||
|
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
|
||||||
|
|
||||||
|
Our implementation itself is unaudited at this time however.
|
||||||
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.65"
|
rust-version = "1.77"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
|
||||||
// Obtain a variable from the Serai environment/secret store.
|
// Obtain a variable from the Serai environment/secret store.
|
||||||
pub fn var(variable: &str) -> Option<String> {
|
pub fn var(variable: &str) -> Option<String> {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "simple-request"
|
name = "simple-request"
|
||||||
version = "0.2.0"
|
version = "0.3.0"
|
||||||
description = "A simple HTTP(S) request library"
|
description = "A simple HTTP(S) request library"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
||||||
@@ -19,10 +19,10 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
tower-service = { version = "0.3", default-features = false }
|
tower-service = { version = "0.3", default-features = false }
|
||||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] }
|
||||||
http-body-util = { version = "0.1", default-features = false }
|
http-body-util = { version = "0.1", default-features = false }
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false }
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
|
|
||||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||||
|
|
||||||
@@ -30,7 +30,8 @@ zeroize = { version = "1", optional = true }
|
|||||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
tls = ["hyper-rustls"]
|
tokio = ["hyper-util/tokio"]
|
||||||
|
tls = ["tokio", "hyper-rustls"]
|
||||||
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
||||||
basic-auth = ["zeroize", "base64ct"]
|
basic-auth = ["zeroize", "base64ct"]
|
||||||
default = ["tls"]
|
default = ["tls"]
|
||||||
|
|||||||
@@ -1,19 +1,20 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
|
use core::{pin::Pin, future::Future};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
use futures_util::FutureExt;
|
||||||
|
use ::tokio::sync::Mutex;
|
||||||
|
|
||||||
use tower_service::Service as TowerService;
|
use tower_service::Service as TowerService;
|
||||||
|
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
|
||||||
|
pub use hyper;
|
||||||
|
|
||||||
|
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
|
||||||
use hyper_util::{
|
|
||||||
rt::tokio::TokioExecutor,
|
|
||||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
|
||||||
};
|
|
||||||
pub use hyper;
|
|
||||||
|
|
||||||
mod request;
|
mod request;
|
||||||
pub use request::*;
|
pub use request::*;
|
||||||
@@ -37,21 +38,32 @@ type Connector = HttpConnector;
|
|||||||
type Connector = HttpsConnector<HttpConnector>;
|
type Connector = HttpsConnector<HttpConnector>;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
enum Connection {
|
enum Connection<
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||||
Connection {
|
Connection {
|
||||||
|
executor: E,
|
||||||
connector: Connector,
|
connector: Connector,
|
||||||
host: Uri,
|
host: Uri,
|
||||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An HTTP client.
|
||||||
|
///
|
||||||
|
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
|
||||||
|
/// the `tls` feature is active without using the `tokio` executor will cause errors.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Client {
|
pub struct Client<
|
||||||
connection: Connection,
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
connection: Connection<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
|
Client<E>
|
||||||
|
{
|
||||||
#[allow(clippy::unnecessary_wraps)]
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
fn connector() -> Result<Connector, Error> {
|
fn connector() -> Result<Connector, Error> {
|
||||||
let mut res = HttpConnector::new();
|
let mut res = HttpConnector::new();
|
||||||
@@ -59,6 +71,15 @@ impl Client {
|
|||||||
res.set_nodelay(true);
|
res.set_nodelay(true);
|
||||||
res.set_reuse_address(true);
|
res.set_reuse_address(true);
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
if core::any::TypeId::of::<E>() !=
|
||||||
|
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
|
||||||
|
{
|
||||||
|
Err(Error::ConnectionError(
|
||||||
|
"`tls` feature enabled but not using the `tokio` executor".into(),
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
res.enforce_http(false);
|
res.enforce_http(false);
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
@@ -79,19 +100,23 @@ impl Client {
|
|||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_connection_pool() -> Result<Client, Error> {
|
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> {
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
connection: Connection::ConnectionPool(
|
connection: Connection::ConnectionPool(
|
||||||
HyperClient::builder(TokioExecutor::new())
|
HyperClient::builder(executor)
|
||||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||||
.build(Self::connector()?),
|
.build(Self::connector()?),
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
pub fn with_executor_and_without_connection_pool(
|
||||||
|
executor: E,
|
||||||
|
host: &str,
|
||||||
|
) -> Result<Client<E>, Error> {
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
connection: Connection::Connection {
|
connection: Connection::Connection {
|
||||||
|
executor,
|
||||||
connector: Self::connector()?,
|
connector: Self::connector()?,
|
||||||
host: {
|
host: {
|
||||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||||
@@ -105,7 +130,7 @@ impl Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> {
|
||||||
let request: Request = request.into();
|
let request: Request = request.into();
|
||||||
let Request { mut request, response_size_limit } = request;
|
let Request { mut request, response_size_limit } = request;
|
||||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||||
@@ -141,7 +166,7 @@ impl Client {
|
|||||||
Connection::ConnectionPool(client) => {
|
Connection::ConnectionPool(client) => {
|
||||||
client.request(request).await.map_err(Error::HyperUtil)?
|
client.request(request).await.map_err(Error::HyperUtil)?
|
||||||
}
|
}
|
||||||
Connection::Connection { connector, host, connection } => {
|
Connection::Connection { executor, connector, host, connection } => {
|
||||||
let mut connection_lock = connection.lock().await;
|
let mut connection_lock = connection.lock().await;
|
||||||
|
|
||||||
// If there's not a connection...
|
// If there's not a connection...
|
||||||
@@ -153,9 +178,8 @@ impl Client {
|
|||||||
let call_res = call_res.map_err(Error::ConnectionError);
|
let call_res = call_res.map_err(Error::ConnectionError);
|
||||||
let (requester, connection) =
|
let (requester, connection) =
|
||||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
// This task will die when we drop the requester
|
||||||
// for it
|
executor.execute(Box::pin(connection.map(|_| ())));
|
||||||
tokio::spawn(connection);
|
|
||||||
*connection_lock = Some(requester);
|
*connection_lock = Some(requester);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -178,3 +202,22 @@ impl Client {
|
|||||||
Ok(Response { response, size_limit: response_size_limit, client: self })
|
Ok(Response { response, size_limit: response_size_limit, client: self })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
mod tokio {
|
||||||
|
use hyper_util::rt::tokio::TokioExecutor;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
pub type TokioClient = Client<TokioExecutor>;
|
||||||
|
impl Client<TokioExecutor> {
|
||||||
|
pub fn with_connection_pool() -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_connection_pool(TokioExecutor::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
pub use tokio::TokioClient;
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
|
use core::{pin::Pin, future::Future};
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use hyper::{
|
use hyper::{
|
||||||
StatusCode,
|
StatusCode,
|
||||||
header::{HeaderValue, HeaderMap},
|
header::{HeaderValue, HeaderMap},
|
||||||
body::Incoming,
|
body::Incoming,
|
||||||
|
rt::Executor,
|
||||||
};
|
};
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
@@ -14,13 +16,18 @@ use crate::{Client, Error};
|
|||||||
// Borrows the client so its async task lives as long as this response exists.
|
// Borrows the client so its async task lives as long as this response exists.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Response<'a> {
|
pub struct Response<
|
||||||
|
'a,
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
pub(crate) response: hyper::Response<Incoming>,
|
pub(crate) response: hyper::Response<Incoming>,
|
||||||
pub(crate) size_limit: Option<usize>,
|
pub(crate) size_limit: Option<usize>,
|
||||||
pub(crate) client: &'a Client,
|
pub(crate) client: &'a Client<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Response<'_> {
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
|
Response<'_, E>
|
||||||
|
{
|
||||||
pub fn status(&self) -> StatusCode {
|
pub fn status(&self) -> StatusCode {
|
||||||
self.response.status()
|
self.response.status()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -6,12 +6,63 @@ pub use std::sync::{Arc, Weak};
|
|||||||
|
|
||||||
mod mutex_shim {
|
mod mutex_shim {
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::{Mutex, MutexGuard};
|
mod spin_mutex {
|
||||||
|
use core::ops::{Deref, DerefMut};
|
||||||
|
|
||||||
|
// We wrap this in an `Option` so we can consider `None` as poisoned
|
||||||
|
pub(super) struct Mutex<T>(spin::Mutex<Option<T>>);
|
||||||
|
|
||||||
|
/// An acquired view of a `Mutex`.
|
||||||
|
pub struct MutexGuard<'mutex, T> {
|
||||||
|
mutex: spin::MutexGuard<'mutex, Option<T>>,
|
||||||
|
// This is `Some` for the lifetime of this guard, and is only represented as an `Option` due
|
||||||
|
// to needing to move it on `Drop` (which solely gives us a mutable reference to `self`)
|
||||||
|
value: Option<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Mutex<T> {
|
||||||
|
pub(super) const fn new(value: T) -> Self {
|
||||||
|
Self(spin::Mutex::new(Some(value)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn lock(&self) -> MutexGuard<'_, T> {
|
||||||
|
let mut mutex = self.0.lock();
|
||||||
|
// Take from the `Mutex` so future acquisitions will see `None` unless this is restored
|
||||||
|
let value = mutex.take();
|
||||||
|
// Check the prior acquisition did in fact restore the value
|
||||||
|
if value.is_none() {
|
||||||
|
panic!("locking a `spin::Mutex` held by a thread which panicked");
|
||||||
|
}
|
||||||
|
MutexGuard { mutex, value }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Deref for MutexGuard<'_, T> {
|
||||||
|
type Target = T;
|
||||||
|
fn deref(&self) -> &T {
|
||||||
|
self.value.as_ref().expect("no value yet checked upon lock acquisition")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T> DerefMut for MutexGuard<'_, T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut T {
|
||||||
|
self.value.as_mut().expect("no value yet checked upon lock acquisition")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'mutex, T> Drop for MutexGuard<'mutex, T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Restore the value
|
||||||
|
*self.mutex = self.value.take();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
pub use spin_mutex::*;
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use std::sync::{Mutex, MutexGuard};
|
pub use std::sync::{Mutex, MutexGuard};
|
||||||
|
|
||||||
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
||||||
#[derive(Default, Debug)]
|
|
||||||
pub struct ShimMutex<T>(Mutex<T>);
|
pub struct ShimMutex<T>(Mutex<T>);
|
||||||
impl<T> ShimMutex<T> {
|
impl<T> ShimMutex<T> {
|
||||||
/// Construct a new `Mutex`.
|
/// Construct a new `Mutex`.
|
||||||
@@ -21,8 +72,9 @@ mod mutex_shim {
|
|||||||
|
|
||||||
/// Acquire a lock on the contents of the `Mutex`.
|
/// Acquire a lock on the contents of the `Mutex`.
|
||||||
///
|
///
|
||||||
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
|
/// This will panic if the `Mutex` was poisoned.
|
||||||
/// this may panic if the `Mutex` was poisoned.
|
///
|
||||||
|
/// On no-`std` environments, the implementation presumably defers to that of a spin lock.
|
||||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
let res = self.0.lock().unwrap();
|
let res = self.0.lock().unwrap();
|
||||||
@@ -35,6 +87,9 @@ mod mutex_shim {
|
|||||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||||
|
|
||||||
#[rustversion::before(1.80)]
|
#[rustversion::before(1.80)]
|
||||||
|
pub use spin::Lazy as LazyLock;
|
||||||
|
|
||||||
|
#[rustversion::since(1.80)]
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::Lazy as LazyLock;
|
pub use spin::Lazy as LazyLock;
|
||||||
#[rustversion::since(1.80)]
|
#[rustversion::since(1.80)]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||||
|
|
||||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ messages = { package = "serai-processor-messages", path = "../processor/messages
|
|||||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||||
tributary-sdk = { path = "./tributary-sdk" }
|
tributary-sdk = { path = "./tributary-sdk" }
|
||||||
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
serai-client-serai = { path = "../substrate/client/serai", default-features = false }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ workspace = true
|
|||||||
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
|
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai"] }
|
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,21 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::{sync::Arc, collections::HashMap};
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use serai_client::{
|
use blake2::{Digest, Blake2b256};
|
||||||
primitives::{SeraiAddress, Amount},
|
|
||||||
validator_sets::primitives::ExternalValidatorSet,
|
use serai_client_serai::{
|
||||||
Serai,
|
abi::{
|
||||||
|
primitives::{
|
||||||
|
network_id::{ExternalNetworkId, NetworkId},
|
||||||
|
balance::Amount,
|
||||||
|
crypto::Public,
|
||||||
|
validator_sets::{Session, ExternalValidatorSet},
|
||||||
|
address::SeraiAddress,
|
||||||
|
merkle::IncrementalUnbalancedMerkleTree,
|
||||||
|
},
|
||||||
|
validator_sets::Event,
|
||||||
|
},
|
||||||
|
Serai, Events,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
@@ -12,9 +23,20 @@ use serai_task::ContinuallyRan;
|
|||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
|
#[derive(BorshSerialize, BorshDeserialize)]
|
||||||
|
struct Set {
|
||||||
|
session: Session,
|
||||||
|
key: Public,
|
||||||
|
stake: Amount,
|
||||||
|
}
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
CosignIntend {
|
CosignIntend {
|
||||||
ScanCosignFrom: () -> u64,
|
ScanCosignFrom: () -> u64,
|
||||||
|
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
|
||||||
|
Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount,
|
||||||
|
Validators: (set: ExternalValidatorSet) -> Vec<SeraiAddress>,
|
||||||
|
LatestSet: (network: ExternalNetworkId) -> Set,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -35,23 +57,38 @@ db_channel! {
|
|||||||
async fn block_has_events_justifying_a_cosign(
|
async fn block_has_events_justifying_a_cosign(
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
) -> Result<(Block, HasEvents), String> {
|
) -> Result<(Block, Events, HasEvents), String> {
|
||||||
let block = serai
|
let block = serai
|
||||||
.finalized_block_by_number(block_number)
|
.block_by_number(block_number)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||||
let serai = serai.as_of(block.hash());
|
let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
if events.validator_sets().set_keys_events().next().is_some() {
|
||||||
return Ok((block, HasEvents::Notable));
|
return Ok((block, events, HasEvents::Notable));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
if events.coins().burn_with_instruction_events().next().is_some() {
|
||||||
return Ok((block, HasEvents::NonNotable));
|
return Ok((block, events, HasEvents::NonNotable));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok((block, HasEvents::No))
|
Ok((block, events, HasEvents::No))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
||||||
|
// block.
|
||||||
|
fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> {
|
||||||
|
let mut sets = vec![];
|
||||||
|
for network in ExternalNetworkId::all() {
|
||||||
|
let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else {
|
||||||
|
// If this network doesn't have usable keys, move on
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
sets.push((ExternalValidatorSet { network, session }, key, stake));
|
||||||
|
}
|
||||||
|
sets
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A task to determine which blocks we should intend to cosign.
|
/// A task to determine which blocks we should intend to cosign.
|
||||||
@@ -67,56 +104,108 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
async move {
|
async move {
|
||||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||||
let latest_block_number =
|
let latest_block_number =
|
||||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
for block_number in start_block_number ..= latest_block_number {
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
let (block, mut has_events) =
|
let (block, events, mut has_events) =
|
||||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
|
let mut builds_upon =
|
||||||
|
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
|
||||||
|
|
||||||
// Check we are indexing a linear chain
|
// Check we are indexing a linear chain
|
||||||
if (block_number > 1) &&
|
if block.header.builds_upon() !=
|
||||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG)
|
||||||
SubstrateBlockHash::get(&txn, block_number - 1)
|
|
||||||
.expect("indexing a block but haven't indexed its parent"))
|
|
||||||
{
|
{
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||||
block_number - 1
|
block_number - 1
|
||||||
))?;
|
))?;
|
||||||
}
|
}
|
||||||
let block_hash = block.hash();
|
let block_hash = block.header.hash();
|
||||||
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||||
|
builds_upon.append(
|
||||||
|
serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG,
|
||||||
|
Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_HEADER_LEAF_TAG])
|
||||||
|
.chain_update(block_hash.0)
|
||||||
|
.finalize()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
BuildsUpon::set(&mut txn, &builds_upon);
|
||||||
|
|
||||||
|
// Update the stakes
|
||||||
|
for event in events.validator_sets().allocation_events() {
|
||||||
|
let Event::Allocation { validator, network, amount } = event else {
|
||||||
|
panic!("event from `allocation_events` wasn't `Event::Allocation`")
|
||||||
|
};
|
||||||
|
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
|
||||||
|
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
|
||||||
|
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0));
|
||||||
|
}
|
||||||
|
for event in events.validator_sets().deallocation_events() {
|
||||||
|
let Event::Deallocation { validator, network, amount, timeline: _ } = event else {
|
||||||
|
panic!("event from `deallocation_events` wasn't `Event::Deallocation`")
|
||||||
|
};
|
||||||
|
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
|
||||||
|
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
|
||||||
|
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle decided sets
|
||||||
|
for event in events.validator_sets().set_decided_events() {
|
||||||
|
let Event::SetDecided { set, validators } = event else {
|
||||||
|
panic!("event from `set_decided_events` wasn't `Event::SetDecided`")
|
||||||
|
};
|
||||||
|
|
||||||
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
|
Validators::set(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&validators.iter().map(|(validator, _key_shares)| *validator).collect(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle declarations of the latest set
|
||||||
|
for event in events.validator_sets().set_keys_events() {
|
||||||
|
let Event::SetKeys { set, key_pair } = event else {
|
||||||
|
panic!("event from `set_keys_events` wasn't `Event::SetKeys`")
|
||||||
|
};
|
||||||
|
let mut stake = 0;
|
||||||
|
for validator in
|
||||||
|
Validators::take(&mut txn, *set).expect("set which wasn't decided set keys")
|
||||||
|
{
|
||||||
|
stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0;
|
||||||
|
}
|
||||||
|
LatestSet::set(
|
||||||
|
&mut txn,
|
||||||
|
set.network,
|
||||||
|
&Set { session: set.session, key: key_pair.0, stake: Amount(stake) },
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||||
|
|
||||||
// If this is notable, it creates a new global session, which we index into the database
|
// If this is notable, it creates a new global session, which we index into the database
|
||||||
// now
|
// now
|
||||||
if has_events == HasEvents::Notable {
|
if has_events == HasEvents::Notable {
|
||||||
let serai = self.serai.as_of(block_hash);
|
let sets_and_keys_and_stakes = cosigning_sets(&txn);
|
||||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
let global_session = GlobalSession::id(
|
||||||
let global_session =
|
sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(),
|
||||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
);
|
||||||
|
|
||||||
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len());
|
||||||
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len());
|
||||||
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len());
|
||||||
let mut total_stake = 0;
|
let mut total_stake = 0;
|
||||||
for (set, key) in &sets_and_keys {
|
for (set, key, stake) in sets_and_keys_and_stakes {
|
||||||
sets.push(*set);
|
sets.push(set);
|
||||||
keys.insert(set.network, SeraiAddress::from(*key));
|
keys.insert(set.network, key);
|
||||||
let stake = serai
|
stakes.insert(set.network, stake.0);
|
||||||
.validator_sets()
|
total_stake += stake.0;
|
||||||
.total_allocated_stake(set.network.into())
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("{e:?}"))?
|
|
||||||
.unwrap_or(Amount(0))
|
|
||||||
.0;
|
|
||||||
stakes.insert(set.network, stake);
|
|
||||||
total_stake += stake;
|
|
||||||
}
|
}
|
||||||
if total_stake == 0 {
|
if total_stake == 0 {
|
||||||
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -9,16 +9,24 @@ use blake2::{Digest, Blake2s256};
|
|||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::{
|
||||||
primitives::{ExternalNetworkId, SeraiAddress},
|
abi::{
|
||||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
primitives::{
|
||||||
Public, Block, Serai, TemporalSerai,
|
BlockHash,
|
||||||
|
crypto::{Public, KeyPair},
|
||||||
|
network_id::ExternalNetworkId,
|
||||||
|
validator_sets::{Session, ExternalValidatorSet},
|
||||||
|
address::SeraiAddress,
|
||||||
|
},
|
||||||
|
Block,
|
||||||
|
},
|
||||||
|
Serai, State,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::*;
|
use serai_task::*;
|
||||||
|
|
||||||
use serai_cosign_types::*;
|
pub use serai_cosign_types::*;
|
||||||
|
|
||||||
/// The cosigns which are intended to be performed.
|
/// The cosigns which are intended to be performed.
|
||||||
mod intend;
|
mod intend;
|
||||||
@@ -51,7 +59,7 @@ use delay::LatestCosignedBlockNumber;
|
|||||||
pub(crate) struct GlobalSession {
|
pub(crate) struct GlobalSession {
|
||||||
pub(crate) start_block_number: u64,
|
pub(crate) start_block_number: u64,
|
||||||
pub(crate) sets: Vec<ExternalValidatorSet>,
|
pub(crate) sets: Vec<ExternalValidatorSet>,
|
||||||
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
|
pub(crate) keys: HashMap<ExternalNetworkId, Public>,
|
||||||
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
||||||
pub(crate) total_stake: u64,
|
pub(crate) total_stake: u64,
|
||||||
}
|
}
|
||||||
@@ -81,7 +89,7 @@ create_db! {
|
|||||||
// The following are populated by the intend task and used throughout the library
|
// The following are populated by the intend task and used throughout the library
|
||||||
|
|
||||||
// An index of Substrate blocks
|
// An index of Substrate blocks
|
||||||
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
|
SubstrateBlockHash: (block_number: u64) -> BlockHash,
|
||||||
// A mapping from a global session's ID to its relevant information.
|
// A mapping from a global session's ID to its relevant information.
|
||||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||||
// The last block to be cosigned by a global session.
|
// The last block to be cosigned by a global session.
|
||||||
@@ -113,60 +121,6 @@ create_db! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the keys used for cosigning by a specific network.
|
|
||||||
async fn keys_for_network(
|
|
||||||
serai: &TemporalSerai<'_>,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
|
||||||
let Some(latest_session) =
|
|
||||||
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
|
|
||||||
else {
|
|
||||||
// If this network hasn't had a session declared, move on
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get the keys for the latest session
|
|
||||||
if let Some(keys) = serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ExternalValidatorSet { network, session: latest_session })
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("{e:?}"))?
|
|
||||||
{
|
|
||||||
return Ok(Some((latest_session, keys)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the latest session has yet to set keys, use the prior session
|
|
||||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
|
||||||
if let Some(keys) = serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
|
||||||
.await
|
|
||||||
.map_err(|e| format!("{e:?}"))?
|
|
||||||
{
|
|
||||||
return Ok(Some((prior_session, keys)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
|
||||||
/// block.
|
|
||||||
async fn cosigning_sets(
|
|
||||||
serai: &TemporalSerai<'_>,
|
|
||||||
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
|
|
||||||
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
|
||||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
|
||||||
// If this network doesn't have usable keys, move on
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
sets.push((ExternalValidatorSet { network, session }, keys.0));
|
|
||||||
}
|
|
||||||
Ok(sets)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An object usable to request notable cosigns for a block.
|
/// An object usable to request notable cosigns for a block.
|
||||||
pub trait RequestNotableCosigns: 'static + Send {
|
pub trait RequestNotableCosigns: 'static + Send {
|
||||||
/// The error type which may be encountered when requesting notable cosigns.
|
/// The error type which may be encountered when requesting notable cosigns.
|
||||||
@@ -267,7 +221,10 @@ impl<D: Db> Cosigning<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch a cosigned Substrate block's hash by its block number.
|
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
pub fn cosigned_block(
|
||||||
|
getter: &impl Get,
|
||||||
|
block_number: u64,
|
||||||
|
) -> Result<Option<BlockHash>, Faulted> {
|
||||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
@@ -282,8 +239,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||||
/// cosigns for this session.
|
/// cosigns for this session.
|
||||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
let mut cosigns = vec![];
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -300,7 +257,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||||
// identification in those who see the faulty cosigns as honest
|
// identification in those who see the faulty cosigns as honest
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||||
if cosign.cosign.global_session == faulted {
|
if cosign.cosign.global_session == faulted {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
@@ -312,8 +269,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
let mut cosigns = vec![];
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -368,13 +325,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
|
|
||||||
// Check the cosign's signature
|
// Check the cosign's signature
|
||||||
{
|
{
|
||||||
let key = Public::from({
|
let key =
|
||||||
let Some(key) = global_session.keys.get(&network) else {
|
*global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?;
|
||||||
Err(IntakeCosignError::NonParticipatingNetwork)?
|
|
||||||
};
|
|
||||||
*key
|
|
||||||
});
|
|
||||||
|
|
||||||
if !signed_cosign.verify_signature(key) {
|
if !signed_cosign.verify_signature(key) {
|
||||||
Err(IntakeCosignError::InvalidSignature)?;
|
Err(IntakeCosignError::InvalidSignature)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
|
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_primitives::{crypto::Public, network_id::ExternalNetworkId};
|
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
|
||||||
|
|
||||||
/// The schnorrkel context to used when signing a cosign.
|
/// The schnorrkel context to used when signing a cosign.
|
||||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||||
@@ -16,7 +16,7 @@ pub struct CosignIntent {
|
|||||||
/// The number of the block to cosign.
|
/// The number of the block to cosign.
|
||||||
pub block_number: u64,
|
pub block_number: u64,
|
||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: BlockHash,
|
||||||
/// If this cosign must be handled before further cosigns are.
|
/// If this cosign must be handled before further cosigns are.
|
||||||
pub notable: bool,
|
pub notable: bool,
|
||||||
}
|
}
|
||||||
@@ -29,7 +29,7 @@ pub struct Cosign {
|
|||||||
/// The number of the block to cosign.
|
/// The number of the block to cosign.
|
||||||
pub block_number: u64,
|
pub block_number: u64,
|
||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: BlockHash,
|
||||||
/// The actual cosigner.
|
/// The actual cosigner.
|
||||||
pub cosigner: ExternalNetworkId,
|
pub cosigner: ExternalNetworkId,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
|||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai"] }
|
serai-client-serai = { path = "../../../substrate/client/serai", default-features = false }
|
||||||
serai-cosign = { path = "../../cosign" }
|
serai-cosign = { path = "../../cosign" }
|
||||||
tributary-sdk = { path = "../../tributary-sdk" }
|
tributary-sdk = { path = "../../tributary-sdk" }
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use rand_core::{RngCore, OsRng};
|
|||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
use schnorrkel::{Keypair, PublicKey, Signature};
|
use schnorrkel::{Keypair, PublicKey, Signature};
|
||||||
|
|
||||||
use serai_client::primitives::PublicKey as Public;
|
use serai_client_serai::abi::primitives::crypto::Public;
|
||||||
|
|
||||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
@@ -104,7 +104,7 @@ impl OnlyValidators {
|
|||||||
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
||||||
.map_err(|_| io::Error::other("invalid signature"))?;
|
.map_err(|_| io::Error::other("invalid signature"))?;
|
||||||
|
|
||||||
Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
|
Ok(peer_id_from_public(Public(public_key.to_bytes())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
use core::future::Future;
|
use core::{future::Future, str::FromStr};
|
||||||
use std::{sync::Arc, collections::HashSet};
|
use std::{sync::Arc, collections::HashSet};
|
||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::{SeraiError, Serai};
|
use serai_client_serai::{RpcError, Serai};
|
||||||
|
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::multiaddr::{Protocol, Multiaddr},
|
core::multiaddr::{Protocol, Multiaddr},
|
||||||
@@ -50,7 +50,7 @@ impl ContinuallyRan for DialTask {
|
|||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||||
|
|
||||||
type Error = SeraiError;
|
type Error = RpcError;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
@@ -94,6 +94,13 @@ impl ContinuallyRan for DialTask {
|
|||||||
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
||||||
|
let Ok(randomly_selected_peer) = libp2p::Multiaddr::from_str(&randomly_selected_peer)
|
||||||
|
else {
|
||||||
|
log::error!(
|
||||||
|
"peer from substrate wasn't a valid `Multiaddr`: {randomly_selected_peer}"
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
log::info!("found peer from substrate: {randomly_selected_peer}");
|
log::info!("found peer from substrate: {randomly_selected_peer}");
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -13,9 +13,10 @@ use rand_core::{RngCore, OsRng};
|
|||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use schnorrkel::Keypair;
|
use schnorrkel::Keypair;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::{
|
||||||
primitives::{ExternalNetworkId, PublicKey},
|
abi::primitives::{
|
||||||
validator_sets::primitives::ExternalValidatorSet,
|
crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet,
|
||||||
|
},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -66,7 +67,7 @@ use dial::DialTask;
|
|||||||
|
|
||||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||||
|
|
||||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
fn peer_id_from_public(public: Public) -> PeerId {
|
||||||
// 0 represents the identity Multihash, that no hash was performed
|
// 0 represents the identity Multihash, that no hash was performed
|
||||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||||
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::{
|
|||||||
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
|
||||||
|
|
||||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||||
|
|
||||||
@@ -92,7 +92,8 @@ impl SwarmTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
gossip::Event::GossipsubNotSupported { peer_id } |
|
||||||
|
gossip::Event::SlowPeer { peer_id, .. } => {
|
||||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,9 +4,8 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session};
|
||||||
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
|
use serai_client_serai::{RpcError, Serai};
|
||||||
};
|
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -52,7 +51,7 @@ impl Validators {
|
|||||||
async fn session_changes(
|
async fn session_changes(
|
||||||
serai: impl Borrow<Serai>,
|
serai: impl Borrow<Serai>,
|
||||||
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
||||||
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, RpcError> {
|
||||||
/*
|
/*
|
||||||
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
||||||
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
||||||
@@ -61,18 +60,18 @@ impl Validators {
|
|||||||
|
|
||||||
Besides, we can't connect to historical validators, only the current validators.
|
Besides, we can't connect to historical validators, only the current validators.
|
||||||
*/
|
*/
|
||||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
let serai = serai.borrow().state().await?;
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
|
||||||
|
|
||||||
let mut session_changes = vec![];
|
let mut session_changes = vec![];
|
||||||
{
|
{
|
||||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||||
// we poll it till it yields all futures with the most minimal processing possible
|
// we poll it till it yields all futures with the most minimal processing possible
|
||||||
let mut futures = FuturesUnordered::new();
|
let mut futures = FuturesUnordered::new();
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
let sessions = sessions.borrow();
|
let sessions = sessions.borrow();
|
||||||
|
let serai = serai.borrow();
|
||||||
futures.push(async move {
|
futures.push(async move {
|
||||||
let session = match temporal_serai.session(network.into()).await {
|
let session = match serai.current_session(network.into()).await {
|
||||||
Ok(Some(session)) => session,
|
Ok(Some(session)) => session,
|
||||||
Ok(None) => return Ok(None),
|
Ok(None) => return Ok(None),
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
@@ -81,12 +80,16 @@ impl Validators {
|
|||||||
if sessions.get(&network) == Some(&session) {
|
if sessions.get(&network) == Some(&session) {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
match temporal_serai.active_network_validators(network.into()).await {
|
match serai.current_validators(network.into()).await {
|
||||||
Ok(validators) => Ok(Some((
|
Ok(Some(validators)) => Ok(Some((
|
||||||
network,
|
network,
|
||||||
session,
|
session,
|
||||||
validators.into_iter().map(peer_id_from_public).collect(),
|
validators
|
||||||
|
.into_iter()
|
||||||
|
.map(|validator| peer_id_from_public(validator.into()))
|
||||||
|
.collect(),
|
||||||
))),
|
))),
|
||||||
|
Ok(None) => panic!("network has session yet no validators"),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -153,7 +156,7 @@ impl Validators {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Update the view of the validators.
|
/// Update the view of the validators.
|
||||||
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
pub(crate) async fn update(&mut self) -> Result<(), RpcError> {
|
||||||
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||||
self.incorporate_session_changes(session_changes);
|
self.incorporate_session_changes(session_changes);
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -206,7 +209,7 @@ impl ContinuallyRan for UpdateValidatorsTask {
|
|||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
|
|
||||||
type Error = SeraiError;
|
type Error = RpcError;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
use serai_primitives::validator_sets::{ExternalValidatorSet, KeyShares};
|
||||||
|
|
||||||
use futures_lite::FutureExt;
|
use futures_lite::FutureExt;
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@ pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
|||||||
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||||
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||||
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||||
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((KeyShares::MAX_PER_SET as usize) * 128));
|
||||||
|
|
||||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||||
/// tip.
|
/// tip.
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
|||||||
@@ -5,9 +5,10 @@ use serai_db::{create_db, db_channel};
|
|||||||
|
|
||||||
use dkg::Participant;
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::abi::primitives::{
|
||||||
primitives::ExternalNetworkId,
|
crypto::KeyPair,
|
||||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
network_id::ExternalNetworkId,
|
||||||
|
validator_sets::{Session, ExternalValidatorSet},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
@@ -103,7 +104,7 @@ mod _internal_db {
|
|||||||
// Tributary transactions to publish from the DKG confirmation task
|
// Tributary transactions to publish from the DKG confirmation task
|
||||||
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
||||||
// Participants to remove
|
// Participants to remove
|
||||||
RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
|
RemoveParticipant: (set: ExternalValidatorSet) -> u16,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -139,10 +140,11 @@ impl RemoveParticipant {
|
|||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
||||||
// If this set has yet to be retired, send this transaction
|
// If this set has yet to be retired, send this transaction
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
||||||
_internal_db::RemoveParticipant::try_recv(txn, set)
|
_internal_db::RemoveParticipant::try_recv(txn, set)
|
||||||
|
.map(|i| Participant::new(i).expect("sent invalid participant index for removal"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,10 +12,8 @@ use frost_schnorrkel::{
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db as DbTrait};
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
use serai_client::{
|
#[rustfmt::skip]
|
||||||
primitives::SeraiAddress,
|
use serai_client_serai::abi::primitives::{validator_sets::ExternalValidatorSet, address::SeraiAddress};
|
||||||
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_task::{DoesNotError, ContinuallyRan};
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
@@ -160,7 +158,7 @@ impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
|||||||
let (machine, preprocess) = AlgorithmMachine::new(
|
let (machine, preprocess) = AlgorithmMachine::new(
|
||||||
schnorrkel(),
|
schnorrkel(),
|
||||||
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
||||||
musig(musig_context(set.into()), key, &[public_key]).unwrap(),
|
musig(ExternalValidatorSet::musig_context(&set), key, &[public_key]).unwrap(),
|
||||||
)
|
)
|
||||||
.preprocess(&mut OsRng);
|
.preprocess(&mut OsRng);
|
||||||
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
||||||
@@ -260,9 +258,12 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let keys =
|
let keys = musig(
|
||||||
musig(musig_context(self.set.set.into()), self.key.clone(), &musig_public_keys)
|
ExternalValidatorSet::musig_context(&self.set.set),
|
||||||
.unwrap();
|
self.key.clone(),
|
||||||
|
&musig_public_keys,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Rebuild the machine
|
// Rebuild the machine
|
||||||
let (machine, preprocess_from_cache) =
|
let (machine, preprocess_from_cache) =
|
||||||
@@ -296,9 +297,10 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Calculate our share
|
// Calculate our share
|
||||||
let (machine, share) = match handle_frost_error(
|
let (machine, share) = match handle_frost_error(machine.sign(
|
||||||
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
preprocesses,
|
||||||
) {
|
&ExternalValidatorSet::set_keys_message(&self.set.set, &key_pair),
|
||||||
|
)) {
|
||||||
Ok((machine, share)) => (machine, share),
|
Ok((machine, share)) => (machine, share),
|
||||||
// This yields the *musig participant index*
|
// This yields the *musig participant index*
|
||||||
Err(participant) => {
|
Err(participant) => {
|
||||||
|
|||||||
@@ -14,9 +14,14 @@ use borsh::BorshDeserialize;
|
|||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::{
|
||||||
primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
|
abi::primitives::{
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
BlockHash,
|
||||||
|
crypto::{Public, Signature, ExternalKey, KeyPair},
|
||||||
|
network_id::ExternalNetworkId,
|
||||||
|
validator_sets::ExternalValidatorSet,
|
||||||
|
address::SeraiAddress,
|
||||||
|
},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
use message_queue::{Service, client::MessageQueue};
|
use message_queue::{Service, client::MessageQueue};
|
||||||
@@ -61,9 +66,7 @@ async fn serai() -> Arc<Serai> {
|
|||||||
let Ok(serai) = Serai::new(format!(
|
let Ok(serai) = Serai::new(format!(
|
||||||
"http://{}:9944",
|
"http://{}:9944",
|
||||||
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
|
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
|
||||||
))
|
)) else {
|
||||||
.await
|
|
||||||
else {
|
|
||||||
log::error!("couldn't connect to the Serai node");
|
log::error!("couldn't connect to the Serai node");
|
||||||
tokio::time::sleep(delay).await;
|
tokio::time::sleep(delay).await;
|
||||||
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
|
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
|
||||||
@@ -213,10 +216,12 @@ async fn handle_network(
|
|||||||
&mut txn,
|
&mut txn,
|
||||||
ExternalValidatorSet { network, session },
|
ExternalValidatorSet { network, session },
|
||||||
&KeyPair(
|
&KeyPair(
|
||||||
PublicKey::from_raw(substrate_key),
|
Public(substrate_key),
|
||||||
network_key
|
ExternalKey(
|
||||||
.try_into()
|
network_key
|
||||||
.expect("generated a network key which exceeds the maximum key length"),
|
.try_into()
|
||||||
|
.expect("generated a network key which exceeds the maximum key length"),
|
||||||
|
),
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -290,6 +295,7 @@ async fn handle_network(
|
|||||||
},
|
},
|
||||||
messages::ProcessorMessage::Substrate(msg) => match msg {
|
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||||
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
|
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
|
||||||
|
let block = BlockHash(block);
|
||||||
let mut by_session = HashMap::new();
|
let mut by_session = HashMap::new();
|
||||||
for plan in plans {
|
for plan in plans {
|
||||||
by_session
|
by_session
|
||||||
@@ -481,7 +487,7 @@ async fn main() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Handle each of the networks
|
// Handle each of the networks
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,10 @@ use tokio::sync::mpsc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db as DbTrait};
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
|
use serai_client_serai::abi::primitives::{
|
||||||
|
network_id::ExternalNetworkId,
|
||||||
|
validator_sets::{Session, ExternalValidatorSet},
|
||||||
|
};
|
||||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
use tributary_sdk::Tributary;
|
use tributary_sdk::Tributary;
|
||||||
@@ -39,7 +42,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
|
|
||||||
// Handle the Canonical events
|
// Handle the Canonical events
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use tokio::sync::mpsc;
|
|||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
|
||||||
|
|
||||||
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||||
|
|
||||||
|
|||||||
@@ -24,12 +24,11 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
|||||||
|
|
||||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai"] }
|
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
futures = { version = "0.3", default-features = false, features = ["std"] }
|
futures = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false }
|
|
||||||
|
|
||||||
serai-db = { path = "../../common/db", version = "0.1.1" }
|
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||||
serai-task = { path = "../../common/task", version = "0.1" }
|
serai-task = { path = "../../common/task", version = "0.1" }
|
||||||
|
|||||||
@@ -3,7 +3,13 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
use serai_client_serai::{
|
||||||
|
abi::{
|
||||||
|
self,
|
||||||
|
primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
|
||||||
|
},
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
|
||||||
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
||||||
|
|
||||||
@@ -15,6 +21,7 @@ use serai_cosign::Cosigning;
|
|||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorSubstrateCanonical {
|
CoordinatorSubstrateCanonical {
|
||||||
NextBlock: () -> u64,
|
NextBlock: () -> u64,
|
||||||
|
LastIndexedBatchId: (network: ExternalNetworkId) -> u32,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -45,10 +52,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
// These are all the events which generate canonical messages
|
// These are all the events which generate canonical messages
|
||||||
struct CanonicalEvents {
|
struct CanonicalEvents {
|
||||||
time: u64,
|
time: u64,
|
||||||
key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
set_keys_events: Vec<abi::validator_sets::Event>,
|
||||||
set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
slash_report_events: Vec<abi::validator_sets::Event>,
|
||||||
batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
|
batch_events: Vec<abi::in_instructions::Event>,
|
||||||
burn_events: Vec<serai_client::coins::CoinsEvent>,
|
burn_events: Vec<abi::coins::Event>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a cosigned block, fetch all relevant events
|
// For a cosigned block, fetch all relevant events
|
||||||
@@ -66,40 +73,24 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
}
|
}
|
||||||
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
||||||
};
|
};
|
||||||
let temporal_serai = serai.as_of(block_hash);
|
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
|
||||||
let temporal_serai_validators = temporal_serai.validator_sets();
|
let set_keys_events = events.validator_sets().set_keys_events().cloned().collect();
|
||||||
let temporal_serai_instructions = temporal_serai.in_instructions();
|
let slash_report_events =
|
||||||
let temporal_serai_coins = temporal_serai.coins();
|
events.validator_sets().slash_report_events().cloned().collect();
|
||||||
|
let batch_events = events.in_instructions().batch_events().cloned().collect();
|
||||||
let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
|
let burn_events = events.coins().burn_with_instruction_events().cloned().collect();
|
||||||
tokio::try_join!(
|
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
|
||||||
serai.block(block_hash),
|
|
||||||
temporal_serai_validators.key_gen_events(),
|
|
||||||
temporal_serai_validators.set_retired_events(),
|
|
||||||
temporal_serai_instructions.batch_events(),
|
|
||||||
temporal_serai_coins.burn_with_instruction_events(),
|
|
||||||
)
|
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let Some(block) = block else {
|
|
||||||
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
||||||
};
|
};
|
||||||
|
|
||||||
let time = if block_number == 0 {
|
// We use time in seconds, not milliseconds, here
|
||||||
block.time().unwrap_or(0)
|
let time = block.header.unix_time_in_millis() / 1000;
|
||||||
} else {
|
|
||||||
// Serai's block time is in milliseconds
|
|
||||||
block
|
|
||||||
.time()
|
|
||||||
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
|
|
||||||
1000
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
block_number,
|
block_number,
|
||||||
CanonicalEvents {
|
CanonicalEvents {
|
||||||
time,
|
time,
|
||||||
key_gen_events,
|
set_keys_events,
|
||||||
set_retired_events,
|
slash_report_events,
|
||||||
batch_events,
|
batch_events,
|
||||||
burn_events,
|
burn_events,
|
||||||
},
|
},
|
||||||
@@ -131,10 +122,9 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
for key_gen in block.key_gen_events {
|
for set_keys in block.set_keys_events {
|
||||||
let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
|
let abi::validator_sets::Event::SetKeys { set, key_pair } = &set_keys else {
|
||||||
else {
|
panic!("`SetKeys` event wasn't a `SetKeys` event: {set_keys:?}");
|
||||||
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
|
|
||||||
};
|
};
|
||||||
crate::Canonical::send(
|
crate::Canonical::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
@@ -147,12 +137,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for set_retired in block.set_retired_events {
|
for slash_report in block.slash_report_events {
|
||||||
let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
|
let abi::validator_sets::Event::SlashReport { set } = &slash_report else {
|
||||||
else {
|
panic!("`SlashReport` event wasn't a `SlashReport` event: {slash_report:?}");
|
||||||
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
|
||||||
};
|
};
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
|
||||||
crate::Canonical::send(
|
crate::Canonical::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set.network,
|
set.network,
|
||||||
@@ -160,10 +148,12 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
let mut batch = None;
|
let mut batch = None;
|
||||||
for this_batch in &block.batch_events {
|
for this_batch in &block.batch_events {
|
||||||
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
// Only irrefutable as this is the only member of the enum at this time
|
||||||
|
#[expect(irrefutable_let_patterns)]
|
||||||
|
let abi::in_instructions::Event::Batch {
|
||||||
network: batch_network,
|
network: batch_network,
|
||||||
publishing_session,
|
publishing_session,
|
||||||
id,
|
id,
|
||||||
@@ -194,14 +184,19 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if LastIndexedBatchId::get(&txn, network) != id.checked_sub(1) {
|
||||||
|
panic!(
|
||||||
|
"next batch from Serai's ID was not an increment of the last indexed batch's ID"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
LastIndexedBatchId::set(&mut txn, network, id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut burns = vec![];
|
let mut burns = vec![];
|
||||||
for burn in &block.burn_events {
|
for burn in &block.burn_events {
|
||||||
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
let abi::coins::Event::BurnWithInstruction { from: _, instruction } = &burn else {
|
||||||
&burn
|
|
||||||
else {
|
|
||||||
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
|
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
|
||||||
};
|
};
|
||||||
if instruction.balance.coin.network() == network {
|
if instruction.balance.coin.network() == network {
|
||||||
@@ -223,3 +218,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn last_indexed_batch_id(txn: &impl DbTxn, network: ExternalNetworkId) -> Option<u32> {
|
||||||
|
LastIndexedBatchId::get(txn, network)
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,9 +3,14 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::{
|
||||||
primitives::{SeraiAddress, EmbeddedEllipticCurve},
|
abi::primitives::{
|
||||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
|
BlockHash,
|
||||||
|
crypto::EmbeddedEllipticCurveKeys as EmbeddedEllipticCurveKeysStruct,
|
||||||
|
network_id::ExternalNetworkId,
|
||||||
|
validator_sets::{KeyShares, ExternalValidatorSet},
|
||||||
|
address::SeraiAddress,
|
||||||
|
},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -19,6 +24,10 @@ use crate::NewSetInformation;
|
|||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorSubstrateEphemeral {
|
CoordinatorSubstrateEphemeral {
|
||||||
NextBlock: () -> u64,
|
NextBlock: () -> u64,
|
||||||
|
EmbeddedEllipticCurveKeys: (
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
validator: SeraiAddress
|
||||||
|
) -> EmbeddedEllipticCurveKeysStruct,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -49,10 +58,11 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
|
|
||||||
// These are all the events which generate canonical messages
|
// These are all the events which generate canonical messages
|
||||||
struct EphemeralEvents {
|
struct EphemeralEvents {
|
||||||
block_hash: [u8; 32],
|
block_hash: BlockHash,
|
||||||
time: u64,
|
time: u64,
|
||||||
new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
embedded_elliptic_curve_keys_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
||||||
accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
|
set_decided_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
||||||
|
accepted_handover_events: Vec<serai_client_serai::abi::validator_sets::Event>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a cosigned block, fetch all relevant events
|
// For a cosigned block, fetch all relevant events
|
||||||
@@ -71,31 +81,31 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let temporal_serai = serai.as_of(block_hash);
|
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
|
||||||
let temporal_serai_validators = temporal_serai.validator_sets();
|
let embedded_elliptic_curve_keys_events = events
|
||||||
let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
|
.validator_sets()
|
||||||
serai.block(block_hash),
|
.set_embedded_elliptic_curve_keys_events()
|
||||||
temporal_serai_validators.new_set_events(),
|
.cloned()
|
||||||
temporal_serai_validators.accepted_handover_events(),
|
.collect::<Vec<_>>();
|
||||||
)
|
let set_decided_events =
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
events.validator_sets().set_decided_events().cloned().collect::<Vec<_>>();
|
||||||
let Some(block) = block else {
|
let accepted_handover_events =
|
||||||
|
events.validator_sets().accepted_handover_events().cloned().collect::<Vec<_>>();
|
||||||
|
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
|
||||||
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
|
||||||
};
|
};
|
||||||
|
|
||||||
let time = if block_number == 0 {
|
// We use time in seconds, not milliseconds, here
|
||||||
block.time().unwrap_or(0)
|
let time = block.header.unix_time_in_millis() / 1000;
|
||||||
} else {
|
|
||||||
// Serai's block time is in milliseconds
|
|
||||||
block
|
|
||||||
.time()
|
|
||||||
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
|
|
||||||
1000
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
block_number,
|
block_number,
|
||||||
EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
|
EphemeralEvents {
|
||||||
|
block_hash,
|
||||||
|
time,
|
||||||
|
embedded_elliptic_curve_keys_events,
|
||||||
|
set_decided_events,
|
||||||
|
accepted_handover_events,
|
||||||
|
},
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -126,105 +136,82 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
for new_set in block.new_set_events {
|
for event in block.embedded_elliptic_curve_keys_events {
|
||||||
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
let serai_client_serai::abi::validator_sets::Event::SetEmbeddedEllipticCurveKeys {
|
||||||
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
validator,
|
||||||
|
keys,
|
||||||
|
} = &event
|
||||||
|
else {
|
||||||
|
panic!(
|
||||||
|
"{}: {event:?}",
|
||||||
|
"`SetEmbeddedEllipticCurveKeys` event wasn't a `SetEmbeddedEllipticCurveKeys` event"
|
||||||
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
EmbeddedEllipticCurveKeys::set(&mut txn, keys.network(), *validator, keys);
|
||||||
|
}
|
||||||
|
|
||||||
|
for set_decided in block.set_decided_events {
|
||||||
|
let serai_client_serai::abi::validator_sets::Event::SetDecided { set, validators } =
|
||||||
|
&set_decided
|
||||||
|
else {
|
||||||
|
panic!("`SetDecided` event wasn't a `SetDecided` event: {set_decided:?}");
|
||||||
|
};
|
||||||
|
|
||||||
// We only coordinate over external networks
|
// We only coordinate over external networks
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
||||||
|
let validators =
|
||||||
|
validators.iter().map(|(validator, weight)| (*validator, weight.0)).collect::<Vec<_>>();
|
||||||
|
|
||||||
let serai = self.serai.as_of(block.block_hash);
|
|
||||||
let serai = serai.validator_sets();
|
|
||||||
let Some(validators) =
|
|
||||||
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
|
|
||||||
else {
|
|
||||||
Err(format!(
|
|
||||||
"block #{block_number} declared a new set but didn't have the participants"
|
|
||||||
))?
|
|
||||||
};
|
|
||||||
let validators = validators
|
|
||||||
.into_iter()
|
|
||||||
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||||
if in_set {
|
if in_set {
|
||||||
if u16::try_from(validators.len()).is_err() {
|
if u16::try_from(validators.len()).is_err() {
|
||||||
Err("more than u16::MAX validators sent")?;
|
Err("more than u16::MAX validators sent")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let Ok(validators) = validators
|
|
||||||
.into_iter()
|
|
||||||
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
|
|
||||||
.collect::<Result<Vec<_>, _>>()
|
|
||||||
else {
|
|
||||||
Err("validator's weight exceeded u16::MAX".to_string())?
|
|
||||||
};
|
|
||||||
|
|
||||||
// Do the summation in u32 so we don't risk a u16 overflow
|
// Do the summation in u32 so we don't risk a u16 overflow
|
||||||
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
|
||||||
if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
|
if total_weight > u32::from(KeyShares::MAX_PER_SET) {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
|
"{set:?} has {total_weight} key shares when the max is {}",
|
||||||
|
KeyShares::MAX_PER_SET
|
||||||
))?;
|
))?;
|
||||||
}
|
}
|
||||||
let total_weight = u16::try_from(total_weight).unwrap();
|
let total_weight = u16::try_from(total_weight)
|
||||||
|
.expect("value smaller than `u16` constant but doesn't fit in `u16`");
|
||||||
|
|
||||||
// Fetch all of the validators' embedded elliptic curve keys
|
// Fetch all of the validators' embedded elliptic curve keys
|
||||||
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
|
|
||||||
for (validator, _) in &validators {
|
|
||||||
let validator = *validator;
|
|
||||||
// try_join doesn't return a future so we need to wrap it in this additional async
|
|
||||||
// block
|
|
||||||
embedded_elliptic_curve_keys.push_back(async move {
|
|
||||||
tokio::try_join!(
|
|
||||||
// One future to fetch the substrate embedded key
|
|
||||||
serai.embedded_elliptic_curve_key(
|
|
||||||
validator.into(),
|
|
||||||
EmbeddedEllipticCurve::Embedwards25519
|
|
||||||
),
|
|
||||||
// One future to fetch the external embedded key, if there is a distinct curve
|
|
||||||
async {
|
|
||||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
|
||||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
|
||||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
|
||||||
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
.map(|(substrate_embedded_key, external_embedded_key)| {
|
|
||||||
(validator, substrate_embedded_key, external_embedded_key)
|
|
||||||
})
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
|
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
|
||||||
for (validator, weight) in &validators {
|
for (validator, weight) in &validators {
|
||||||
let (future_validator, substrate_embedded_key, external_embedded_key) =
|
let keys = match EmbeddedEllipticCurveKeys::get(&txn, set.network, *validator)
|
||||||
embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
|
.expect("selected validator lacked embedded elliptic curve keys")
|
||||||
assert_eq!(*validator, future_validator);
|
{
|
||||||
let external_embedded_key =
|
EmbeddedEllipticCurveKeysStruct::Bitcoin(substrate, external) => {
|
||||||
external_embedded_key.unwrap_or(substrate_embedded_key.clone());
|
assert_eq!(set.network, ExternalNetworkId::Bitcoin);
|
||||||
match (substrate_embedded_key, external_embedded_key) {
|
(substrate, external.to_vec())
|
||||||
(Some(substrate_embedded_key), Some(external_embedded_key)) => {
|
|
||||||
let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
|
|
||||||
.map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
|
|
||||||
for _ in 0 .. *weight {
|
|
||||||
evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => Err("NewSet with validator missing an embedded key".to_string())?,
|
EmbeddedEllipticCurveKeysStruct::Ethereum(substrate, external) => {
|
||||||
|
assert_eq!(set.network, ExternalNetworkId::Ethereum);
|
||||||
|
(substrate, external.to_vec())
|
||||||
|
}
|
||||||
|
EmbeddedEllipticCurveKeysStruct::Monero(substrate) => {
|
||||||
|
assert_eq!(set.network, ExternalNetworkId::Monero);
|
||||||
|
(substrate, substrate.to_vec())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
for _ in 0 .. *weight {
|
||||||
|
evrf_public_keys.push(keys.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut new_set = NewSetInformation {
|
let mut new_set = NewSetInformation {
|
||||||
set,
|
set,
|
||||||
serai_block: block.block_hash,
|
serai_block: block.block_hash.0,
|
||||||
declaration_time: block.time,
|
declaration_time: block.time,
|
||||||
// TODO: This should be inlined into the Processor's key gen code
|
// TODO: This should be inlined into the Processor's key gen code
|
||||||
// It's legacy from when we removed participants from the key gen
|
// It's legacy from when we removed participants from the key gen
|
||||||
threshold: ((total_weight * 2) / 3) + 1,
|
threshold: ((total_weight * 2) / 3) + 1,
|
||||||
|
// TODO: Why are `validators` and `evrf_public_keys` two separate fields?
|
||||||
validators,
|
validators,
|
||||||
evrf_public_keys,
|
evrf_public_keys,
|
||||||
participant_indexes: Default::default(),
|
participant_indexes: Default::default(),
|
||||||
@@ -238,7 +225,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for accepted_handover in block.accepted_handover_events {
|
for accepted_handover in block.accepted_handover_events {
|
||||||
let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
|
let serai_client_serai::abi::validator_sets::Event::AcceptedHandover { set } =
|
||||||
&accepted_handover
|
&accepted_handover
|
||||||
else {
|
else {
|
||||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -8,10 +8,14 @@ use borsh::{BorshSerialize, BorshDeserialize};
|
|||||||
|
|
||||||
use dkg::Participant;
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client_serai::abi::{
|
||||||
primitives::{ExternalNetworkId, SeraiAddress, Signature},
|
primitives::{
|
||||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
|
network_id::ExternalNetworkId,
|
||||||
in_instructions::primitives::SignedBatch,
|
validator_sets::{Session, ExternalValidatorSet, SlashReport},
|
||||||
|
crypto::{Signature, KeyPair},
|
||||||
|
address::SeraiAddress,
|
||||||
|
instructions::SignedBatch,
|
||||||
|
},
|
||||||
Transaction,
|
Transaction,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -19,6 +23,7 @@ use serai_db::*;
|
|||||||
|
|
||||||
mod canonical;
|
mod canonical;
|
||||||
pub use canonical::CanonicalEventStream;
|
pub use canonical::CanonicalEventStream;
|
||||||
|
use canonical::last_indexed_batch_id;
|
||||||
mod ephemeral;
|
mod ephemeral;
|
||||||
pub use ephemeral::EphemeralEventStream;
|
pub use ephemeral::EphemeralEventStream;
|
||||||
|
|
||||||
@@ -37,7 +42,7 @@ pub struct NewSetInformation {
|
|||||||
pub set: ExternalValidatorSet,
|
pub set: ExternalValidatorSet,
|
||||||
/// The Serai block which declared it.
|
/// The Serai block which declared it.
|
||||||
pub serai_block: [u8; 32],
|
pub serai_block: [u8; 32],
|
||||||
/// The time of the block which declared it, in seconds.
|
/// The time of the block which declared it, in seconds since the epoch.
|
||||||
pub declaration_time: u64,
|
pub declaration_time: u64,
|
||||||
/// The threshold to use.
|
/// The threshold to use.
|
||||||
pub threshold: u16,
|
pub threshold: u16,
|
||||||
@@ -96,9 +101,9 @@ mod _public_db {
|
|||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
// Keys to set on the Serai network
|
// Keys to set on the Serai network
|
||||||
Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
Keys: (network: ExternalNetworkId) -> (Session, Transaction),
|
||||||
// Slash reports to publish onto the Serai network
|
// Slash reports to publish onto the Serai network
|
||||||
SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
SlashReports: (network: ExternalNetworkId) -> (Session, Transaction),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -171,7 +176,7 @@ impl Keys {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
|
let tx = serai_client_serai::ValidatorSets::set_keys(
|
||||||
set.network,
|
set.network,
|
||||||
key_pair,
|
key_pair,
|
||||||
signature_participants,
|
signature_participants,
|
||||||
@@ -192,7 +197,7 @@ pub struct SignedBatches;
|
|||||||
impl SignedBatches {
|
impl SignedBatches {
|
||||||
/// Send a `SignedBatch` to publish onto Serai.
|
/// Send a `SignedBatch` to publish onto Serai.
|
||||||
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||||
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
_public_db::SignedBatches::send(txn, batch.batch.network(), batch);
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
|
||||||
_public_db::SignedBatches::try_recv(txn, network)
|
_public_db::SignedBatches::try_recv(txn, network)
|
||||||
@@ -219,11 +224,8 @@ impl SlashReports {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
let tx =
|
||||||
set.network,
|
serai_client_serai::ValidatorSets::report_slashes(set.network, slash_report, signature);
|
||||||
slash_report,
|
|
||||||
signature,
|
|
||||||
);
|
|
||||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx));
|
_public_db::SlashReports::set(txn, set.network, &(set.session, tx));
|
||||||
}
|
}
|
||||||
pub(crate) fn take(
|
pub(crate) fn take(
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
use serai_client_serai::{
|
||||||
use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
|
abi::primitives::{network_id::ExternalNetworkId, instructions::SignedBatch},
|
||||||
|
RpcError, Serai,
|
||||||
|
};
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, create_db};
|
use serai_db::{Get, DbTxn, Db, create_db};
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -31,7 +33,7 @@ impl<D: Db> PublishBatchTask<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
||||||
type Error = SeraiError;
|
type Error = RpcError;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
@@ -43,8 +45,8 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// If this is a Batch not yet published, save it into our unordered mapping
|
// If this is a Batch not yet published, save it into our unordered mapping
|
||||||
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
|
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id()) {
|
||||||
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
|
BatchesToPublish::set(&mut txn, self.network, batch.batch.id(), &batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
@@ -52,12 +54,8 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
|
|
||||||
// Synchronize our last published batch with the Serai network's
|
// Synchronize our last published batch with the Serai network's
|
||||||
let next_to_publish = {
|
let next_to_publish = {
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
|
||||||
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
|
let last_batch = crate::last_indexed_batch_id(&txn, self.network);
|
||||||
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
||||||
while our_last_batch < last_batch {
|
while our_last_batch < last_batch {
|
||||||
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
||||||
@@ -68,6 +66,7 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
if let Some(last_batch) = our_last_batch {
|
if let Some(last_batch) = our_last_batch {
|
||||||
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
||||||
}
|
}
|
||||||
|
txn.commit();
|
||||||
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -75,7 +74,7 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
||||||
self
|
self
|
||||||
.serai
|
.serai
|
||||||
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
.publish_transaction(&serai_client_serai::InInstructions::execute_batch(batch))
|
||||||
.await?;
|
.await?;
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -3,7 +3,10 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
|
use serai_client_serai::{
|
||||||
|
abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session},
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
@@ -33,10 +36,10 @@ impl<D: Db> PublishSlashReportTask<D> {
|
|||||||
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?;
|
||||||
let serai = serai.validator_sets();
|
|
||||||
let session_after_slash_report = Session(session.0 + 1);
|
let session_after_slash_report = Session(session.0 + 1);
|
||||||
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
let current_session =
|
||||||
|
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
let current_session = current_session.map(|session| session.0);
|
let current_session = current_session.map(|session| session.0);
|
||||||
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||||
// active
|
// active
|
||||||
@@ -55,14 +58,13 @@ impl<D: Db> PublishSlashReportTask<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If this session which should publish a slash report already has, move on
|
// If this session which should publish a slash report already has, move on
|
||||||
let key_pending_slash_report =
|
if !serai.pending_slash_report(network).await.map_err(|e| format!("{e:?}"))? {
|
||||||
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
if key_pending_slash_report.is_none() {
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
};
|
};
|
||||||
|
|
||||||
match self.serai.publish(&slash_report).await {
|
// Since this slash report is still pending, publish it
|
||||||
|
match self.serai.publish_transaction(&slash_report).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
txn.commit();
|
txn.commit();
|
||||||
Ok(true)
|
Ok(true)
|
||||||
@@ -84,7 +86,7 @@ impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
|||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
let mut error = None;
|
let mut error = None;
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
let network_res = self.publish(network).await;
|
let network_res = self.publish(network).await;
|
||||||
// We made progress if any network successfully published their slash report
|
// We made progress if any network successfully published their slash report
|
||||||
made_progress |= network_res == Ok(true);
|
made_progress |= network_res == Ok(true);
|
||||||
|
|||||||
@@ -3,7 +3,10 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
use serai_client_serai::{
|
||||||
|
abi::primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
@@ -28,7 +31,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in ExternalNetworkId::all() {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||||
// No keys to set
|
// No keys to set
|
||||||
@@ -37,10 +40,9 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
// This uses the latest finalized block, not the latest cosigned block, which should be
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
// fine as in the worst case, the only impact is no longer attempting TX publication
|
||||||
let serai =
|
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?;
|
||||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
let current_session =
|
||||||
let serai = serai.validator_sets();
|
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let current_session = current_session.map(|session| session.0);
|
let current_session = current_session.map(|session| session.0);
|
||||||
// Only attempt to set these keys if this isn't a retired session
|
// Only attempt to set these keys if this isn't a retired session
|
||||||
if Some(session.0) < current_session {
|
if Some(session.0) < current_session {
|
||||||
@@ -67,7 +69,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
match self.serai.publish(&keys).await {
|
match self.serai.publish_transaction(&keys).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
txn.commit();
|
txn.commit();
|
||||||
made_progress = true;
|
made_progress = true;
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ license = "MIT"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.75"
|
rust-version = "1.77"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ serai-task = { path = "../../common/task", version = "0.1" }
|
|||||||
|
|
||||||
tributary-sdk = { path = "../tributary-sdk" }
|
tributary-sdk = { path = "../tributary-sdk" }
|
||||||
|
|
||||||
serai-cosign = { path = "../cosign" }
|
serai-cosign-types = { path = "../cosign/types" }
|
||||||
serai-coordinator-substrate = { path = "../substrate" }
|
serai-coordinator-substrate = { path = "../substrate" }
|
||||||
|
|
||||||
messages = { package = "serai-processor-messages", path = "../../processor/messages" }
|
messages = { package = "serai-processor-messages", path = "../../processor/messages" }
|
||||||
|
|||||||
@@ -2,13 +2,13 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_primitives::{address::SeraiAddress, validator_sets::primitives::ExternalValidatorSet};
|
use serai_primitives::{BlockHash, validator_sets::ExternalValidatorSet, address::SeraiAddress};
|
||||||
|
|
||||||
use messages::sign::{VariantSignId, SignId};
|
use messages::sign::{VariantSignId, SignId};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
use serai_cosign::CosignIntent;
|
use serai_cosign_types::CosignIntent;
|
||||||
|
|
||||||
use crate::transaction::SigningProtocolRound;
|
use crate::transaction::SigningProtocolRound;
|
||||||
|
|
||||||
@@ -122,7 +122,7 @@ impl Topic {
|
|||||||
Topic::DkgConfirmation { attempt, round: _ } => Some({
|
Topic::DkgConfirmation { attempt, round: _ } => Some({
|
||||||
let id = {
|
let id = {
|
||||||
let mut id = [0; 32];
|
let mut id = [0; 32];
|
||||||
let encoded_set = borsh::to_vec(set).unwrap();
|
let encoded_set = borsh::to_vec(&set).unwrap();
|
||||||
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
|
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
|
||||||
VariantSignId::Batch(id)
|
VariantSignId::Batch(id)
|
||||||
};
|
};
|
||||||
@@ -232,18 +232,18 @@ create_db!(
|
|||||||
SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32,
|
SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32,
|
||||||
|
|
||||||
// The cosign intent for a Substrate block
|
// The cosign intent for a Substrate block
|
||||||
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
|
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: BlockHash) -> CosignIntent,
|
||||||
// The latest Substrate block to cosign.
|
// The latest Substrate block to cosign.
|
||||||
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> [u8; 32],
|
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> BlockHash,
|
||||||
// The hash of the block we're actively cosigning.
|
// The hash of the block we're actively cosigning.
|
||||||
ActivelyCosigning: (set: ExternalValidatorSet) -> [u8; 32],
|
ActivelyCosigning: (set: ExternalValidatorSet) -> BlockHash,
|
||||||
// If this block has already been cosigned.
|
// If this block has already been cosigned.
|
||||||
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: BlockHash) -> (),
|
||||||
|
|
||||||
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
SubstrateBlockPlans: (
|
SubstrateBlockPlans: (
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32]
|
substrate_block_hash: BlockHash
|
||||||
) -> Vec<[u8; 32]>,
|
) -> Vec<[u8; 32]>,
|
||||||
|
|
||||||
// The weight accumulated for a topic.
|
// The weight accumulated for a topic.
|
||||||
@@ -291,26 +291,26 @@ impl TributaryDb {
|
|||||||
pub(crate) fn latest_substrate_block_to_cosign(
|
pub(crate) fn latest_substrate_block_to_cosign(
|
||||||
getter: &impl Get,
|
getter: &impl Get,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
) -> Option<[u8; 32]> {
|
) -> Option<BlockHash> {
|
||||||
LatestSubstrateBlockToCosign::get(getter, set)
|
LatestSubstrateBlockToCosign::get(getter, set)
|
||||||
}
|
}
|
||||||
pub(crate) fn set_latest_substrate_block_to_cosign(
|
pub(crate) fn set_latest_substrate_block_to_cosign(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
) {
|
) {
|
||||||
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
||||||
}
|
}
|
||||||
pub(crate) fn actively_cosigning(
|
pub(crate) fn actively_cosigning(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
) -> Option<[u8; 32]> {
|
) -> Option<BlockHash> {
|
||||||
ActivelyCosigning::get(txn, set)
|
ActivelyCosigning::get(txn, set)
|
||||||
}
|
}
|
||||||
pub(crate) fn start_cosigning(
|
pub(crate) fn start_cosigning(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
substrate_block_number: u64,
|
substrate_block_number: u64,
|
||||||
) {
|
) {
|
||||||
assert!(
|
assert!(
|
||||||
@@ -335,14 +335,14 @@ impl TributaryDb {
|
|||||||
pub(crate) fn mark_cosigned(
|
pub(crate) fn mark_cosigned(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
) {
|
) {
|
||||||
Cosigned::set(txn, set, substrate_block_hash, &());
|
Cosigned::set(txn, set, substrate_block_hash, &());
|
||||||
}
|
}
|
||||||
pub(crate) fn cosigned(
|
pub(crate) fn cosigned(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
@@ -9,8 +9,9 @@ use ciphersuite::group::GroupEncoding;
|
|||||||
use dkg::Participant;
|
use dkg::Participant;
|
||||||
|
|
||||||
use serai_primitives::{
|
use serai_primitives::{
|
||||||
address::SeraiAddress,
|
BlockHash,
|
||||||
validator_sets::{ExternalValidatorSet, Slash},
|
validator_sets::{ExternalValidatorSet, Slash},
|
||||||
|
address::SeraiAddress,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
@@ -25,7 +26,7 @@ use tributary_sdk::{
|
|||||||
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
|
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_cosign::CosignIntent;
|
use serai_cosign_types::CosignIntent;
|
||||||
use serai_coordinator_substrate::NewSetInformation;
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
|
||||||
use messages::sign::{VariantSignId, SignId};
|
use messages::sign::{VariantSignId, SignId};
|
||||||
@@ -79,7 +80,7 @@ impl CosignIntents {
|
|||||||
fn take(
|
fn take(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
) -> Option<CosignIntent> {
|
) -> Option<CosignIntent> {
|
||||||
db::CosignIntents::take(txn, set, substrate_block_hash)
|
db::CosignIntents::take(txn, set, substrate_block_hash)
|
||||||
}
|
}
|
||||||
@@ -113,7 +114,7 @@ impl SubstrateBlockPlans {
|
|||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
plans: &Vec<[u8; 32]>,
|
plans: &Vec<[u8; 32]>,
|
||||||
) {
|
) {
|
||||||
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
|
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
|
||||||
@@ -121,7 +122,7 @@ impl SubstrateBlockPlans {
|
|||||||
fn take(
|
fn take(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ExternalValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
) -> Option<Vec<[u8; 32]>> {
|
) -> Option<Vec<[u8; 32]>> {
|
||||||
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
||||||
}
|
}
|
||||||
@@ -574,14 +575,9 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
};
|
};
|
||||||
let msgs = (
|
let msgs = (
|
||||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
|
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
|
||||||
if data.1.is_some() {
|
data.1.as_ref().map(|data| {
|
||||||
Some(
|
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(data).unwrap()
|
||||||
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.1.unwrap())
|
}),
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
},
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use schnorr::SchnorrSignature;
|
|||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_primitives::{addess::SeraiAddress, validator_sets::MAX_KEY_SHARES_PER_SET};
|
use serai_primitives::{BlockHash, validator_sets::KeyShares, address::SeraiAddress};
|
||||||
|
|
||||||
use messages::sign::VariantSignId;
|
use messages::sign::VariantSignId;
|
||||||
|
|
||||||
@@ -137,7 +137,7 @@ pub enum Transaction {
|
|||||||
/// be the one selected to be cosigned.
|
/// be the one selected to be cosigned.
|
||||||
Cosign {
|
Cosign {
|
||||||
/// The hash of the Substrate block to cosign
|
/// The hash of the Substrate block to cosign
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Note an intended-to-be-cosigned Substrate block as cosigned
|
/// Note an intended-to-be-cosigned Substrate block as cosigned
|
||||||
@@ -175,7 +175,7 @@ pub enum Transaction {
|
|||||||
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
|
||||||
Cosigned {
|
Cosigned {
|
||||||
/// The hash of the Substrate block which was cosigned
|
/// The hash of the Substrate block which was cosigned
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: BlockHash,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Acknowledge a Substrate block
|
/// Acknowledge a Substrate block
|
||||||
@@ -186,7 +186,7 @@ pub enum Transaction {
|
|||||||
/// resulting from its handling.
|
/// resulting from its handling.
|
||||||
SubstrateBlock {
|
SubstrateBlock {
|
||||||
/// The hash of the Substrate block
|
/// The hash of the Substrate block
|
||||||
hash: [u8; 32],
|
hash: BlockHash,
|
||||||
},
|
},
|
||||||
|
|
||||||
/// Acknowledge a Batch
|
/// Acknowledge a Batch
|
||||||
@@ -250,11 +250,11 @@ impl TransactionTrait for Transaction {
|
|||||||
signed.to_tributary_signed(0),
|
signed.to_tributary_signed(0),
|
||||||
),
|
),
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
|
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
|
||||||
borsh::to_vec(b"DkgConfirmation".as_slice(), attempt).unwrap(),
|
borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(),
|
||||||
signed.to_tributary_signed(0),
|
signed.to_tributary_signed(0),
|
||||||
),
|
),
|
||||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
|
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
|
||||||
borsh::to_vec(b"DkgConfirmation".as_slice(), attempt).unwrap(),
|
borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(),
|
||||||
signed.to_tributary_signed(1),
|
signed.to_tributary_signed(1),
|
||||||
),
|
),
|
||||||
|
|
||||||
@@ -264,7 +264,7 @@ impl TransactionTrait for Transaction {
|
|||||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||||
borsh::to_vec(b"Sign".as_slice(), id, attempt).unwrap(),
|
borsh::to_vec(&(b"Sign".as_slice(), id, attempt)).unwrap(),
|
||||||
signed.to_tributary_signed(round.nonce()),
|
signed.to_tributary_signed(round.nonce()),
|
||||||
),
|
),
|
||||||
|
|
||||||
@@ -303,14 +303,14 @@ impl TransactionTrait for Transaction {
|
|||||||
Transaction::Batch { .. } => {}
|
Transaction::Batch { .. } => {}
|
||||||
|
|
||||||
Transaction::Sign { data, .. } => {
|
Transaction::Sign { data, .. } => {
|
||||||
if data.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
if data.len() > usize::from(KeyShares::MAX_PER_SET) {
|
||||||
Err(TransactionError::InvalidContent)?
|
Err(TransactionError::InvalidContent)?
|
||||||
}
|
}
|
||||||
// TODO: MAX_SIGN_LEN
|
// TODO: MAX_SIGN_LEN
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SlashReport { slash_points, .. } => {
|
Transaction::SlashReport { slash_points, .. } => {
|
||||||
if slash_points.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
|
if slash_points.len() > usize::from(KeyShares::MAX_PER_SET) {
|
||||||
Err(TransactionError::InvalidContent)?
|
Err(TransactionError::InvalidContent)?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("lib.md")]
|
#![doc = include_str!("lib.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#![allow(deprecated)]
|
#![allow(deprecated)]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![allow(clippy::redundant_closure_call)]
|
#![allow(clippy::redundant_closure_call)]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
|||||||
@@ -26,21 +26,9 @@ presented in section 4.2 is extended, with the following changes:
|
|||||||
just one round.
|
just one round.
|
||||||
|
|
||||||
For a gist of the verifiable encryption scheme, please see
|
For a gist of the verifiable encryption scheme, please see
|
||||||
https://gist.github.com/kayabaNerve/cfbde74b0660dfdf8dd55326d6ec33d7. Security
|
https://gist.github.com/kayabaNerve/cfbde74b0660dfdf8dd55326d6ec33d7. For
|
||||||
proofs are currently being worked on.
|
security proofs and audit information, please see
|
||||||
|
[here](../../../audits/crypto/dkg/evrf).
|
||||||
---
|
|
||||||
|
|
||||||
This library relies on an implementation of Bulletproofs and various
|
|
||||||
zero-knowledge gadgets. This library uses
|
|
||||||
[`generalized-bulletproofs`](https://docs.rs/generalized-bulletproofs),
|
|
||||||
[`generalized-bulletproofs-circuit-abstraction`](https://docs.rs/generalized-bulletproofs-circuit-abstraction),
|
|
||||||
and
|
|
||||||
[`generalized-bulletproofs-ec-gadgets`](https://docs.rs/generalized-bulletproofs-ec-gadgets)
|
|
||||||
from the Monero project's FCMP++ codebase. These libraries have received the
|
|
||||||
following audits in the past:
|
|
||||||
- https://github.com/kayabaNerve/monero-oxide/tree/fcmp++/audits/generalized-bulletproofs
|
|
||||||
- https://github.com/kayabaNerve/monero-oxide/tree/fcmp++/audits/fcmps
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -33,6 +33,6 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|||||||
ff-group-tests = { path = "../ff-group-tests" }
|
ff-group-tests = { path = "../ff-group-tests" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
alloc = ["zeroize/alloc", "sha3/alloc", "crypto-bigint/alloc", "prime-field/alloc", "ciphersuite/alloc"]
|
alloc = ["zeroize/alloc", "sha3/alloc", "prime-field/alloc", "ciphersuite/alloc"]
|
||||||
std = ["alloc", "zeroize/std", "prime-field/std", "ciphersuite/std"]
|
std = ["alloc", "zeroize/std", "prime-field/std", "ciphersuite/std"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
/// Tests for the Field trait.
|
/// Tests for the Field trait.
|
||||||
|
|||||||
@@ -28,8 +28,10 @@ impl<A: Send + Sync + Clone + PartialEq + Debug + WriteAddendum> Addendum for A
|
|||||||
|
|
||||||
/// Algorithm trait usable by the FROST signing machine to produce signatures..
|
/// Algorithm trait usable by the FROST signing machine to produce signatures..
|
||||||
pub trait Algorithm<C: Curve>: Send + Sync {
|
pub trait Algorithm<C: Curve>: Send + Sync {
|
||||||
/// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible
|
/// The transcript format this algorithm uses.
|
||||||
/// transcript included in this crate.
|
///
|
||||||
|
/// This MUST NOT be the IETF-compatible transcript included in this crate UNLESS this is an
|
||||||
|
/// IETF-specified ciphersuite.
|
||||||
type Transcript: Sync + Clone + Debug + Transcript;
|
type Transcript: Sync + Clone + Debug + Transcript;
|
||||||
/// Serializable addendum, used in algorithms requiring more data than just the nonces.
|
/// Serializable addendum, used in algorithms requiring more data than just the nonces.
|
||||||
type Addendum: Addendum;
|
type Addendum: Addendum;
|
||||||
@@ -69,8 +71,10 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
|||||||
) -> Result<(), FrostError>;
|
) -> Result<(), FrostError>;
|
||||||
|
|
||||||
/// Sign a share with the given secret/nonce.
|
/// Sign a share with the given secret/nonce.
|
||||||
|
///
|
||||||
/// The secret will already have been its lagrange coefficient applied so it is the necessary
|
/// The secret will already have been its lagrange coefficient applied so it is the necessary
|
||||||
/// key share.
|
/// key share.
|
||||||
|
///
|
||||||
/// The nonce will already have been processed into the combined form d + (e * p).
|
/// The nonce will already have been processed into the combined form d + (e * p).
|
||||||
fn sign_share(
|
fn sign_share(
|
||||||
&mut self,
|
&mut self,
|
||||||
@@ -85,6 +89,7 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
|||||||
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;
|
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;
|
||||||
|
|
||||||
/// Verify a specific share given as a response.
|
/// Verify a specific share given as a response.
|
||||||
|
///
|
||||||
/// This function should return a series of pairs whose products should sum to zero for a valid
|
/// This function should return a series of pairs whose products should sum to zero for a valid
|
||||||
/// share. Any error raised is treated as the share being invalid.
|
/// share. Any error raised is treated as the share being invalid.
|
||||||
#[allow(clippy::type_complexity, clippy::result_unit_err)]
|
#[allow(clippy::type_complexity, clippy::result_unit_err)]
|
||||||
@@ -99,8 +104,10 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
|||||||
mod sealed {
|
mod sealed {
|
||||||
pub use super::*;
|
pub use super::*;
|
||||||
|
|
||||||
/// IETF-compliant transcript. This is incredibly naive and should not be used within larger
|
/// IETF-compliant transcript.
|
||||||
/// protocols.
|
///
|
||||||
|
/// This is incredibly naive and MUST NOT be used within larger protocols. No guarantees are made
|
||||||
|
/// about its safety EXCEPT as used with the IETF-specified FROST ciphersuites.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct IetfTranscript(pub(crate) Vec<u8>);
|
pub struct IetfTranscript(pub(crate) Vec<u8>);
|
||||||
impl Transcript for IetfTranscript {
|
impl Transcript for IetfTranscript {
|
||||||
@@ -131,6 +138,7 @@ pub(crate) use sealed::IetfTranscript;
|
|||||||
/// HRAm usable by the included Schnorr signature algorithm to generate challenges.
|
/// HRAm usable by the included Schnorr signature algorithm to generate challenges.
|
||||||
pub trait Hram<C: Curve>: Send + Sync + Clone {
|
pub trait Hram<C: Curve>: Send + Sync + Clone {
|
||||||
/// HRAm function to generate a challenge.
|
/// HRAm function to generate a challenge.
|
||||||
|
///
|
||||||
/// H2 from the IETF draft, despite having a different argument set (not being pre-formatted).
|
/// H2 from the IETF draft, despite having a different argument set (not being pre-formatted).
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
|
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -102,6 +102,7 @@ pub trait PreprocessMachine: Send {
|
|||||||
type SignMachine: SignMachine<Self::Signature, Preprocess = Self::Preprocess>;
|
type SignMachine: SignMachine<Self::Signature, Preprocess = Self::Preprocess>;
|
||||||
|
|
||||||
/// Perform the preprocessing round required in order to sign.
|
/// Perform the preprocessing round required in order to sign.
|
||||||
|
///
|
||||||
/// Returns a preprocess message to be broadcast to all participants, over an authenticated
|
/// Returns a preprocess message to be broadcast to all participants, over an authenticated
|
||||||
/// channel.
|
/// channel.
|
||||||
fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R)
|
fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R)
|
||||||
@@ -235,6 +236,8 @@ pub trait SignMachine<S>: Send + Sync + Sized {
|
|||||||
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
|
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
|
||||||
/// to all participants, over an authenticated channel. The parties who participate here will
|
/// to all participants, over an authenticated channel. The parties who participate here will
|
||||||
/// become the signing set for this session.
|
/// become the signing set for this session.
|
||||||
|
///
|
||||||
|
/// The caller MUST only use preprocesses obtained via this machine's `read_preprocess` function.
|
||||||
fn sign(
|
fn sign(
|
||||||
self,
|
self,
|
||||||
commitments: HashMap<Participant, Self::Preprocess>,
|
commitments: HashMap<Participant, Self::Preprocess>,
|
||||||
@@ -421,7 +424,10 @@ pub trait SignatureMachine<S>: Send + Sync {
|
|||||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare>;
|
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare>;
|
||||||
|
|
||||||
/// Complete signing.
|
/// Complete signing.
|
||||||
|
///
|
||||||
/// Takes in everyone elses' shares. Returns the signature.
|
/// Takes in everyone elses' shares. Returns the signature.
|
||||||
|
///
|
||||||
|
/// The caller MUST only use shares obtained via this machine's `read_shares` function.
|
||||||
fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) -> Result<S, FrostError>;
|
fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) -> Result<S, FrostError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,6 @@ ff = { version = "0.13", default-features = false, features = ["bits"] }
|
|||||||
ff-group-tests = { version = "0.13", path = "../ff-group-tests", optional = true }
|
ff-group-tests = { version = "0.13", path = "../ff-group-tests", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
alloc = ["zeroize/alloc", "crypto-bigint/alloc", "ff/alloc"]
|
alloc = ["zeroize/alloc", "ff/alloc"]
|
||||||
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "ff/std", "ff-group-tests"]
|
std = ["alloc", "zeroize/std", "subtle/std", "rand_core/std", "ff/std", "ff-group-tests"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
|
||||||
|
|||||||
18
deny.toml
18
deny.toml
@@ -7,10 +7,8 @@ db-urls = ["https://github.com/rustsec/advisory-db"]
|
|||||||
yanked = "deny"
|
yanked = "deny"
|
||||||
|
|
||||||
ignore = [
|
ignore = [
|
||||||
"RUSTSEC-2022-0061", # https://github.com/serai-dex/serai/227
|
"RUSTSEC-2024-0370", # `proc-macro-error` is unmaintained, in-tree due to Substrate/`litep2p`
|
||||||
"RUSTSEC-2024-0370", # proc-macro-error is unmaintained
|
|
||||||
"RUSTSEC-2024-0436", # paste is unmaintained
|
"RUSTSEC-2024-0436", # paste is unmaintained
|
||||||
"RUSTSEC-2025-0057", # https://github.com/bytecodealliance/wasmtime/pull/11634
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[licenses]
|
[licenses]
|
||||||
@@ -80,7 +78,7 @@ exceptions = [
|
|||||||
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-libp2p-p2p" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-libp2p-p2p" },
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator" },
|
||||||
|
|
||||||
{ allow = ["AGPL-3.0-only"], name = "pallet-session" },
|
{ allow = ["AGPL-3.0-only"], name = "substrate-median" },
|
||||||
|
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-core-pallet" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-core-pallet" },
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-coins-pallet" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-coins-pallet" },
|
||||||
@@ -108,6 +106,7 @@ exceptions = [
|
|||||||
{ allow = ["AGPL-3.0-only"], name = "serai-message-queue-tests" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-message-queue-tests" },
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-processor-tests" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-processor-tests" },
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-tests" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-tests" },
|
||||||
|
{ allow = ["AGPL-3.0-only"], name = "serai-substrate-tests" },
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-full-stack-tests" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-full-stack-tests" },
|
||||||
{ allow = ["AGPL-3.0-only"], name = "serai-reproducible-runtime-tests" },
|
{ allow = ["AGPL-3.0-only"], name = "serai-reproducible-runtime-tests" },
|
||||||
]
|
]
|
||||||
@@ -125,12 +124,22 @@ multiple-versions = "warn"
|
|||||||
wildcards = "warn"
|
wildcards = "warn"
|
||||||
highlight = "all"
|
highlight = "all"
|
||||||
deny = [
|
deny = [
|
||||||
|
# Contains a non-reproducible binary blob
|
||||||
|
# https://github.com/serde-rs/serde/pull/2514
|
||||||
|
# https://github.com/serde-rs/serde/issues/2575
|
||||||
{ name = "serde_derive", version = ">=1.0.172, <1.0.185" },
|
{ name = "serde_derive", version = ">=1.0.172, <1.0.185" },
|
||||||
|
# Introduced an insecure implementation of `borsh` removed with `0.15.1`
|
||||||
|
# https://github.com/rust-lang/hashbrown/issues/576
|
||||||
{ name = "hashbrown", version = "=0.15.0" },
|
{ name = "hashbrown", version = "=0.15.0" },
|
||||||
|
|
||||||
# Legacy which _no one_ should use anymore
|
# Legacy which _no one_ should use anymore
|
||||||
{ name = "is-terminal", version = "*" },
|
{ name = "is-terminal", version = "*" },
|
||||||
# Stop introduction into the tree without realizing it
|
# Stop introduction into the tree without realizing it
|
||||||
{ name = "once_cell_polyfill", version = "*" },
|
{ name = "once_cell_polyfill", version = "*" },
|
||||||
|
|
||||||
|
# Conflicts with our usage of mimalloc
|
||||||
|
# https://github.com/serai-dex/serai/issues/690
|
||||||
|
{ name = "tikv-jemalloc-sys", version = "*" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[sources]
|
[sources]
|
||||||
@@ -138,7 +147,6 @@ unknown-registry = "deny"
|
|||||||
unknown-git = "deny"
|
unknown-git = "deny"
|
||||||
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
||||||
allow-git = [
|
allow-git = [
|
||||||
"https://github.com/rust-lang-nursery/lazy-static.rs",
|
|
||||||
"https://github.com/kayabaNerve/elliptic-curves",
|
"https://github.com/kayabaNerve/elliptic-curves",
|
||||||
"https://github.com/monero-oxide/monero-oxide",
|
"https://github.com/monero-oxide/monero-oxide",
|
||||||
"https://github.com/serai-dex/patch-polkadot-sdk",
|
"https://github.com/serai-dex/patch-polkadot-sdk",
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
3.3.4
|
3.3.10
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
source 'https://rubygems.org'
|
source 'https://rubygems.org'
|
||||||
|
|
||||||
gem "jekyll", "~> 4.3.3"
|
gem "jekyll", "~> 4.4"
|
||||||
gem "just-the-docs", "0.8.2"
|
gem "just-the-docs", "0.10.1"
|
||||||
|
|||||||
@@ -1,34 +1,39 @@
|
|||||||
GEM
|
GEM
|
||||||
remote: https://rubygems.org/
|
remote: https://rubygems.org/
|
||||||
specs:
|
specs:
|
||||||
addressable (2.8.7)
|
addressable (2.8.8)
|
||||||
public_suffix (>= 2.0.2, < 7.0)
|
public_suffix (>= 2.0.2, < 8.0)
|
||||||
bigdecimal (3.1.8)
|
base64 (0.3.0)
|
||||||
|
bigdecimal (3.3.1)
|
||||||
colorator (1.1.0)
|
colorator (1.1.0)
|
||||||
concurrent-ruby (1.3.4)
|
concurrent-ruby (1.3.5)
|
||||||
|
csv (3.3.5)
|
||||||
em-websocket (0.5.3)
|
em-websocket (0.5.3)
|
||||||
eventmachine (>= 0.12.9)
|
eventmachine (>= 0.12.9)
|
||||||
http_parser.rb (~> 0)
|
http_parser.rb (~> 0)
|
||||||
eventmachine (1.2.7)
|
eventmachine (1.2.7)
|
||||||
ffi (1.17.0-x86_64-linux-gnu)
|
ffi (1.17.2-x86_64-linux-gnu)
|
||||||
forwardable-extended (2.6.0)
|
forwardable-extended (2.6.0)
|
||||||
google-protobuf (4.28.2-x86_64-linux)
|
google-protobuf (4.33.1-x86_64-linux-gnu)
|
||||||
bigdecimal
|
bigdecimal
|
||||||
rake (>= 13)
|
rake (>= 13)
|
||||||
http_parser.rb (0.8.0)
|
http_parser.rb (0.8.0)
|
||||||
i18n (1.14.6)
|
i18n (1.14.7)
|
||||||
concurrent-ruby (~> 1.0)
|
concurrent-ruby (~> 1.0)
|
||||||
jekyll (4.3.4)
|
jekyll (4.4.1)
|
||||||
addressable (~> 2.4)
|
addressable (~> 2.4)
|
||||||
|
base64 (~> 0.2)
|
||||||
colorator (~> 1.0)
|
colorator (~> 1.0)
|
||||||
|
csv (~> 3.0)
|
||||||
em-websocket (~> 0.5)
|
em-websocket (~> 0.5)
|
||||||
i18n (~> 1.0)
|
i18n (~> 1.0)
|
||||||
jekyll-sass-converter (>= 2.0, < 4.0)
|
jekyll-sass-converter (>= 2.0, < 4.0)
|
||||||
jekyll-watch (~> 2.0)
|
jekyll-watch (~> 2.0)
|
||||||
|
json (~> 2.6)
|
||||||
kramdown (~> 2.3, >= 2.3.1)
|
kramdown (~> 2.3, >= 2.3.1)
|
||||||
kramdown-parser-gfm (~> 1.0)
|
kramdown-parser-gfm (~> 1.0)
|
||||||
liquid (~> 4.0)
|
liquid (~> 4.0)
|
||||||
mercenary (>= 0.3.6, < 0.5)
|
mercenary (~> 0.3, >= 0.3.6)
|
||||||
pathutil (~> 0.9)
|
pathutil (~> 0.9)
|
||||||
rouge (>= 3.0, < 5.0)
|
rouge (>= 3.0, < 5.0)
|
||||||
safe_yaml (~> 1.0)
|
safe_yaml (~> 1.0)
|
||||||
@@ -36,19 +41,20 @@ GEM
|
|||||||
webrick (~> 1.7)
|
webrick (~> 1.7)
|
||||||
jekyll-include-cache (0.2.1)
|
jekyll-include-cache (0.2.1)
|
||||||
jekyll (>= 3.7, < 5.0)
|
jekyll (>= 3.7, < 5.0)
|
||||||
jekyll-sass-converter (3.0.0)
|
jekyll-sass-converter (3.1.0)
|
||||||
sass-embedded (~> 1.54)
|
sass-embedded (~> 1.75)
|
||||||
jekyll-seo-tag (2.8.0)
|
jekyll-seo-tag (2.8.0)
|
||||||
jekyll (>= 3.8, < 5.0)
|
jekyll (>= 3.8, < 5.0)
|
||||||
jekyll-watch (2.2.1)
|
jekyll-watch (2.2.1)
|
||||||
listen (~> 3.0)
|
listen (~> 3.0)
|
||||||
just-the-docs (0.8.2)
|
json (2.16.0)
|
||||||
|
just-the-docs (0.10.1)
|
||||||
jekyll (>= 3.8.5)
|
jekyll (>= 3.8.5)
|
||||||
jekyll-include-cache
|
jekyll-include-cache
|
||||||
jekyll-seo-tag (>= 2.0)
|
jekyll-seo-tag (>= 2.0)
|
||||||
rake (>= 12.3.1)
|
rake (>= 12.3.1)
|
||||||
kramdown (2.4.0)
|
kramdown (2.5.1)
|
||||||
rexml
|
rexml (>= 3.3.9)
|
||||||
kramdown-parser-gfm (1.1.0)
|
kramdown-parser-gfm (1.1.0)
|
||||||
kramdown (~> 2.0)
|
kramdown (~> 2.0)
|
||||||
liquid (4.0.4)
|
liquid (4.0.4)
|
||||||
@@ -58,27 +64,27 @@ GEM
|
|||||||
mercenary (0.4.0)
|
mercenary (0.4.0)
|
||||||
pathutil (0.16.2)
|
pathutil (0.16.2)
|
||||||
forwardable-extended (~> 2.6)
|
forwardable-extended (~> 2.6)
|
||||||
public_suffix (6.0.1)
|
public_suffix (7.0.0)
|
||||||
rake (13.2.1)
|
rake (13.3.1)
|
||||||
rb-fsevent (0.11.2)
|
rb-fsevent (0.11.2)
|
||||||
rb-inotify (0.11.1)
|
rb-inotify (0.11.1)
|
||||||
ffi (~> 1.0)
|
ffi (~> 1.0)
|
||||||
rexml (3.3.7)
|
rexml (3.4.4)
|
||||||
rouge (4.4.0)
|
rouge (4.6.1)
|
||||||
safe_yaml (1.0.5)
|
safe_yaml (1.0.5)
|
||||||
sass-embedded (1.79.3-x86_64-linux-gnu)
|
sass-embedded (1.94.2-x86_64-linux-gnu)
|
||||||
google-protobuf (~> 4.27)
|
google-protobuf (~> 4.31)
|
||||||
terminal-table (3.0.2)
|
terminal-table (3.0.2)
|
||||||
unicode-display_width (>= 1.1.1, < 3)
|
unicode-display_width (>= 1.1.1, < 3)
|
||||||
unicode-display_width (2.6.0)
|
unicode-display_width (2.6.0)
|
||||||
webrick (1.8.2)
|
webrick (1.9.2)
|
||||||
|
|
||||||
PLATFORMS
|
PLATFORMS
|
||||||
x86_64-linux
|
x86_64-linux
|
||||||
|
|
||||||
DEPENDENCIES
|
DEPENDENCIES
|
||||||
jekyll (~> 4.3.3)
|
jekyll (~> 4.4)
|
||||||
just-the-docs (= 0.8.2)
|
just-the-docs (= 0.10.1)
|
||||||
|
|
||||||
BUNDLED WITH
|
BUNDLED WITH
|
||||||
2.5.11
|
2.5.22
|
||||||
|
|||||||
@@ -219,7 +219,6 @@ async fn main() {
|
|||||||
ExternalNetworkId::Bitcoin => "BITCOIN_KEY",
|
ExternalNetworkId::Bitcoin => "BITCOIN_KEY",
|
||||||
ExternalNetworkId::Ethereum => "ETHEREUM_KEY",
|
ExternalNetworkId::Ethereum => "ETHEREUM_KEY",
|
||||||
ExternalNetworkId::Monero => "MONERO_KEY",
|
ExternalNetworkId::Monero => "MONERO_KEY",
|
||||||
_ => panic!("unrecognized network"),
|
|
||||||
}) else {
|
}) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@@ -239,8 +238,7 @@ async fn main() {
|
|||||||
// TODO: Add a magic value with a key at the start of the connection to make this authed
|
// TODO: Add a magic value with a key at the start of the connection to make this authed
|
||||||
let mut db = db.clone();
|
let mut db = db.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
loop {
|
while let Ok(msg_len) = socket.read_u32_le().await {
|
||||||
let Ok(msg_len) = socket.read_u32_le().await else { break };
|
|
||||||
let mut buf = vec![0; usize::try_from(msg_len).unwrap()];
|
let mut buf = vec![0; usize::try_from(msg_len).unwrap()];
|
||||||
let Ok(_) = socket.read_exact(&mut buf).await else { break };
|
let Ok(_) = socket.read_exact(&mut buf).await else { break };
|
||||||
let msg = borsh::from_slice(&buf).unwrap();
|
let msg = borsh::from_slice(&buf).unwrap();
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ license = "MIT"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/bitcoin"
|
repository = "https://github.com/serai-dex/serai/tree/develop/networks/bitcoin"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Vrx <vrx00@proton.me>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Vrx <vrx00@proton.me>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.85"
|
rust-version = "1.89"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -30,9 +30,9 @@ k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic"
|
|||||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.11", default-features = false, features = ["secp256k1"] }
|
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.11", default-features = false, features = ["secp256k1"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, optional = true }
|
hex = { version = "0.4", default-features = false, optional = true }
|
||||||
serde = { version = "1", default-features = false, features = ["derive"], optional = true }
|
core-json-traits = { version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||||
serde_json = { version = "1", default-features = false, optional = true }
|
core-json-derive = { version = "0.4", default-features = false, optional = true }
|
||||||
simple-request = { path = "../../common/request", version = "0.2", default-features = false, features = ["tls", "basic-auth"], optional = true }
|
simple-request = { path = "../../common/request", version = "0.3", default-features = false, features = ["tokio", "tls", "basic-auth"], optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
secp256k1 = { version = "0.29", default-features = false, features = ["std"] }
|
secp256k1 = { version = "0.29", default-features = false, features = ["std"] }
|
||||||
@@ -52,15 +52,16 @@ std = [
|
|||||||
"rand_core/std",
|
"rand_core/std",
|
||||||
|
|
||||||
"bitcoin/std",
|
"bitcoin/std",
|
||||||
"bitcoin/serde",
|
|
||||||
|
|
||||||
"k256/std",
|
"k256/std",
|
||||||
"frost/std",
|
"frost/std",
|
||||||
|
]
|
||||||
|
rpc = [
|
||||||
|
"std",
|
||||||
"hex/std",
|
"hex/std",
|
||||||
"serde/std",
|
"core-json-traits",
|
||||||
"serde_json/std",
|
"core-json-derive",
|
||||||
"simple-request",
|
"simple-request",
|
||||||
]
|
]
|
||||||
hazmat = []
|
hazmat = []
|
||||||
default = ["std"]
|
default = ["std", "rpc"]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user