18 Commits

Author SHA1 Message Date
Luke Parker
0b377f3c4e Always generate a new key for the P2P network 2025-09-22 06:41:30 -04:00
Luke Parker
3655ae68df Restore chain_getBlockBin to the RPC 2025-09-22 06:41:07 -04:00
Luke Parker
3f2c1bf303 Move from Debian bookworm to trixie 2025-09-22 03:41:25 -04:00
Luke Parker
c12c471602 Restore clang as a dependency due to the Debian Dockerfile as we require a C++ compiler 2025-09-22 02:57:50 -04:00
Luke Parker
77ceb86f9a Update to a version of substrate without wasmtimer
Turns out `wasmtimer` is WASM only. This should restore the node's functioning
on non-WASM environments.
2025-09-22 02:20:57 -04:00
Luke Parker
486409c9ef Set the CODE storage slot 2025-09-22 01:04:03 -04:00
Luke Parker
de460e368a Add libclang-dev as dependency to the Debian Dockerfile 2025-09-22 00:58:33 -04:00
Luke Parker
c904167920 Replace hard-coded path with the intended env variable to fix macOS 13 2025-09-22 00:41:16 -04:00
Luke Parker
d20a66891c Use sw_vers instead of uname on macOS
Yields the macOS version instead of the kernel's version.
2025-09-22 00:14:14 -04:00
Luke Parker
18ce7efc19 Correct how macOS 13 and 14 have different homebrew paths 2025-09-22 00:08:20 -04:00
Luke Parker
fabe22e99f Correct llvm/lib path on macOS 2025-09-21 23:54:58 -04:00
Luke Parker
feb6166190 Correct Windows build dependencies 2025-09-21 23:42:50 -04:00
Luke Parker
87920d8685 Use libp2p 0.56 in serai-node 2025-09-21 23:34:33 -04:00
Luke Parker
5087b32cc8 Attempt to use LD_LIBRARY_PATH in macOS GitHub CI 2025-09-21 23:32:33 -04:00
Luke Parker
9fa7a08c1a Add /usr/local/opt/llvm/lib to paths on macOS hosts 2025-09-21 23:17:11 -04:00
Luke Parker
9a08ffde09 CI fixes 2025-09-21 22:53:22 -04:00
Luke Parker
41bb2c7b2a Update develop to patch-polkadot-sdk
Allows us to finally remove the old `serai-dex/substrate` repository _and_
should have CI pass without issue on `develop` again.

The changes made here should be trivial and maintain all prior
behavior/functionality. The most notable are to `chain_spec.rs`, in order to
still use a SCALE-encoded `GenesisConfig` (avoiding `serde_json`).
2025-09-21 22:36:43 -04:00
Luke Parker
52d15b789c Update build-dependencies CI action 2025-09-21 15:41:09 -04:00
883 changed files with 48612 additions and 53279 deletions

2
.github/LICENSE vendored
View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: "30.0" default: "27.0"
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Bitcoin Daemon Cache - name: Bitcoin Daemon Cache
id: cache-bitcoind id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with: with:
path: bitcoin.tar.gz path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
@@ -37,4 +37,4 @@ runs:
- name: Bitcoin Regtest Daemon - name: Bitcoin Regtest Daemon
shell: bash shell: bash
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon

View File

@@ -43,8 +43,8 @@ runs:
choco install protoc choco install protoc
elif [ "$RUNNER_OS" == "macOS" ]; then elif [ "$RUNNER_OS" == "macOS" ]; then
brew install protobuf llvm brew install protobuf llvm
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon HOMEBREW_ROOT_PATH=/opt/homebrew # macOS 14+
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel if [ $(sw_vers -productVersion | awk -F "." '{ print $1 }') = "13" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # macOS 13
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang` ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV" echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
fi fi
@@ -52,9 +52,9 @@ runs:
- name: Install solc - name: Install solc
shell: bash shell: bash
run: | run: |
cargo +1.91.1 install svm-rs --version =0.5.22 cargo +1.89 install svm-rs --version =0.5.18
svm install 0.8.29 svm install 0.8.26
svm use 0.8.29 svm use 0.8.26
- name: Remove preinstalled Docker - name: Remove preinstalled Docker
shell: bash shell: bash
@@ -62,7 +62,7 @@ runs:
docker system prune -a --volumes docker system prune -a --volumes
sudo apt remove -y *docker* sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker # Install uidmap which will be required for the explicitly installed Docker
sudo apt install -y uidmap sudo apt install uidmap
if: runner.os == 'Linux' if: runner.os == 'Linux'
- name: Update system dependencies - name: Update system dependencies
@@ -75,8 +75,11 @@ runs:
if: runner.os == 'Linux' if: runner.os == 'Linux'
- name: Install rootless Docker - name: Install rootless Docker
uses: docker/setup-docker-action@e61617a16c407a86262fb923c35a616ddbe070b3 # 4.6.0 uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with: with:
rootless: true rootless: true
set-host: true set-host: true
if: runner.os == 'Linux' if: runner.os == 'Linux'
# - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.4.4 default: v0.18.3.4
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Wallet RPC Cache - name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with: with:
path: monero-wallet-rpc path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,46 +5,39 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.4.4 default: v0.18.3.4
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Daemon Cache - name: Monero Daemon Cache
id: cache-monerod id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4 uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with: with:
path: /usr/bin/monerod path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
- name: Download the Monero Daemon - name: Download the Monero Daemon
if: steps.cache-monerod.outputs.cache-hit != 'true' if: steps.cache-monerod.outputs.cache-hit != 'true'
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
# to the contained folder not following the same naming scheme and
# requiring further expansion not worth doing right now
shell: bash shell: bash
run: | run: |
OS=${{ runner.os }} RUNNER_OS=${{ runner.os }}
ARCH=${{ runner.arch }} RUNNER_ARCH=${{ runner.arch }}
OS=$(echo "$OS" | tr "[:upper:]" "[:lower:]") RUNNER_OS=${RUNNER_OS,,}
ARCH=$(echo "$ARCH" | tr "[:upper:]" "[:lower:]") RUNNER_ARCH=${RUNNER_ARCH,,}
if [ "$OS" = "windows" ]; then RUNNER_OS=linux
OS=win RUNNER_ARCH=x64
echo "Windows is unsupported at this time"
exit 1
fi
if [ "$OS" = "macos" ]; then
OS=mac
fi
if [ "$ARCH" = "arm64" ]; then
ARCH=armv8
fi
FILE=monero-$OS-$ARCH-${{ inputs.version }}.tar.bz2 FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
wget https://downloads.getmonero.org/cli/$FILE wget https://downloads.getmonero.org/cli/$FILE
tar -xvf $FILE tar -xvf $FILE
rm $FILE
sudo mv $(find . -name monerod) /usr/bin/monerod sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod
sudo chmod 777 /usr/bin/monerod sudo chmod 777 /usr/bin/monerod
sudo chmod +x /usr/bin/monerod sudo chmod +x /usr/bin/monerod

View File

@@ -5,12 +5,12 @@ inputs:
monero-version: monero-version:
description: "Monero version to download and run as a regtest node" description: "Monero version to download and run as a regtest node"
required: false required: false
default: v0.18.4.4 default: v0.18.3.4
bitcoin-version: bitcoin-version:
description: "Bitcoin version to download and run as a regtest node" description: "Bitcoin version to download and run as a regtest node"
required: false required: false
default: "30.0" default: "27.1"
runs: runs:
using: "composite" using: "composite"
@@ -19,9 +19,9 @@ runs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install Foundry - name: Install Foundry
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0 uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with: with:
version: v1.5.0 version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
cache: false cache: false
- name: Run a Monero Regtest Node - name: Run a Monero Regtest Node

View File

@@ -1 +1 @@
nightly-2025-12-01 nightly-2025-09-01

View File

@@ -17,7 +17,7 @@ jobs:
test-common: test-common:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -30,5 +30,4 @@ jobs:
-p patchable-async-sleep \ -p patchable-async-sleep \
-p serai-db \ -p serai-db \
-p serai-env \ -p serai-env \
-p serai-task \
-p simple-request -p simple-request

View File

@@ -31,7 +31,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -19,7 +19,7 @@ jobs:
test-crypto: test-crypto:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -35,14 +35,12 @@ jobs:
-p ciphersuite-kp256 \ -p ciphersuite-kp256 \
-p multiexp \ -p multiexp \
-p schnorr-signatures \ -p schnorr-signatures \
-p prime-field \ -p dleq \
-p short-weierstrass \
-p secq256k1 \
-p embedwards25519 \
-p dkg \ -p dkg \
-p dkg-recovery \ -p dkg-recovery \
-p dkg-dealer \ -p dkg-dealer \
-p dkg-promote \
-p dkg-musig \ -p dkg-musig \
-p dkg-evrf \ -p dkg-pedpop \
-p modular-frost \ -p modular-frost \
-p frost-schnorrkel -p frost-schnorrkel

View File

@@ -9,10 +9,16 @@ jobs:
name: Run cargo deny name: Run cargo deny
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.91.1 install cargo-deny --version =0.18.9 run: cargo +1.89 install cargo-deny --version =0.18.3
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check --hide-inclusion-graph

View File

@@ -13,7 +13,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -11,11 +11,11 @@ jobs:
clippy: clippy:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest] os: [ubuntu-latest, macos-13, macos-14, windows-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use - name: Get nightly version to use
id: nightly id: nightly
@@ -26,7 +26,7 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install nightly rust - name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
- name: Run Clippy - name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
@@ -43,10 +43,16 @@ jobs:
deny: deny:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.91.1 install cargo-deny --version =0.18.9 run: cargo +1.89 install cargo-deny --version =0.18.4
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check --hide-inclusion-graph
@@ -54,7 +60,7 @@ jobs:
fmt: fmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use - name: Get nightly version to use
id: nightly id: nightly
@@ -67,148 +73,11 @@ jobs:
- name: Run rustfmt - name: Run rustfmt
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
with:
version: v1.5.0
cache: false
- name: Run forge fmt
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -name "*.sol")
machete: machete:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use - name: Verify all dependencies are in use
run: | run: |
cargo +1.91.1 install cargo-machete --version =0.9.1 cargo +1.89 install cargo-machete --version =0.8.0
cargo +1.91.1 machete cargo +1.89 machete
msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- name: Verify claimed `rust-version`
shell: bash
run: |
cargo +1.91.1 install cargo-msrv --version =0.18.4
function check_msrv {
# We `cd` into the directory passed as the first argument, but will return to the
# directory called from.
return_to=$(pwd)
echo "Checking $1"
cd $1
# We then find the existing `rust-version` using `grep` (for the right line) and then a
# regex (to strip to just the major and minor version).
existing=$(cat ./Cargo.toml | grep "rust-version" | grep -Eo "[0-9]+\.[0-9]+")
# We then backup the `Cargo.toml`, allowing us to restore it after, saving time on future
# MSRV checks (as they'll benefit from immediately exiting if the queried version is less
# than the declared MSRV).
mv ./Cargo.toml ./Cargo.toml.bak
# We then use an inverted (`-v`) grep to remove the existing `rust-version` from the
# `Cargo.toml`, as required because else earlier versions of Rust won't even attempt to
# compile this crate.
cat ./Cargo.toml.bak | grep -v "rust-version" > Cargo.toml
# We then find the actual `rust-version` using `cargo-msrv` (again stripping to just the
# major and minor version).
actual=$(cargo msrv find --output-format minimal | grep -Eo "^[0-9]+\.[0-9]+")
# Finally, we compare the two.
echo "Declared rust-version: $existing"
echo "Actual rust-version: $actual"
[ $existing == $actual ]
result=$?
# Restore the original `Cargo.toml`.
rm Cargo.toml
mv ./Cargo.toml.bak ./Cargo.toml
# Return to the directory called from and return the result.
cd $return_to
return $result
}
# Check each member of the workspace
function check_workspace {
# Get the members array from the workspace's `Cargo.toml`
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
# Keep all lines after the start of the array, then keep all lines before the next "]"
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
# We accomplish the latter by pruning all characters after the entry's ","
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
# Replace the first line, which was "members = [" and is now "members = [,", with "["
members=$(echo "$members" | sed "1s/.*/\[/")
# Correct the last line, which was malleated to "],"
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
# Don't check the following
# Most of these are binaries, with the exception of the Substrate runtime which has a
# bespoke build pipeline
members=$(echo "$members" | grep -v "networks/ethereum/relayer\"")
members=$(echo "$members" | grep -v "message-queue\"")
members=$(echo "$members" | grep -v "processor/bin\"")
members=$(echo "$members" | grep -v "processor/bitcoin\"")
members=$(echo "$members" | grep -v "processor/ethereum\"")
members=$(echo "$members" | grep -v "processor/monero\"")
members=$(echo "$members" | grep -v "coordinator\"")
members=$(echo "$members" | grep -v "substrate/runtime\"")
members=$(echo "$members" | grep -v "substrate/node\"")
members=$(echo "$members" | grep -v "orchestration\"")
# Don't check the tests
members=$(echo "$members" | grep -v "mini\"")
members=$(echo "$members" | grep -v "tests/")
# Remove the trailing comma by replacing the last line's "," with ""
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
echo $members | jq -r ".[]" | while read -r member; do
check_msrv $member
correct=$?
if [ $correct -ne 0 ]; then
return $correct
fi
done
}
check_workspace
slither:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Slither
run: |
python3 -m pip install slither-analyzer==0.11.3
slither ./networks/ethereum/schnorr/contracts/Schnorr.sol
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
slither processor/ethereum/deployer/contracts/Deployer.sol
slither processor/ethereum/erc20/contracts/IERC20.sol
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
cd processor/ethereum/router/contracts
slither Router.sol
shellcheck:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- name: shellcheck
run: |
sudo apt install -y shellcheck
find . -name "*.sh" | while read -r script; do
shellcheck --enable=all --shell=sh --severity=info $script
done

View File

@@ -27,7 +27,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -17,7 +17,7 @@ jobs:
test-common: test-common:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -9,7 +9,7 @@ jobs:
name: Update nightly name: Update nightly
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
with: with:
submodules: "recursive" submodules: "recursive"

View File

@@ -21,7 +21,7 @@ jobs:
test-networks: test-networks:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies - name: Test Dependencies
uses: ./.github/actions/test-dependencies uses: ./.github/actions/test-dependencies
@@ -30,7 +30,6 @@ jobs:
run: | run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p bitcoin-serai \ -p bitcoin-serai \
-p build-solidity-contracts \
-p ethereum-schnorr-contract \
-p alloy-simple-request-transport \ -p alloy-simple-request-transport \
-p ethereum-serai \
-p serai-ethereum-relayer \ -p serai-ethereum-relayer \

View File

@@ -23,23 +23,13 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Install RISC-V Toolchain - name: Install RISC-V Toolchain
run: | run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
sudo apt update
sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal --component rust-src --target riscv32imac-unknown-none-elf
- name: Verify no-std builds - name: Verify no-std builds
run: | run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core -p serai-no-std-tests
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core,alloc -p serai-no-std-tests --features "alloc"

View File

@@ -46,16 +46,16 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Setup Ruby - name: Setup Ruby
uses: ruby/setup-ruby@8aeb6ff8030dd539317f8e1769a044873b56ea71 # 1.268.0 uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
with: with:
bundler-cache: true bundler-cache: true
cache-version: 0 cache-version: 0
working-directory: "${{ github.workspace }}/docs" working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages - name: Setup Pages
id: pages id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # 5.0.0 uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
- name: Build with Jekyll - name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env: env:
@@ -70,11 +70,11 @@ jobs:
- name: Buld Rust docs - name: Buld Rust docs
run: | run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
mv target/doc docs/_site/rust mv target/doc docs/_site/rust
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # 4.0.0 uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
with: with:
path: "docs/_site/" path: "docs/_site/"
@@ -88,4 +88,4 @@ jobs:
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # 4.0.5 uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e

View File

@@ -31,7 +31,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -27,10 +27,10 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run Reproducible Runtime tests - name: Run Reproducible Runtime tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests -- --nocapture run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests

View File

@@ -1,166 +0,0 @@
name: Check Update Default Stack Size
on:
push:
paths:
- "orchestration/increase_default_stack_size.sh"
pull_request:
paths:
- "orchestration/increase_default_stack_size.sh"
workflow_dispatch:
# Also run weekly to ensure this doesn't inadvertently decay
schedule:
- cron: "0 0 * * 1"
jobs:
stack_size:
strategy:
matrix:
os: [ubuntu-latest, ubuntu-24.04, ubuntu-22.04, macos-15-intel, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- name: Install Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # 6.1.0
with:
go-version: stable
- name: Monero Daemon Cache
id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
with:
path: monerod
key: stack-size-monerod
- name: Download the Monero Daemon
if: steps.cache-monerod.outputs.cache-hit != 'true'
run: |
# We explicitly download the Linux binary as this script executes over an ELF binary
wget https://downloads.getmonero.org/cli/monero-linux-x64-v0.18.4.4.tar.bz2
tar -xvf monero-linux-x64-v0.18.4.4.tar.bz2
mv $(find . -name monerod) .
- name: Verify expected behavior
shell: bash
run: |
STACK=$((8 * 1024 * 1024))
OS=${{ runner.os }}
if [ "$OS" = "Linux" ]; then
sudo apt update -y
sudo apt install -y ksh bash dash zsh busybox posh mksh yash
sudo ln -s "$(which busybox)" /usr/bin/ash
sudo ln -s "$(which busybox)" /usr/bin/hush
wget http://ftp.us.debian.org/debian/pool/main/g/gash/gash_0.3.1-1_amd64.deb
sudo apt install ./gash_0.3.1-1_amd64.deb
SHELLS="sh ksh bash dash zsh ash hush posh mksh lksh gash yash"
fi
if [ "$OS" = "macOS" ]; then
brew install binutils # `readelf`
# `binutils` is not placed within the path, so find its
# `readelf` bin and manually move it into our path
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
sudo cp $(find "$HOMEBREW_ROOT_PATH" -name readelf) /usr/local/bin/
# macOS has the benefit of packaging `oksh`, `osh`, and having distinct core tools
# TODO: `posh` is packaged but doesn't work: https://github.com/serai-dex/serai/issues/703
brew install ksh93 bash dash-shell zsh mksh oksh yash oils-for-unix
SHELLS="sh ksh bash dash zsh mksh oksh yash osh"
# macOS also has the benefit of packaging (via MacPorts) `mrsh`,
# which explicitly attempts to be be exactly POSIX, without any extensions.
# We first have to install MacPorts, the easiest method being via source.
curl -O https://distfiles.macports.org/MacPorts/MacPorts-2.11.6.tar.bz2
tar xf MacPorts-2.11.6.tar.bz2
cd MacPorts-2.11.6
./configure
make
sudo make install
cd ..
PATH=$PATH:/opt/local/bin
sudo port -v selfupdate
# Now, we install `mrsh`
# TODO: https://github.com/serai-dex/serai/issues/704
# sudo port install mrsh
# SHELLS="$SHELLS mrsh"
fi
# Install shells available via `cargo`
cargo install brush-shell
SHELLS="$SHELLS brush"
# We would also test with `nsh` here if not for https://github.com/nuta/nsh/issues/49
# cargo install nsh
# SHELLS="$SHELLS nsh"
# Install shells available via `go`
# TODO: https://github.com/u-root/u-root/issues/3474
# GOBIN=/usr/local/bin go install github.com/u-root/u-root/cmds/core/gosh@latest
# SHELLS="$SHELLS gosh"
# Patch with `muslstack`
cp monerod monerod-muslstack
GOBIN=$(pwd) go install github.com/yaegashi/muslstack@d19cc5866abce3ca59dfc1666df7cc97097d0933
./muslstack -s "$STACK" ./monerod-muslstack
# Patch with `chelf`, which only works on a Linux host (due to requiring `elf.h`)
# TODO: Install the header on macOS so `chelf` may be used as the source of truth
if [ "$OS" = "Linux" ]; then
cp monerod monerod-chelf
git clone https://github.com/Gottox/chelf
cd chelf
git checkout b2994186cea7b7d61a588fd06c1cc1ae75bcc21a
make
./chelf -s "$STACK" ../monerod-chelf
cd ..
fi
# Run our script with all installed shells
for shell in $SHELLS; do
echo "Executing \`$shell\`"
cp monerod monerod-idss-$shell
ln -s "$(which $shell)" sh
./sh ./orchestration/increase_default_stack_size.sh monerod-idss-$shell
rm ./sh
done
# Verify they all had the same result
sha256() {
sha256sum "$1" | cut -d' ' -f1
}
CHELF=$(sha256 monerod-muslstack)
find . -name "monerod-*" | while read -r bin; do
BIN=$(sha256 "$bin")
if [ ! "$CHELF" = "$BIN" ]; then
echo "Different artifact between \`monerod-muslstack\` ($CHELF) and \`$bin\` ($BIN)"
exit 1
fi
done
# Verify the integrity of the result
read_stack() {
STACK_INFO=$(readelf "$1" -l | grep STACK -A1)
MEMSZ=$(printf "%s\n" "$STACK_INFO" | tail -n1 | sed -E s/^[[:space:]]*//g | cut -f2 -d' ')
printf "%i" $((MEMSZ))
}
INITIAL_STACK=$(read_stack monerod)
if [ "$INITIAL_STACK" -ne "0" ]; then
echo "Initial \`PT_GNU_STACK\` wasn't 0"
exit 2
fi
UPDATED_STACK=$(read_stack monerod-muslstack)
if [ "$UPDATED_STACK" -ne "$STACK" ]; then
echo "Updated \`PT_GNU_STACK\` ($UPDATED_STACK) wasn't 8 MB ($STACK)"
exit 3
fi
# Only one byte should be different due to the bit pattern of 8 MB
BYTES_DIFFERENT=$(cmp -l monerod monerod-muslstack | wc -l || true)
if [ "$BYTES_DIFFERENT" -ne 1 ]; then
echo "More than one byte was different between the two binaries"
exit 4
fi

View File

@@ -29,7 +29,7 @@ jobs:
test-infra: test-infra:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -39,34 +39,9 @@ jobs:
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-message-queue \ -p serai-message-queue \
-p serai-processor-messages \ -p serai-processor-messages \
-p serai-processor-key-gen \ -p serai-processor \
-p serai-processor-view-keys \
-p serai-processor-frost-attempt-manager \
-p serai-processor-primitives \
-p serai-processor-scanner \
-p serai-processor-scheduler-primitives \
-p serai-processor-utxo-scheduler-primitives \
-p serai-processor-utxo-scheduler \
-p serai-processor-transaction-chaining-scheduler \
-p serai-processor-smart-contract-scheduler \
-p serai-processor-signers \
-p serai-processor-bin \
-p serai-bitcoin-processor \
-p serai-processor-ethereum-primitives \
-p serai-processor-ethereum-test-primitives \
-p serai-processor-ethereum-deployer \
-p serai-processor-ethereum-router \
-p serai-processor-ethereum-erc20 \
-p serai-ethereum-processor \
-p serai-monero-processor \
-p tendermint-machine \ -p tendermint-machine \
-p tributary-sdk \ -p tributary-chain \
-p serai-cosign-types \
-p serai-cosign \
-p serai-coordinator-substrate \
-p serai-coordinator-tributary \
-p serai-coordinator-p2p \
-p serai-coordinator-libp2p-p2p \
-p serai-coordinator \ -p serai-coordinator \
-p serai-orchestrator \ -p serai-orchestrator \
-p serai-docker-tests -p serai-docker-tests
@@ -74,7 +49,7 @@ jobs:
test-substrate: test-substrate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -83,33 +58,31 @@ jobs:
run: | run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-primitives \ -p serai-primitives \
-p serai-abi \ -p serai-coins-primitives \
-p substrate-median \
-p serai-core-pallet \
-p serai-coins-pallet \ -p serai-coins-pallet \
-p serai-validator-sets-pallet \
-p serai-signals-pallet \
-p serai-dex-pallet \ -p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \ -p serai-genesis-liquidity-pallet \
-p serai-economic-security-pallet \ -p serai-emissions-primitives \
-p serai-emissions-pallet \ -p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \ -p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \ -p serai-runtime \
-p serai-node -p serai-node
-p serai-substrate-tests
test-serai-client: test-serai-client:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run Tests - name: Run Tests
run: | run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client

8
.gitignore vendored
View File

@@ -1,13 +1,7 @@
target target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!/Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile !orchestration/runtime/Dockerfile
.test-logs .test-logs
.vscode .vscode

6134
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,18 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
# std patches
"patches/matches",
# Rewrites/redirects
"patches/option-ext",
"patches/directories-next",
"common/std-shims", "common/std-shims",
"common/zalloc", "common/zalloc",
"common/patchable-async-sleep", "common/patchable-async-sleep",
"common/db", "common/db",
"common/env", "common/env",
"common/task",
"common/request", "common/request",
"crypto/transcript", "crypto/transcript",
@@ -18,87 +24,62 @@ members = [
"crypto/ciphersuite/kp256", "crypto/ciphersuite/kp256",
"crypto/multiexp", "crypto/multiexp",
"crypto/schnorr", "crypto/schnorr",
"crypto/dleq",
"crypto/prime-field",
"crypto/short-weierstrass",
"crypto/secq256k1",
"crypto/embedwards25519",
"crypto/dkg", "crypto/dkg",
"crypto/dkg/recovery", "crypto/dkg/recovery",
"crypto/dkg/dealer", "crypto/dkg/dealer",
"crypto/dkg/promote",
"crypto/dkg/musig", "crypto/dkg/musig",
"crypto/dkg/evrf", "crypto/dkg/pedpop",
"crypto/frost", "crypto/frost",
"crypto/schnorrkel", "crypto/schnorrkel",
"networks/bitcoin", "networks/bitcoin",
"networks/ethereum/build-contracts",
"networks/ethereum/schnorr",
"networks/ethereum/alloy-simple-request-transport", "networks/ethereum/alloy-simple-request-transport",
"networks/ethereum",
"networks/ethereum/relayer", "networks/ethereum/relayer",
"message-queue", "message-queue",
"processor/messages", "processor/messages",
"processor",
"processor/key-gen", "coordinator/tributary/tendermint",
"processor/view-keys",
"processor/frost-attempt-manager",
"processor/primitives",
"processor/scanner",
"processor/scheduler/primitives",
"processor/scheduler/utxo/primitives",
"processor/scheduler/utxo/standard",
"processor/scheduler/utxo/transaction-chaining",
"processor/scheduler/smart-contract",
"processor/signers",
"processor/bin",
"processor/bitcoin",
"processor/ethereum/primitives",
"processor/ethereum/test-primitives",
"processor/ethereum/deployer",
"processor/ethereum/erc20",
"processor/ethereum/router",
"processor/ethereum",
"processor/monero",
"coordinator/tributary-sdk/tendermint",
"coordinator/tributary-sdk",
"coordinator/cosign/types",
"coordinator/cosign",
"coordinator/substrate",
"coordinator/tributary", "coordinator/tributary",
"coordinator/p2p",
"coordinator/p2p/libp2p",
"coordinator", "coordinator",
"substrate/primitives", "substrate/primitives",
"substrate/coins/primitives",
"substrate/coins/pallet",
"substrate/dex/pallet",
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/signals/primitives",
"substrate/signals/pallet",
"substrate/abi", "substrate/abi",
"substrate/median",
"substrate/core",
"substrate/coins",
"substrate/validator-sets",
"substrate/signals",
"substrate/dex",
"substrate/genesis-liquidity",
"substrate/economic-security",
"substrate/emissions",
"substrate/in-instructions",
"substrate/runtime", "substrate/runtime",
"substrate/node", "substrate/node",
"substrate/client/serai",
"substrate/client/bitcoin",
"substrate/client/ethereum",
"substrate/client/monero",
"substrate/client", "substrate/client",
"orchestration", "orchestration",
@@ -109,106 +90,46 @@ members = [
"tests/docker", "tests/docker",
"tests/message-queue", "tests/message-queue",
# TODO "tests/processor", "tests/processor",
# TODO "tests/coordinator", "tests/coordinator",
"tests/substrate", "tests/full-stack",
# TODO "tests/full-stack",
"tests/reproducible-runtime", "tests/reproducible-runtime",
] ]
[profile.dev]
panic = "abort"
overflow-checks = true
[profile.release]
panic = "abort"
overflow-checks = true
# These do not respect the `panic` configuration value, so we don't provide them
[profile.test]
# panic = "abort" # https://github.com/rust-lang/issues/67650
overflow-checks = true
[profile.bench]
overflow-checks = true
[profile.dev.package]
# Always compile Monero (and a variety of dependencies) with optimizations due # Always compile Monero (and a variety of dependencies) with optimizations due
# to the extensive operations required for Bulletproofs # to the extensive operations required for Bulletproofs
[profile.dev.package]
subtle = { opt-level = 3 } subtle = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
sha3 = { opt-level = 3 }
blake2 = { opt-level = 3 }
ff = { opt-level = 3 } ff = { opt-level = 3 }
group = { opt-level = 3 } group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 } crypto-bigint = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 } dalek-ff-group = { opt-level = 3 }
minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 } multiexp = { opt-level = 3 }
monero-io = { opt-level = 3 }
monero-primitives = { opt-level = 3 }
monero-ed25519 = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs-generators = { opt-level = 3 }
monero-bulletproofs = {opt-level = 3 }
monero-oxide = { opt-level = 3 } monero-oxide = { opt-level = 3 }
# Always compile the eVRF DKG tree with optimizations as well [profile.release]
secp256k1 = { opt-level = 3 } panic = "unwind"
secq256k1 = { opt-level = 3 } overflow-checks = true
embedwards25519 = { opt-level = 3 }
generalized-bulletproofs = { opt-level = 3 }
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
# revm also effectively requires being built with optimizations
revm = { opt-level = 3 }
revm-bytecode = { opt-level = 3 }
revm-context = { opt-level = 3 }
revm-context-interface = { opt-level = 3 }
revm-database = { opt-level = 3 }
revm-database-interface = { opt-level = 3 }
revm-handler = { opt-level = 3 }
revm-inspector = { opt-level = 3 }
revm-interpreter = { opt-level = 3 }
revm-precompile = { opt-level = 3 }
revm-primitives = { opt-level = 3 }
revm-state = { opt-level = 3 }
[patch.crates-io] [patch.crates-io]
# Point to empty crates for crates unused within in our tree # Dependencies from monero-oxide which originate from within our own tree
alloy-eip2124 = { path = "patches/ethereum/alloy-eip2124" } std-shims = { path = "common/std-shims" }
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" } simple-request = { path = "common/request" }
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" } dalek-ff-group = { path = "crypto/dalek-ff-group" }
c-kzg = { path = "patches/ethereum/c-kzg" }
fastrlp-3 = { package = "fastrlp", path = "patches/ethereum/fastrlp-0.3" }
fastrlp-4 = { package = "fastrlp", path = "patches/ethereum/fastrlp-0.4" }
primitive-types-12 = { package = "primitive-types", path = "patches/ethereum/primitive-types-0.12" }
rlp = { path = "patches/ethereum/rlp" }
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-0.30" }
# Dependencies from monero-oxide which originate from within our own tree, potentially shimmed to account for deviations since publishing
std-shims = { path = "patches/std-shims" }
simple-request = { path = "patches/simple-request" }
multiexp = { path = "crypto/multiexp" }
flexible-transcript = { path = "crypto/transcript" } flexible-transcript = { path = "crypto/transcript" }
ciphersuite = { path = "patches/ciphersuite" }
dalek-ff-group = { path = "patches/dalek-ff-group" }
minimal-ed448 = { path = "crypto/ed448" }
modular-frost = { path = "crypto/frost" } modular-frost = { path = "crypto/frost" }
# Patches due to `std` now including the required functionality # https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
is_terminal_polyfill = { path = "patches/is_terminal_polyfill" } lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
lazy_static = { path = "patches/lazy_static" }
# This has a non-deprecated `std` alternative since Rust's 2024 edition
home = { path = "patches/home" }
# Updates to the latest version # `matches` has an `std` alternative
darling = { path = "patches/darling" } matches = { path = "patches/matches" }
thiserror = { path = "patches/thiserror" }
# directories-next was created because directories was unmaintained # directories-next was created because directories was unmaintained
# directories-next is now unmaintained while directories is maintained # directories-next is now unmaintained while directories is maintained
@@ -218,19 +139,11 @@ thiserror = { path = "patches/thiserror" }
option-ext = { path = "patches/option-ext" } option-ext = { path = "patches/option-ext" }
directories-next = { path = "patches/directories-next" } directories-next = { path = "patches/directories-next" }
# Patch from a fork back to upstream
parity-bip39 = { path = "patches/parity-bip39" }
# Patch to include `FromUniformBytes<64>` over `Scalar`
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
[workspace.lints.clippy] [workspace.lints.clippy]
incompatible_msrv = "allow" # Manually verified with a GitHub workflow uninlined_format_args = "allow" # TODO
manual_is_multiple_of = "allow"
unwrap_or_default = "allow" unwrap_or_default = "allow"
map_unwrap_or = "allow" manual_is_multiple_of = "allow"
needless_continue = "allow" incompatible_msrv = "allow" # Manually verified with a GitHub workflow
borrow_as_ptr = "deny" borrow_as_ptr = "deny"
cast_lossless = "deny" cast_lossless = "deny"
cast_possible_truncation = "deny" cast_possible_truncation = "deny"
@@ -255,12 +168,14 @@ large_stack_arrays = "deny"
linkedlist = "deny" linkedlist = "deny"
macro_use_imports = "deny" macro_use_imports = "deny"
manual_instant_elapsed = "deny" manual_instant_elapsed = "deny"
manual_let_else = "deny" # TODO manual_let_else = "deny"
manual_ok_or = "deny" manual_ok_or = "deny"
manual_string_new = "deny" manual_string_new = "deny"
map_unwrap_or = "deny"
match_bool = "deny" match_bool = "deny"
match_same_arms = "deny" match_same_arms = "deny"
missing_fields_in_debug = "deny" missing_fields_in_debug = "deny"
# TODO needless_continue = "deny"
needless_pass_by_value = "deny" needless_pass_by_value = "deny"
ptr_cast_constness = "deny" ptr_cast_constness = "deny"
range_minus_one = "deny" range_minus_one = "deny"
@@ -268,9 +183,7 @@ range_plus_one = "deny"
redundant_closure_for_method_calls = "deny" redundant_closure_for_method_calls = "deny"
redundant_else = "deny" redundant_else = "deny"
string_add_assign = "deny" string_add_assign = "deny"
string_slice = "deny" unchecked_duration_subtraction = "deny"
unchecked_time_subtraction = "deny"
uninlined_format_args = "deny"
unnecessary_box_returns = "deny" unnecessary_box_returns = "deny"
unnecessary_join = "deny" unnecessary_join = "deny"
unnecessary_wraps = "deny" unnecessary_wraps = "deny"
@@ -279,5 +192,19 @@ unused_async = "deny"
unused_self = "deny" unused_self = "deny"
zero_sized_map_values = "deny" zero_sized_map_values = "deny"
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
# primary branch, `next` is)
needless_continue = "allow"
needless_lifetimes = "allow"
useless_conversion = "allow"
empty_line_after_doc_comments = "allow"
manual_div_ceil = "allow"
manual_let_else = "allow"
unnecessary_map_or = "allow"
result_large_err = "allow"
unneeded_struct_pattern = "allow"
[workspace.lints.rust] [workspace.lints.rust]
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648 mismatched_lifetime_syntaxes = "allow"
unused_attributes = "allow"
unused-parens = "allow"

View File

@@ -1,14 +0,0 @@
# Trail of Bits Ethereum Contracts Audit, June 2025
This audit included:
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
- Our Ethereum primitives library (/processor/ethereum/primitives)
- Our Deployer contract and associated library (/processor/ethereum/deployer)
- Our ERC20 library (/processor/ethereum/erc20)
- Our Router contract and associated library (/processor/ethereum/router)
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
Please see
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
for the actual report.

View File

@@ -1,50 +0,0 @@
# eVRF DKG
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
the IACR preprint server. Within it was a one-round unbiased DKG and a
one-round unbiased threshold DKG. Unfortunately, both simply describe
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
in practice, the need for an additional round of communication to occur where
all participants confirm they received their secret shares.
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
achieve a verifiable encryption scheme. This allows the secret shares to be
posted to any 'bulletin board' (such as a blockchain) and for all observers to
confirm:
- A participant participated
- The secret shares sent can be received by the intended recipient so long as
they can access the bulletin board
Additionally, Serai desired a robust scheme (albeit with an biased key as the
output, which is fine for our purposes). Accordingly, our implementation
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
for verifiable encryption, with the caller allowed to decide the set of
participants. They may:
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
paper
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
the eVRF paper
- Select a post-determined set (with any solution for the Common Subset
problem), allowing achieving a robust threshold biased DKG
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
this is unnecessary when the resulting key will be biased. Any proof of
knowledge for the coefficients, as necessary for their extraction within the
security proofs, would be sufficient.
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
provide proofs for its security. This resulted in
[this paper](<./Security Proofs.pdf>).
Our implementation itself is then built on top of the audited
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
and
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
Note we do not use the originally premised DDH eVRF yet the one premised on
elliptic curve divisors, the methodology of which is commented on
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
Our implementation itself is unaudited at this time however.

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "serai-db" name = "serai-db"
version = "0.1.1" version = "0.1.0"
description = "A simple database trait and backends for it" description = "A simple database trait and backends for it"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/db" repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.77" rust-version = "1.65"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true } parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true } rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
[features] [features]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,8 +0,0 @@
# Serai DB
An inefficient, minimal abstraction around databases.
The abstraction offers `get`, `put`, and `del` with helper functions and macros
built on top. Database iteration is not offered, forcing the caller to manually
implement indexing schemes. This ensures wide compatibility across abstracted
databases.

View File

@@ -15,7 +15,7 @@ pub fn serai_db_key(
/// ///
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro /// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key, /// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
/// they must be `borsh` serializable. The return type is used to auto (de)serialize the database /// they must be `scale` encodable. The return type is used to auto encode and decode the database
/// value bytes using `borsh`. /// value bytes using `borsh`.
/// ///
/// # Arguments /// # Arguments
@@ -38,65 +38,32 @@ pub fn serai_db_key(
#[macro_export] #[macro_export]
macro_rules! create_db { macro_rules! create_db {
($db_name: ident { ($db_name: ident {
$( $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
$field_name: ident:
$(<$($generic_name: tt: $generic_type: tt),+>)?(
$($arg: ident: $arg_type: ty),*
) -> $field_type: ty$(,)?
)*
}) => { }) => {
$( $(
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct $field_name$( pub(crate) struct $field_name;
<$($generic_name: $generic_type),+> impl $field_name {
)?$(
(core::marker::PhantomData<($($generic_name),+)>)
)?;
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> { pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
use scale::Encode;
$crate::serai_db_key( $crate::serai_db_key(
stringify!($db_name).as_bytes(), stringify!($db_name).as_bytes(),
stringify!($field_name).as_bytes(), stringify!($field_name).as_bytes(),
&borsh::to_vec(&($($arg),*)).unwrap(), ($($arg),*).encode()
) )
} }
pub(crate) fn set( pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
txn: &mut impl DbTxn let key = $field_name::key($($arg),*);
$(, $arg: $arg_type)*,
data: &$field_type
) {
let key = Self::key($($arg),*);
txn.put(&key, borsh::to_vec(data).unwrap()); txn.put(&key, borsh::to_vec(data).unwrap());
} }
pub(crate) fn get( pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
getter: &impl Get, getter.get($field_name::key($($arg),*)).map(|data| {
$($arg: $arg_type),*
) -> Option<$field_type> {
getter.get(Self::key($($arg),*)).map(|data| {
borsh::from_slice(data.as_ref()).unwrap() borsh::from_slice(data.as_ref()).unwrap()
}) })
} }
// Returns a PhantomData of all generic types so if the generic was only used in the value,
// not the keys, this doesn't have unused generic types
#[allow(dead_code)] #[allow(dead_code)]
pub(crate) fn del( pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
txn: &mut impl DbTxn txn.del(&$field_name::key($($arg),*))
$(, $arg: $arg_type)*
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
txn.del(&Self::key($($arg),*));
core::marker::PhantomData
}
pub(crate) fn take(
txn: &mut impl DbTxn
$(, $arg: $arg_type)*
) -> Option<$field_type> {
let key = Self::key($($arg),*);
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
if res.is_some() {
txn.del(key);
}
res
} }
} }
)* )*
@@ -106,30 +73,19 @@ macro_rules! create_db {
#[macro_export] #[macro_export]
macro_rules! db_channel { macro_rules! db_channel {
($db_name: ident { ($db_name: ident {
$($field_name: ident: $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
$(<$($generic_name: tt: $generic_type: tt),+>)?(
$($arg: ident: $arg_type: ty),*
) -> $field_type: ty$(,)?
)*
}) => { }) => {
$( $(
create_db! { create_db! {
$db_name { $db_name {
$field_name: $(<$($generic_name: $generic_type),+>)?( $field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
$($arg: $arg_type,)*
index: u32
) -> $field_type
} }
} }
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { impl $field_name {
pub(crate) fn send( pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
txn: &mut impl DbTxn
$(, $arg: $arg_type)*
, value: &$field_type
) {
// Use index 0 to store the amount of messages // Use index 0 to store the amount of messages
let messages_sent_key = Self::key($($arg,)* 0); let messages_sent_key = $field_name::key($($arg),*, 0);
let messages_sent = txn.get(&messages_sent_key).map(|counter| { let messages_sent = txn.get(&messages_sent_key).map(|counter| {
u32::from_le_bytes(counter.try_into().unwrap()) u32::from_le_bytes(counter.try_into().unwrap())
}).unwrap_or(0); }).unwrap_or(0);
@@ -140,35 +96,19 @@ macro_rules! db_channel {
// at the same time // at the same time
let index_to_use = messages_sent + 2; let index_to_use = messages_sent + 2;
Self::set(txn, $($arg,)* index_to_use, value); $field_name::set(txn, $($arg),*, index_to_use, value);
} }
pub(crate) fn peek( pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
getter: &impl Get let messages_recvd_key = $field_name::key($($arg),*, 1);
$(, $arg: $arg_type)*
) -> Option<$field_type> {
let messages_recvd_key = Self::key($($arg,)* 1);
let messages_recvd = getter.get(&messages_recvd_key).map(|counter| {
u32::from_le_bytes(counter.try_into().unwrap())
}).unwrap_or(0);
let index_to_read = messages_recvd + 2;
Self::get(getter, $($arg,)* index_to_read)
}
pub(crate) fn try_recv(
txn: &mut impl DbTxn
$(, $arg: $arg_type)*
) -> Option<$field_type> {
let messages_recvd_key = Self::key($($arg,)* 1);
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
u32::from_le_bytes(counter.try_into().unwrap()) u32::from_le_bytes(counter.try_into().unwrap())
}).unwrap_or(0); }).unwrap_or(0);
let index_to_read = messages_recvd + 2; let index_to_read = messages_recvd + 2;
let res = Self::get(txn, $($arg,)* index_to_read); let res = $field_name::get(txn, $($arg),*, index_to_read);
if res.is_some() { if res.is_some() {
Self::del(txn, $($arg,)* index_to_read); $field_name::del(txn, $($arg),*, index_to_read);
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes()); txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
} }
res res

View File

@@ -14,43 +14,26 @@ mod parity_db;
#[cfg(feature = "parity-db")] #[cfg(feature = "parity-db")]
pub use parity_db::{ParityDb, new_parity_db}; pub use parity_db::{ParityDb, new_parity_db};
/// An object implementing `get`. /// An object implementing get.
pub trait Get { pub trait Get {
/// Get a value from the database.
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>; fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
} }
/// An atomic database transaction. /// An atomic database operation.
///
/// A transaction is only required to atomically commit. It is not required that two `Get` calls
/// made with the same transaction return the same result, if another transaction wrote to that
/// key.
///
/// If two transactions are created, and both write (including deletions) to the same key, behavior
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
/// randomly, or any other action, at time of write or at time of commit.
#[must_use] #[must_use]
pub trait DbTxn: Send + Get { pub trait DbTxn: Send + Get {
/// Write a value to this key.
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>); fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
/// Delete the value from this key.
fn del(&mut self, key: impl AsRef<[u8]>); fn del(&mut self, key: impl AsRef<[u8]>);
/// Commit this transaction.
fn commit(self); fn commit(self);
} }
/// A database supporting atomic transaction. /// A database supporting atomic operations.
pub trait Db: 'static + Send + Sync + Clone + Get { pub trait Db: 'static + Send + Sync + Clone + Get {
/// The type representing a database transaction.
type Transaction<'a>: DbTxn; type Transaction<'a>: DbTxn;
/// Calculate a key for a database entry.
///
/// Keys are separated by the database, the item within the database, and the item's key itself.
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> { fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
let db_len = u8::try_from(db_dst.len()).unwrap(); let db_len = u8::try_from(db_dst.len()).unwrap();
let dst_len = u8::try_from(item_dst.len()).unwrap(); let dst_len = u8::try_from(item_dst.len()).unwrap();
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
} }
/// Open a new transaction.
fn txn(&mut self) -> Self::Transaction<'_>; fn txn(&mut self) -> Self::Transaction<'_>;
} }

View File

@@ -11,7 +11,7 @@ use crate::*;
#[derive(PartialEq, Eq, Debug)] #[derive(PartialEq, Eq, Debug)]
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>); pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
impl Get for MemDbTxn<'_> { impl<'a> Get for MemDbTxn<'a> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> { fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
if self.2.contains(key.as_ref()) { if self.2.contains(key.as_ref()) {
return None; return None;
@@ -23,7 +23,7 @@ impl Get for MemDbTxn<'_> {
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned()) .or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
} }
} }
impl DbTxn for MemDbTxn<'_> { impl<'a> DbTxn for MemDbTxn<'a> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.2.remove(key.as_ref()); self.2.remove(key.as_ref());
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.64" rust-version = "1.60"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

2
common/env/LICENSE vendored
View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
// Obtain a variable from the Serai environment/secret store. // Obtain a variable from the Serai environment/secret store.
pub fn var(variable: &str) -> Option<String> { pub fn var(variable: &str) -> Option<String> {

View File

@@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["async", "sleep", "tokio", "smol", "async-std"] keywords = ["async", "sleep", "tokio", "smol", "async-std"]
edition = "2021" edition = "2021"
rust-version = "1.70"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2024-2025 Luke Parker Copyright (c) 2024 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "simple-request" name = "simple-request"
version = "0.3.0" version = "0.1.0"
description = "A simple HTTP(S) request library" description = "A simple HTTP(S) request library"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/request" repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"] keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021" edition = "2021"
rust-version = "1.71" rust-version = "1.70"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -19,10 +19,9 @@ workspace = true
[dependencies] [dependencies]
tower-service = { version = "0.3", default-features = false } tower-service = { version = "0.3", default-features = false }
hyper = { version = "1", default-features = false, features = ["http1", "client"] } hyper = { version = "1", default-features = false, features = ["http1", "client"] }
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] } hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
http-body-util = { version = "0.1", default-features = false } http-body-util = { version = "0.1", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false }
tokio = { version = "1", default-features = false, features = ["sync"] }
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
@@ -30,8 +29,6 @@ zeroize = { version = "1", optional = true }
base64ct = { version = "1", features = ["alloc"], optional = true } base64ct = { version = "1", features = ["alloc"], optional = true }
[features] [features]
tokio = ["hyper-util/tokio"] tls = ["hyper-rustls"]
tls = ["tokio", "hyper-rustls"]
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
basic-auth = ["zeroize", "base64ct"] basic-auth = ["zeroize", "base64ct"]
default = ["tls"] default = ["tls"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,20 +1,19 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
use core::{pin::Pin, future::Future};
use std::sync::Arc; use std::sync::Arc;
use futures_util::FutureExt; use tokio::sync::Mutex;
use ::tokio::sync::Mutex;
use tower_service::Service as TowerService; use tower_service::Service as TowerService;
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
pub use hyper;
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector}; use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
use hyper_util::{
rt::tokio::TokioExecutor,
client::legacy::{Client as HyperClient, connect::HttpConnector},
};
pub use hyper;
mod request; mod request;
pub use request::*; pub use request::*;
@@ -38,86 +37,52 @@ type Connector = HttpConnector;
type Connector = HttpsConnector<HttpConnector>; type Connector = HttpsConnector<HttpConnector>;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
enum Connection< enum Connection {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
> {
ConnectionPool(HyperClient<Connector, Full<Bytes>>), ConnectionPool(HyperClient<Connector, Full<Bytes>>),
Connection { Connection {
executor: E,
connector: Connector, connector: Connector,
host: Uri, host: Uri,
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>, connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
}, },
} }
/// An HTTP client.
///
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
/// the `tls` feature is active without using the `tokio` executor will cause errors.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Client< pub struct Client {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>, connection: Connection,
> {
connection: Connection<E>,
} }
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>> impl Client {
Client<E> fn connector() -> Connector {
{
#[allow(clippy::unnecessary_wraps)]
fn connector() -> Result<Connector, Error> {
let mut res = HttpConnector::new(); let mut res = HttpConnector::new();
res.set_keepalive(Some(core::time::Duration::from_secs(60))); res.set_keepalive(Some(core::time::Duration::from_secs(60)));
res.set_nodelay(true); res.set_nodelay(true);
res.set_reuse_address(true); res.set_reuse_address(true);
#[cfg(feature = "tls")]
if core::any::TypeId::of::<E>() !=
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
{
Err(Error::ConnectionError(
"`tls` feature enabled but not using the `tokio` executor".into(),
))?;
}
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
res.enforce_http(false); res.enforce_http(false);
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
let https = HttpsConnectorBuilder::new().with_native_roots(); let res = HttpsConnectorBuilder::new()
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))] .with_native_roots()
let https = https.map_err(|e| { .expect("couldn't fetch system's SSL roots")
Error::ConnectionError( .https_or_http()
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}") .enable_http1()
.into(), .wrap_connector(res);
) res
})?;
// Fallback to `webpki-roots` if present
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
#[cfg(feature = "tls")]
let res = https.https_or_http().enable_http1().wrap_connector(res);
Ok(res)
} }
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> { pub fn with_connection_pool() -> Client {
Ok(Client { Client {
connection: Connection::ConnectionPool( connection: Connection::ConnectionPool(
HyperClient::builder(executor) HyperClient::builder(TokioExecutor::new())
.pool_idle_timeout(core::time::Duration::from_secs(60)) .pool_idle_timeout(core::time::Duration::from_secs(60))
.build(Self::connector()?), .build(Self::connector()),
), ),
}) }
} }
pub fn with_executor_and_without_connection_pool( pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
executor: E,
host: &str,
) -> Result<Client<E>, Error> {
Ok(Client { Ok(Client {
connection: Connection::Connection { connection: Connection::Connection {
executor, connector: Self::connector(),
connector: Self::connector()?,
host: { host: {
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?; let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
if uri.host().is_none() { if uri.host().is_none() {
@@ -130,9 +95,9 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
}) })
} }
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> { pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
let request: Request = request.into(); let request: Request = request.into();
let Request { mut request, response_size_limit } = request; let mut request = request.0;
if let Some(header_host) = request.headers().get(hyper::header::HOST) { if let Some(header_host) = request.headers().get(hyper::header::HOST) {
match &self.connection { match &self.connection {
Connection::ConnectionPool(_) => {} Connection::ConnectionPool(_) => {}
@@ -166,7 +131,7 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
Connection::ConnectionPool(client) => { Connection::ConnectionPool(client) => {
client.request(request).await.map_err(Error::HyperUtil)? client.request(request).await.map_err(Error::HyperUtil)?
} }
Connection::Connection { executor, connector, host, connection } => { Connection::Connection { connector, host, connection } => {
let mut connection_lock = connection.lock().await; let mut connection_lock = connection.lock().await;
// If there's not a connection... // If there's not a connection...
@@ -178,46 +143,28 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
let call_res = call_res.map_err(Error::ConnectionError); let call_res = call_res.map_err(Error::ConnectionError);
let (requester, connection) = let (requester, connection) =
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?; hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
// This task will die when we drop the requester // This will die when we drop the requester, so we don't need to track an AbortHandle
executor.execute(Box::pin(connection.map(|_| ()))); // for it
tokio::spawn(connection);
*connection_lock = Some(requester); *connection_lock = Some(requester);
} }
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned"); let connection = connection_lock.as_mut().unwrap();
let mut err = connection.ready().await.err(); let mut err = connection.ready().await.err();
if err.is_none() { if err.is_none() {
// Send the request // Send the request
let response = connection.send_request(request).await; let res = connection.send_request(request).await;
if let Ok(response) = response { if let Ok(res) = res {
return Ok(Response { response, size_limit: response_size_limit, client: self }); return Ok(Response(res, self));
} }
err = response.err(); err = res.err();
} }
// Since this connection has been put into an error state, drop it // Since this connection has been put into an error state, drop it
*connection_lock = None; *connection_lock = None;
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))? Err(Error::Hyper(err.unwrap()))?
} }
}; };
Ok(Response { response, size_limit: response_size_limit, client: self }) Ok(Response(response, self))
} }
} }
#[cfg(feature = "tokio")]
mod tokio {
use hyper_util::rt::tokio::TokioExecutor;
use super::*;
pub type TokioClient = Client<TokioExecutor>;
impl Client<TokioExecutor> {
pub fn with_connection_pool() -> Result<Self, Error> {
Self::with_executor_and_connection_pool(TokioExecutor::new())
}
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
}
}
}
#[cfg(feature = "tokio")]
pub use tokio::TokioClient;

View File

@@ -7,15 +7,11 @@ pub use http_body_util::Full;
use crate::Error; use crate::Error;
#[derive(Debug)] #[derive(Debug)]
pub struct Request { pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
pub(crate) request: hyper::Request<Full<Bytes>>,
pub(crate) response_size_limit: Option<usize>,
}
impl Request { impl Request {
#[cfg(feature = "basic-auth")] #[cfg(feature = "basic-auth")]
fn username_password_from_uri(&self) -> Result<(String, String), Error> { fn username_password_from_uri(&self) -> Result<(String, String), Error> {
if let Some(authority) = self.request.uri().authority() { if let Some(authority) = self.0.uri().authority() {
let authority = authority.as_str(); let authority = authority.as_str();
if authority.contains('@') { if authority.contains('@') {
// Decode the username and password from the URI // Decode the username and password from the URI
@@ -40,10 +36,9 @@ impl Request {
let mut formatted = format!("{username}:{password}"); let mut formatted = format!("{username}:{password}");
let mut encoded = Base64::encode_string(formatted.as_bytes()); let mut encoded = Base64::encode_string(formatted.as_bytes());
formatted.zeroize(); formatted.zeroize();
self.request.headers_mut().insert( self.0.headers_mut().insert(
hyper::header::AUTHORIZATION, hyper::header::AUTHORIZATION,
HeaderValue::from_str(&format!("Basic {encoded}")) HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
.expect("couldn't form header from base64-encoded string"),
); );
encoded.zeroize(); encoded.zeroize();
} }
@@ -64,17 +59,9 @@ impl Request {
pub fn with_basic_auth(&mut self) { pub fn with_basic_auth(&mut self) {
let _ = self.basic_auth_from_uri(); let _ = self.basic_auth_from_uri();
} }
/// Set a size limit for the response.
///
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
self.response_size_limit = response_size_limit;
}
} }
impl From<hyper::Request<Full<Bytes>>> for Request { impl From<hyper::Request<Full<Bytes>>> for Request {
fn from(request: hyper::Request<Full<Bytes>>) -> Request { fn from(request: hyper::Request<Full<Bytes>>) -> Request {
Request { request, response_size_limit: None } Request(request)
} }
} }

View File

@@ -1,54 +1,24 @@
use core::{pin::Pin, future::Future};
use std::io;
use hyper::{ use hyper::{
StatusCode, StatusCode,
header::{HeaderValue, HeaderMap}, header::{HeaderValue, HeaderMap},
body::Incoming, body::{Buf, Incoming},
rt::Executor,
}; };
use http_body_util::BodyExt; use http_body_util::BodyExt;
use futures_util::{Stream, StreamExt};
use crate::{Client, Error}; use crate::{Client, Error};
// Borrows the client so its async task lives as long as this response exists. // Borrows the client so its async task lives as long as this response exists.
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug)] #[derive(Debug)]
pub struct Response< pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
'a, impl<'a> Response<'a> {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
> {
pub(crate) response: hyper::Response<Incoming>,
pub(crate) size_limit: Option<usize>,
pub(crate) client: &'a Client<E>,
}
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
Response<'_, E>
{
pub fn status(&self) -> StatusCode { pub fn status(&self) -> StatusCode {
self.response.status() self.0.status()
} }
pub fn headers(&self) -> &HeaderMap<HeaderValue> { pub fn headers(&self) -> &HeaderMap<HeaderValue> {
self.response.headers() self.0.headers()
} }
pub async fn body(self) -> Result<impl std::io::Read, Error> { pub async fn body(self) -> Result<impl std::io::Read, Error> {
let mut body = self.response.into_body().into_data_stream(); Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
let mut res: Vec<u8> = vec![];
loop {
if let Some(size_limit) = self.size_limit {
let (lower, upper) = body.size_hint();
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
Err(Error::ConnectionError("response exceeded size limit".into()))?;
}
}
let Some(part) = body.next().await else { break };
let part = part.map_err(Error::Hyper)?;
res.extend(part.as_ref());
}
Ok(io::Cursor::new(res))
} }
} }

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "std-shims" name = "std-shims"
version = "0.1.5" version = "0.1.4"
description = "A series of std shims to make alloc more feasible" description = "A series of std shims to make alloc more feasible"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims" repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"] keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021" edition = "2021"
rust-version = "1.65" rust-version = "1.64"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -18,10 +18,9 @@ workspace = true
[dependencies] [dependencies]
rustversion = { version = "1", default-features = false } rustversion = { version = "1", default-features = false }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] } spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true } hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
[features] [features]
alloc = ["hashbrown"] std = []
std = ["alloc", "spin/std"]
default = ["std"] default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,28 +1,11 @@
# `std` shims # std shims
`std-shims` is a Rust crate with two purposes: A crate which passes through to std when the default `std` feature is enabled,
- Expand the functionality of `core` and `alloc` yet provides a series of shims when it isn't.
- Polyfill functionality only available on newer version of Rust
The goal is to make supporting no-`std` environments, and older versions of No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
Rust, as simple as possible. For most use cases, replacing `std::` with average case.
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
advantage of `std-shims`.
# API Surface `HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
`spin` (avoiding a requirement on `critical-section`).
`std-shims` only aims to have items _mutually available_ between `alloc` (with types are not guaranteed to be
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
no shims available, will not be exported by `std-shims`.
# Dependencies
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
primitives are provided via `spin` (avoiding a requirement on
`critical-section`). Sections of `std::io` are independently matched as
possible. `rustversion` is used to detect when to provide polyfills.
# Disclaimer
No guarantee of one-to-one parity is provided. The shims provided aim to be
sufficient for the average case. Pull requests are _welcome_.

View File

@@ -1,7 +1,7 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::collections::*;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use hashbrown::{HashSet, HashMap};
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::collections::*; pub use std::collections::*;
#[cfg(not(feature = "std"))]
pub use alloc::collections::*;
#[cfg(not(feature = "std"))]
pub use hashbrown::{HashSet, HashMap};

View File

@@ -1,74 +1,42 @@
#[cfg(feature = "std")]
pub use std::io::*;
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
mod shims { mod shims {
use core::fmt::{self, Debug, Display, Formatter}; use core::fmt::{Debug, Formatter};
#[cfg(feature = "alloc")] use alloc::{boxed::Box, vec::Vec};
use extern_alloc::{boxed::Box, vec::Vec};
use crate::error::Error as CoreError;
/// The kind of error.
#[derive(Clone, Copy, PartialEq, Eq, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ErrorKind { pub enum ErrorKind {
UnexpectedEof, UnexpectedEof,
Other, Other,
} }
/// An error.
#[derive(Debug)]
pub struct Error { pub struct Error {
kind: ErrorKind, kind: ErrorKind,
#[cfg(feature = "alloc")] error: Box<dyn Send + Sync>,
error: Box<dyn Send + Sync + CoreError>,
} }
impl Display for Error { impl Debug for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
<Self as Debug>::fmt(self, f) fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
} }
} }
impl CoreError for Error {}
#[cfg(not(feature = "alloc"))]
pub trait IntoBoxSendSyncError {}
#[cfg(not(feature = "alloc"))]
impl<I> IntoBoxSendSyncError for I {}
#[cfg(feature = "alloc")]
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
#[cfg(feature = "alloc")]
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
impl Error { impl Error {
/// Create a new error. pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
/// Error { kind, error: Box::new(error) }
/// The error object itself is silently dropped when `alloc` is not enabled.
#[allow(unused)]
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
#[cfg(not(feature = "alloc"))]
let res = Error { kind };
#[cfg(feature = "alloc")]
let res = Error { kind, error: error.into() };
res
} }
/// Create a new error with `io::ErrorKind::Other` as its kind. pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
/// Error { kind: ErrorKind::Other, error: Box::new(error) }
/// The error object itself is silently dropped when `alloc` is not enabled.
#[allow(unused)]
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
#[cfg(not(feature = "alloc"))]
let res = Error { kind: ErrorKind::Other };
#[cfg(feature = "alloc")]
let res = Error { kind: ErrorKind::Other, error: error.into() };
res
} }
/// The kind of error.
pub fn kind(&self) -> ErrorKind { pub fn kind(&self) -> ErrorKind {
self.kind self.kind
} }
/// Retrieve the inner error. pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
#[cfg(feature = "alloc")]
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
Some(self.error) Some(self.error)
} }
} }
@@ -96,12 +64,6 @@ mod shims {
} }
} }
impl<R: Read> Read for &mut R {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
R::read(*self, buf)
}
}
pub trait BufRead: Read { pub trait BufRead: Read {
fn fill_buf(&mut self) -> Result<&[u8]>; fn fill_buf(&mut self) -> Result<&[u8]>;
fn consume(&mut self, amt: usize); fn consume(&mut self, amt: usize);
@@ -126,7 +88,6 @@ mod shims {
} }
} }
#[cfg(feature = "alloc")]
impl Write for Vec<u8> { impl Write for Vec<u8> {
fn write(&mut self, buf: &[u8]) -> Result<usize> { fn write(&mut self, buf: &[u8]) -> Result<usize> {
self.extend(buf); self.extend(buf);
@@ -134,8 +95,6 @@ mod shims {
} }
} }
} }
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
pub use shims::*; pub use shims::*;
#[cfg(feature = "std")]
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};

View File

@@ -1,45 +1,18 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "alloc"))] pub extern crate alloc;
pub use core::*;
#[cfg(not(feature = "alloc"))]
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
#[cfg(not(feature = "std"))]
#[rustversion::before(1.81)]
pub mod error {
use core::fmt::Debug::Display;
pub trait Error: Debug + Display {}
}
#[cfg(not(feature = "std"))]
#[rustversion::since(1.81)]
pub use core::error;
#[cfg(feature = "alloc")]
extern crate alloc as extern_alloc;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
#[cfg(feature = "std")]
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
pub mod sync;
pub mod collections; pub mod collections;
pub mod io; pub mod io;
pub mod sync;
pub use alloc::vec;
pub use alloc::str;
pub use alloc::string;
pub mod prelude { pub mod prelude {
// Shim the `std` prelude
#[cfg(feature = "alloc")]
pub use extern_alloc::{
format, vec,
borrow::ToOwned,
boxed::Box,
vec::Vec,
string::{String, ToString},
};
// Shim `div_ceil`
#[rustversion::before(1.73)] #[rustversion::before(1.73)]
#[doc(hidden)] #[doc(hidden)]
pub trait StdShimsDivCeil { pub trait StdShimsDivCeil {
@@ -80,7 +53,6 @@ pub mod prelude {
} }
} }
// Shim `io::Error::other`
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[rustversion::before(1.74)] #[rustversion::before(1.74)]
#[doc(hidden)] #[doc(hidden)]

View File

@@ -1,80 +1,19 @@
pub use core::sync::atomic; pub use core::sync::*;
#[cfg(all(feature = "alloc", not(feature = "std")))] pub use alloc::sync::*;
pub use extern_alloc::sync::{Arc, Weak};
#[cfg(feature = "std")]
pub use std::sync::{Arc, Weak};
mod mutex_shim { mod mutex_shim {
#[cfg(not(feature = "std"))]
mod spin_mutex {
use core::ops::{Deref, DerefMut};
// We wrap this in an `Option` so we can consider `None` as poisoned
pub(super) struct Mutex<T>(spin::Mutex<Option<T>>);
/// An acquired view of a `Mutex`.
pub struct MutexGuard<'mutex, T> {
mutex: spin::MutexGuard<'mutex, Option<T>>,
// This is `Some` for the lifetime of this guard, and is only represented as an `Option` due
// to needing to move it on `Drop` (which solely gives us a mutable reference to `self`)
value: Option<T>,
}
impl<T> Mutex<T> {
pub(super) const fn new(value: T) -> Self {
Self(spin::Mutex::new(Some(value)))
}
pub(super) fn lock(&self) -> MutexGuard<'_, T> {
let mut mutex = self.0.lock();
// Take from the `Mutex` so future acquisitions will see `None` unless this is restored
let value = mutex.take();
// Check the prior acquisition did in fact restore the value
if value.is_none() {
panic!("locking a `spin::Mutex` held by a thread which panicked");
}
MutexGuard { mutex, value }
}
}
impl<T> Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
self.value.as_ref().expect("no value yet checked upon lock acquisition")
}
}
impl<T> DerefMut for MutexGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
self.value.as_mut().expect("no value yet checked upon lock acquisition")
}
}
impl<'mutex, T> Drop for MutexGuard<'mutex, T> {
fn drop(&mut self) {
// Restore the value
*self.mutex = self.value.take();
}
}
}
#[cfg(not(feature = "std"))]
pub use spin_mutex::*;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::{Mutex, MutexGuard}; pub use std::sync::*;
#[cfg(not(feature = "std"))]
pub use spin::*;
/// A shimmed `Mutex` with an API mutual to `spin` and `std`. #[derive(Default, Debug)]
pub struct ShimMutex<T>(Mutex<T>); pub struct ShimMutex<T>(Mutex<T>);
impl<T> ShimMutex<T> { impl<T> ShimMutex<T> {
/// Construct a new `Mutex`.
pub const fn new(value: T) -> Self { pub const fn new(value: T) -> Self {
Self(Mutex::new(value)) Self(Mutex::new(value))
} }
/// Acquire a lock on the contents of the `Mutex`.
///
/// This will panic if the `Mutex` was poisoned.
///
/// On no-`std` environments, the implementation presumably defers to that of a spin lock.
pub fn lock(&self) -> MutexGuard<'_, T> { pub fn lock(&self) -> MutexGuard<'_, T> {
#[cfg(feature = "std")] #[cfg(feature = "std")]
let res = self.0.lock().unwrap(); let res = self.0.lock().unwrap();
@@ -86,12 +25,11 @@ mod mutex_shim {
} }
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard}; pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[rustversion::before(1.80)]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock; pub use spin::Lazy as LazyLock;
#[rustversion::before(1.80)]
#[cfg(feature = "std")]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)] #[rustversion::since(1.80)]
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::LazyLock; pub use std::sync::LazyLock;

View File

@@ -1,22 +0,0 @@
[package]
name = "serai-task"
version = "0.1.0"
description = "A task schema for Serai services"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/common/task"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.75"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
log = { version = "0.4", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] }

View File

@@ -1,3 +0,0 @@
# Task
A schema to define tasks to be run ad infinitum.

View File

@@ -1,161 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::{
fmt::{self, Debug},
future::Future,
time::Duration,
};
use tokio::sync::mpsc;
mod type_name;
/// A handle for a task.
///
/// The task will only stop running once all handles for it are dropped.
//
// `run_now` isn't infallible if the task may have been closed. `run_now` on a closed task would
// either need to panic (historic behavior), silently drop the fact the task can't be run, or
// return an error. Instead of having a potential panic, and instead of modeling the error
// behavior, this task can't be closed unless all handles are dropped, ensuring calls to `run_now`
// are infallible.
#[derive(Clone)]
pub struct TaskHandle {
run_now: mpsc::Sender<()>,
#[allow(dead_code)] // This is used to track if all handles have been dropped
close: mpsc::Sender<()>,
}
/// A task's internal structures.
pub struct Task {
run_now: mpsc::Receiver<()>,
close: mpsc::Receiver<()>,
}
impl Task {
/// Create a new task definition.
pub fn new() -> (Self, TaskHandle) {
// Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as
// soon as possible
let (run_now_send, run_now_recv) = mpsc::channel(1);
// And any call to close satisfies all calls to close
let (close_send, close_recv) = mpsc::channel(1);
(
Self { run_now: run_now_recv, close: close_recv },
TaskHandle { run_now: run_now_send, close: close_send },
)
}
}
impl TaskHandle {
/// Tell the task to run now (and not whenever its next iteration on a timer is).
pub fn run_now(&self) {
#[allow(clippy::match_same_arms)]
match self.run_now.try_send(()) {
Ok(()) => {}
// NOP on full, as this task will already be ran as soon as possible
Err(mpsc::error::TrySendError::Full(())) => {}
Err(mpsc::error::TrySendError::Closed(())) => {
// The task should only be closed if all handles are dropped, and this one hasn't been
panic!("task was unexpectedly closed when calling run_now")
}
}
}
}
/// An enum which can't be constructed, representing that the task does not error.
pub enum DoesNotError {}
impl Debug for DoesNotError {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
// This type can't be constructed so we'll never have a `&self` to call this fn with
unreachable!()
}
}
/// A task to be continually ran.
pub trait ContinuallyRan: Sized + Send {
/// The amount of seconds before this task should be polled again.
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
/// The maximum amount of seconds before this task should be run again.
///
/// Upon error, the amount of time waited will be linearly increased until this limit.
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
/// The error potentially yielded upon running an iteration of this task.
type Error: Debug;
/// Run an iteration of the task.
///
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
/// (without waiting for whatever timer they were already on).
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>>;
/// Continually run the task.
fn continually_run(
mut self,
mut task: Task,
dependents: Vec<TaskHandle>,
) -> impl Send + Future<Output = ()> {
async move {
// The default number of seconds to sleep before running the task again
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
// The current number of seconds to sleep before running the task again
// We increment this upon errors in order to not flood the logs with errors
let mut current_sleep_before_next_task = default_sleep_before_next_task;
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
// Set a limit of sleeping for two minutes
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
};
loop {
// If we were told to close/all handles were dropped, drop it
{
let should_close = task.close.try_recv();
match should_close {
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
Err(mpsc::error::TryRecvError::Empty) => {}
}
}
match self.run_iteration().await {
Ok(run_dependents) => {
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
current_sleep_before_next_task = default_sleep_before_next_task;
if run_dependents {
for dependent in &dependents {
dependent.run_now();
}
}
}
Err(e) => {
// Get the type name
let type_name = type_name::strip_type_name(core::any::type_name::<Self>());
// Print the error as a warning, prefixed by the task's type
log::warn!("{type_name}: {e:?}");
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
}
}
// Don't run the task again for another few seconds UNLESS told to run now
/*
We could replace tokio::mpsc with async_channel, tokio::time::sleep with
patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or
It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio
*/
tokio::select! {
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
msg = task.run_now.recv() => {
// Check if this is firing because the handle was dropped
if msg.is_none() {
break;
}
},
}
}
}
}
}

View File

@@ -1,31 +0,0 @@
/// Strip the modules from a type name.
// This may be of the form `a::b::C`, in which case we only want `C`
pub(crate) fn strip_type_name(full_type_name: &'static str) -> String {
// It also may be `a::b::C<d::e::F>`, in which case, we only attempt to strip `a::b`
let mut by_generics = full_type_name.split('<');
// Strip to just `C`
let full_outer_object_name = by_generics.next().unwrap();
let mut outer_object_name_parts = full_outer_object_name.split("::");
let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap();
for part in outer_object_name_parts {
last_part_in_outer_object_name = part;
}
// Push back on the generic terms
let mut type_name = last_part_in_outer_object_name.to_string();
for generic in by_generics {
type_name.push('<');
type_name.push_str(generic);
}
type_name
}
#[test]
fn test_strip_type_name() {
assert_eq!(strip_type_name("core::option::Option"), "Option");
assert_eq!(
strip_type_name("core::option::Option<alloc::string::String>"),
"Option<alloc::string::String>"
);
}

View File

@@ -7,9 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
# This must be specified with the patch version, else Rust believes `1.77` < `1.77.0` and will rust-version = "1.77"
# refuse to compile due to relying on versions introduced with `1.77.0`
rust-version = "1.77.0"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] #![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.

View File

@@ -17,45 +17,50 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
async-trait = { version = "0.1", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std"] }
bitvec = { version = "1", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
dkg = { package = "dkg-musig", path = "../crypto/dkg/musig", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" } frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" } frost-schnorrkel = { path = "../crypto/schnorrkel" }
hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
zalloc = { path = "../common/zalloc" } zalloc = { path = "../common/zalloc" }
serai-db = { path = "../common/db" } serai-db = { path = "../common/db" }
serai-env = { path = "../common/env" } serai-env = { path = "../common/env" }
serai-task = { path = "../common/task", version = "0.1" }
messages = { package = "serai-processor-messages", path = "../processor/messages" } processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
message-queue = { package = "serai-message-queue", path = "../message-queue" } message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary-sdk = { path = "./tributary-sdk" } tributary = { package = "tributary-chain", path = "./tributary" }
serai-client-serai = { path = "../substrate/client/serai", default-features = false } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "b225e429397af981afda8a3d48be027926c466ba", default-features = false, features = ["std"] }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] } futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
serai-cosign = { path = "./cosign" } [dev-dependencies]
serai-coordinator-substrate = { path = "./substrate" } tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
serai-coordinator-tributary = { path = "./tributary" } sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "b225e429397af981afda8a3d48be027926c466ba", default-features = false, features = ["std"] }
serai-coordinator-p2p = { path = "./p2p" } sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "b225e429397af981afda8a3d48be027926c466ba", default-features = false, features = ["std"] }
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
[features] [features]
longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"] longer-reattempts = []
parity-db = ["serai-db/parity-db"] parity-db = ["serai-db/parity-db"]
rocksdb = ["serai-db/rocksdb"] rocksdb = ["serai-db/rocksdb"]

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,29 +1,7 @@
# Coordinator # Coordinator
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint The Serai coordinator communicates with other coordinators to prepare batches
BFT algorithm. for Serai and sign transactions.
- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead In order to achieve consensus over gossip, and order certain events, a
of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend micro-blockchain is instantiated.
to, `tributary` is solely intended to be an embedded asynchronous task within
an application.
The Serai coordinator spawns a tributary for each validator set it's
coordinating. This allows the participating validators to communicate in a
byzantine-fault-tolerant manner (relying on Tendermint for consensus).
- [`cosign`](./cosign) contains a library to decide which Substrate blocks
should be cosigned and to evaluate cosigns.
- [`substrate`](./substrate) contains a library to index the Substrate
blockchain and handle its events.
- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the
Serai processor. It includes the `Transaction` definition and deferred
execution logic.
- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator.
- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API.
- [`src`](./src) contains the source code for the Coordinator binary itself.

View File

@@ -1,33 +0,0 @@
[package]
name = "serai-cosign"
version = "0.1.0"
description = "Evaluator of cosigns for the Serai network"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
log = { version = "0.4", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" }
serai-cosign-types = { path = "./types" }

View File

@@ -1,121 +0,0 @@
# Serai Cosign
The Serai blockchain is controlled by a set of validators referred to as the
Serai validators. These validators could attempt to double-spend, even if every
node on the network is a full node, via equivocating.
Posit:
- The Serai validators control X SRI
- The Serai validators produce block A swapping X SRI to Y XYZ
- The Serai validators produce block B swapping X SRI to Z ABC
- The Serai validators finalize block A and send to the validators for XYZ
- The Serai validators finalize block B and send to the validators for ABC
This is solved via the cosigning protocol. The validators for XYZ and the
validators for ABC each sign their view of the Serai blockchain, communicating
amongst each other to ensure consistency.
The security of the cosigning protocol is not formally proven, and there are no
claims it achieves Byzantine Fault Tolerance. This protocol is meant to be
practical and make such attacks infeasible, when they could already be argued
difficult to perform.
### Definitions
- Cosign: A signature from a non-Serai validator set for a Serai block
- Cosign Commit: A collection of cosigns which achieve the necessary weight
### Methodology
Finalized blocks from the Serai network are intended to be cosigned if they
contain burn events. Only once cosigned should non-Serai validators process
them.
Cosigning occurs by a non-Serai validator set, using their threshold keys
declared on the Serai blockchain. Once 83% of non-Serai validator sets, by
weight, cosign a block, a cosign commit is formed. A cosign commit for a block
is considered to also cosign for all blocks preceding it.
### Bounds Under Asynchrony
Assuming an asynchronous environment fully controlled by the adversary, 34% of
a validator set may cause an equivocation. Control of 67% of non-Serai
validator sets, by weight, is sufficient to produce two distinct cosign commits
at the same position. This is due to the honest stake, 33%, being split across
the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means
the cosigning protocol may produce multiple cosign commits if 34% of 67%, just
22.78%, of the non-Serai validator sets, is malicious. This would be in
conjunction with 34% of the Serai validator set (assumed 20% of total stake),
for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is
an increase from the 6.8% required without the cosigning protocol.
### Bounds Under Synchrony
Assuming the honest stake within the non-Serai validator sets detect the
malicious stake within their set prior to assisting in producing a cosign for
their set, for which there is a multi-second window, 67% of 67% of non-Serai
validator sets is required to produce cosigns for those sets. This raises the
total stake requirement to 42.712% (past the usual 34% threshold).
### Behavior Reliant on Synchrony
If the Serai blockchain node detects an equivocation, it will stop responding
to all RPC requests and stop participating in finalizing further blocks. This
lets the node communicate the equivocating commits to other nodes (causing them
to exhibit the same behavior), yet prevents interaction with it.
If cosigns representing 17% of the non-Serai validators sets by weight are
detected for distinct blocks at the same position, the protocol halts. An
explicit latency period of seventy seconds is enacted after receiving a cosign
commit for the detection of such an equivocation. This is largely redundant
given how the Serai blockchain node will presumably have halted itself by this
time.
### Equivocation-Detection Avoidance
Malicious Serai validators could avoid detection of their equivocating if they
produced two distinct blockchains, A and B, with different keys declared for
the same non-Serai validator set. While the validators following A may detect
the cosigns for distinct blocks by validators following B, the cosigns would be
assumed invalid due to their signatures being verified against distinct keys.
This is prevented by requiring cosigns on the blocks which declare new keys,
ensuring all validators have a consistent view of the keys used within the
cosigning protocol (per the bounds of the cosigning protocol). These blocks are
exempt from the general policy of cosign commits cosigning all prior blocks,
preventing the newly declared keys (which aren't yet cosigned) from being used
to cosign themselves. These cosigns are flagged as "notable", are permanently
archived, and must be synced before a validator will move forward.
Cosigning the block which declares new keys also ensures agreement on the
preceding block which declared the new set, with an exact specification of the
participants and their weight, before it impacts the cosigning protocol.
### Denial of Service Concerns
Any historical Serai validator set may trigger a chain halt by producing an
equivocation after their retiry. This requires 67% to be malicious. 34% of the
active Serai validator set may also trigger a chain halt.
17% of non-Serai validator sets equivocating causing a halt means 5.67% of
non-Serai validator sets' stake may cause a halt (in an asynchronous
environment fully controlled by the adversary). In a synchronous environment
where the honest stake cannot be split across two candidates, 11.33% of
non-Serai validator sets' stake is required.
The more practical attack is for one to obtain 5.67% of non-Serai validator
sets' stake, under any network conditions, and simply go offline. This will
take 17% of validator sets offline with it, preventing any cosign commits
from being performed. A fallback protocol where validators individually produce
cosigns, removing the network's horizontal scalability but ensuring liveness,
prevents this, restoring the additional requirements for control of an
asynchronous network or 11.33% of non-Serai validator sets' stake.
### TODO
The Serai node no longer responding to RPC requests upon detecting any
equivocation, and the fallback protocol where validators individually produce
signatures, are not implemented at this time. The former means the detection of
equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai
validator sets' stake the DoS threshold, even without control of an
asynchronous network.

View File

@@ -1,57 +0,0 @@
use core::future::Future;
use std::time::{Duration, SystemTime};
use serai_db::*;
use serai_task::{DoesNotError, ContinuallyRan};
use crate::evaluator::CosignedBlocks;
/// How often callers should broadcast the cosigns flagged for rebroadcasting.
pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60);
const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10);
const ACKNOWLEDGEMENT_DELAY: Duration =
Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs());
create_db!(
SubstrateCosignDelay {
// The latest cosigned block number.
LatestCosignedBlockNumber: () -> u64,
}
);
/// A task to delay acknowledgement of cosigns.
pub(crate) struct CosignDelayTask<D: Db> {
pub(crate) db: D,
}
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
type Error = DoesNotError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
loop {
let mut txn = self.db.txn();
// Receive the next block to mark as cosigned
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
break;
};
// Calculate when we should mark it as valid
let time_valid =
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
// Sleep until then
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
.await;
// Set the cosigned block
LatestCosignedBlockNumber::set(&mut txn, &block_number);
txn.commit();
made_progress = true;
}
Ok(made_progress)
}
}
}

View File

@@ -1,246 +0,0 @@
use core::future::Future;
use std::time::{Duration, Instant, SystemTime};
use serai_db::*;
use serai_task::ContinuallyRan;
use crate::{
HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns,
intend::{GlobalSessionsChannel, BlockEventData, BlockEvents},
};
create_db!(
SubstrateCosignEvaluator {
// The global session currently being evaluated.
CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession),
}
);
db_channel!(
SubstrateCosignEvaluatorChannels {
// (cosigned block, time cosign was evaluated)
CosignedBlocks: () -> (u64, u64),
}
);
// This is a strict function which won't panic, even with a malicious Serai node, so long as:
// - It's called incrementally (with an increment of 1)
// - It's only called for block numbers we've completed indexing on within the intend task
// - It's only called for block numbers after a global session has started
// - The global sessions channel is populated as the block declaring the session is indexed
// Which all hold true within the context of this task and the intend task.
//
// This function will also ensure the currently evaluated global session is incremented once we
// finish evaluation of the prior session.
fn currently_evaluated_global_session_strict(
txn: &mut impl DbTxn,
block_number: u64,
) -> ([u8; 32], GlobalSession) {
let mut res = {
let existing = match CurrentlyEvaluatedGlobalSession::get(txn) {
Some(existing) => existing,
None => {
let first = GlobalSessionsChannel::try_recv(txn)
.expect("fetching latest global session yet none declared");
CurrentlyEvaluatedGlobalSession::set(txn, &first);
first
}
};
assert!(
existing.1.start_block_number <= block_number,
"candidate's start block number exceeds our block number"
);
existing
};
if let Some(next) = GlobalSessionsChannel::peek(txn) {
assert!(
block_number <= next.1.start_block_number,
"currently_evaluated_global_session_strict wasn't called incrementally"
);
// If it's time for this session to activate, take it from the channel and set it
if block_number == next.1.start_block_number {
GlobalSessionsChannel::try_recv(txn).unwrap();
CurrentlyEvaluatedGlobalSession::set(txn, &next);
res = next;
}
}
res
}
pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> {
CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id)
}
/// A task to determine if a block has been cosigned and we should handle it.
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
pub(crate) db: D,
pub(crate) request: R,
pub(crate) last_request_for_cosigns: Instant,
}
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
return false;
}
*last_request_for_cosigns = Instant::now();
true
};
async move {
let mut known_cosign = None;
let mut made_progress = false;
loop {
let mut txn = self.db.txn();
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
else {
break;
};
// Fetch the global session information
let (global_session, global_session_info) =
currently_evaluated_global_session_strict(&mut txn, block_number);
match has_events {
// Because this had notable events, we require an explicit cosign for this block by a
// supermajority of the prior block's validator sets
HasEvents::Notable => {
let mut weight_cosigned = 0;
for set in global_session_info.sets {
// Check if we have the cosign from this set
if NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
.map(|signed_cosign| signed_cosign.cosign.block_number) ==
Some(block_number)
{
// Since have this cosign, add the set's weight to the weight which has cosigned
weight_cosigned +=
global_session_info.stakes.get(&set.network).ok_or_else(|| {
"ValidatorSet in global session yet didn't have its stake".to_string()
})?;
}
}
// Check if the sum weight doesn't cross the required threshold
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
// Request the necessary cosigns over the network
if should_request_cosigns(&mut self.last_request_for_cosigns) {
self
.request
.request_notable_cosigns(global_session)
.await
.map_err(|e| format!("{e:?}"))?;
}
// We return an error so the delay before this task is run again increases
return Err(format!(
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
));
}
log::info!("marking notable block #{block_number} as cosigned");
}
// Since this block didn't have any notable events, we simply require a cosign for this
// block or a greater block by the current validator sets
HasEvents::NonNotable => {
// Check if this was satisfied by a cached result which wasn't calculated incrementally
let known_cosigned = if let Some(known_cosign) = known_cosign {
known_cosign >= block_number
} else {
// Clear `known_cosign` which is no longer helpful
known_cosign = None;
false
};
// If it isn't already known to be cosigned, evaluate the latest cosigns
if !known_cosigned {
/*
LatestCosign is populated with the latest cosigns for each network which don't
exceed the latest global session we've evaluated the start of. This current block
is during the latest global session we've evaluated the start of.
*/
let mut weight_cosigned = 0;
let mut lowest_common_block: Option<u64> = None;
for set in global_session_info.sets {
// Check if this set cosigned this block or not
let Some(cosign) =
NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
else {
continue;
};
if cosign.cosign.block_number >= block_number {
weight_cosigned +=
global_session_info.stakes.get(&set.network).ok_or_else(|| {
"ValidatorSet in global session yet didn't have its stake".to_string()
})?;
}
// Update the lowest block common to all of these cosigns
lowest_common_block = lowest_common_block
.map(|existing| existing.min(cosign.cosign.block_number))
.or(Some(cosign.cosign.block_number));
}
// Check if the sum weight doesn't cross the required threshold
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
// Request the superseding notable cosigns over the network
// If this session hasn't yet produced notable cosigns, then we presume we'll see
// the desired non-notable cosigns as part of normal operations, without needing to
// explicitly request them
if should_request_cosigns(&mut self.last_request_for_cosigns) {
self
.request
.request_notable_cosigns(global_session)
.await
.map_err(|e| format!("{e:?}"))?;
}
// We return an error so the delay before this task is run again increases
return Err(format!(
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
));
}
// Update the cached result for the block we know is cosigned
/*
There may be a higher block which was cosigned, but once we get to this block,
we'll re-evaluate and find it then. The alternative would be an optimistic
re-evaluation now. Both are fine, so the lower-complexity option is preferred.
*/
known_cosign = lowest_common_block;
}
log::debug!("marking non-notable block #{block_number} as cosigned");
}
// If this block has no events necessitating cosigning, we can immediately consider the
// block cosigned (making this block a NOP)
HasEvents::No => {}
}
// Since we checked we had the necessary cosigns, send it for delay before acknowledgement
CosignedBlocks::send(
&mut txn,
&(
block_number,
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or(Duration::ZERO)
.as_secs(),
),
);
txn.commit();
if (block_number % 500) == 0 {
log::info!("marking block #{block_number} as cosigned");
}
made_progress = true;
}
Ok(made_progress)
}
}
}

View File

@@ -1,273 +0,0 @@
use core::future::Future;
use std::{sync::Arc, collections::HashMap};
use blake2::{Digest, Blake2b256};
use serai_client_serai::{
abi::{
primitives::{
network_id::{ExternalNetworkId, NetworkId},
balance::Amount,
crypto::Public,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
merkle::IncrementalUnbalancedMerkleTree,
},
validator_sets::Event,
},
Serai, Events,
};
use serai_db::*;
use serai_task::ContinuallyRan;
use crate::*;
#[derive(BorshSerialize, BorshDeserialize)]
struct Set {
session: Session,
key: Public,
stake: Amount,
}
create_db!(
CosignIntend {
ScanCosignFrom: () -> u64,
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount,
Validators: (set: ExternalValidatorSet) -> Vec<SeraiAddress>,
LatestSet: (network: ExternalNetworkId) -> Set,
}
);
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub(crate) struct BlockEventData {
pub(crate) block_number: u64,
pub(crate) has_events: HasEvents,
}
db_channel! {
CosignIntendChannels {
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
BlockEvents: () -> BlockEventData,
IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent,
}
}
async fn block_has_events_justifying_a_cosign(
serai: &Serai,
block_number: u64,
) -> Result<(Block, Events, HasEvents), String> {
let block = serai
.block_by_number(block_number)
.await
.map_err(|e| format!("{e:?}"))?
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?;
if events.validator_sets().set_keys_events().next().is_some() {
return Ok((block, events, HasEvents::Notable));
}
if events.coins().burn_with_instruction_events().next().is_some() {
return Ok((block, events, HasEvents::NonNotable));
}
Ok((block, events, HasEvents::No))
}
// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
// block.
fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> {
let mut sets = vec![];
for network in ExternalNetworkId::all() {
let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ExternalValidatorSet { network, session }, key, stake));
}
sets
}
/// A task to determine which blocks we should intend to cosign.
pub(crate) struct CosignIntendTask<D: Db> {
pub(crate) db: D,
pub(crate) serai: Arc<Serai>,
}
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
let latest_block_number =
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?;
for block_number in start_block_number ..= latest_block_number {
let mut txn = self.db.txn();
let (block, events, mut has_events) =
block_has_events_justifying_a_cosign(&self.serai, block_number)
.await
.map_err(|e| format!("{e:?}"))?;
let mut builds_upon =
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
// Check we are indexing a linear chain
if block.header.builds_upon() !=
builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG)
{
Err(format!(
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
block_number - 1
))?;
}
let block_hash = block.header.hash();
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
builds_upon.append(
serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG,
Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_HEADER_LEAF_TAG])
.chain_update(block_hash.0)
.finalize()
.into(),
);
BuildsUpon::set(&mut txn, &builds_upon);
// Update the stakes
for event in events.validator_sets().allocation_events() {
let Event::Allocation { validator, network, amount } = event else {
panic!("event from `allocation_events` wasn't `Event::Allocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0));
}
for event in events.validator_sets().deallocation_events() {
let Event::Deallocation { validator, network, amount, timeline: _ } = event else {
panic!("event from `deallocation_events` wasn't `Event::Deallocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0));
}
// Handle decided sets
for event in events.validator_sets().set_decided_events() {
let Event::SetDecided { set, validators } = event else {
panic!("event from `set_decided_events` wasn't `Event::SetDecided`")
};
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
Validators::set(
&mut txn,
set,
&validators.iter().map(|(validator, _key_shares)| *validator).collect(),
);
}
// Handle declarations of the latest set
for event in events.validator_sets().set_keys_events() {
let Event::SetKeys { set, key_pair } = event else {
panic!("event from `set_keys_events` wasn't `Event::SetKeys`")
};
let mut stake = 0;
for validator in
Validators::take(&mut txn, *set).expect("set which wasn't decided set keys")
{
stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0;
}
LatestSet::set(
&mut txn,
set.network,
&Set { session: set.session, key: key_pair.0, stake: Amount(stake) },
);
}
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
// If this is notable, it creates a new global session, which we index into the database
// now
if has_events == HasEvents::Notable {
let sets_and_keys_and_stakes = cosigning_sets(&txn);
let global_session = GlobalSession::id(
sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(),
);
let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len());
let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len());
let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len());
let mut total_stake = 0;
for (set, key, stake) in sets_and_keys_and_stakes {
sets.push(set);
keys.insert(set.network, key);
stakes.insert(set.network, stake.0);
total_stake += stake.0;
}
if total_stake == 0 {
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
}
let global_session_info = GlobalSession {
// This session starts cosigning after this block, as this block must be cosigned by
// the existing validators
start_block_number: block_number + 1,
sets,
keys,
stakes,
total_stake,
};
GlobalSessions::set(&mut txn, global_session, &global_session_info);
if let Some(ending_global_session) = global_session_for_this_block {
GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number);
}
LatestGlobalSessionIntended::set(&mut txn, &global_session);
GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info));
}
// If there isn't anyone available to cosign this block, meaning it'll never be cosigned,
// we flag it as not having any events requiring cosigning so we don't attempt to
// sign/require a cosign for it
if global_session_for_this_block.is_none() {
has_events = HasEvents::No;
}
match has_events {
HasEvents::Notable | HasEvents::NonNotable => {
let global_session_for_this_block = global_session_for_this_block
.expect("global session for this block was None but still attempting to cosign it");
let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block)
.expect("last global session intended wasn't saved to the database");
// Tell each set of their expectation to cosign this block
for set in global_session_info.sets {
log::debug!("{set:?} will be cosigning block #{block_number}");
IntendedCosigns::send(
&mut txn,
set,
&CosignIntent {
global_session: global_session_for_this_block,
block_number,
block_hash,
notable: has_events == HasEvents::Notable,
},
);
}
}
HasEvents::No => {}
}
// Populate a singular feed with every block's status for the evluator to work off of
BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events }));
// Mark this block as handled, meaning we should scan from the next block moving on
ScanCosignFrom::set(&mut txn, &(block_number + 1));
txn.commit();
}
Ok(start_block_number <= latest_block_number)
}
}
}

View File

@@ -1,393 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::{fmt::Debug, future::Future};
use std::{sync::Arc, collections::HashMap, time::Instant};
use blake2::{Digest, Blake2s256};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client_serai::{
abi::{
primitives::{
BlockHash,
crypto::{Public, KeyPair},
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
},
Block,
},
Serai, State,
};
use serai_db::*;
use serai_task::*;
pub use serai_cosign_types::*;
/// The cosigns which are intended to be performed.
mod intend;
/// The evaluator of the cosigns.
mod evaluator;
/// The task to delay acknowledgement of the cosigns.
mod delay;
pub use delay::BROADCAST_FREQUENCY;
use delay::LatestCosignedBlockNumber;
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
///
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
/// distinct blocks at distinct positions within a global session, we still identify the faults.
/*
There is the attack where a validator set is given an alternate blockchain with a key generation
event at block #n, while most validator sets are given a blockchain with a key generation event
at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the
cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block
prior to the block being cosigned.
We solve this by binding cosigns to a global session ID, which has a specific start block, and
reading the keys from the start block. This means that so long as all validator sets agree on the
start of a global session, they can verify all cosigns produced by that session, regardless of
how it advances. Since agreeing on the start of a global session is mandated, there's no way to
have validator sets follow two distinct global sessions without breaking the bounds of the
cosigning protocol.
*/
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub(crate) struct GlobalSession {
pub(crate) start_block_number: u64,
pub(crate) sets: Vec<ExternalValidatorSet>,
pub(crate) keys: HashMap<ExternalNetworkId, Public>,
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
pub(crate) total_stake: u64,
}
impl GlobalSession {
fn id(mut cosigners: Vec<ExternalValidatorSet>) -> [u8; 32] {
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
}
}
/// If the block has events.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
enum HasEvents {
/// The block had a notable event.
///
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
/// accordingly must be cosigned before we advance past them.
Notable,
/// The block had an non-notable event justifying a cosign.
NonNotable,
/// The block didn't have an event justifying a cosign.
No,
}
create_db! {
Cosign {
// The following are populated by the intend task and used throughout the library
// An index of Substrate blocks
SubstrateBlockHash: (block_number: u64) -> BlockHash,
// A mapping from a global session's ID to its relevant information.
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
// The last block to be cosigned by a global session.
GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64,
// The latest global session intended.
//
// This is distinct from the latest global session for which we've evaluated the cosigns for.
LatestGlobalSessionIntended: () -> [u8; 32],
// The following are managed by the `intake_cosign` function present in this file
// The latest cosigned block for each network.
//
// This will only be populated with cosigns predating or during the most recent global session
// to have its start cosigned.
//
// The global session changes upon a notable block, causing each global session to have exactly
// one notable block. All validator sets will explicitly produce a cosign for their notable
// block, causing the latest cosigned block for a global session to either be the global
// session's notable cosigns or the network's latest cosigns.
NetworksLatestCosignedBlock: (
global_session: [u8; 32],
network: ExternalNetworkId
) -> SignedCosign,
// Cosigns received for blocks not locally recognized as finalized.
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
// The global session which faulted.
FaultedSession: () -> [u8; 32],
}
}
/// An object usable to request notable cosigns for a block.
pub trait RequestNotableCosigns: 'static + Send {
/// The error type which may be encountered when requesting notable cosigns.
type Error: Debug;
/// Request the notable cosigns for this global session.
fn request_notable_cosigns(
&self,
global_session: [u8; 32],
) -> impl Send + Future<Output = Result<(), Self::Error>>;
}
/// An error used to indicate the cosigning protocol has faulted.
#[derive(Debug)]
pub struct Faulted;
/// An error incurred while intaking a cosign.
#[derive(Debug)]
pub enum IntakeCosignError {
/// Cosign is for a not-yet-indexed block
NotYetIndexedBlock,
/// A later cosign for this cosigner has already been handled
StaleCosign,
/// The cosign's global session isn't recognized
UnrecognizedGlobalSession,
/// The cosign is for a block before its global session starts
BeforeGlobalSessionStart,
/// The cosign is for a block after its global session ends
AfterGlobalSessionEnd,
/// The cosign's signing network wasn't a participant in this global session
NonParticipatingNetwork,
/// The cosign had an invalid signature
InvalidSignature,
/// The cosign is for a global session which has yet to have its declaration block cosigned
FutureGlobalSession,
}
impl IntakeCosignError {
/// If this error is temporal to the local view
pub fn temporal(&self) -> bool {
match self {
IntakeCosignError::NotYetIndexedBlock |
IntakeCosignError::StaleCosign |
IntakeCosignError::UnrecognizedGlobalSession |
IntakeCosignError::FutureGlobalSession => true,
IntakeCosignError::BeforeGlobalSessionStart |
IntakeCosignError::AfterGlobalSessionEnd |
IntakeCosignError::NonParticipatingNetwork |
IntakeCosignError::InvalidSignature => false,
}
}
}
/// The interface to manage cosigning with.
pub struct Cosigning<D: Db> {
db: D,
}
impl<D: Db> Cosigning<D> {
/// Spawn the tasks to intend and evaluate cosigns.
///
/// The database specified must only be used with a singular instance of the Serai network, and
/// only used once at any given time.
pub fn spawn<R: RequestNotableCosigns>(
db: D,
serai: Arc<Serai>,
request: R,
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
) -> Self {
let (intend_task, _intend_task_handle) = Task::new();
let (evaluator_task, evaluator_task_handle) = Task::new();
let (delay_task, delay_task_handle) = Task::new();
tokio::spawn(
(intend::CosignIntendTask { db: db.clone(), serai })
.continually_run(intend_task, vec![evaluator_task_handle]),
);
tokio::spawn(
(evaluator::CosignEvaluatorTask {
db: db.clone(),
request,
last_request_for_cosigns: Instant::now(),
})
.continually_run(evaluator_task, vec![delay_task_handle]),
);
tokio::spawn(
(delay::CosignDelayTask { db: db.clone() })
.continually_run(delay_task, tasks_to_run_upon_cosigning),
);
Self { db }
}
/// The latest cosigned block number.
pub fn latest_cosigned_block_number(getter: &impl Get) -> Result<u64, Faulted> {
if FaultedSession::get(getter).is_some() {
Err(Faulted)?;
}
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
}
/// Fetch a cosigned Substrate block's hash by its block number.
pub fn cosigned_block(
getter: &impl Get,
block_number: u64,
) -> Result<Option<BlockHash>, Faulted> {
if block_number > Self::latest_cosigned_block_number(getter)? {
return Ok(None);
}
Ok(Some(
SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"),
))
}
/// Fetch the notable cosigns for a global session in order to respond to requests.
///
/// If this global session hasn't produced any notable cosigns, this will return the latest
/// cosigns for this session.
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
let mut cosigns = vec![];
for network in ExternalNetworkId::all() {
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
cosigns.push(cosign);
}
}
cosigns
}
/// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds.
///
/// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty
/// cosigns, in case of a fault, to induce identification of the fault by others.
pub fn cosigns_to_rebroadcast(&self) -> Vec<SignedCosign> {
if let Some(faulted) = FaultedSession::get(&self.db) {
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
// identification in those who see the faulty cosigns as honest
for network in ExternalNetworkId::all() {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
if cosign.cosign.global_session == faulted {
cosigns.push(cosign);
}
}
}
cosigns
} else {
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
return vec![];
};
let mut cosigns = vec![];
for network in ExternalNetworkId::all() {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
cosigns.push(cosign);
}
}
cosigns
}
}
/// Intake a cosign.
//
// Takes `&mut self` as this should only be called once at any given moment.
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> {
let cosign = &signed_cosign.cosign;
let network = cosign.cosigner;
// Check our indexed blockchain includes a block with this block number
let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else {
Err(IntakeCosignError::NotYetIndexedBlock)?
};
let faulty = cosign.block_hash != our_block_hash;
// Check this isn't a dated cosign within its global session (as it would be if rebroadcasted)
if !faulty {
if let Some(existing) =
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
{
if existing.cosign.block_number >= cosign.block_number {
Err(IntakeCosignError::StaleCosign)?;
}
}
}
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
Err(IntakeCosignError::UnrecognizedGlobalSession)?
};
// Check the cosigned block number is in range to the global session
if cosign.block_number < global_session.start_block_number {
// Cosign is for a block predating the global session
Err(IntakeCosignError::BeforeGlobalSessionStart)?;
}
if !faulty {
// This prevents a malicious validator set, on the same chain, from producing a cosign after
// their final block, replacing their notable cosign
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
if cosign.block_number > last_block {
// Cosign is for a block after the last block this global session should have signed
Err(IntakeCosignError::AfterGlobalSessionEnd)?;
}
}
}
// Check the cosign's signature
{
let key =
*global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?;
if !signed_cosign.verify_signature(key) {
Err(IntakeCosignError::InvalidSignature)?;
}
}
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
// cosign
let mut txn = self.db.txn();
if !faulty {
// If this is for a future global session, we don't acknowledge this cosign at this time
let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0);
// This global session starts the block *after* its declaration, so we want to check if the
// block declaring it was cosigned
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
drop(txn);
return Err(IntakeCosignError::FutureGlobalSession);
}
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign);
} else {
let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]);
// Only handle this as a fault if this set wasn't prior faulty
if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) {
faults.push(signed_cosign.clone());
Faults::set(&mut txn, cosign.global_session, &faults);
let mut weight_cosigned = 0;
for fault in &faults {
let stake = global_session
.stakes
.get(&fault.cosign.cosigner)
.expect("cosigner with recognized key didn't have a stake entry saved");
weight_cosigned += stake;
}
// Check if the sum weight means a fault has occurred
if weight_cosigned >= ((global_session.total_stake * 17) / 100) {
FaultedSession::set(&mut txn, &cosign.global_session);
}
}
}
txn.commit();
Ok(())
}
/// Receive intended cosigns to produce for this ExternalValidatorSet.
///
/// All cosigns intended, up to and including the next notable cosign, are returned.
///
/// This will drain the internal channel and not re-yield these intentions again.
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<CosignIntent> {
let mut res: Vec<CosignIntent> = vec![];
// While we have yet to find a notable cosign...
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
let Some(intent) = intend::IntendedCosigns::try_recv(txn, set) else { break };
res.push(intent);
}
res
}
}

View File

@@ -1,25 +0,0 @@
[package]
name = "serai-cosign-types"
version = "0.1.0"
description = "Evaluator of cosigns for the Serai network"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] }

View File

@@ -1,72 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// The actual cosigner.
pub cosigner: ExternalNetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
borsh::to_vec(self).unwrap()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
/// Verify a cosign's signature.
pub fn verify_signature(&self, signer: Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}

View File

@@ -1,33 +0,0 @@
[package]
name = "serai-coordinator-p2p"
version = "0.1.0"
description = "Serai coordinator's P2P abstraction"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-db = { path = "../../common/db", version = "0.1" }
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
serai-cosign = { path = "../cosign" }
tributary-sdk = { path = "../tributary-sdk" }
futures-lite = { version = "2", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["sync", "macros"] }
log = { version = "0.4", default-features = false, features = ["std"] }
serai-task = { path = "../../common/task", version = "0.1" }

View File

@@ -1,3 +0,0 @@
# Serai Coordinator P2P
The P2P abstraction used by Serai's coordinator, and tasks over it.

View File

@@ -1,42 +0,0 @@
[package]
name = "serai-coordinator-libp2p-p2p"
version = "0.1.0"
description = "Serai coordinator's libp2p-based P2P backend"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p/libp2p"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.87"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
async-trait = { version = "0.1", default-features = false }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../../substrate/client/serai", default-features = false }
serai-cosign = { path = "../../cosign" }
tributary-sdk = { path = "../../tributary-sdk" }
futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["sync"] }
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
log = { version = "0.4", default-features = false, features = ["std"] }
serai-task = { path = "../../../common/task", version = "0.1" }
serai-coordinator-p2p = { path = "../" }

View File

@@ -1,15 +0,0 @@
AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.

View File

@@ -1,14 +0,0 @@
# Serai Coordinator libp2p P2P
A libp2p-backed P2P instantiation for Serai's coordinator.
The libp2p swarm is limited to validators from the Serai network. The swarm
does not maintain any of its own peer finding/routing infrastructure, instead
relying on the Serai network's connection information to dial peers. This does
limit the listening peers to only the peers immediately reachable via the same
IP address (despite the two distinct services), not hidden behind a NAT, yet is
also quite simple and gives full control of who to connect to to us.
Peers are decided via the internal `DialTask` which aims to maintain a target
amount of peers for each external network. This ensures cosigns are able to
propagate across the external networks which sign them.

View File

@@ -1,187 +0,0 @@
use core::{pin::Pin, future::Future};
use std::io;
use zeroize::Zeroizing;
use rand_core::{RngCore, OsRng};
use blake2::{Digest, Blake2s256};
use schnorrkel::{Keypair, PublicKey, Signature};
use serai_client_serai::abi::primitives::crypto::Public;
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::{
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade},
identity::{self, PeerId},
noise,
};
use crate::peer_id_from_public;
const PROTOCOL: &str = "/serai/coordinator/validators";
#[derive(Clone)]
pub(crate) struct OnlyValidators {
pub(crate) serai_key: Zeroizing<Keypair>,
pub(crate) noise_keypair: identity::Keypair,
}
impl OnlyValidators {
/// The ephemeral challenge protocol for authentication.
///
/// We use ephemeral challenges to prevent replaying signatures from historic sessions.
///
/// We don't immediately send the challenge. We only send a commitment to it. This prevents our
/// remote peer from choosing their challenge in response to our challenge, in case there was any
/// benefit to doing so.
async fn challenges<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
socket: &mut noise::Output<S>,
) -> io::Result<([u8; 32], [u8; 32])> {
let mut our_challenge = [0; 32];
OsRng.fill_bytes(&mut our_challenge);
// Write the hash of our challenge
socket.write_all(&Blake2s256::digest(our_challenge)).await?;
// Read the hash of their challenge
let mut their_challenge_commitment = [0; 32];
socket.read_exact(&mut their_challenge_commitment).await?;
// Reveal our challenge
socket.write_all(&our_challenge).await?;
// Read their challenge
let mut their_challenge = [0; 32];
socket.read_exact(&mut their_challenge).await?;
// Verify their challenge
if <[u8; 32]>::from(Blake2s256::digest(their_challenge)) != their_challenge_commitment {
Err(io::Error::other("challenge didn't match challenge commitment"))?;
}
Ok((our_challenge, their_challenge))
}
// We sign the two noise peer IDs and the ephemeral challenges.
//
// Signing the noise peer IDs ensures we're authenticating this noise connection. The only
// expectations placed on noise are for it to prevent a MITM from impersonating the other end or
// modifying any messages sent.
//
// Signing the ephemeral challenges prevents any replays. While that should be unnecessary, as
// noise MAY prevent replays across sessions (even when the same key is used), and noise IDs
// shouldn't be reused (so it should be fine to reuse an existing signature for these noise IDs),
// it doesn't hurt.
async fn authenticate<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
&self,
socket: &mut noise::Output<S>,
dialer_peer_id: PeerId,
dialer_challenge: [u8; 32],
listener_peer_id: PeerId,
listener_challenge: [u8; 32],
) -> io::Result<PeerId> {
// Write our public key
socket.write_all(&self.serai_key.public.to_bytes()).await?;
let msg = borsh::to_vec(&(
dialer_peer_id.to_bytes(),
dialer_challenge,
listener_peer_id.to_bytes(),
listener_challenge,
))
.unwrap();
let signature = self.serai_key.sign_simple(PROTOCOL.as_bytes(), &msg);
socket.write_all(&signature.to_bytes()).await?;
let mut public_key_and_sig = [0; 96];
socket.read_exact(&mut public_key_and_sig).await?;
let public_key = PublicKey::from_bytes(&public_key_and_sig[.. 32])
.map_err(|_| io::Error::other("invalid public key"))?;
let sig = Signature::from_bytes(&public_key_and_sig[32 ..])
.map_err(|_| io::Error::other("invalid signature serialization"))?;
public_key
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
.map_err(|_| io::Error::other("invalid signature"))?;
Ok(peer_id_from_public(Public(public_key.to_bytes())))
}
}
impl UpgradeInfo for OnlyValidators {
type Info = <noise::Config as UpgradeInfo>::Info;
type InfoIter = <noise::Config as UpgradeInfo>::InfoIter;
fn protocol_info(&self) -> Self::InfoIter {
// A keypair only causes an error if its sign operation fails, which is only possible with RSA,
// which isn't used within this codebase
noise::Config::new(&self.noise_keypair).unwrap().protocol_info()
}
}
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S>
for OnlyValidators
{
type Output = (PeerId, noise::Output<S>);
type Error = io::Error;
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
fn upgrade_inbound(
self,
socket: S,
info: <Self as UpgradeInfo>::Info,
) -> <Self as InboundConnectionUpgrade<S>>::Future {
Box::pin(async move {
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
.unwrap()
.upgrade_inbound(socket, info)
.await
.map_err(io::Error::other)?;
let (our_challenge, dialer_challenge) = OnlyValidators::challenges(&mut socket).await?;
let dialer_serai_validator = self
.authenticate(
&mut socket,
dialer_noise_peer_id,
dialer_challenge,
PeerId::from_public_key(&self.noise_keypair.public()),
our_challenge,
)
.await?;
Ok((dialer_serai_validator, socket))
})
}
}
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S>
for OnlyValidators
{
type Output = (PeerId, noise::Output<S>);
type Error = io::Error;
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
fn upgrade_outbound(
self,
socket: S,
info: <Self as UpgradeInfo>::Info,
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
Box::pin(async move {
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
.unwrap()
.upgrade_outbound(socket, info)
.await
.map_err(io::Error::other)?;
let (our_challenge, listener_challenge) = OnlyValidators::challenges(&mut socket).await?;
let listener_serai_validator = self
.authenticate(
&mut socket,
PeerId::from_public_key(&self.noise_keypair.public()),
our_challenge,
listener_noise_peer_id,
listener_challenge,
)
.await?;
Ok((listener_serai_validator, socket))
})
}
}

View File

@@ -1,134 +0,0 @@
use core::{future::Future, str::FromStr};
use std::{sync::Arc, collections::HashSet};
use rand_core::{RngCore, OsRng};
use tokio::sync::mpsc;
use serai_client_serai::{RpcError, Serai};
use libp2p::{
core::multiaddr::{Protocol, Multiaddr},
swarm::dial_opts::DialOpts,
};
use serai_task::ContinuallyRan;
use crate::{PORT, Peers, validators::Validators};
const TARGET_PEERS_PER_NETWORK: usize = 5;
/*
If we only tracked the target amount of peers per network, we'd risk being eclipsed by an
adversary who immediately connects to us with their array of validators upon our boot. Their
array would satisfy our target amount of peers, so we'd never seek more, enabling the adversary
to be the only entity we peered with.
We solve this by additionally requiring an explicit amount of peers we dialed. That means we
randomly chose to connect to these peers.
*/
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
pub(crate) struct DialTask {
serai: Arc<Serai>,
validators: Validators,
peers: Peers,
to_dial: mpsc::UnboundedSender<DialOpts>,
}
impl DialTask {
pub(crate) fn new(
serai: Arc<Serai>,
peers: Peers,
to_dial: mpsc::UnboundedSender<DialOpts>,
) -> Self {
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
}
}
impl ContinuallyRan for DialTask {
// Only run every five minutes, not the default of every five seconds
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
type Error = RpcError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
self.validators.update().await?;
// If any of our peers is lacking, try to connect to more
let mut dialed = false;
let peer_counts = self
.peers
.peers
.read()
.await
.iter()
.map(|(network, peers)| (*network, peers.len()))
.collect::<Vec<_>>();
for (network, peer_count) in peer_counts {
/*
If we don't have the target amount of peers, and we don't have all the validators in the
set but one, attempt to connect to more validators within this set.
The latter clause is so if there's a set with only 3 validators, we don't infinitely try
to connect to the target amount of peers for this network as we never will. Instead, we
only try to connect to most of the validators actually present.
*/
if (peer_count < TARGET_PEERS_PER_NETWORK) &&
(peer_count <
self
.validators
.by_network()
.get(&network)
.map(HashSet::len)
.unwrap_or(0)
.saturating_sub(1))
{
let mut potential_peers = self.serai.p2p_validators(network).await?;
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
if potential_peers.is_empty() {
break;
}
let index_to_dial =
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
.unwrap();
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
let Ok(randomly_selected_peer) = libp2p::Multiaddr::from_str(&randomly_selected_peer)
else {
log::error!(
"peer from substrate wasn't a valid `Multiaddr`: {randomly_selected_peer}"
);
continue;
};
log::info!("found peer from substrate: {randomly_selected_peer}");
// Map the peer from a Substrate P2P network peer to a Coordinator P2P network peer
let mapped_peer = randomly_selected_peer
.into_iter()
.filter_map(|protocol| match protocol {
// Drop PeerIds from the Substrate P2p network
Protocol::P2p(_) => None,
// Use our own TCP port
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
// Pass-through any other specifications (IPv4, IPv6, etc)
other => Some(other),
})
.collect::<Multiaddr>();
log::debug!("mapped found peer: {mapped_peer}");
self
.to_dial
.send(DialOpts::unknown_peer_id().address(mapped_peer).build())
.expect("dial receiver closed?");
dialed = true;
}
}
}
Ok(dialed)
}
}
}

View File

@@ -1,75 +0,0 @@
use core::time::Duration;
use blake2::{Digest, Blake2s256};
use borsh::{BorshSerialize, BorshDeserialize};
use libp2p::gossipsub::{
IdentTopic, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform,
AllowAllSubscriptionFilter, Behaviour,
};
pub use libp2p::gossipsub::Event;
use serai_cosign::SignedCosign;
// Block size limit + 16 KB of space for signatures/metadata
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384;
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
const BASE_TOPIC: &str = "/";
fn topic_for_tributary(tributary: [u8; 32]) -> IdentTopic {
IdentTopic::new(format!("/tributary/{}", hex::encode(tributary)))
}
#[derive(Clone, BorshSerialize, BorshDeserialize)]
pub(crate) enum Message {
Tributary { tributary: [u8; 32], message: Vec<u8> },
Cosign(SignedCosign),
}
impl Message {
pub(crate) fn topic(&self) -> IdentTopic {
match self {
Message::Tributary { tributary, .. } => topic_for_tributary(*tributary),
Message::Cosign(_) => IdentTopic::new(BASE_TOPIC),
}
}
}
pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilter>;
pub(crate) fn new_behavior() -> Behavior {
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
// libp2p-rs defaults to 1 second, whereas ours will be ~2
let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME;
// The amount of heartbeats which will occur within a single Tributary block
let heartbeats_per_block =
tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
// libp2p-rs defaults to 5, whereas ours will be ~8
let heartbeats_to_keep = 2 * heartbeats_per_block;
// libp2p-rs defaults to 3 whereas ours will be ~4
let heartbeats_to_gossip = heartbeats_per_block;
let config = ConfigBuilder::default()
.protocol_id_prefix(LIBP2P_PROTOCOL)
.history_length(usize::try_from(heartbeats_to_keep).unwrap())
.history_gossip(usize::try_from(heartbeats_to_gossip).unwrap())
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
.max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)
.duplicate_cache_time(Duration::from_millis((heartbeats_to_keep * heartbeat_interval).into()))
.validation_mode(ValidationMode::Anonymous)
// Uses a content based message ID to avoid duplicates as much as possible
.message_id_fn(|msg| {
MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat()))
})
.build();
let mut gossip = Behavior::new(MessageAuthenticity::Anonymous, config.unwrap()).unwrap();
// Subscribe to the base topic
let topic = IdentTopic::new(BASE_TOPIC);
let _ = gossip.subscribe(&topic);
gossip
}

View File

@@ -1,417 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::{future::Future, time::Duration};
use std::{
sync::Arc,
collections::{HashSet, HashMap},
};
use rand_core::{RngCore, OsRng};
use zeroize::Zeroizing;
use schnorrkel::Keypair;
use serai_client_serai::{
abi::primitives::{
crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet,
},
Serai,
};
use tokio::sync::{mpsc, oneshot, Mutex, RwLock};
use serai_task::{Task, ContinuallyRan};
use serai_cosign::SignedCosign;
use libp2p::{
multihash::Multihash,
identity::{self, PeerId},
tcp::Config as TcpConfig,
yamux, allow_block_list,
connection_limits::{self, ConnectionLimits},
swarm::NetworkBehaviour,
SwarmBuilder,
};
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
/// A struct to sync the validators from the Serai node in order to keep track of them.
mod validators;
use validators::UpdateValidatorsTask;
/// The authentication protocol upgrade to limit the P2P network to active validators.
mod authenticate;
use authenticate::OnlyValidators;
/// The ping behavior, used to ensure connection latency is below the limit
mod ping;
/// The request-response messages and behavior
mod reqres;
use reqres::{InboundRequestId, Request, Response};
/// The gossip messages and behavior
mod gossip;
use gossip::Message;
/// The swarm task, running it and dispatching to/from it
mod swarm;
use swarm::SwarmTask;
/// The dial task, to find new peers to connect to
mod dial;
use dial::DialTask;
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
fn peer_id_from_public(public: Public) -> PeerId {
// 0 represents the identity Multihash, that no hash was performed
// It's an internal constant so we can't refer to the constant inside libp2p
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
}
/// The representation of a peer.
pub struct Peer<'a> {
outbound_requests: &'a mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
id: PeerId,
}
impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
fn send_heartbeat(
&self,
heartbeat: Heartbeat,
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>> {
async move {
const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(5);
let request = Request::Heartbeat(heartbeat);
let (sender, receiver) = oneshot::channel();
self
.outbound_requests
.send((self.id, request, sender))
.expect("outbound requests recv channel was dropped?");
if let Ok(Ok(Response::Blocks(blocks))) =
tokio::time::timeout(HEARTBEAT_TIMEOUT, receiver).await
{
Some(blocks)
} else {
None
}
}
}
}
#[derive(Clone)]
struct Peers {
peers: Arc<RwLock<HashMap<ExternalNetworkId, HashSet<PeerId>>>>,
}
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
// network for peers, we could use it solely for bootstrapping/as a fallback.
#[derive(NetworkBehaviour)]
struct Behavior {
// Used to only allow Serai validators as peers
allow_list: allow_block_list::Behaviour<allow_block_list::AllowedPeers>,
// Used to limit each peer to a single connection
connection_limits: connection_limits::Behaviour,
// Used to ensure connection latency is within tolerances
ping: ping::Behavior,
// Used to request data from specific peers
reqres: reqres::Behavior,
// Used to broadcast messages to all other peers subscribed to a topic
gossip: gossip::Behavior,
}
#[allow(clippy::type_complexity)]
struct Libp2pInner {
peers: Peers,
gossip: mpsc::UnboundedSender<Message>,
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
tributary_gossip: Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>,
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
heartbeat_requests:
Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ExternalValidatorSet, [u8; 32])>>,
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>,
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
}
/// The libp2p-backed P2P implementation.
///
/// The P2p trait implementation does not support backpressure and is expected to be fully
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
#[derive(Clone)]
pub struct Libp2p(Arc<Libp2pInner>);
impl Libp2p {
/// Create a new libp2p-backed P2P instance.
///
/// This will spawn all of the internal tasks necessary for functioning.
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Arc<Serai>) -> Libp2p {
// Define the object we track peers with
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
// Define the dial task
let (dial_task_def, dial_task) = Task::new();
let (to_dial_send, to_dial_recv) = mpsc::unbounded_channel();
tokio::spawn(
DialTask::new(serai.clone(), peers.clone(), to_dial_send)
.continually_run(dial_task_def, vec![]),
);
let swarm = {
let new_only_validators = |noise_keypair: &identity::Keypair| -> Result<_, ()> {
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
};
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
.with_tokio()
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default)
.unwrap()
.with_behaviour(|_| Behavior {
allow_list: allow_block_list::Behaviour::default(),
// Limit each per to a single connection
connection_limits: connection_limits::Behaviour::new(
ConnectionLimits::default().with_max_established_per_peer(Some(1)),
),
ping: ping::new_behavior(),
reqres: reqres::new_behavior(),
gossip: gossip::new_behavior(),
})
.unwrap()
.with_swarm_config(|config| {
config
.with_idle_connection_timeout(ping::INTERVAL + ping::TIMEOUT + Duration::from_secs(5))
})
.build();
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
swarm.listen_on(format!("/ip6/::/tcp/{PORT}").parse().unwrap()).unwrap();
swarm
};
let (swarm_validators, validator_changes) = UpdateValidatorsTask::spawn(serai);
let (gossip_send, gossip_recv) = mpsc::unbounded_channel();
let (signed_cosigns_send, signed_cosigns_recv) = mpsc::unbounded_channel();
let (tributary_gossip_send, tributary_gossip_recv) = mpsc::unbounded_channel();
let (outbound_requests_send, outbound_requests_recv) = mpsc::unbounded_channel();
let (heartbeat_requests_send, heartbeat_requests_recv) = mpsc::unbounded_channel();
let (notable_cosign_requests_send, notable_cosign_requests_recv) = mpsc::unbounded_channel();
let (inbound_request_responses_send, inbound_request_responses_recv) =
mpsc::unbounded_channel();
// Create the swarm task
SwarmTask::spawn(
dial_task,
to_dial_recv,
swarm_validators,
validator_changes,
peers.clone(),
swarm,
gossip_recv,
signed_cosigns_send.clone(),
tributary_gossip_send,
outbound_requests_recv,
heartbeat_requests_send,
notable_cosign_requests_send,
inbound_request_responses_recv,
);
Libp2p(Arc::new(Libp2pInner {
peers,
gossip: gossip_send,
outbound_requests: outbound_requests_send,
tributary_gossip: Mutex::new(tributary_gossip_recv),
signed_cosigns: Mutex::new(signed_cosigns_recv),
signed_cosigns_send,
heartbeat_requests: Mutex::new(heartbeat_requests_recv),
notable_cosign_requests: Mutex::new(notable_cosign_requests_recv),
inbound_request_responses: inbound_request_responses_send,
}))
}
}
impl tributary_sdk::P2p for Libp2p {
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
async move {
self
.0
.gossip
.send(Message::Tributary { tributary, message })
.expect("gossip recv channel was dropped?");
}
}
}
impl serai_cosign::RequestNotableCosigns for Libp2p {
type Error = ();
fn request_notable_cosigns(
&self,
global_session: [u8; 32],
) -> impl Send + Future<Output = Result<(), Self::Error>> {
async move {
const AMOUNT_OF_PEERS_TO_REQUEST_FROM: usize = 3;
const NOTABLE_COSIGNS_TIMEOUT: Duration = Duration::from_secs(5);
let request = Request::NotableCosigns { global_session };
let peers = self.0.peers.peers.read().await.clone();
// HashSet of all peers
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
// Vec of all peers
let mut peers = peers.into_iter().collect::<Vec<_>>();
let mut channels = Vec::with_capacity(AMOUNT_OF_PEERS_TO_REQUEST_FROM);
for _ in 0 .. AMOUNT_OF_PEERS_TO_REQUEST_FROM {
if peers.is_empty() {
break;
}
let i = usize::try_from(OsRng.next_u64() % u64::try_from(peers.len()).unwrap()).unwrap();
let peer = peers.swap_remove(i);
let (sender, receiver) = oneshot::channel();
self
.0
.outbound_requests
.send((peer, request, sender))
.expect("outbound requests recv channel was dropped?");
channels.push(receiver);
}
// We could reduce our latency by using FuturesUnordered here but the latency isn't a concern
for channel in channels {
if let Ok(Ok(Response::NotableCosigns(cosigns))) =
tokio::time::timeout(NOTABLE_COSIGNS_TIMEOUT, channel).await
{
for cosign in cosigns {
self
.0
.signed_cosigns_send
.send(cosign)
.expect("signed_cosigns recv in this object was dropped?");
}
}
}
Ok(())
}
}
}
impl serai_coordinator_p2p::P2p for Libp2p {
type Peer<'a> = Peer<'a>;
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
async move {
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
return vec![];
};
let mut res = vec![];
for id in peer_ids {
res.push(Peer { outbound_requests: &self.0.outbound_requests, id });
}
res
}
}
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()> {
async move {
self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?");
}
}
fn heartbeat(
&self,
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
async move {
let (request_id, set, latest_block_hash) = self
.0
.heartbeat_requests
.lock()
.await
.recv()
.await
.expect("heartbeat_requests_send was dropped?");
let (sender, receiver) = oneshot::channel();
tokio::spawn({
let respond = self.0.inbound_request_responses.clone();
async move {
// The swarm task expects us to respond to every request. If the caller drops this
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
// without requiring the caller send a value down this channel
let response = if let Ok(blocks) = receiver.await {
Response::Blocks(blocks)
} else {
Response::Blocks(vec![])
};
respond
.send((request_id, response))
.expect("inbound_request_responses_recv was dropped?");
}
});
(Heartbeat { set, latest_block_hash }, sender)
}
}
fn notable_cosigns_request(
&self,
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
async move {
let (request_id, global_session) = self
.0
.notable_cosign_requests
.lock()
.await
.recv()
.await
.expect("notable_cosign_requests_send was dropped?");
let (sender, receiver) = oneshot::channel();
tokio::spawn({
let respond = self.0.inbound_request_responses.clone();
async move {
let response = if let Ok(notable_cosigns) = receiver.await {
Response::NotableCosigns(notable_cosigns)
} else {
Response::NotableCosigns(vec![])
};
respond
.send((request_id, response))
.expect("inbound_request_responses_recv was dropped?");
}
});
(global_session, sender)
}
}
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
async move {
self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
}
}
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
async move {
self
.0
.signed_cosigns
.lock()
.await
.recv()
.await
.expect("signed_cosigns couldn't recv despite send in same object?")
}
}
}

View File

@@ -1,17 +0,0 @@
use core::time::Duration;
use tributary_sdk::tendermint::LATENCY_TIME;
use libp2p::ping::{self, Config, Behaviour};
pub use ping::Event;
pub(crate) const INTERVAL: Duration = Duration::from_secs(30);
// LATENCY_TIME represents the maximum latency for message delivery. Sending the ping, and
// receiving the pong, each have to occur within this time bound to validate the connection. We
// enforce that, as best we can, by requiring the round-trip be within twice the allowed latency.
pub(crate) const TIMEOUT: Duration = Duration::from_millis((2 * LATENCY_TIME) as u64);
pub(crate) type Behavior = Behaviour;
pub(crate) fn new_behavior() -> Behavior {
Behavior::new(Config::default().with_interval(INTERVAL).with_timeout(TIMEOUT))
}

View File

@@ -1,134 +0,0 @@
use core::{fmt, time::Duration};
use std::io;
use async_trait::async_trait;
use borsh::{BorshSerialize, BorshDeserialize};
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::request_response::{
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
};
pub use request_response::{InboundRequestId, Message};
use serai_cosign::SignedCosign;
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
/// The maximum message size for the request-response protocol
// This is derived from the heartbeat message size as it's our largest message
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT;
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
/// Requests which can be made via the request-response protocol.
#[derive(Clone, Copy, Debug, BorshSerialize, BorshDeserialize)]
pub(crate) enum Request {
/// A heartbeat informing our peers of our latest block, for the specified blockchain, on regular
/// intervals.
///
/// If our peers have more blocks than us, they're expected to respond with those blocks.
Heartbeat(Heartbeat),
/// A request for the notable cosigns for a global session.
NotableCosigns { global_session: [u8; 32] },
}
/// Responses which can be received via the request-response protocol.
#[derive(Clone, BorshSerialize, BorshDeserialize)]
pub(crate) enum Response {
None,
Blocks(Vec<TributaryBlockWithCommit>),
NotableCosigns(Vec<SignedCosign>),
}
impl fmt::Debug for Response {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Response::None => fmt.debug_struct("Response::None").finish(),
Response::Blocks(_) => fmt.debug_struct("Response::Block").finish_non_exhaustive(),
Response::NotableCosigns(_) => {
fmt.debug_struct("Response::NotableCosigns").finish_non_exhaustive()
}
}
}
}
/// The codec used for the request-response protocol.
///
/// We don't use CBOR or JSON, but use borsh to create `Vec<u8>`s we then length-prefix. While
/// ideally, we'd use borsh directly with the `io` traits defined here, they're async and there
/// isn't an amenable API within borsh for incremental deserialization.
#[derive(Default, Clone, Copy, Debug)]
pub(crate) struct Codec;
impl Codec {
async fn read<M: BorshDeserialize>(io: &mut (impl Unpin + AsyncRead)) -> io::Result<M> {
let mut len = [0; 4];
io.read_exact(&mut len).await?;
let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?");
if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?;
}
// This may be a non-trivial allocation easily causable
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
// the max message size should be sufficiently sane
let mut buf = vec![0; len];
io.read_exact(&mut buf).await?;
let mut buf = buf.as_slice();
let res = M::deserialize(&mut buf)?;
if !buf.is_empty() {
Err(io::Error::other("p2p message had extra data appended to it"))?;
}
Ok(res)
}
async fn write(io: &mut (impl Unpin + AsyncWrite), msg: &impl BorshSerialize) -> io::Result<()> {
let msg = borsh::to_vec(msg).unwrap();
io.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await?;
io.write_all(&msg).await
}
}
#[async_trait]
impl CodecTrait for Codec {
type Protocol = &'static str;
type Request = Request;
type Response = Response;
async fn read_request<R: Send + Unpin + AsyncRead>(
&mut self,
_: &Self::Protocol,
io: &mut R,
) -> io::Result<Request> {
Self::read(io).await
}
async fn read_response<R: Send + Unpin + AsyncRead>(
&mut self,
_: &Self::Protocol,
io: &mut R,
) -> io::Result<Response> {
Self::read(io).await
}
async fn write_request<W: Send + Unpin + AsyncWrite>(
&mut self,
_: &Self::Protocol,
io: &mut W,
req: Request,
) -> io::Result<()> {
Self::write(io, &req).await
}
async fn write_response<W: Send + Unpin + AsyncWrite>(
&mut self,
_: &Self::Protocol,
io: &mut W,
res: Response,
) -> io::Result<()> {
Self::write(io, &res).await
}
}
pub(crate) type Event = GenericEvent<Request, Response>;
pub(crate) type Behavior = Behaviour<Codec>;
pub(crate) fn new_behavior() -> Behavior {
let config = Config::default().with_request_timeout(Duration::from_secs(5));
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
}

View File

@@ -1,360 +0,0 @@
use std::{
sync::Arc,
collections::{HashSet, HashMap},
time::{Duration, Instant},
};
use borsh::BorshDeserialize;
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
use tokio::sync::{mpsc, oneshot, RwLock};
use serai_task::TaskHandle;
use serai_cosign::SignedCosign;
use futures_util::StreamExt;
use libp2p::{
identity::PeerId,
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel},
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
};
use serai_coordinator_p2p::Heartbeat;
use crate::{
Peers, BehaviorEvent, Behavior,
validators::{self, Validators},
ping,
reqres::{self, Request, Response},
gossip,
};
const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60);
/*
`SwarmTask` handles everything we need the `Swarm` object for. The goal is to minimize the
contention on this task. Unfortunately, the `Swarm` object itself is needed for a variety of
purposes making this a rather large task.
Responsibilities include:
- Actually dialing new peers (the selection process occurs in another task)
- Maintaining the peers structure (as we need the Swarm object to see who our peers are)
- Gossiping messages
- Dispatching gossiped messages
- Sending requests
- Dispatching responses to requests
- Dispatching received requests
- Sending responses
*/
pub(crate) struct SwarmTask {
dial_task: TaskHandle,
to_dial: mpsc::UnboundedReceiver<DialOpts>,
last_dial_task_run: Instant,
validators: Arc<RwLock<Validators>>,
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
peers: Peers,
rebuild_peers_at: Instant,
swarm: Swarm<Behavior>,
gossip: mpsc::UnboundedReceiver<gossip::Message>,
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>,
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>,
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
}
impl SwarmTask {
fn handle_gossip(&mut self, event: gossip::Event) {
match event {
gossip::Event::Message { message, .. } => {
let Ok(message) = gossip::Message::deserialize(&mut message.data.as_slice()) else {
// TODO: Penalize the PeerId which created this message, which requires authenticating
// each message OR moving to explicit acknowledgement before re-gossiping
return;
};
match message {
gossip::Message::Tributary { tributary, message } => {
let _: Result<_, _> = self.tributary_gossip.send((tributary, message));
}
gossip::Message::Cosign(signed_cosign) => {
let _: Result<_, _> = self.signed_cosigns.send(signed_cosign);
}
}
}
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
gossip::Event::GossipsubNotSupported { peer_id } |
gossip::Event::SlowPeer { peer_id, .. } => {
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
}
}
}
fn handle_reqres(&mut self, event: reqres::Event) {
match event {
reqres::Event::Message { message, .. } => match message {
reqres::Message::Request { request_id, request, channel } => match request {
reqres::Request::Heartbeat(Heartbeat { set, latest_block_hash }) => {
self.inbound_request_response_channels.insert(request_id, channel);
let _: Result<_, _> =
self.heartbeat_requests.send((request_id, set, latest_block_hash));
}
reqres::Request::NotableCosigns { global_session } => {
self.inbound_request_response_channels.insert(request_id, channel);
let _: Result<_, _> = self.notable_cosign_requests.send((request_id, global_session));
}
},
reqres::Message::Response { request_id, response } => {
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
let _: Result<_, _> = channel.send(response);
}
}
},
reqres::Event::OutboundFailure { request_id, .. } => {
// Send None as the response for the request
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
let _: Result<_, _> = channel.send(Response::None);
}
}
reqres::Event::InboundFailure { .. } | reqres::Event::ResponseSent { .. } => {}
}
}
async fn run(mut self) {
loop {
let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now());
tokio::select! {
// If the validators have changed, update the allow list
validator_changes = self.validator_changes.recv() => {
let validator_changes = validator_changes.expect("validators update task shut down?");
let behavior = &mut self.swarm.behaviour_mut().allow_list;
for removed in validator_changes.removed {
behavior.disallow_peer(removed);
}
for added in validator_changes.added {
behavior.allow_peer(added);
}
}
// Dial peers we're instructed to
dial_opts = self.to_dial.recv() => {
let dial_opts = dial_opts.expect("DialTask was closed?");
let _: Result<_, _> = self.swarm.dial(dial_opts);
}
/*
Rebuild the peers every 10 minutes.
This protects against any race conditions/edge cases we have in our logic to track peers,
along with unrepresented behavior such as when a peer changes the networks they're active
in. This lets the peer tracking logic simply be 'good enough' to not become horribly
corrupt over the span of `TIME_BETWEEN_REBUILD_PEERS`.
We also use this to disconnect all peers who are no longer active in any network.
*/
() = tokio::time::sleep(time_till_rebuild_peers) => {
let validators_by_network = self.validators.read().await.by_network().clone();
let connected_peers = self.swarm.connected_peers().copied().collect::<HashSet<_>>();
// Build the new peers object
let mut peers = HashMap::new();
for (network, validators) in validators_by_network {
peers.insert(network, validators.intersection(&connected_peers).copied().collect());
}
// Write the new peers object
*self.peers.peers.write().await = peers;
self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS;
}
// Handle swarm events
event = self.swarm.next() => {
// `Swarm::next` will never return `Poll::Ready(None)`
// https://docs.rs/
// libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E
let event = event.unwrap();
match event {
// New connection, so update peers
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
let Some(networks) =
self.validators.read().await.networks(&peer_id).cloned() else { continue };
let mut peers = self.peers.peers.write().await;
for network in networks {
peers.entry(network).or_insert_with(HashSet::new).insert(peer_id);
}
}
// Connection closed, so update peers
SwarmEvent::ConnectionClosed { peer_id, .. } => {
let Some(networks) =
self.validators.read().await.networks(&peer_id).cloned() else { continue };
let mut peers = self.peers.peers.write().await;
for network in networks {
peers.entry(network).or_insert_with(HashSet::new).remove(&peer_id);
}
/*
We want to re-run the dial task, since we lost a peer, in case we should find new
peers. This opens a DoS where a validator repeatedly opens/closes connections to
force iterations of the dial task. We prevent this by setting a minimum distance
since the last explicit iteration.
This is suboptimal. If we have several disconnects in immediate proximity, we'll
trigger the dial task upon the first (where we may still have enough peers we
shouldn't dial more) but not the last (where we may have so few peers left we
should dial more). This is accepted as the dial task will eventually run on its
natural timer.
*/
const MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL: Duration = Duration::from_secs(60);
let now = Instant::now();
if (self.last_dial_task_run + MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL) < now {
self.dial_task.run_now();
self.last_dial_task_run = now;
}
}
SwarmEvent::Behaviour(event) => {
match event {
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => {
// This *is* an exhaustive match as these events are empty enums
match event {}
}
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => {
if result.is_err() {
self.swarm.close_connection(connection);
}
}
BehaviorEvent::Reqres(event) => self.handle_reqres(event),
BehaviorEvent::Gossip(event) => self.handle_gossip(event),
}
}
// We don't handle any of these
SwarmEvent::IncomingConnection { .. } |
SwarmEvent::IncomingConnectionError { .. } |
SwarmEvent::OutgoingConnectionError { .. } |
SwarmEvent::NewListenAddr { .. } |
SwarmEvent::ExpiredListenAddr { .. } |
SwarmEvent::ListenerClosed { .. } |
SwarmEvent::ListenerError { .. } |
SwarmEvent::Dialing { .. } |
SwarmEvent::NewExternalAddrCandidate { .. } |
SwarmEvent::ExternalAddrConfirmed { .. } |
SwarmEvent::ExternalAddrExpired { .. } |
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
// Requires as SwarmEvent is non-exhaustive
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
}
}
message = self.gossip.recv() => {
let message = message.expect("channel for messages to gossip was closed?");
let topic = message.topic();
let message = borsh::to_vec(&message).unwrap();
/*
If we're sending a message for this topic, it's because this topic is relevant to us.
Subscribe to it.
We create topics roughly weekly, one per validator set/session. Once present in a
topic, we're interested in all messages for it until the validator set/session retires.
Then there should no longer be any messages for the topic as we should drop the
Tributary which creates the messages.
We use this as an argument to not bother implement unsubscribing from topics. They're
incredibly infrequently created and old topics shouldn't still have messages published
to them. Having the coordinator reboot being our method of unsubscribing is fine.
Alternatively, we could route an API to determine when a topic is retired, or retire
any topics we haven't sent messages on in the past hour.
*/
let behavior = self.swarm.behaviour_mut();
let _: Result<_, _> = behavior.gossip.subscribe(&topic);
/*
This may be an error of `InsufficientPeers`. If so, we could ask DialTask to dial more
peers for this network. We don't as we assume DialTask will detect the lack of peers
for this network, and will already successfully handle this.
*/
let _: Result<_, _> = behavior.gossip.publish(topic.hash(), message);
}
request = self.outbound_requests.recv() => {
let (peer, request, response_channel) =
request.expect("channel for requests was closed?");
let request_id = self.swarm.behaviour_mut().reqres.send_request(&peer, request);
self.outbound_request_responses.insert(request_id, response_channel);
}
response = self.inbound_request_responses.recv() => {
let (request_id, response) =
response.expect("channel for inbound request responses was closed?");
if let Some(channel) = self.inbound_request_response_channels.remove(&request_id) {
let _: Result<_, _> =
self.swarm.behaviour_mut().reqres.send_response(channel, response);
}
}
}
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn spawn(
dial_task: TaskHandle,
to_dial: mpsc::UnboundedReceiver<DialOpts>,
validators: Arc<RwLock<Validators>>,
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
peers: Peers,
swarm: Swarm<Behavior>,
gossip: mpsc::UnboundedReceiver<gossip::Message>,
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
) {
tokio::spawn(
SwarmTask {
dial_task,
to_dial,
last_dial_task_run: Instant::now(),
validators,
validator_changes,
peers,
rebuild_peers_at: Instant::now() + TIME_BETWEEN_REBUILD_PEERS,
swarm,
gossip,
signed_cosigns,
tributary_gossip,
outbound_requests,
outbound_request_responses: HashMap::new(),
inbound_request_response_channels: HashMap::new(),
heartbeat_requests,
notable_cosign_requests,
inbound_request_responses,
}
.run(),
);
}
}

View File

@@ -1,224 +0,0 @@
use core::{borrow::Borrow, future::Future};
use std::{
sync::Arc,
collections::{HashSet, HashMap},
};
use serai_client_serai::abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session};
use serai_client_serai::{RpcError, Serai};
use serai_task::{Task, ContinuallyRan};
use libp2p::PeerId;
use futures_util::stream::{StreamExt, FuturesUnordered};
use tokio::sync::{mpsc, RwLock};
use crate::peer_id_from_public;
pub(crate) struct Changes {
pub(crate) removed: HashSet<PeerId>,
pub(crate) added: HashSet<PeerId>,
}
pub(crate) struct Validators {
serai: Arc<Serai>,
// A cache for which session we're populated with the validators of
sessions: HashMap<ExternalNetworkId, Session>,
// The validators by network
by_network: HashMap<ExternalNetworkId, HashSet<PeerId>>,
// The validators and their networks
validators: HashMap<PeerId, HashSet<ExternalNetworkId>>,
// The channel to send the changes down
changes: mpsc::UnboundedSender<Changes>,
}
impl Validators {
pub(crate) fn new(serai: Arc<Serai>) -> (Self, mpsc::UnboundedReceiver<Changes>) {
let (send, recv) = mpsc::unbounded_channel();
let validators = Validators {
serai,
sessions: HashMap::new(),
by_network: HashMap::new(),
validators: HashMap::new(),
changes: send,
};
(validators, recv)
}
async fn session_changes(
serai: impl Borrow<Serai>,
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, RpcError> {
/*
This uses the latest finalized block, not the latest cosigned block, which should be fine as
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
bypass the cosign protocol unless a historical global session was malicious, in which case
the cosign protocol already breaks.
Besides, we can't connect to historical validators, only the current validators.
*/
let serai = serai.borrow().state().await?;
let mut session_changes = vec![];
{
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
// we poll it till it yields all futures with the most minimal processing possible
let mut futures = FuturesUnordered::new();
for network in ExternalNetworkId::all() {
let sessions = sessions.borrow();
let serai = serai.borrow();
futures.push(async move {
let session = match serai.current_session(network.into()).await {
Ok(Some(session)) => session,
Ok(None) => return Ok(None),
Err(e) => return Err(e),
};
if sessions.get(&network) == Some(&session) {
Ok(None)
} else {
match serai.current_validators(network.into()).await {
Ok(Some(validators)) => Ok(Some((
network,
session,
validators
.into_iter()
.map(|validator| peer_id_from_public(validator.into()))
.collect(),
))),
Ok(None) => panic!("network has session yet no validators"),
Err(e) => Err(e),
}
}
});
}
while let Some(session_change) = futures.next().await {
if let Some(session_change) = session_change? {
session_changes.push(session_change);
}
}
}
Ok(session_changes)
}
fn incorporate_session_changes(
&mut self,
session_changes: Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>,
) {
let mut removed = HashSet::new();
let mut added = HashSet::new();
for (network, session, validators) in session_changes {
// Remove the existing validators
for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) {
// Get all networks this validator is in
let mut networks = self.validators.remove(&validator).unwrap();
// Remove this one
networks.remove(&network);
if !networks.is_empty() {
// Insert the networks back if the validator was present in other networks
self.validators.insert(validator, networks);
} else {
// Because this validator is no longer present in any network, mark them as removed
/*
This isn't accurate. The validator isn't present in the latest session for this
network. The validator was present in the prior session which has yet to retire. Our
lack of explicit inclusion for both the prior session and the current session causes
only the validators mutually present in both sessions to be responsible for all actions
still ongoing as the prior validator set retires.
TODO: Fix this
*/
removed.insert(validator);
}
}
// Add the new validators
for validator in validators.iter().copied() {
self.validators.entry(validator).or_insert_with(HashSet::new).insert(network);
added.insert(validator);
}
self.by_network.insert(network, validators);
// Update the session we have populated
self.sessions.insert(network, session);
}
// Only flag validators for removal if they weren't simultaneously added by these changes
removed.retain(|validator| !added.contains(validator));
// Send the changes, dropping the error
// This lets the caller opt-out of change notifications by dropping the receiver
let _: Result<_, _> = self.changes.send(Changes { removed, added });
}
/// Update the view of the validators.
pub(crate) async fn update(&mut self) -> Result<(), RpcError> {
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
self.incorporate_session_changes(session_changes);
Ok(())
}
pub(crate) fn by_network(&self) -> &HashMap<ExternalNetworkId, HashSet<PeerId>> {
&self.by_network
}
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<ExternalNetworkId>> {
self.validators.get(peer_id)
}
}
/// A task which updates a set of validators.
///
/// The validators managed by this tak will have their exclusive lock held for a minimal amount of
/// time while the update occurs to minimize the disruption to the services relying on it.
pub(crate) struct UpdateValidatorsTask {
validators: Arc<RwLock<Validators>>,
}
impl UpdateValidatorsTask {
/// Spawn a new instance of the UpdateValidatorsTask.
///
/// This returns a reference to the Validators it updates after spawning itself.
pub(crate) fn spawn(
serai: Arc<Serai>,
) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
// The validators which will be updated
let (validators, changes) = Validators::new(serai);
let validators = Arc::new(RwLock::new(validators));
// Define the task
let (update_validators_task, update_validators_task_handle) = Task::new();
// Forget the handle, as dropping the handle would stop the task
core::mem::forget(update_validators_task_handle);
// Spawn the task
tokio::spawn(
(Self { validators: validators.clone() }).continually_run(update_validators_task, vec![]),
);
// Return the validators
(validators, changes)
}
}
impl ContinuallyRan for UpdateValidatorsTask {
// Only run every minute, not the default of every five seconds
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
type Error = RpcError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let session_changes = {
let validators = self.validators.read().await;
Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await?
};
self.validators.write().await.incorporate_session_changes(session_changes);
Ok(true)
}
}
}

View File

@@ -1,151 +0,0 @@
use core::future::Future;
use std::time::{Duration, SystemTime};
use serai_primitives::validator_sets::{ExternalValidatorSet, KeyShares};
use futures_lite::FutureExt;
use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
use serai_db::*;
use serai_task::ContinuallyRan;
use crate::{Heartbeat, Peer, P2p};
// Amount of blocks in a minute
const BLOCKS_PER_MINUTE: usize =
(60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to
/// include in the batch.
///
/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount
/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within
/// the size limit.
pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
/// The size limit for a batch of blocks sent in response to a Heartbeat.
///
/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((KeyShares::MAX_PER_SET as usize) * 128));
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
/// tip.
///
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
/// the sync protocol for our Tributaries.
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
pub(crate) set: ExternalValidatorSet,
pub(crate) tributary: Tributary<TD, Tx, P>,
pub(crate) reader: TributaryReader<TD, Tx>,
pub(crate) p2p: P,
}
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
let mut tip = self.reader.tip();
let time_since = {
let block_time = if let Some(time_of_block) = self.reader.time_of_block(&tip) {
SystemTime::UNIX_EPOCH + Duration::from_secs(time_of_block)
} else {
// If we couldn't fetch this block's time, assume it's old
// We don't want to declare its unix time as 0 and claim it's 50+ years old though
log::warn!(
"heartbeat task couldn't fetch the time of a block, flagging it as a minute old"
);
SystemTime::now() - TIME_TO_TRIGGER_SYNCING
};
SystemTime::now().duration_since(block_time).unwrap_or(Duration::ZERO)
};
let mut tip_is_stale = false;
let mut synced_block = false;
if TIME_TO_TRIGGER_SYNCING <= time_since {
log::warn!(
"last known tributary block for {:?} was {} seconds ago",
self.set,
time_since.as_secs()
);
// This requests all peers for this network, without differentiating by session
// This should be fine as most validators should overlap across sessions
'peer: for peer in self.p2p.peers(self.set.network).await {
loop {
// Create the request for blocks
if tip_is_stale {
tip = self.reader.tip();
tip_is_stale = false;
}
// Necessary due to https://github.com/rust-lang/rust/issues/100013
let Some(blocks) = peer
.send_heartbeat(Heartbeat { set: self.set, latest_block_hash: tip })
.boxed()
.await
else {
continue 'peer;
};
// This is the final batch if it has less than the maximum amount of blocks
// (signifying there weren't more blocks after this to fill the batch with)
let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH;
// Sync each block
for block_with_commit in blocks {
let Ok(block) = Block::read(&mut block_with_commit.block.as_slice()) else {
// TODO: Disconnect/slash this peer
log::warn!("received invalid Block inside response to heartbeat");
continue 'peer;
};
// Attempt to sync the block
if !self.tributary.sync_block(block, block_with_commit.commit).await {
// The block may be invalid or stale if we added a block elsewhere
if (!tip_is_stale) && (tip != self.reader.tip()) {
// Since the Tributary's tip advanced on its own, return
return Ok(false);
}
// Since this block was invalid or stale in a way non-trivial to detect, try to
// sync with the next peer
continue 'peer;
}
// Because we synced a block, flag the tip as stale
tip_is_stale = true;
// And that we did sync a block
synced_block = true;
}
// If this was the final batch, move on from this peer
// We could assume they were honest and we are done syncing the chain, but this is a
// bit more robust
if final_batch {
continue 'peer;
}
}
}
// This will cause the tak to be run less and less often, ensuring we aren't spamming the
// net if we legitimately aren't making progress
if !synced_block {
Err(format!(
"tried to sync blocks for {:?} since we haven't seen one in {} seconds but didn't",
self.set,
time_since.as_secs(),
))?;
}
}
Ok(synced_block)
}
}
}

View File

@@ -1,204 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use core::future::Future;
use std::collections::HashMap;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet};
use serai_db::Db;
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
use serai_cosign::{SignedCosign, Cosigning};
use tokio::sync::{mpsc, oneshot};
use serai_task::{Task, ContinuallyRan};
/// The heartbeat task, effecting sync of Tributaries
pub mod heartbeat;
use crate::heartbeat::HeartbeatTask;
/// A heartbeat for a Tributary.
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
pub struct Heartbeat {
/// The Tributary this is the heartbeat of.
pub set: ExternalValidatorSet,
/// The hash of the latest block added to the Tributary.
pub latest_block_hash: [u8; 32],
}
/// A tributary block and its commit.
#[derive(Clone, BorshSerialize, BorshDeserialize)]
pub struct TributaryBlockWithCommit {
/// The serialized block.
pub block: Vec<u8>,
/// The serialized commit.
pub commit: Vec<u8>,
}
/// A representation of a peer.
pub trait Peer<'a>: Send {
/// Send a heartbeat to this peer.
fn send_heartbeat(
&self,
heartbeat: Heartbeat,
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>>;
}
/// The representation of the P2P network.
pub trait P2p:
Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns
{
/// The representation of a peer.
type Peer<'a>: Peer<'a>;
/// Fetch the peers for this network.
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
/// Broadcast a cosign.
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
/// A cancel-safe future for the next heartbeat received over the P2P network.
///
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
/// descending blocks. This channel MUST NOT and will not have its receiver dropped before a
/// message is sent.
fn heartbeat(
&self,
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
///
/// Yields the global session the request is for and a channel to return the notable cosigns.
/// This channel MUST NOT and will not have its receiver dropped before a message is sent.
fn notable_cosigns_request(
&self,
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
/// A cancel-safe future for the next message regarding a Tributary.
///
/// Yields the message's Tributary's genesis block hash and the message.
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)>;
/// A cancel-safe future for the next cosign received.
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
}
fn handle_notable_cosigns_request<D: Db>(
db: &D,
global_session: [u8; 32],
channel: oneshot::Sender<Vec<SignedCosign>>,
) {
let cosigns = Cosigning::<D>::notable_cosigns(db, global_session);
channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?");
}
fn handle_heartbeat<D: Db, T: TransactionTrait>(
reader: &TributaryReader<D, T>,
mut latest_block_hash: [u8; 32],
channel: oneshot::Sender<Vec<TributaryBlockWithCommit>>,
) {
let mut res_size = 8;
let mut res = vec![];
// This former case should be covered by this latter case
while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) {
let Some(block_after) = reader.block_after(&latest_block_hash) else { break };
// These `break` conditions should only occur under edge cases, such as if we're actively
// deleting this Tributary due to being done with it
let Some(block) = reader.block(&block_after) else { break };
let block = block.serialize();
let Some(commit) = reader.commit(&block_after) else { break };
res_size += 8 + block.len() + 8 + commit.len();
res.push(TributaryBlockWithCommit { block, commit });
latest_block_hash = block_after;
}
channel
.send(res)
.map_err(|_| ())
.expect("channel listening for heartbeat oneshot response was dropped?");
}
/// Run the P2P instance.
///
/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must
/// never be dropped. `retire_tributary` is not required to only be instructed with added
/// Tributaries.
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
db: impl Db,
p2p: P,
mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary<TD, Tx, P>)>,
mut retire_tributary: mpsc::UnboundedReceiver<ExternalValidatorSet>,
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
) {
let mut readers = HashMap::<ExternalValidatorSet, TributaryReader<TD, Tx>>::new();
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
let mut heartbeat_tasks = HashMap::<ExternalValidatorSet, _>::new();
loop {
tokio::select! {
tributary = add_tributary.recv() => {
let (set, tributary) = tributary.expect("add_tributary send was dropped");
let reader = tributary.reader();
readers.insert(set, reader.clone());
let (heartbeat_task_def, heartbeat_task) = Task::new();
tokio::spawn(
(HeartbeatTask {
set,
tributary: tributary.clone(),
reader: reader.clone(),
p2p: p2p.clone(),
}).continually_run(heartbeat_task_def, vec![])
);
heartbeat_tasks.insert(set, heartbeat_task);
let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel();
tributaries.insert(tributary.genesis(), tributary_message_send);
// For as long as this sender exists, handle the messages from it on a dedicated task
tokio::spawn(async move {
while let Some(message) = tributary_message_recv.recv().await {
tributary.handle_message(&message).await;
}
});
}
set = retire_tributary.recv() => {
let set = set.expect("retire_tributary send was dropped");
let Some(reader) = readers.remove(&set) else { continue };
tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary");
heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task");
}
(heartbeat, channel) = p2p.heartbeat() => {
if let Some(reader) = readers.get(&heartbeat.set) {
let reader = reader.clone(); // This is a cheap clone
// We spawn this on a task due to the DB reads needed
tokio::spawn(async move {
handle_heartbeat(&reader, heartbeat.latest_block_hash, channel)
});
}
}
(global_session, channel) = p2p.notable_cosigns_request() => {
tokio::spawn({
let db = db.clone();
async move { handle_notable_cosigns_request(&db, global_session, channel) }
});
}
(tributary, message) = p2p.tributary_message() => {
if let Some(tributary) = tributaries.get(&tributary) {
tributary.send(message).expect("tributary message recv was dropped?");
}
}
cosign = p2p.cosign() => {
// We don't call `Cosigning::intake_cosign` here as that can only be called from a single
// location. We also need to intake the cosigns we produce, which means we need to merge
// these streams (signing, network) somehow. That's done with this mpsc channel
send_cosigns.send(cosign).expect("channel receiving cosigns was dropped");
}
}
}
}

View File

@@ -0,0 +1,333 @@
use core::time::Duration;
use std::{
sync::Arc,
collections::{HashSet, HashMap},
};
use tokio::{
sync::{mpsc, Mutex, RwLock},
time::sleep,
};
use borsh::BorshSerialize;
use sp_application_crypto::RuntimePublic;
use serai_client::{
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError, TemporalSerai,
};
use serai_db::{Get, DbTxn, Db, create_db};
use processor_messages::coordinator::cosign_block_msg;
use crate::{
p2p::{CosignedBlock, GossipMessageKind, P2p},
substrate::LatestCosignedBlock,
};
create_db! {
CosignDb {
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
DistinctChain: (set: ExternalValidatorSet) -> (),
}
}
pub struct CosignEvaluator<D: Db> {
db: Mutex<D>,
serai: Arc<Serai>,
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
}
impl<D: Db> CosignEvaluator<D> {
async fn update_latest_cosign(&self) {
let stakes_lock = self.stakes.read().await;
// If we haven't gotten the stake data yet, return
let Some(stakes) = stakes_lock.as_ref() else { return };
let total_stake = stakes.values().copied().sum::<u64>();
let latest_cosigns = self.latest_cosigns.read().await;
let mut highest_block = 0;
for cosign in latest_cosigns.values() {
let mut networks = HashSet::new();
for (network, sub_cosign) in &*latest_cosigns {
if sub_cosign.block_number >= cosign.block_number {
networks.insert(network);
}
}
let sum_stake =
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
let needed_stake = ((total_stake * 2) / 3) + 1;
if (total_stake == 0) || (sum_stake > needed_stake) {
highest_block = highest_block.max(cosign.block_number);
}
}
let mut db_lock = self.db.lock().await;
let mut txn = db_lock.txn();
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
log::info!("setting latest cosigned block to {}", highest_block);
LatestCosignedBlock::set(&mut txn, &highest_block);
}
txn.commit();
}
async fn update_stakes(&self) -> Result<(), SeraiError> {
let serai = self.serai.as_of_latest_finalized_block().await?;
let mut stakes = HashMap::new();
for network in EXTERNAL_NETWORKS {
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
if set_key {
stakes.insert(
network,
serai
.validator_sets()
.total_allocated_stake(network.into())
.await?
.expect("network which published a batch didn't have a stake set")
.0,
);
}
}
// Since we've successfully built stakes, set it
*self.stakes.write().await = Some(stakes);
self.update_latest_cosign().await;
Ok(())
}
// Uses Err to signify a message should be retried
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
// If we already have this cosign or a newer cosign, return
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
if latest.block_number >= cosign.block_number {
return Ok(());
}
}
// If this an old cosign (older than a day), drop it
let latest_block = self.serai.latest_finalized_block().await?;
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
return Ok(());
}
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
log::warn!("received cosign with a block number which doesn't map to a block");
return Ok(());
};
async fn set_with_keys_fn(
serai: &TemporalSerai<'_>,
network: ExternalNetworkId,
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
return Ok(None);
};
let prior_session = Session(latest_session.0.saturating_sub(1));
Ok(Some(
if serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ExternalValidatorSet { network, session: prior_session }
} else {
ExternalValidatorSet { network, session: latest_session }
},
))
}
// Get the key for this network as of the prior block
// If we have two chains, this value may be different across chains depending on if one chain
// included the set_keys and one didn't
// Because set_keys will force a cosign, it will force detection of distinct blocks
// re: set_keys using keys prior to set_keys (assumed amenable to all)
let serai = self.serai.as_of(block.header.parent_hash.into());
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
return Ok(());
};
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
log::warn!("received cosign for a block we didn't have keys for");
return Ok(());
};
if !keys
.0
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
{
log::warn!("received cosigned block with an invalid signature");
return Ok(());
}
log::info!(
"received cosign for block {} ({}) by {:?}",
block.number(),
hex::encode(cosign.block),
cosign.network
);
// Save this cosign to the DB
{
let mut db = self.db.lock().await;
let mut txn = db.txn();
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
txn.commit();
}
if cosign.block != block.hash() {
log::error!(
"received cosign for a distinct block at {}. we have {}. cosign had {}",
cosign.block_number,
hex::encode(block.hash()),
hex::encode(cosign.block)
);
let serai = self.serai.as_of(latest_block.hash());
let mut db = self.db.lock().await;
// Save this set as being on a different chain
let mut txn = db.txn();
DistinctChain::set(&mut txn, set_with_keys, &());
txn.commit();
let mut total_stake = 0;
let mut total_on_distinct_chain = 0;
for network in EXTERNAL_NETWORKS {
// Get the current set for this network
let set_with_keys = {
let mut res;
while {
res = set_with_keys_fn(&serai, network).await;
res.is_err()
} {
log::error!(
"couldn't get the set with keys when checking for a distinct chain: {:?}",
res
);
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
}
res.unwrap()
};
// Get its stake
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
if let Some(set_with_keys) = set_with_keys {
let stake = {
let mut res;
while {
res =
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
res.is_err()
} {
log::error!(
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
res
);
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
}
res.unwrap()
};
if let Some(stake) = stake {
total_stake += stake.0;
if DistinctChain::get(&*db, set_with_keys).is_some() {
total_on_distinct_chain += stake.0;
}
}
}
}
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
if (total_stake * 17 / 100) <= total_on_distinct_chain {
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
}
} else {
{
let mut latest_cosigns = self.latest_cosigns.write().await;
latest_cosigns.insert(cosign.network, cosign);
}
self.update_latest_cosign().await;
}
Ok(())
}
#[allow(clippy::new_ret_no_self)]
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
let mut latest_cosigns = HashMap::new();
for network in EXTERNAL_NETWORKS {
if let Some(cosign) = LatestCosign::get(&db, network) {
latest_cosigns.insert(network, cosign);
}
}
let evaluator = Arc::new(Self {
db: Mutex::new(db),
serai,
stakes: RwLock::new(None),
latest_cosigns: RwLock::new(latest_cosigns),
});
// Spawn a task to update stakes regularly
tokio::spawn({
let evaluator = evaluator.clone();
async move {
loop {
// Run this until it passes
while evaluator.update_stakes().await.is_err() {
log::warn!("couldn't update stakes in the cosign evaluator");
// Try again in 10 seconds
sleep(Duration::from_secs(10)).await;
}
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
sleep(Duration::from_secs(10 * 60)).await;
}
}
});
// Spawn a task to receive cosigns and handle them
let (send, mut recv) = mpsc::unbounded_channel();
tokio::spawn({
let evaluator = evaluator.clone();
async move {
while let Some(msg) = recv.recv().await {
while evaluator.handle_new_cosign(msg).await.is_err() {
// Try again in 10 seconds
sleep(Duration::from_secs(10)).await;
}
}
}
});
// Spawn a task to rebroadcast the most recent cosigns
tokio::spawn({
async move {
loop {
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
for cosign in cosigns {
let mut buf = vec![];
cosign.serialize(&mut buf).unwrap();
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
}
sleep(Duration::from_secs(60)).await;
}
}
});
// Return the channel to send cosigns
send
}
}

View File

@@ -1,150 +1,134 @@
use std::{path::Path, fs}; use blake2::{
digest::{consts::U32, Digest},
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait}; Blake2b,
use serai_db::{create_db, db_channel};
use dkg::Participant;
use serai_client_serai::abi::primitives::{
crypto::KeyPair,
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
}; };
use serai_cosign::SignedCosign; use scale::Encode;
use serai_coordinator_substrate::NewSetInformation; use borsh::{BorshSerialize, BorshDeserialize};
use serai_coordinator_tributary::Transaction; use serai_client::{
in_instructions::primitives::{Batch, SignedBatch},
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
};
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] pub use serai_db::*;
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
#[cfg(feature = "rocksdb")]
pub(crate) type Db = serai_db::RocksDB;
#[allow(unused_variables, unreachable_code)] use ::tributary::ReadWrite;
fn db(path: &str) -> Db { use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
{
let path: &Path = path.as_ref(); create_db!(
// This may error if this path already exists, which we shouldn't propagate/panic on. If this MainDb {
// is a problem (such as we don't have the necessary permissions to write to this path), we HandledMessageDb: (network: ExternalNetworkId) -> u64,
// expect the following DB opening to error. ActiveTributaryDb: () -> Vec<u8>,
let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap()); RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
FirstPreprocessDb: (
network: ExternalNetworkId,
id_type: RecognizedIdType,
id: &[u8]
) -> Vec<Vec<u8>>,
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
} }
);
#[cfg(all(feature = "parity-db", feature = "rocksdb"))] impl ActiveTributaryDb {
panic!("built with parity-db and rocksdb"); pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] let bytes = Self::get(getter).unwrap_or_default();
let db = serai_db::new_parity_db(path); let mut bytes_ref: &[u8] = bytes.as_ref();
#[cfg(feature = "rocksdb")]
let db = serai_db::new_rocksdb(path);
db
}
pub(crate) fn coordinator_db() -> Db { let mut tributaries = vec![];
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified"); while !bytes_ref.is_empty() {
db(&format!("{root_path}/coordinator/db")) tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
}
fn tributary_db_folder(set: ExternalValidatorSet) -> String {
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
let network = match set.network {
ExternalNetworkId::Bitcoin => "Bitcoin",
ExternalNetworkId::Ethereum => "Ethereum",
ExternalNetworkId::Monero => "Monero",
};
format!("{root_path}/tributary-{network}-{}", set.session.0)
}
pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db {
db(&format!("{}/db", tributary_db_folder(set)))
}
pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) {
log::info!("pruning data directory for tributary {set:?}");
let db = tributary_db_folder(set);
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
fs::remove_dir_all(db).unwrap();
}
}
create_db! {
Coordinator {
// The currently active Tributaries
ActiveTributaries: () -> Vec<NewSetInformation>,
// The latest Tributary to have been retired for a network
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
// retired
RetiredTributary: (network: ExternalNetworkId) -> Session,
// The last handled message from a Processor
LastProcessorMessage: (network: ExternalNetworkId) -> u64,
// Cosigns we produced and tried to intake yet incurred an error while doing so
ErroneousCosigns: () -> Vec<SignedCosign>,
// The keys to confirm and set on the Serai network
KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair,
// The key was set on the Serai network
KeySet: (set: ExternalValidatorSet) -> (),
}
}
db_channel! {
Coordinator {
// Cosigns we produced
SignedCosigns: () -> SignedCosign,
// Tributaries to clean up upon reboot
TributaryCleanup: () -> ExternalValidatorSet,
}
}
mod _internal_db {
use super::*;
db_channel! {
Coordinator {
// Tributary transactions to publish from the Processor messages
TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction,
// Tributary transactions to publish from the DKG confirmation task
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
// Participants to remove
RemoveParticipant: (set: ExternalValidatorSet) -> u16,
} }
(bytes, tributaries)
}
pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {
let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);
for tributary in &existing {
if tributary == spec {
return;
}
}
spec.serialize(&mut existing_bytes).unwrap();
ActiveTributaryDb::set(txn, &existing_bytes);
}
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
let mut active = Self::active_tributaries(txn).1;
for i in 0 .. active.len() {
if active[i].set() == set {
active.remove(i);
break;
}
}
let mut bytes = vec![];
for active in active {
active.serialize(&mut bytes).unwrap();
}
Self::set(txn, &bytes);
RetiredTributaryDb::set(txn, set, &());
} }
} }
pub(crate) struct TributaryTransactionsFromProcessorMessages; impl FirstPreprocessDb {
impl TributaryTransactionsFromProcessorMessages { pub fn save_first_preprocess(
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { txn: &mut impl DbTxn,
// If this set has yet to be retired, send this transaction network: ExternalNetworkId,
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { id_type: RecognizedIdType,
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx); id: &[u8],
preprocess: &Vec<Vec<u8>>,
) {
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
return;
} }
} FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
} }
} }
pub(crate) struct TributaryTransactionsFromDkgConfirmation; impl ExpectedBatchDb {
impl TributaryTransactionsFromDkgConfirmation { pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { LastReceivedBatchDb::set(txn, batch.network, &batch.id);
// If this set has yet to be retired, send this transaction Self::set(
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { txn,
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx); batch.network,
} batch.id,
} &Blake2b::<U32>::digest(batch.instructions.encode()).into(),
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> { );
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
} }
} }
pub(crate) struct RemoveParticipant; impl HandoverBatchDb {
impl RemoveParticipant { pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) { Self::set(txn, set, &batch);
// If this set has yet to be retired, send this transaction LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { }
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant)); }
} impl QueuedBatchesDb {
} pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> { let mut batches = Self::get(txn, set).unwrap_or_default();
_internal_db::RemoveParticipant::try_recv(txn, set) batch.write(&mut batches).unwrap();
.map(|i| Participant::new(i).expect("sent invalid participant index for removal")) Self::set(txn, set, &batches);
}
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
let batches_vec = Self::get(txn, set).unwrap_or_default();
txn.del(Self::key(set));
let mut batches: &[u8] = &batches_vec;
let mut res = vec![];
while !batches.is_empty() {
res.push(Transaction::read(&mut batches).unwrap());
}
res
} }
} }

View File

@@ -1,441 +0,0 @@
use core::{ops::Deref, future::Future};
use std::{boxed::Box, collections::HashMap};
use zeroize::Zeroizing;
use rand_core::OsRng;
use ciphersuite::{group::GroupEncoding, *};
use dkg::{Participant, musig};
use frost_schnorrkel::{
frost::{curve::Ristretto, FrostError, sign::*},
Schnorrkel,
};
use serai_db::{DbTxn, Db as DbTrait};
#[rustfmt::skip]
use serai_client_serai::abi::primitives::{validator_sets::ExternalValidatorSet, address::SeraiAddress};
use serai_task::{DoesNotError, ContinuallyRan};
use serai_coordinator_substrate::{NewSetInformation, Keys};
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
fn schnorrkel() -> Schnorrkel {
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
}
fn our_i(
set: &NewSetInformation,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
data: &HashMap<Participant, Vec<u8>>,
) -> Participant {
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
let mut our_i = None;
for participant in data.keys() {
let validator_index = usize::from(u16::from(*participant) - 1);
let (validator, _weight) = set.validators[validator_index];
if validator == public {
our_i = Some(*participant);
}
}
our_i.unwrap()
}
// Take a HashMap of participations with non-contiguous Participants and convert them to a
// contiguous sequence.
//
// The input data is expected to not include our own data, which also won't be in the output data.
//
// Returns the mapping from the contiguous Participants to the original Participants.
fn make_contiguous<T>(
our_i: Participant,
mut data: HashMap<Participant, Vec<u8>>,
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
) -> Result<HashMap<Participant, T>, Participant> {
assert!(!data.contains_key(&our_i));
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
ordered_participants.sort_by_key(|participant| u16::from(*participant));
let mut our_i = Some(our_i);
let mut contiguous = HashMap::new();
let mut i = 1;
for participant in ordered_participants {
// If this is the first participant after our own index, increment to account for our index
if let Some(our_i_value) = our_i {
if u16::from(participant) > u16::from(our_i_value) {
i += 1;
our_i = None;
}
}
let contiguous_index = Participant::new(i).unwrap();
let data = match transform(data.remove(&participant).unwrap()) {
Ok(data) => data,
Err(_) => Err(participant)?,
};
contiguous.insert(contiguous_index, data);
i += 1;
}
Ok(contiguous)
}
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
match &result {
Ok(_) => Ok(result.unwrap()),
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
Err(*participant)
}
// All of these should be unreachable
Err(
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_),
) => {
result.unwrap();
unreachable!("continued execution after unwrapping Result::Err");
}
}
}
#[rustfmt::skip]
enum Signer {
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
Share {
attempt: u32,
musig_validators: Vec<SeraiAddress>,
share: [u8; 32],
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
},
}
/// Performs the DKG Confirmation protocol.
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
db: CD,
set: NewSetInformation,
tributary_db: TD,
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
signer: Option<Signer>,
}
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
pub(crate) fn new(
db: CD,
set: NewSetInformation,
tributary_db: TD,
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
) -> Self {
Self { db, set, tributary_db, key, signer: None }
}
fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) {
let mut txn = db.txn();
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
set,
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
);
txn.commit();
}
fn preprocess(
db: &mut CD,
set: ExternalValidatorSet,
attempt: u32,
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
signer: &mut Option<Signer>,
) {
// Perform the preprocess
let public_key = Ristretto::generator() * key.deref();
let (machine, preprocess) = AlgorithmMachine::new(
schnorrkel(),
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
musig(ExternalValidatorSet::musig_context(&set), key, &[public_key]).unwrap(),
)
.preprocess(&mut OsRng);
// We take the preprocess so we can use it in a distinct machine with the actual Musig
// parameters
let seed = machine.cache();
let mut preprocess_bytes = [0u8; 64];
preprocess_bytes.copy_from_slice(&preprocess.serialize());
let preprocess = preprocess_bytes;
let mut txn = db.txn();
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
set,
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
);
txn.commit();
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
}
}
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
type Error = DoesNotError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
// If we were sent a key to set, create the signer for it
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
// Create and publish the initial preprocess
Self::preprocess(&mut self.db, self.set.set, 0, self.key.clone(), &mut self.signer);
made_progress = true;
}
// If we have keys to confirm, handle all messages from the tributary
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
// Handle all messages from the Tributary
loop {
let mut tributary_txn = self.tributary_db.txn();
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
else {
break;
};
match msg {
messages::sign::CoordinatorMessage::Reattempt {
id: messages::sign::SignId { attempt, .. },
} => {
// Create and publish the preprocess for the specified attempt
Self::preprocess(
&mut self.db,
self.set.set,
attempt,
self.key.clone(),
&mut self.signer,
);
}
messages::sign::CoordinatorMessage::Preprocesses {
id: messages::sign::SignId { attempt, .. },
mut preprocesses,
} => {
// Confirm the preprocess we're expected to sign with is the one we locally have
// It may be different if we rebooted and made a second preprocess for this attempt
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
self.signer.take()
else {
// If this message is not expected, commit the txn to drop it and move on
// At some point, we'll get a Reattempt and reset
tributary_txn.commit();
break;
};
// Determine the MuSig key signed with
let musig_validators = {
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
ordered_participants.sort_by_key(|participant| u16::from(*participant));
let mut res = vec![];
for participant in ordered_participants {
let (validator, _weight) =
self.set.validators[usize::from(u16::from(participant) - 1)];
res.push(validator);
}
res
};
let musig_public_keys = musig_validators
.iter()
.map(|key| {
Ristretto::read_G(&mut key.0.as_slice())
.expect("Serai validator had invalid public key")
})
.collect::<Vec<_>>();
let keys = musig(
ExternalValidatorSet::musig_context(&self.set.set),
self.key.clone(),
&musig_public_keys,
)
.unwrap();
// Rebuild the machine
let (machine, preprocess_from_cache) =
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
// Ensure this is a consistent signing session
let our_i = our_i(&self.set, &self.key, &preprocesses);
let consistent = (attempt == our_attempt) &&
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
if !consistent {
tributary_txn.commit();
break;
}
// Reformat the preprocesses into the expected format for Musig
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
machine.read_preprocess(&mut preprocess.as_slice())
}) {
Ok(preprocesses) => preprocesses,
// This yields the *original participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
self.set.validators[usize::from(u16::from(participant) - 1)].0,
);
tributary_txn.commit();
break;
}
};
// Calculate our share
let (machine, share) = match handle_frost_error(machine.sign(
preprocesses,
&ExternalValidatorSet::set_keys_message(&self.set.set, &key_pair),
)) {
Ok((machine, share)) => (machine, share),
// This yields the *musig participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
musig_validators[usize::from(u16::from(participant) - 1)],
);
tributary_txn.commit();
break;
}
};
// Send our share
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
let mut txn = self.db.txn();
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
self.set.set,
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
);
txn.commit();
self.signer = Some(Signer::Share {
attempt,
musig_validators,
share,
machine: Box::new(machine),
});
}
messages::sign::CoordinatorMessage::Shares {
id: messages::sign::SignId { attempt, .. },
mut shares,
} => {
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
self.signer.take()
else {
tributary_txn.commit();
break;
};
// Ensure this is a consistent signing session
let our_i = our_i(&self.set, &self.key, &shares);
let consistent = (attempt == our_attempt) &&
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
if !consistent {
tributary_txn.commit();
break;
}
// Reformat the shares into the expected format for Musig
let shares = match make_contiguous(our_i, shares, |share| {
machine.read_share(&mut share.as_slice())
}) {
Ok(shares) => shares,
// This yields the *original participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
self.set.validators[usize::from(u16::from(participant) - 1)].0,
);
tributary_txn.commit();
break;
}
};
match handle_frost_error(machine.complete(shares)) {
Ok(signature) => {
// Create the bitvec of the participants
let mut signature_participants;
{
use bitvec::prelude::*;
signature_participants = bitvec![u8, Lsb0; 0; 0];
let mut i = 0;
for (validator, _) in &self.set.validators {
if Some(validator) == musig_validators.get(i) {
signature_participants.push(true);
i += 1;
} else {
signature_participants.push(false);
}
}
}
// This is safe to call multiple times as it'll just change which *valid*
// signature to publish
let mut txn = self.db.txn();
Keys::set(
&mut txn,
self.set.set,
key_pair.clone(),
signature_participants,
signature.into(),
);
txn.commit();
}
// This yields the *musig participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
musig_validators[usize::from(u16::from(participant) - 1)],
);
tributary_txn.commit();
break;
}
}
}
}
// Because we successfully handled this message, note we made proress
made_progress = true;
tributary_txn.commit();
}
}
// Check if the key has been set on Serai
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
KeySet::get(&self.db, self.set.set).is_some()
{
// Take the keys to confirm so we never instantiate the signer again
let mut txn = self.db.txn();
KeysToConfirm::take(&mut txn, self.set.set);
KeySet::take(&mut txn, self.set.set);
txn.commit();
// Drop our own signer
// The task won't die until the Tributary does, but now it'll never do anything again
self.signer = None;
made_progress = true;
}
Ok(made_progress)
}
}
}

File diff suppressed because it is too large Load Diff

1045
coordinator/src/p2p.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,46 @@
use std::sync::Arc;
use serai_client::primitives::ExternalNetworkId;
use processor_messages::{ProcessorMessage, CoordinatorMessage};
use message_queue::{Service, Metadata, client::MessageQueue};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Message {
pub id: u64,
pub network: ExternalNetworkId,
pub msg: ProcessorMessage,
}
#[async_trait::async_trait]
pub trait Processors: 'static + Send + Sync + Clone {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
async fn recv(&self, network: ExternalNetworkId) -> Message;
async fn ack(&self, msg: Message);
}
#[async_trait::async_trait]
impl Processors for Arc<MessageQueue> {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let msg: CoordinatorMessage = msg.into();
let metadata =
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
let msg = borsh::to_vec(&msg).unwrap();
self.queue(metadata, msg).await;
}
async fn recv(&self, network: ExternalNetworkId) -> Message {
let msg = self.next(Service::Processor(network)).await;
assert_eq!(msg.from, Service::Processor(network));
let id = msg.id;
// Deserialize it into a ProcessorMessage
let msg: ProcessorMessage =
borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded ProcessorMessage");
return Message { id, network, msg };
}
async fn ack(&self, msg: Message) {
MessageQueue::ack(self, Service::Processor(msg.network), msg.id).await
}
}

View File

@@ -1,167 +0,0 @@
use core::future::Future;
use std::sync::Arc;
use zeroize::Zeroizing;
use ciphersuite::*;
use dalek_ff_group::Ristretto;
use tokio::sync::mpsc;
use serai_db::{DbTxn, Db as DbTrait};
use serai_client_serai::abi::primitives::{
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
};
use message_queue::{Service, Metadata, client::MessageQueue};
use tributary_sdk::Tributary;
use serai_task::ContinuallyRan;
use serai_coordinator_tributary::Transaction;
use serai_coordinator_p2p::P2p;
use crate::{Db, KeySet};
pub(crate) struct SubstrateTask<P: P2p> {
pub(crate) serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
pub(crate) db: Db,
pub(crate) message_queue: Arc<MessageQueue>,
pub(crate) p2p: P,
pub(crate) p2p_add_tributary:
mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ExternalValidatorSet>,
}
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
type Error = String; // TODO
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
// Handle the Canonical events
for network in ExternalNetworkId::all() {
loop {
let mut txn = self.db.txn();
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
else {
break;
};
match msg {
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &());
}
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
let next_to_be_retired =
prior_retired.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
assert_eq!(session, next_to_be_retired);
crate::db::RetiredTributary::set(&mut txn, network, &session);
self
.p2p_retire_tributary
.send(ExternalValidatorSet { network, session })
.expect("p2p retire_tributary channel dropped?");
}
messages::substrate::CoordinatorMessage::Block { .. } => {}
}
let msg = messages::CoordinatorMessage::from(msg);
let metadata = Metadata {
from: Service::Coordinator,
to: Service::Processor(network),
intent: msg.intent(),
};
let msg = borsh::to_vec(&msg).unwrap();
self.message_queue.queue(metadata, msg).await?;
txn.commit();
made_progress = true;
}
}
// Handle the NewSet events
loop {
let mut txn = self.db.txn();
let Some(new_set) = serai_coordinator_substrate::NewSet::try_recv(&mut txn) else { break };
if let Some(historic_session) = new_set.set.session.0.checked_sub(2) {
// We should have retired this session if we're here
if crate::db::RetiredTributary::get(&txn, new_set.set.network).map(|session| session.0) <
Some(historic_session)
{
/*
If we haven't, it's because we're processing the NewSet event before the retiry
event from the Canonical event stream. This happens if the Canonical event, and
then the NewSet event, is fired while we're already iterating over NewSet events.
We break, dropping the txn, restoring this NewSet to the database, so we'll only
handle it once a future iteration of this loop handles the retiry event.
*/
break;
}
/*
Queue this historical Tributary for deletion.
We explicitly don't queue this upon Tributary retire, instead here, to give time to
investigate retired Tributaries if questions are raised post-retiry. This gives a
week (the duration of the following session) after the Tributary has been retired to
make a backup of the data directory for any investigations.
*/
crate::db::TributaryCleanup::send(
&mut txn,
&ExternalValidatorSet {
network: new_set.set.network,
session: Session(historic_session),
},
);
}
// Save this Tributary as active to the database
{
let mut active_tributaries =
crate::db::ActiveTributaries::get(&txn).unwrap_or(Vec::with_capacity(1));
active_tributaries.push(new_set.clone());
crate::db::ActiveTributaries::set(&mut txn, &active_tributaries);
}
// Send GenerateKey to the processor
let msg = messages::key_gen::CoordinatorMessage::GenerateKey {
session: new_set.set.session,
threshold: new_set.threshold,
evrf_public_keys: new_set.evrf_public_keys.clone(),
};
let msg = messages::CoordinatorMessage::from(msg);
let metadata = Metadata {
from: Service::Coordinator,
to: Service::Processor(new_set.set.network),
intent: msg.intent(),
};
let msg = borsh::to_vec(&msg).unwrap();
self.message_queue.queue(metadata, msg).await?;
// Commit the transaction for all of this
txn.commit();
// Now spawn the Tributary
// If we reboot after committing the txn, but before this is called, this will be called
// on boot
crate::tributary::spawn_tributary(
self.db.clone(),
self.message_queue.clone(),
self.p2p.clone(),
&self.p2p_add_tributary,
new_set,
self.serai_key.clone(),
)
.await;
made_progress = true;
}
Ok(made_progress)
}
}
}

View File

@@ -0,0 +1,338 @@
/*
If:
A) This block has events and it's been at least X blocks since the last cosign or
B) This block doesn't have events but it's been X blocks since a skipped block which did
have events or
C) This block key gens (which changes who the cosigners are)
cosign this block.
This creates both a minimum and maximum delay of X blocks before a block's cosigning begins,
barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly
spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to
ensure any block needing cosigned is consigned within a reasonable amount of time.
*/
use zeroize::Zeroizing;
use dalek_ff_group::Ristretto;
use ciphersuite::Ciphersuite;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::ExternalNetworkId,
validator_sets::primitives::{ExternalValidatorSet, Session},
Serai, SeraiError,
};
use serai_db::*;
use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};
// 5 minutes, expressed in blocks
// TODO: Pull a constant for block time
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
enum HasEvents {
KeyGen,
Yes,
No,
}
create_db!(
SubstrateCosignDb {
ScanCosignFrom: () -> u64,
IntendedCosign: () -> (u64, Option<u64>),
BlockHasEventsCache: (block: u64) -> HasEvents,
LatestCosignedBlock: () -> u64,
}
);
impl IntendedCosign {
// Sets the intended to cosign block, clearing the prior value entirely.
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
Self::set(txn, &(intended, None::<u64>));
}
// Sets the cosign skipped since the last intended to cosign block.
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
let (intended, prior_skipped) = Self::get(txn).unwrap();
assert!(prior_skipped.is_none());
Self::set(txn, &(intended, Some(skipped)));
}
}
impl LatestCosignedBlock {
pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
Self::get(getter).unwrap_or_default().max(1)
}
}
db_channel! {
SubstrateDbChannels {
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
}
}
impl CosignTransactions {
// Append a cosign transaction.
pub fn append_cosign(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
number: u64,
hash: [u8; 32],
) {
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
}
}
async fn block_has_events(
txn: &mut impl DbTxn,
serai: &Serai,
block: u64,
) -> Result<HasEvents, SeraiError> {
let cached = BlockHasEventsCache::get(txn, block);
match cached {
None => {
let serai = serai.as_of(
serai
.finalized_block_by_number(block)
.await?
.expect("couldn't get block which should've been finalized")
.hash(),
);
if !serai.validator_sets().key_gen_events().await?.is_empty() {
return Ok(HasEvents::KeyGen);
}
let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&
serai.in_instructions().batch_events().await?.is_empty() &&
serai.validator_sets().new_set_events().await?.is_empty() &&
serai.validator_sets().set_retired_events().await?.is_empty();
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
BlockHasEventsCache::set(txn, block, &has_events);
Ok(has_events)
}
Some(code) => Ok(code),
}
}
async fn potentially_cosign_block(
txn: &mut impl DbTxn,
serai: &Serai,
block: u64,
skipped_block: Option<u64>,
window_end_exclusive: u64,
) -> Result<bool, SeraiError> {
// The following code regarding marking cosigned if prior block is cosigned expects this block to
// not be zero
// While we could perform this check there, there's no reason not to optimize the entire function
// as such
if block == 0 {
return Ok(false);
}
let block_has_events = block_has_events(txn, serai, block).await?;
// If this block had no events and immediately follows a cosigned block, mark it as cosigned
if (block_has_events == HasEvents::No) &&
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
{
log::debug!("automatically co-signing next block ({block}) since it has no events");
LatestCosignedBlock::set(txn, &block);
}
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
// trigger a cosigning protocol covering it
// This means there will be the maximum delay allowed from a block needing cosigning occurring
// and a cosign for it triggering
let maximally_latent_cosign_block =
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
// If this block is within the window,
if block < window_end_exclusive {
// and set a key, cosign it
if block_has_events == HasEvents::KeyGen {
IntendedCosign::set_intended_cosign(txn, block);
// Carry skipped if it isn't included by cosigning this block
if let Some(skipped) = skipped_block {
if skipped > block {
IntendedCosign::set_skipped_cosign(txn, block);
}
}
return Ok(true);
}
} else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {
// Since this block was outside the window and had events/was maximally latent, cosign it
IntendedCosign::set_intended_cosign(txn, block);
return Ok(true);
}
Ok(false)
}
/*
Advances the cosign protocol as should be done per the latest block.
A block is considered cosigned if:
A) It was cosigned
B) It's the parent of a cosigned block
C) It immediately follows a cosigned block and has no events requiring cosigning
This only actually performs advancement within a limited bound (generally until it finds a block
which should be cosigned). Accordingly, it is necessary to call multiple times even if
`latest_number` doesn't change.
*/
async fn advance_cosign_protocol_inner(
db: &mut impl Db,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &Serai,
latest_number: u64,
) -> Result<(), SeraiError> {
let mut txn = db.txn();
const INITIAL_INTENDED_COSIGN: u64 = 1;
let (last_intended_to_cosign_block, mut skipped_block) = {
let intended_cosign = IntendedCosign::get(&txn);
// If we haven't prior intended to cosign a block, set the intended cosign to 1
if let Some(intended_cosign) = intended_cosign {
intended_cosign
} else {
IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);
IntendedCosign::get(&txn).unwrap()
}
};
// "windows" refers to the window of blocks where even if there's a block which should be
// cosigned, it won't be due to proximity due to the prior cosign
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
// If we've never triggered a cosign, don't skip any cosigns based on proximity
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
window_end_exclusive = 1;
}
// The consensus rules for this are `last_intended_to_cosign_block + 1`
let scan_start_block = last_intended_to_cosign_block + 1;
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
// new state
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
// Check all blocks within the window to see if they should be cosigned
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
// do cosign them
// We only perform this check if we haven't already marked a block as skipped since the cosign
// the skipped block will cause will cosign all other blocks within this window
if skipped_block.is_none() {
let window_end_inclusive = window_end_exclusive - 1;
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
skipped_block = Some(b);
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
IntendedCosign::set_skipped_cosign(&mut txn, b);
break;
}
}
}
// A block which should be cosigned
let mut to_cosign = None;
// A list of sets which are cosigning, along with a boolean of if we're in the set
let mut cosigning = vec![];
for block in scan_start_block ..= latest_number {
let actual_block = serai
.finalized_block_by_number(block)
.await?
.expect("couldn't get block which should've been finalized");
// Save the block number for this block, as needed by the cosigner to perform cosigning
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?
{
to_cosign = Some((block, actual_block.hash()));
// Get the keys as of the prior block
// If this key sets new keys, the coordinator won't acknowledge so until we process this
// block
// We won't process this block until its co-signed
// Using the keys of the prior block ensures this deadlock isn't reached
let serai = serai.as_of(actual_block.header.parent_hash.into());
for network in serai_client::primitives::EXTERNAL_NETWORKS {
// Get the latest session to have set keys
let set_with_keys = {
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
continue;
};
let prior_session = Session(latest_session.0.saturating_sub(1));
if serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.await?
.is_some()
{
ExternalValidatorSet { network, session: prior_session }
} else {
let set = ExternalValidatorSet { network, session: latest_session };
if serai.validator_sets().keys(set).await?.is_none() {
continue;
}
set
}
};
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
}
break;
}
// If this TX is committed, always start future scanning from the next block
ScanCosignFrom::set(&mut txn, &(block + 1));
// Since we're scanning *from* the next block, tidy the cache
BlockHasEventsCache::del(&mut txn, block);
}
if let Some((number, hash)) = to_cosign {
// If this block doesn't have cosigners, yet does have events, automatically mark it as
// cosigned
if cosigning.is_empty() {
log::debug!("{} had no cosigners available, marking as cosigned", number);
LatestCosignedBlock::set(&mut txn, &number);
} else {
for (set, in_set) in cosigning {
if in_set {
log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session);
CosignTransactions::append_cosign(&mut txn, set, number, hash);
}
}
}
}
txn.commit();
Ok(())
}
pub async fn advance_cosign_protocol(
db: &mut impl Db,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &Serai,
latest_number: u64,
) -> Result<(), SeraiError> {
loop {
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
// Only scan 1000 blocks at a time to limit a massive txn from forming
let scan_to = latest_number.min(scan_from + 1000);
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
// If we didn't limit the scan_to, break
if scan_to == latest_number {
break;
}
}
Ok(())
}

View File

@@ -0,0 +1,32 @@
use serai_client::primitives::ExternalNetworkId;
pub use serai_db::*;
mod inner_db {
use super::*;
create_db!(
SubstrateDb {
NextBlock: () -> u64,
HandledEvent: (block: [u8; 32]) -> u32,
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
}
);
}
pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb};
pub struct HandledEvent;
impl HandledEvent {
fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1)
}
pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {
let next = Self::next_to_handle_event(getter, block);
assert!(next >= event_id);
next == event_id
}
pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {
assert!(Self::next_to_handle_event(txn, block) == index);
inner_db::HandledEvent::set(txn, block, &index);
}
}

View File

@@ -0,0 +1,547 @@
use core::{ops::Deref, time::Duration};
use std::{
sync::Arc,
collections::{HashSet, HashMap},
};
use zeroize::Zeroizing;
use dalek_ff_group::Ristretto;
use ciphersuite::{group::GroupEncoding, Ciphersuite};
use serai_client::{
coins::CoinsEvent,
in_instructions::InInstructionsEvent,
primitives::{BlockHash, ExternalNetworkId},
validator_sets::{
primitives::{ExternalValidatorSet, ValidatorSet},
ValidatorSetsEvent,
},
Block, Serai, SeraiError, TemporalSerai,
};
use serai_db::DbTxn;
use processor_messages::SubstrateContext;
use tokio::{sync::mpsc, time::sleep};
use crate::{
Db,
processors::Processors,
tributary::{TributarySpec, SeraiDkgCompleted},
};
mod db;
pub use db::*;
mod cosign;
pub use cosign::*;
async fn in_set(
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &TemporalSerai<'_>,
set: ValidatorSet,
) -> Result<Option<bool>, SeraiError> {
let Some(participants) = serai.validator_sets().participants(set.network).await? else {
return Ok(None);
};
let key = (Ristretto::generator() * key.deref()).to_bytes();
Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key)))
}
async fn handle_new_set<D: Db>(
txn: &mut D::Transaction<'_>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
serai: &Serai,
block: &Block,
set: ExternalValidatorSet,
) -> Result<(), SeraiError> {
if in_set(key, &serai.as_of(block.hash()), set.into())
.await?
.expect("NewSet for set which doesn't exist")
{
log::info!("present in set {:?}", set);
let set_data = {
let serai = serai.as_of(block.hash());
let serai = serai.validator_sets();
let set_participants =
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
};
let time = if let Ok(time) = block.time() {
time
} else {
assert_eq!(block.number(), 0);
// Use the next block's time
loop {
let Ok(Some(res)) = serai.finalized_block_by_number(1).await else {
sleep(Duration::from_secs(5)).await;
continue;
};
break res.time().unwrap();
}
};
// The block time is in milliseconds yet the Tributary is in seconds
let time = time / 1000;
// Since this block is in the past, and Tendermint doesn't play nice with starting chains after
// their start time (though it does eventually work), delay the start time by 120 seconds
// This is meant to handle ~20 blocks of lack of finalization for this first block
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
let spec = TributarySpec::new(block.hash(), time, set, set_data);
log::info!("creating new tributary for {:?}", spec.set());
// Save it to the database now, not on the channel receiver's side, so this is safe against
// reboots
// If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries
// If this txn doesn't finish, this will be re-fired
// If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the
// prior fired event may have not been received yet
crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec);
new_tributary_spec.send(spec).unwrap();
} else {
log::info!("not present in new set {:?}", set);
}
Ok(())
}
async fn handle_batch_and_burns<Pro: Processors>(
txn: &mut impl DbTxn,
processors: &Pro,
serai: &Serai,
block: &Block,
) -> Result<(), SeraiError> {
// Track which networks had events with a Vec in ordr to preserve the insertion order
// While that shouldn't be needed, ensuring order never hurts, and may enable design choices
// with regards to Processor <-> Coordinator message passing
let mut networks_with_event = vec![];
let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| {
// Don't insert this network multiple times
// A Vec is still used in order to maintain the insertion order
if !networks_with_event.contains(&network) {
networks_with_event.push(network);
burns.insert(network, vec![]);
batches.insert(network, vec![]);
}
};
let mut batch_block = HashMap::new();
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
let mut burns = HashMap::new();
let serai = serai.as_of(block.hash());
for batch in serai.in_instructions().batch_events().await? {
if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } =
batch
{
network_had_event(&mut burns, &mut batches, network);
BatchInstructionsHashDb::set(txn, network, id, &instructions_hash);
// Make sure this is the only Batch event for this network in this Block
assert!(batch_block.insert(network, network_block).is_none());
// Add the batch included by this block
batches.get_mut(&network).unwrap().push(id);
} else {
panic!("Batch event wasn't Batch: {batch:?}");
}
}
for burn in serai.coins().burn_with_instruction_events().await? {
if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn {
let network = instruction.balance.coin.network();
network_had_event(&mut burns, &mut batches, network);
// network_had_event should register an entry in burns
burns.get_mut(&network).unwrap().push(instruction);
} else {
panic!("Burn event wasn't Burn: {burn:?}");
}
}
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
for network in networks_with_event {
let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
block
} else {
// If it's had a batch or a burn, it must have had a block acknowledged
serai
.in_instructions()
.latest_block_for_network(network)
.await?
.expect("network had a batch/burn yet never set a latest block")
};
processors
.send(
network,
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block,
},
block: block.number(),
burns: burns.remove(&network).unwrap(),
batches: batches.remove(&network).unwrap(),
},
)
.await;
}
Ok(())
}
// Handle a specific Substrate block, returning an error when it fails to get data
// (not blocking / holding)
#[allow(clippy::too_many_arguments)]
async fn handle_block<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
processors: &Pro,
serai: &Serai,
block: Block,
) -> Result<(), SeraiError> {
let hash = block.hash();
// Define an indexed event ID.
let mut event_id = 0;
// If a new validator set was activated, create tributary/inform processor to do a DKG
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
// Individually mark each event as handled so on reboot, we minimize duplicates
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
// events will successfully be incrementally handled
// (though the Serai connection should be stable, making this unnecessary)
let ValidatorSetsEvent::NewSet { set } = new_set else {
panic!("NewSet event wasn't NewSet: {new_set:?}");
};
// We only coordinate/process external networks
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh new set event {:?}", new_set);
let mut txn = db.txn();
handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
// If a key pair was confirmed, inform the processor
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh key gen event {:?}", key_gen);
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
};
let substrate_key = key_pair.0 .0;
processors
.send(
set.network,
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block: serai
.as_of(block.hash())
.in_instructions()
.latest_block_for_network(set.network)
.await?
// The processor treats this as a magic value which will cause it to find a network
// block which has a time greater than or equal to the Serai time
.unwrap_or(BlockHash([0; 32])),
},
session: set.session,
key_pair,
},
)
.await;
// TODO: If we were in the set, yet were removed, drop the tributary
let mut txn = db.txn();
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {
let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
};
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh accepted handover event {:?}", accepted_handover);
// TODO: This isn't atomic with the event handling
// Send a oneshot receiver so we can await the response?
perform_slash_report.send(set).unwrap();
let mut txn = db.txn();
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {
let ValidatorSetsEvent::SetRetired { set } = retired_set else {
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
};
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh set retired event {:?}", retired_set);
let mut txn = db.txn();
crate::ActiveTributaryDb::retire_tributary(&mut txn, set);
tributary_retired.send(set).unwrap();
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
// Finally, tell the processor of acknowledged blocks/burns
// This uses a single event as unlike prior events which individually executed code, all
// following events share data collection
if HandledEvent::is_unhandled(db, hash, event_id) {
let mut txn = db.txn();
handle_batch_and_burns(&mut txn, processors, serai, &block).await?;
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn handle_new_blocks<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
processors: &Pro,
serai: &Serai,
next_block: &mut u64,
) -> Result<(), SeraiError> {
// Check if there's been a new Substrate block
let latest_number = serai.latest_finalized_block().await?.number();
// Advance the cosigning protocol
advance_cosign_protocol(db, key, serai, latest_number).await?;
// Reduce to the latest cosigned block
let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));
if latest_number < *next_block {
return Ok(());
}
for b in *next_block ..= latest_number {
let block = serai
.finalized_block_by_number(b)
.await?
.expect("couldn't get block before the latest finalized block");
log::info!("handling substrate block {b}");
handle_block(
db,
key,
new_tributary_spec,
perform_slash_report,
tributary_retired,
processors,
serai,
block,
)
.await?;
*next_block += 1;
let mut txn = db.txn();
NextBlock::set(&mut txn, next_block);
txn.commit();
log::info!("handled substrate block {b}");
}
Ok(())
}
pub async fn scan_task<D: Db, Pro: Processors>(
mut db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
processors: Pro,
serai: Arc<Serai>,
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
) {
log::info!("scanning substrate");
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
/*
let new_substrate_block_notifier = {
let serai = &serai;
move || async move {
loop {
match serai.newly_finalized_block().await {
Ok(sub) => return sub,
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
};
*/
// TODO: Restore the above subscription-based system
// That would require moving serai-client from HTTP to websockets
let new_substrate_block_notifier = {
let serai = &serai;
move |next_substrate_block| async move {
loop {
match serai.latest_finalized_block().await {
Ok(latest) => {
if latest.header.number >= next_substrate_block {
return latest;
}
sleep(Duration::from_secs(3)).await;
}
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
};
loop {
// await the next block, yet if our notifier had an error, re-create it
{
let Ok(_) = tokio::time::timeout(
Duration::from_secs(60),
new_substrate_block_notifier(next_substrate_block),
)
.await
else {
// Timed out, which may be because Serai isn't finalizing or may be some issue with the
// notifier
if serai.latest_finalized_block().await.map(|block| block.number()).ok() ==
Some(next_substrate_block.saturating_sub(1))
{
log::info!("serai hasn't finalized a block in the last 60s...");
}
continue;
};
/*
// next_block is a Option<Result>
if next_block.and_then(Result::ok).is_none() {
substrate_block_notifier = new_substrate_block_notifier(next_substrate_block);
continue;
}
*/
}
match handle_new_blocks(
&mut db,
&key,
&new_tributary_spec,
&perform_slash_report,
&tributary_retired,
&processors,
&serai,
&mut next_substrate_block,
)
.await
{
Ok(()) => {}
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
/// Gets the expected ID for the next Batch.
///
/// Will log an error and apply a slight sleep on error, letting the caller simply immediately
/// retry.
pub(crate) async fn expected_next_batch(
serai: &Serai,
network: ExternalNetworkId,
) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner(
serai: &Serai,
network: ExternalNetworkId,
) -> Result<u32, SeraiError> {
let serai = serai.as_of_latest_finalized_block().await?;
let last = serai.in_instructions().last_batch_for_network(network).await?;
Ok(if let Some(last) = last { last + 1 } else { 0 })
}
match expected_next_batch_inner(serai, network).await {
Ok(next) => Ok(next),
Err(e) => {
log::error!("couldn't get the expected next batch from substrate: {e:?}");
sleep(Duration::from_millis(100)).await;
Err(e)
}
}
}
/// Verifies `Batch`s which have already been indexed from Substrate.
///
/// Spins if a distinct `Batch` is detected on-chain.
///
/// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected.
/// This is deemed fine.
pub(crate) async fn verify_published_batches<D: Db>(
txn: &mut D::Transaction<'_>,
network: ExternalNetworkId,
optimistic_up_to: u32,
) -> Option<u32> {
// TODO: Localize from MainDb to SubstrateDb
let last = crate::LastVerifiedBatchDb::get(txn, network);
for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to {
let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else {
break;
};
let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap();
if on_chain != off_chain {
// Halt operations on this network and spin, as this is a critical fault
loop {
log::error!(
"{}! network: {:?} id: {} off-chain: {} on-chain: {}",
"on-chain batch doesn't match off-chain",
network,
id,
hex::encode(off_chain),
hex::encode(on_chain),
);
sleep(Duration::from_secs(60)).await;
}
}
crate::LastVerifiedBatchDb::set(txn, network, &id);
}
crate::LastVerifiedBatchDb::get(txn, network)
}

View File

@@ -0,0 +1,125 @@
use core::fmt::Debug;
use std::{
sync::Arc,
collections::{VecDeque, HashSet, HashMap},
};
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
use processor_messages::CoordinatorMessage;
use async_trait::async_trait;
use tokio::sync::RwLock;
use crate::{
processors::{Message, Processors},
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
};
pub mod tributary;
#[derive(Clone)]
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
impl MemProcessors {
#[allow(clippy::new_without_default)]
pub fn new() -> MemProcessors {
MemProcessors(Arc::new(RwLock::new(HashMap::new())))
}
}
#[async_trait::async_trait]
impl Processors for MemProcessors {
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
let mut processors = self.0.write().await;
let processor = processors.entry(network).or_insert_with(VecDeque::new);
processor.push_back(msg.into());
}
async fn recv(&self, _: ExternalNetworkId) -> Message {
todo!()
}
async fn ack(&self, _: Message) {
todo!()
}
}
#[allow(clippy::type_complexity)]
#[derive(Clone, Debug)]
pub struct LocalP2p(
usize,
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
);
impl LocalP2p {
pub fn new(validators: usize) -> Vec<LocalP2p> {
let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators])));
let mut res = vec![];
for i in 0 .. validators {
res.push(LocalP2p(i, shared.clone()));
}
res
}
}
#[async_trait]
impl P2p for LocalP2p {
type Id = usize;
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
let mut msg_ref = msg.as_slice();
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
}
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
// Content-based deduplication
let mut lock = self.1.write().await;
{
let already_sent = &mut lock.0;
if already_sent.contains(&msg) {
return;
}
already_sent.insert(msg.clone());
}
let queues = &mut lock.1;
let kind_len = (match kind {
P2pMessageKind::ReqRes(kind) => kind.serialize(),
P2pMessageKind::Gossip(kind) => kind.serialize(),
})
.len();
let msg = msg[kind_len ..].to_vec();
for (i, msg_queue) in queues.iter_mut().enumerate() {
if i == self.0 {
continue;
}
msg_queue.push_back((self.0, kind, msg.clone()));
}
}
async fn receive(&self) -> P2pMessage<Self> {
// This is a cursed way to implement an async read from a Vec
loop {
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
return P2pMessage { sender, kind, msg };
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
}
}
#[async_trait]
impl TributaryP2p for LocalP2p {
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
<Self as P2p>::broadcast(
self,
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
msg,
)
.await
}
}

Some files were not shown because too many files have changed in this diff Show More