1 Commits

Author SHA1 Message Date
Luke Parker
ce3b90541e Make transactions undroppable
coordinator/cosign/src/delay.rs literally demonstrates how we'd need to rewrite
our handling of transactions with this change. It can be cleaned up a bit but
already identifies ergonomic issues. It also doesn't model passing an &mut txn
to an async function, which would also require using the droppable wrapper
struct.

To locally see this build, run

RUSTFLAGS="-Zpanic_abort_tests -C panic=abort" cargo +nightly build -p serai-cosign --all-targets

To locally see this fail to build, run

cargo build -p serai-cosign --all-targets

While it doesn't say which line causes it fail to build, the only distinction
is panic=unwind.

For more context, please see #578.
2025-01-15 03:56:59 -05:00
952 changed files with 99781 additions and 32753 deletions

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: "30.0" default: "27.0"
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Bitcoin Daemon Cache - name: Bitcoin Daemon Cache
id: cache-bitcoind id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: bitcoin.tar.gz path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -7,20 +7,13 @@ runs:
- name: Remove unused packages - name: Remove unused packages
shell: bash shell: bash
run: | run: |
# Ensure the repositories are synced sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt update -y
# Actually perform the removals
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*" sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*" sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
# This removal command requires the prior removals due to unmet dependencies otherwise
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*" sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
sudo apt autoremove -y
# Reinstall python3 as a general dependency of a functional operating system sudo apt clean
sudo apt install -y python3 --fix-missing docker system prune -a --volumes
if: runner.os == 'Linux' if: runner.os == 'Linux'
- name: Remove unused packages - name: Remove unused packages
@@ -38,45 +31,19 @@ runs:
shell: bash shell: bash
run: | run: |
if [ "$RUNNER_OS" == "Linux" ]; then if [ "$RUNNER_OS" == "Linux" ]; then
sudo apt install -y ca-certificates protobuf-compiler libclang-dev sudo apt install -y ca-certificates protobuf-compiler
elif [ "$RUNNER_OS" == "Windows" ]; then elif [ "$RUNNER_OS" == "Windows" ]; then
choco install protoc choco install protoc
elif [ "$RUNNER_OS" == "macOS" ]; then elif [ "$RUNNER_OS" == "macOS" ]; then
brew install protobuf llvm brew install protobuf
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
fi fi
- name: Install solc - name: Install solc
shell: bash shell: bash
run: | run: |
cargo +1.91.1 install svm-rs --version =0.5.21 cargo install svm-rs
svm install 0.8.29 svm install 0.8.26
svm use 0.8.29 svm use 0.8.26
- name: Remove preinstalled Docker # - name: Cache Rust
shell: bash # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
run: |
docker system prune -a --volumes
sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker
sudo apt install uidmap
if: runner.os == 'Linux'
- name: Update system dependencies
shell: bash
run: |
sudo apt update -y
sudo apt upgrade -y
sudo apt autoremove -y
sudo apt clean
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@e61617a16c407a86262fb923c35a616ddbe070b3 # 4.6.0
with:
rootless: true
set-host: true
if: runner.os == 'Linux'

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.4.4 default: v0.18.3.4
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Wallet RPC Cache - name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: monero-wallet-rpc path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.4.4 default: v0.18.3.4
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Daemon Cache - name: Monero Daemon Cache
id: cache-monerod id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: /usr/bin/monerod path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,12 +5,12 @@ inputs:
monero-version: monero-version:
description: "Monero version to download and run as a regtest node" description: "Monero version to download and run as a regtest node"
required: false required: false
default: v0.18.4.4 default: v0.18.3.4
bitcoin-version: bitcoin-version:
description: "Bitcoin version to download and run as a regtest node" description: "Bitcoin version to download and run as a regtest node"
required: false required: false
default: "30.0" default: "27.1"
runs: runs:
using: "composite" using: "composite"
@@ -19,9 +19,9 @@ runs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install Foundry - name: Install Foundry
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0 uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with: with:
version: v1.5.0 version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
cache: false cache: false
- name: Run a Monero Regtest Node - name: Run a Monero Regtest Node

View File

@@ -1 +1 @@
nightly-2025-12-01 nightly-2024-07-01

View File

@@ -17,7 +17,7 @@ jobs:
test-common: test-common:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -31,7 +31,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -19,7 +19,7 @@ jobs:
test-crypto: test-crypto:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -32,17 +32,13 @@ jobs:
-p dalek-ff-group \ -p dalek-ff-group \
-p minimal-ed448 \ -p minimal-ed448 \
-p ciphersuite \ -p ciphersuite \
-p ciphersuite-kp256 \
-p multiexp \ -p multiexp \
-p schnorr-signatures \ -p schnorr-signatures \
-p prime-field \ -p dleq \
-p short-weierstrass \ -p generalized-bulletproofs \
-p secq256k1 \ -p generalized-bulletproofs-circuit-abstraction \
-p embedwards25519 \ -p ec-divisors \
-p generalized-bulletproofs-ec-gadgets \
-p dkg \ -p dkg \
-p dkg-recovery \
-p dkg-dealer \
-p dkg-musig \
-p dkg-evrf \
-p modular-frost \ -p modular-frost \
-p frost-schnorrkel -p frost-schnorrkel

View File

@@ -9,10 +9,16 @@ jobs:
name: Run cargo deny name: Run cargo deny
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.91.1 install cargo-deny --version =0.18.6 run: cargo install --locked cargo-deny
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check

View File

@@ -13,7 +13,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -11,11 +11,11 @@ jobs:
clippy: clippy:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest] os: [ubuntu-latest, macos-13, macos-14, windows-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use - name: Get nightly version to use
id: nightly id: nightly
@@ -26,7 +26,7 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install nightly rust - name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
- name: Run Clippy - name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
@@ -43,18 +43,24 @@ jobs:
deny: deny:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.91.1 install cargo-deny --version =0.18.6 run: cargo install --locked cargo-deny
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check
fmt: fmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use - name: Get nightly version to use
id: nightly id: nightly
@@ -67,10 +73,10 @@ jobs:
- name: Run rustfmt - name: Run rustfmt
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
- name: Install Foundry - name: Install foundry
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0 uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with: with:
version: v1.5.0 version: nightly-41d4e5437107f6f42c7711123890147bc736a609
cache: false cache: false
- name: Run forge fmt - name: Run forge fmt
@@ -79,120 +85,25 @@ jobs:
machete: machete:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use - name: Verify all dependencies are in use
run: | run: |
cargo +1.91.1 install cargo-machete --version =0.9.1 cargo install cargo-machete
cargo +1.91.1 machete cargo machete
msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- name: Verify claimed `rust-version`
shell: bash
run: |
cargo +1.91.1 install cargo-msrv --version =0.18.4
function check_msrv {
# We `cd` into the directory passed as the first argument, but will return to the
# directory called from.
return_to=$(pwd)
echo "Checking $1"
cd $1
# We then find the existing `rust-version` using `grep` (for the right line) and then a
# regex (to strip to just the major and minor version).
existing=$(cat ./Cargo.toml | grep "rust-version" | grep -Eo "[0-9]+\.[0-9]+")
# We then backup the `Cargo.toml`, allowing us to restore it after, saving time on future
# MSRV checks (as they'll benefit from immediately exiting if the queried version is less
# than the declared MSRV).
mv ./Cargo.toml ./Cargo.toml.bak
# We then use an inverted (`-v`) grep to remove the existing `rust-version` from the
# `Cargo.toml`, as required because else earlier versions of Rust won't even attempt to
# compile this crate.
cat ./Cargo.toml.bak | grep -v "rust-version" > Cargo.toml
# We then find the actual `rust-version` using `cargo-msrv` (again stripping to just the
# major and minor version).
actual=$(cargo msrv find --output-format minimal | grep -Eo "^[0-9]+\.[0-9]+")
# Finally, we compare the two.
echo "Declared rust-version: $existing"
echo "Actual rust-version: $actual"
[ $existing == $actual ]
result=$?
# Restore the original `Cargo.toml`.
rm Cargo.toml
mv ./Cargo.toml.bak ./Cargo.toml
# Return to the directory called from and return the result.
cd $return_to
return $result
}
# Check each member of the workspace
function check_workspace {
# Get the members array from the workspace's `Cargo.toml`
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
# Keep all lines after the start of the array, then keep all lines before the next "]"
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
# We accomplish the latter by pruning all characters after the entry's ","
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
# Replace the first line, which was "members = [" and is now "members = [,", with "["
members=$(echo "$members" | sed "1s/.*/\[/")
# Correct the last line, which was malleated to "],"
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
# Don't check the following
# Most of these are binaries, with the exception of the Substrate runtime which has a
# bespoke build pipeline
members=$(echo "$members" | grep -v "networks/ethereum/relayer\"")
members=$(echo "$members" | grep -v "message-queue\"")
members=$(echo "$members" | grep -v "processor/bin\"")
members=$(echo "$members" | grep -v "processor/bitcoin\"")
members=$(echo "$members" | grep -v "processor/ethereum\"")
members=$(echo "$members" | grep -v "processor/monero\"")
members=$(echo "$members" | grep -v "coordinator\"")
members=$(echo "$members" | grep -v "substrate/runtime\"")
members=$(echo "$members" | grep -v "substrate/node\"")
members=$(echo "$members" | grep -v "orchestration\"")
# Don't check the tests
members=$(echo "$members" | grep -v "mini\"")
members=$(echo "$members" | grep -v "tests/")
# Remove the trailing comma by replacing the last line's "," with ""
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
echo $members | jq -r ".[]" | while read -r member; do
check_msrv $member
correct=$?
if [ $correct -ne 0 ]; then
return $correct
fi
done
}
check_workspace
slither: slither:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Slither - name: Slither
run: | run: |
python3 -m pip install slither-analyzer==0.11.3 python3 -m pip install solc-select
solc-select install 0.8.26
solc-select use 0.8.26
slither ./networks/ethereum/schnorr/contracts/Schnorr.sol python3 -m pip install slither-analyzer
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
slither processor/ethereum/deployer/contracts/Deployer.sol slither processor/ethereum/deployer/contracts/Deployer.sol
slither processor/ethereum/erc20/contracts/IERC20.sol slither processor/ethereum/erc20/contracts/IERC20.sol

View File

@@ -27,7 +27,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -17,7 +17,7 @@ jobs:
test-common: test-common:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

77
.github/workflows/monero-tests.yaml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Monero Tests
on:
push:
branches:
- develop
paths:
- "networks/monero/**"
- "processor/**"
pull_request:
paths:
- "networks/monero/**"
- "processor/**"
workflow_dispatch:
jobs:
# Only run these once since they will be consistent regardless of any node
unit-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
- name: Run Unit Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
# Doesn't run unit tests with features as the tests workflow will
integration-tests:
runs-on: ubuntu-latest
# Test against all supported protocol versions
strategy:
matrix:
version: [v0.17.3.2, v0.18.3.4]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
with:
monero-version: ${{ matrix.version }}
- name: Run Integration Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
- name: Run Integration Tests
# Don't run if the the tests workflow also will
if: ${{ matrix.version != 'v0.18.3.4' }}
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'

View File

@@ -9,7 +9,7 @@ jobs:
name: Update nightly name: Update nightly
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
with: with:
submodules: "recursive" submodules: "recursive"

259
.github/workflows/msrv.yml vendored Normal file
View File

@@ -0,0 +1,259 @@
name: Weekly MSRV Check
on:
schedule:
- cron: "0 0 * * 0"
workflow_dispatch:
jobs:
msrv-common:
name: Run cargo msrv on common
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on common
run: |
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
cargo msrv verify --manifest-path common/env/Cargo.toml
cargo msrv verify --manifest-path common/db/Cargo.toml
cargo msrv verify --manifest-path common/task/Cargo.toml
cargo msrv verify --manifest-path common/request/Cargo.toml
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
msrv-crypto:
name: Run cargo msrv on crypto
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on crypto
run: |
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
msrv-networks:
name: Run cargo msrv on networks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on networks
run: |
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
cargo msrv verify --manifest-path networks/monero/Cargo.toml
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
msrv-message-queue:
name: Run cargo msrv on message-queue
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on message-queue
run: |
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
msrv-processor:
name: Run cargo msrv on processor
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on processor
run: |
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/messages/Cargo.toml
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
cargo msrv verify --manifest-path processor/signers/Cargo.toml
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
cargo msrv verify --manifest-path processor/monero/Cargo.toml
msrv-coordinator:
name: Run cargo msrv on coordinator
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on coordinator
run: |
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
cargo msrv verify --manifest-path coordinator/Cargo.toml
msrv-substrate:
name: Run cargo msrv on substrate
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on substrate
run: |
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
cargo msrv verify --manifest-path substrate/client/Cargo.toml
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
cargo msrv verify --manifest-path substrate/node/Cargo.toml
msrv-orchestration:
name: Run cargo msrv on orchestration
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on message-queue
run: |
cargo msrv verify --manifest-path orchestration/Cargo.toml
msrv-mini:
name: Run cargo msrv on mini
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on mini
run: |
cargo msrv verify --manifest-path mini/Cargo.toml

View File

@@ -21,7 +21,7 @@ jobs:
test-networks: test-networks:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies - name: Test Dependencies
uses: ./.github/actions/test-dependencies uses: ./.github/actions/test-dependencies
@@ -34,3 +34,19 @@ jobs:
-p ethereum-schnorr-contract \ -p ethereum-schnorr-contract \
-p alloy-simple-request-transport \ -p alloy-simple-request-transport \
-p serai-ethereum-relayer \ -p serai-ethereum-relayer \
-p monero-io \
-p monero-generators \
-p monero-primitives \
-p monero-mlsag \
-p monero-clsag \
-p monero-borromean \
-p monero-bulletproofs \
-p monero-serai \
-p monero-rpc \
-p monero-simple-request-rpc \
-p monero-address \
-p monero-wallet \
-p monero-seed \
-p polyseed \
-p monero-wallet-util \
-p monero-serai-verify-chain

View File

@@ -23,23 +23,13 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Install RISC-V Toolchain - name: Install RISC-V Toolchain
run: | run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
sudo apt update
sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal --component rust-src --target riscv32imac-unknown-none-elf
- name: Verify no-std builds - name: Verify no-std builds
run: | run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core -p serai-no-std-tests
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core,alloc -p serai-no-std-tests --features "alloc"

View File

@@ -1,7 +1,6 @@
# MIT License # MIT License
# #
# Copyright (c) 2022 just-the-docs # Copyright (c) 2022 just-the-docs
# Copyright (c) 2022-2024 Luke Parker
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal # of this software and associated documentation files (the "Software"), to deal
@@ -21,21 +20,31 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. # SOFTWARE.
name: Deploy Rust docs and Jekyll site to Pages # This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy Jekyll site to Pages
on: on:
push: push:
branches: branches:
- "develop" - "develop"
paths:
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch: workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions: permissions:
contents: read contents: read
pages: write pages: write
id-token: write id-token: write
# Only allow one concurrent deployment # Allow one concurrent deployment
concurrency: concurrency:
group: "pages" group: "pages"
cancel-in-progress: true cancel-in-progress: true
@@ -44,37 +53,27 @@ jobs:
# Build job # Build job
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
defaults:
run:
working-directory: docs
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 uses: actions/checkout@v3
- name: Setup Ruby - name: Setup Ruby
uses: ruby/setup-ruby@8aeb6ff8030dd539317f8e1769a044873b56ea71 # 1.268.0 uses: ruby/setup-ruby@v1
with: with:
bundler-cache: true bundler-cache: true
cache-version: 0 cache-version: 0
working-directory: "${{ github.workspace }}/docs" working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages - name: Setup Pages
id: pages id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # 5.0.0 uses: actions/configure-pages@v3
- name: Build with Jekyll - name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env: env:
JEKYLL_ENV: production JEKYLL_ENV: production
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Buld Rust docs
run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
mv target/doc docs/_site/rust
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # 4.0.0 uses: actions/upload-pages-artifact@v1
with: with:
path: "docs/_site/" path: "docs/_site/"
@@ -88,4 +87,4 @@ jobs:
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # 4.0.5 uses: actions/deploy-pages@v2

View File

@@ -31,7 +31,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -27,7 +27,7 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies

View File

@@ -29,7 +29,7 @@ jobs:
test-infra: test-infra:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -61,7 +61,6 @@ jobs:
-p serai-monero-processor \ -p serai-monero-processor \
-p tendermint-machine \ -p tendermint-machine \
-p tributary-sdk \ -p tributary-sdk \
-p serai-cosign-types \
-p serai-cosign \ -p serai-cosign \
-p serai-coordinator-substrate \ -p serai-coordinator-substrate \
-p serai-coordinator-tributary \ -p serai-coordinator-tributary \
@@ -74,7 +73,7 @@ jobs:
test-substrate: test-substrate:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
@@ -83,33 +82,31 @@ jobs:
run: | run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-primitives \ -p serai-primitives \
-p serai-abi \ -p serai-coins-primitives \
-p substrate-median \
-p serai-core-pallet \
-p serai-coins-pallet \ -p serai-coins-pallet \
-p serai-validator-sets-pallet \
-p serai-signals-pallet \
-p serai-dex-pallet \ -p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \ -p serai-genesis-liquidity-pallet \
-p serai-economic-security-pallet \ -p serai-emissions-primitives \
-p serai-emissions-pallet \ -p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \ -p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \ -p serai-runtime \
-p serai-node -p serai-node
-p serai-substrate-tests
test-serai-client: test-serai-client:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0 - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies - name: Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run Tests - name: Run Tests
run: | run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client

8
.gitignore vendored
View File

@@ -1,13 +1,7 @@
target target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!/Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile !orchestration/runtime/Dockerfile
.test-logs .test-logs
.vscode .vscode

7821
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,20 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
# Version patches
"patches/parking_lot_core",
"patches/parking_lot",
"patches/zstd",
"patches/rocksdb",
# std patches
"patches/matches",
"patches/is-terminal",
# Rewrites/redirects
"patches/option-ext",
"patches/directories-next",
"common/std-shims", "common/std-shims",
"common/zalloc", "common/zalloc",
"common/patchable-async-sleep", "common/patchable-async-sleep",
@@ -15,21 +29,19 @@ members = [
"crypto/dalek-ff-group", "crypto/dalek-ff-group",
"crypto/ed448", "crypto/ed448",
"crypto/ciphersuite", "crypto/ciphersuite",
"crypto/ciphersuite/kp256",
"crypto/multiexp", "crypto/multiexp",
"crypto/schnorr", "crypto/schnorr",
"crypto/dleq",
"crypto/prime-field", "crypto/evrf/secq256k1",
"crypto/short-weierstrass", "crypto/evrf/embedwards25519",
"crypto/secq256k1", "crypto/evrf/generalized-bulletproofs",
"crypto/embedwards25519", "crypto/evrf/circuit-abstraction",
"crypto/evrf/divisors",
"crypto/evrf/ec-gadgets",
"crypto/dkg", "crypto/dkg",
"crypto/dkg/recovery",
"crypto/dkg/dealer",
"crypto/dkg/musig",
"crypto/dkg/evrf",
"crypto/frost", "crypto/frost",
"crypto/schnorrkel", "crypto/schnorrkel",
@@ -40,6 +52,23 @@ members = [
"networks/ethereum/alloy-simple-request-transport", "networks/ethereum/alloy-simple-request-transport",
"networks/ethereum/relayer", "networks/ethereum/relayer",
"networks/monero/io",
"networks/monero/generators",
"networks/monero/primitives",
"networks/monero/ringct/mlsag",
"networks/monero/ringct/clsag",
"networks/monero/ringct/borromean",
"networks/monero/ringct/bulletproofs",
"networks/monero",
"networks/monero/rpc",
"networks/monero/rpc/simple-request",
"networks/monero/wallet/address",
"networks/monero/wallet",
"networks/monero/wallet/seed",
"networks/monero/wallet/polyseed",
"networks/monero/wallet/util",
"networks/monero/verify-chain",
"message-queue", "message-queue",
"processor/messages", "processor/messages",
@@ -62,14 +91,13 @@ members = [
"processor/ethereum/primitives", "processor/ethereum/primitives",
"processor/ethereum/test-primitives", "processor/ethereum/test-primitives",
"processor/ethereum/deployer", "processor/ethereum/deployer",
"processor/ethereum/erc20",
"processor/ethereum/router", "processor/ethereum/router",
"processor/ethereum/erc20",
"processor/ethereum", "processor/ethereum",
"processor/monero", "processor/monero",
"coordinator/tributary-sdk/tendermint", "coordinator/tributary-sdk/tendermint",
"coordinator/tributary-sdk", "coordinator/tributary-sdk",
"coordinator/cosign/types",
"coordinator/cosign", "coordinator/cosign",
"coordinator/substrate", "coordinator/substrate",
"coordinator/tributary", "coordinator/tributary",
@@ -78,27 +106,34 @@ members = [
"coordinator", "coordinator",
"substrate/primitives", "substrate/primitives",
"substrate/coins/primitives",
"substrate/coins/pallet",
"substrate/dex/pallet",
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/signals/primitives",
"substrate/signals/pallet",
"substrate/abi", "substrate/abi",
"substrate/median",
"substrate/core",
"substrate/coins",
"substrate/validator-sets",
"substrate/signals",
"substrate/dex",
"substrate/genesis-liquidity",
"substrate/economic-security",
"substrate/emissions",
"substrate/in-instructions",
"substrate/runtime", "substrate/runtime",
"substrate/node", "substrate/node",
"substrate/client/serai",
"substrate/client/bitcoin",
"substrate/client/ethereum",
"substrate/client/monero",
"substrate/client", "substrate/client",
"orchestration", "orchestration",
@@ -109,96 +144,62 @@ members = [
"tests/docker", "tests/docker",
"tests/message-queue", "tests/message-queue",
# TODO "tests/processor", "tests/processor",
# TODO "tests/coordinator", "tests/coordinator",
"tests/substrate", "tests/full-stack",
# TODO "tests/full-stack",
"tests/reproducible-runtime", "tests/reproducible-runtime",
] ]
[profile.dev.package]
# Always compile Monero (and a variety of dependencies) with optimizations due # Always compile Monero (and a variety of dependencies) with optimizations due
# to the extensive operations required for Bulletproofs # to the extensive operations required for Bulletproofs
[profile.dev.package]
subtle = { opt-level = 3 } subtle = { opt-level = 3 }
sha3 = { opt-level = 3 }
blake2 = { opt-level = 3 }
ff = { opt-level = 3 } ff = { opt-level = 3 }
group = { opt-level = 3 } group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 } crypto-bigint = { opt-level = 3 }
secp256k1 = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 } curve25519-dalek = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 } dalek-ff-group = { opt-level = 3 }
minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 } multiexp = { opt-level = 3 }
monero-io = { opt-level = 3 }
monero-primitives = { opt-level = 3 }
monero-ed25519 = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs-generators = { opt-level = 3 }
monero-bulletproofs = {opt-level = 3 }
monero-oxide = { opt-level = 3 }
# Always compile the eVRF DKG tree with optimizations as well
secp256k1 = { opt-level = 3 }
secq256k1 = { opt-level = 3 } secq256k1 = { opt-level = 3 }
embedwards25519 = { opt-level = 3 } embedwards25519 = { opt-level = 3 }
generalized-bulletproofs = { opt-level = 3 } generalized-bulletproofs = { opt-level = 3 }
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 } generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
ec-divisors = { opt-level = 3 }
generalized-bulletproofs-ec-gadgets = { opt-level = 3 } generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
# revm also effectively requires being built with optimizations dkg = { opt-level = 3 }
revm = { opt-level = 3 }
revm-bytecode = { opt-level = 3 } monero-generators = { opt-level = 3 }
revm-context = { opt-level = 3 } monero-borromean = { opt-level = 3 }
revm-context-interface = { opt-level = 3 } monero-bulletproofs = { opt-level = 3 }
revm-database = { opt-level = 3 } monero-mlsag = { opt-level = 3 }
revm-database-interface = { opt-level = 3 } monero-clsag = { opt-level = 3 }
revm-handler = { opt-level = 3 }
revm-inspector = { opt-level = 3 }
revm-interpreter = { opt-level = 3 }
revm-precompile = { opt-level = 3 }
revm-primitives = { opt-level = 3 }
revm-state = { opt-level = 3 }
[profile.release] [profile.release]
panic = "unwind" panic = "unwind"
overflow-checks = true
[patch.crates-io] [patch.crates-io]
# Point to empty crates for crates unused within in our tree
alloy-eip2124 = { path = "patches/ethereum/alloy-eip2124" }
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
c-kzg = { path = "patches/ethereum/c-kzg" }
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
# Dependencies from monero-oxide which originate from within our own tree, potentially shimmed to account for deviations since publishing
std-shims = { path = "patches/std-shims" }
simple-request = { path = "patches/simple-request" }
multiexp = { path = "crypto/multiexp" }
flexible-transcript = { path = "crypto/transcript" }
ciphersuite = { path = "patches/ciphersuite" }
dalek-ff-group = { path = "patches/dalek-ff-group" }
minimal-ed448 = { path = "crypto/ed448" }
modular-frost = { path = "crypto/frost" }
# Patch due to `std` now including the required functionality
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
# This has a non-deprecated `std` alternative since Rust's 2024 edition
home = { path = "patches/home" }
# Updates to the latest version
darling = { path = "patches/darling" }
thiserror = { path = "patches/thiserror" }
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201 # https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
parking_lot_core = { path = "patches/parking_lot_core" }
parking_lot = { path = "patches/parking_lot" }
# wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" }
# Needed for WAL compression
rocksdb = { path = "patches/rocksdb" }
# is-terminal now has an std-based solution with an equivalent API
is-terminal = { path = "patches/is-terminal" }
# So does matches
matches = { path = "patches/matches" }
# directories-next was created because directories was unmaintained # directories-next was created because directories was unmaintained
# directories-next is now unmaintained while directories is maintained # directories-next is now unmaintained while directories is maintained
# The directories author pulls in ridiculously pointless crates and prefers # The directories author pulls in ridiculously pointless crates and prefers
@@ -207,22 +208,12 @@ lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev
option-ext = { path = "patches/option-ext" } option-ext = { path = "patches/option-ext" }
directories-next = { path = "patches/directories-next" } directories-next = { path = "patches/directories-next" }
# Patch from a fork back to upstream # The official pasta_curves repo doesn't support Zeroize
parity-bip39 = { path = "patches/parity-bip39" } pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
# Patch to include `FromUniformBytes<64>` over `Scalar`
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
# `jemalloc` conflicts with `mimalloc`, so patch to a `kvdb-rocksdb` which never exposes `jemalloc`
kvdb-rocksdb = { path = "patches/kvdb-rocksdb" }
[workspace.lints.clippy] [workspace.lints.clippy]
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
manual_is_multiple_of = "allow"
unwrap_or_default = "allow" unwrap_or_default = "allow"
map_unwrap_or = "allow" map_unwrap_or = "allow"
needless_continue = "allow"
borrow_as_ptr = "deny" borrow_as_ptr = "deny"
cast_lossless = "deny" cast_lossless = "deny"
cast_possible_truncation = "deny" cast_possible_truncation = "deny"
@@ -253,6 +244,7 @@ manual_string_new = "deny"
match_bool = "deny" match_bool = "deny"
match_same_arms = "deny" match_same_arms = "deny"
missing_fields_in_debug = "deny" missing_fields_in_debug = "deny"
needless_continue = "deny"
needless_pass_by_value = "deny" needless_pass_by_value = "deny"
ptr_cast_constness = "deny" ptr_cast_constness = "deny"
range_minus_one = "deny" range_minus_one = "deny"
@@ -261,7 +253,7 @@ redundant_closure_for_method_calls = "deny"
redundant_else = "deny" redundant_else = "deny"
string_add_assign = "deny" string_add_assign = "deny"
string_slice = "deny" string_slice = "deny"
unchecked_time_subtraction = "deny" unchecked_duration_subtraction = "deny"
uninlined_format_args = "deny" uninlined_format_args = "deny"
unnecessary_box_returns = "deny" unnecessary_box_returns = "deny"
unnecessary_join = "deny" unnecessary_join = "deny"
@@ -270,6 +262,3 @@ unnested_or_patterns = "deny"
unused_async = "deny" unused_async = "deny"
unused_self = "deny" unused_self = "deny"
zero_sized_map_values = "deny" zero_sized_map_values = "deny"
[workspace.lints.rust]
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648

View File

@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
as a reference text. This copy should be provided with any distribution of a as a reference text. This copy should be provided with any distribution of a
crate licensed under the AGPL-3.0, as per its terms. crate licensed under the AGPL-3.0, as per its terms.
The GitHub actions/workflows (`.github`) are licensed under the MIT license. The GitHub actions (`.github/actions`) are licensed under the MIT license.

View File

@@ -59,6 +59,7 @@ issued at the discretion of the Immunefi program managers.
- [Website](https://serai.exchange/): https://serai.exchange/ - [Website](https://serai.exchange/): https://serai.exchange/
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/ - [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX - [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz - [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org - [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/ - [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/

View File

@@ -1,14 +0,0 @@
# Trail of Bits Ethereum Contracts Audit, June 2025
This audit included:
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
- Our Ethereum primitives library (/processor/ethereum/primitives)
- Our Deployer contract and associated library (/processor/ethereum/deployer)
- Our ERC20 library (/processor/ethereum/erc20)
- Our Router contract and associated library (/processor/ethereum/router)
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
Please see
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
for the actual report.

View File

@@ -1,50 +0,0 @@
# eVRF DKG
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
the IACR preprint server. Within it was a one-round unbiased DKG and a
one-round unbiased threshold DKG. Unfortunately, both simply describe
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
in practice, the need for an additional round of communication to occur where
all participants confirm they received their secret shares.
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
achieve a verifiable encryption scheme. This allows the secret shares to be
posted to any 'bulletin board' (such as a blockchain) and for all observers to
confirm:
- A participant participated
- The secret shares sent can be received by the intended recipient so long as
they can access the bulletin board
Additionally, Serai desired a robust scheme (albeit with an biased key as the
output, which is fine for our purposes). Accordingly, our implementation
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
for verifiable encryption, with the caller allowed to decide the set of
participants. They may:
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
paper
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
the eVRF paper
- Select a post-determined set (with any solution for the Common Subset
problem), allowing achieving a robust threshold biased DKG
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
this is unnecessary when the resulting key will be biased. Any proof of
knowledge for the coefficients, as necessary for their extraction within the
security proofs, would be sufficient.
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
provide proofs for its security. This resulted in
[this paper](<./Security Proofs.pdf>).
Our implementation itself is then built on top of the audited
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
and
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
Note we do not use the originally premised DDH eVRF yet the one premised on
elliptic curve divisors, the methodology of which is commented on
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
Our implementation itself is unaudited at this time however.

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.77" rust-version = "1.71"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true } parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true } rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
[features] [features]
parity-db = ["dep:parity-db"] parity-db = ["dep:parity-db"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -15,7 +15,7 @@ pub fn serai_db_key(
/// ///
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro /// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key, /// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
/// they must be `borsh` serializable. The return type is used to auto (de)serialize the database /// they must be `scale` encodable. The return type is used to auto encode and decode the database
/// value bytes using `borsh`. /// value bytes using `borsh`.
/// ///
/// # Arguments /// # Arguments
@@ -54,10 +54,11 @@ macro_rules! create_db {
)?; )?;
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> { pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
use scale::Encode;
$crate::serai_db_key( $crate::serai_db_key(
stringify!($db_name).as_bytes(), stringify!($db_name).as_bytes(),
stringify!($field_name).as_bytes(), stringify!($field_name).as_bytes(),
&borsh::to_vec(&($($arg),*)).unwrap(), ($($arg),*).encode()
) )
} }
pub(crate) fn set( pub(crate) fn set(

View File

@@ -30,13 +30,53 @@ pub trait Get {
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values /// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
/// randomly, or any other action, at time of write or at time of commit. /// randomly, or any other action, at time of write or at time of commit.
#[must_use] #[must_use]
pub trait DbTxn: Send + Get { pub trait DbTxn: Sized + Send + Get {
/// Write a value to this key. /// Write a value to this key.
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>); fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
/// Delete the value from this key. /// Delete the value from this key.
fn del(&mut self, key: impl AsRef<[u8]>); fn del(&mut self, key: impl AsRef<[u8]>);
/// Commit this transaction. /// Commit this transaction.
fn commit(self); fn commit(self);
/// Close this transaction.
///
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
/// with transactions which can't be dropped.
fn close(self) {
drop(self);
}
}
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
pub struct Undroppable<T>(Option<T>);
impl<T> Drop for Undroppable<T> {
fn drop(&mut self) {
// Use an assertion at compile time to prevent this code from compiling if generated
#[allow(clippy::assertions_on_constants)]
const {
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
}
}
}
impl<T: DbTxn> Get for Undroppable<T> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
self.0.as_ref().unwrap().get(key)
}
}
impl<T: DbTxn> DbTxn for Undroppable<T> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.0.as_mut().unwrap().put(key, value);
}
fn del(&mut self, key: impl AsRef<[u8]>) {
self.0.as_mut().unwrap().del(key);
}
fn commit(mut self) {
self.0.take().unwrap().commit();
let _ = core::mem::ManuallyDrop::new(self);
}
fn close(mut self) {
drop(self.0.take().unwrap());
let _ = core::mem::ManuallyDrop::new(self);
}
} }
/// A database supporting atomic transaction. /// A database supporting atomic transaction.
@@ -51,6 +91,10 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
let dst_len = u8::try_from(item_dst.len()).unwrap(); let dst_len = u8::try_from(item_dst.len()).unwrap();
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
} }
/// Open a new transaction. /// Open a new transaction which may be dropped.
fn txn(&mut self) -> Self::Transaction<'_>; fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
/// Open a new transaction which must be committed or closed.
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
Undroppable(Some(self.unsafe_txn()))
}
} }

View File

@@ -74,7 +74,7 @@ impl Get for MemDb {
} }
impl Db for MemDb { impl Db for MemDb {
type Transaction<'a> = MemDbTxn<'a>; type Transaction<'a> = MemDbTxn<'a>;
fn txn(&mut self) -> MemDbTxn<'_> { fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
MemDbTxn(self, HashMap::new(), HashSet::new()) MemDbTxn(self, HashMap::new(), HashSet::new())
} }
} }

View File

@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
} }
impl Db for Arc<ParityDb> { impl Db for Arc<ParityDb> {
type Transaction<'a> = Transaction<'a>; type Transaction<'a> = Transaction<'a>;
fn txn(&mut self) -> Self::Transaction<'_> { fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
Transaction(self, vec![]) Transaction(self, vec![])
} }
} }

View File

@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
} }
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> { impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
type Transaction<'a> = Transaction<'a, T>; type Transaction<'a> = Transaction<'a, T>;
fn txn(&mut self) -> Self::Transaction<'_> { fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
let mut opts = WriteOptions::default(); let mut opts = WriteOptions::default();
opts.set_sync(true); opts.set_sync(true);
Transaction(self.transaction_opt(&opts, &Default::default()), &**self) Transaction(self.transaction_opt(&opts, &Default::default()), &**self)

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.64" rust-version = "1.71"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

2
common/env/LICENSE vendored
View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
// Obtain a variable from the Serai environment/secret store. // Obtain a variable from the Serai environment/secret store.
pub fn var(variable: &str) -> Option<String> { pub fn var(variable: &str) -> Option<String> {

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["async", "sleep", "tokio", "smol", "async-std"] keywords = ["async", "sleep", "tokio", "smol", "async-std"]
edition = "2021" edition = "2021"
rust-version = "1.70" rust-version = "1.71"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2024-2025 Luke Parker Copyright (c) 2024 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]

View File

@@ -1,9 +1,9 @@
[package] [package]
name = "simple-request" name = "simple-request"
version = "0.3.0" version = "0.1.0"
description = "A simple HTTP(S) request library" description = "A simple HTTP(S) request library"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/request" repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"] keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021" edition = "2021"
@@ -19,10 +19,9 @@ workspace = true
[dependencies] [dependencies]
tower-service = { version = "0.3", default-features = false } tower-service = { version = "0.3", default-features = false }
hyper = { version = "1", default-features = false, features = ["http1", "client"] } hyper = { version = "1", default-features = false, features = ["http1", "client"] }
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] } hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
http-body-util = { version = "0.1", default-features = false } http-body-util = { version = "0.1", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false }
tokio = { version = "1", default-features = false, features = ["sync"] }
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
@@ -30,8 +29,6 @@ zeroize = { version = "1", optional = true }
base64ct = { version = "1", features = ["alloc"], optional = true } base64ct = { version = "1", features = ["alloc"], optional = true }
[features] [features]
tokio = ["hyper-util/tokio"] tls = ["hyper-rustls"]
tls = ["tokio", "hyper-rustls"]
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
basic-auth = ["zeroize", "base64ct"] basic-auth = ["zeroize", "base64ct"]
default = ["tls"] default = ["tls"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,20 +1,19 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
use core::{pin::Pin, future::Future};
use std::sync::Arc; use std::sync::Arc;
use futures_util::FutureExt; use tokio::sync::Mutex;
use ::tokio::sync::Mutex;
use tower_service::Service as TowerService; use tower_service::Service as TowerService;
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
pub use hyper;
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector}; use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
use hyper_util::{
rt::tokio::TokioExecutor,
client::legacy::{Client as HyperClient, connect::HttpConnector},
};
pub use hyper;
mod request; mod request;
pub use request::*; pub use request::*;
@@ -38,86 +37,52 @@ type Connector = HttpConnector;
type Connector = HttpsConnector<HttpConnector>; type Connector = HttpsConnector<HttpConnector>;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
enum Connection< enum Connection {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
> {
ConnectionPool(HyperClient<Connector, Full<Bytes>>), ConnectionPool(HyperClient<Connector, Full<Bytes>>),
Connection { Connection {
executor: E,
connector: Connector, connector: Connector,
host: Uri, host: Uri,
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>, connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
}, },
} }
/// An HTTP client.
///
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
/// the `tls` feature is active without using the `tokio` executor will cause errors.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Client< pub struct Client {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>, connection: Connection,
> {
connection: Connection<E>,
} }
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>> impl Client {
Client<E> fn connector() -> Connector {
{
#[allow(clippy::unnecessary_wraps)]
fn connector() -> Result<Connector, Error> {
let mut res = HttpConnector::new(); let mut res = HttpConnector::new();
res.set_keepalive(Some(core::time::Duration::from_secs(60))); res.set_keepalive(Some(core::time::Duration::from_secs(60)));
res.set_nodelay(true); res.set_nodelay(true);
res.set_reuse_address(true); res.set_reuse_address(true);
#[cfg(feature = "tls")]
if core::any::TypeId::of::<E>() !=
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
{
Err(Error::ConnectionError(
"`tls` feature enabled but not using the `tokio` executor".into(),
))?;
}
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
res.enforce_http(false); res.enforce_http(false);
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
let https = HttpsConnectorBuilder::new().with_native_roots(); let res = HttpsConnectorBuilder::new()
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))] .with_native_roots()
let https = https.map_err(|e| { .expect("couldn't fetch system's SSL roots")
Error::ConnectionError( .https_or_http()
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}") .enable_http1()
.into(), .wrap_connector(res);
) res
})?;
// Fallback to `webpki-roots` if present
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
#[cfg(feature = "tls")]
let res = https.https_or_http().enable_http1().wrap_connector(res);
Ok(res)
} }
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> { pub fn with_connection_pool() -> Client {
Ok(Client { Client {
connection: Connection::ConnectionPool( connection: Connection::ConnectionPool(
HyperClient::builder(executor) HyperClient::builder(TokioExecutor::new())
.pool_idle_timeout(core::time::Duration::from_secs(60)) .pool_idle_timeout(core::time::Duration::from_secs(60))
.build(Self::connector()?), .build(Self::connector()),
), ),
}) }
} }
pub fn with_executor_and_without_connection_pool( pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
executor: E,
host: &str,
) -> Result<Client<E>, Error> {
Ok(Client { Ok(Client {
connection: Connection::Connection { connection: Connection::Connection {
executor, connector: Self::connector(),
connector: Self::connector()?,
host: { host: {
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?; let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
if uri.host().is_none() { if uri.host().is_none() {
@@ -130,9 +95,9 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
}) })
} }
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> { pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
let request: Request = request.into(); let request: Request = request.into();
let Request { mut request, response_size_limit } = request; let mut request = request.0;
if let Some(header_host) = request.headers().get(hyper::header::HOST) { if let Some(header_host) = request.headers().get(hyper::header::HOST) {
match &self.connection { match &self.connection {
Connection::ConnectionPool(_) => {} Connection::ConnectionPool(_) => {}
@@ -166,7 +131,7 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
Connection::ConnectionPool(client) => { Connection::ConnectionPool(client) => {
client.request(request).await.map_err(Error::HyperUtil)? client.request(request).await.map_err(Error::HyperUtil)?
} }
Connection::Connection { executor, connector, host, connection } => { Connection::Connection { connector, host, connection } => {
let mut connection_lock = connection.lock().await; let mut connection_lock = connection.lock().await;
// If there's not a connection... // If there's not a connection...
@@ -178,46 +143,28 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
let call_res = call_res.map_err(Error::ConnectionError); let call_res = call_res.map_err(Error::ConnectionError);
let (requester, connection) = let (requester, connection) =
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?; hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
// This task will die when we drop the requester // This will die when we drop the requester, so we don't need to track an AbortHandle
executor.execute(Box::pin(connection.map(|_| ()))); // for it
tokio::spawn(connection);
*connection_lock = Some(requester); *connection_lock = Some(requester);
} }
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned"); let connection = connection_lock.as_mut().unwrap();
let mut err = connection.ready().await.err(); let mut err = connection.ready().await.err();
if err.is_none() { if err.is_none() {
// Send the request // Send the request
let response = connection.send_request(request).await; let res = connection.send_request(request).await;
if let Ok(response) = response { if let Ok(res) = res {
return Ok(Response { response, size_limit: response_size_limit, client: self }); return Ok(Response(res, self));
} }
err = response.err(); err = res.err();
} }
// Since this connection has been put into an error state, drop it // Since this connection has been put into an error state, drop it
*connection_lock = None; *connection_lock = None;
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))? Err(Error::Hyper(err.unwrap()))?
} }
}; };
Ok(Response { response, size_limit: response_size_limit, client: self }) Ok(Response(response, self))
} }
} }
#[cfg(feature = "tokio")]
mod tokio {
use hyper_util::rt::tokio::TokioExecutor;
use super::*;
pub type TokioClient = Client<TokioExecutor>;
impl Client<TokioExecutor> {
pub fn with_connection_pool() -> Result<Self, Error> {
Self::with_executor_and_connection_pool(TokioExecutor::new())
}
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
}
}
}
#[cfg(feature = "tokio")]
pub use tokio::TokioClient;

View File

@@ -7,15 +7,11 @@ pub use http_body_util::Full;
use crate::Error; use crate::Error;
#[derive(Debug)] #[derive(Debug)]
pub struct Request { pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
pub(crate) request: hyper::Request<Full<Bytes>>,
pub(crate) response_size_limit: Option<usize>,
}
impl Request { impl Request {
#[cfg(feature = "basic-auth")] #[cfg(feature = "basic-auth")]
fn username_password_from_uri(&self) -> Result<(String, String), Error> { fn username_password_from_uri(&self) -> Result<(String, String), Error> {
if let Some(authority) = self.request.uri().authority() { if let Some(authority) = self.0.uri().authority() {
let authority = authority.as_str(); let authority = authority.as_str();
if authority.contains('@') { if authority.contains('@') {
// Decode the username and password from the URI // Decode the username and password from the URI
@@ -40,10 +36,9 @@ impl Request {
let mut formatted = format!("{username}:{password}"); let mut formatted = format!("{username}:{password}");
let mut encoded = Base64::encode_string(formatted.as_bytes()); let mut encoded = Base64::encode_string(formatted.as_bytes());
formatted.zeroize(); formatted.zeroize();
self.request.headers_mut().insert( self.0.headers_mut().insert(
hyper::header::AUTHORIZATION, hyper::header::AUTHORIZATION,
HeaderValue::from_str(&format!("Basic {encoded}")) HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
.expect("couldn't form header from base64-encoded string"),
); );
encoded.zeroize(); encoded.zeroize();
} }
@@ -64,17 +59,9 @@ impl Request {
pub fn with_basic_auth(&mut self) { pub fn with_basic_auth(&mut self) {
let _ = self.basic_auth_from_uri(); let _ = self.basic_auth_from_uri();
} }
/// Set a size limit for the response.
///
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
self.response_size_limit = response_size_limit;
} }
}
impl From<hyper::Request<Full<Bytes>>> for Request { impl From<hyper::Request<Full<Bytes>>> for Request {
fn from(request: hyper::Request<Full<Bytes>>) -> Request { fn from(request: hyper::Request<Full<Bytes>>) -> Request {
Request { request, response_size_limit: None } Request(request)
} }
} }

View File

@@ -1,54 +1,24 @@
use core::{pin::Pin, future::Future};
use std::io;
use hyper::{ use hyper::{
StatusCode, StatusCode,
header::{HeaderValue, HeaderMap}, header::{HeaderValue, HeaderMap},
body::Incoming, body::{Buf, Incoming},
rt::Executor,
}; };
use http_body_util::BodyExt; use http_body_util::BodyExt;
use futures_util::{Stream, StreamExt};
use crate::{Client, Error}; use crate::{Client, Error};
// Borrows the client so its async task lives as long as this response exists. // Borrows the client so its async task lives as long as this response exists.
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug)] #[derive(Debug)]
pub struct Response< pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
'a, impl<'a> Response<'a> {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
> {
pub(crate) response: hyper::Response<Incoming>,
pub(crate) size_limit: Option<usize>,
pub(crate) client: &'a Client<E>,
}
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
Response<'_, E>
{
pub fn status(&self) -> StatusCode { pub fn status(&self) -> StatusCode {
self.response.status() self.0.status()
} }
pub fn headers(&self) -> &HeaderMap<HeaderValue> { pub fn headers(&self) -> &HeaderMap<HeaderValue> {
self.response.headers() self.0.headers()
} }
pub async fn body(self) -> Result<impl std::io::Read, Error> { pub async fn body(self) -> Result<impl std::io::Read, Error> {
let mut body = self.response.into_body().into_data_stream(); Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
let mut res: Vec<u8> = vec![];
loop {
if let Some(size_limit) = self.size_limit {
let (lower, upper) = body.size_hint();
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
Err(Error::ConnectionError("response exceeded size limit".into()))?;
}
}
let Some(part) = body.next().await else { break };
let part = part.map_err(Error::Hyper)?;
res.extend(part.as_ref());
}
Ok(io::Cursor::new(res))
} }
} }

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "std-shims" name = "std-shims"
version = "0.1.5" version = "0.1.1"
description = "A series of std shims to make alloc more feasible" description = "A series of std shims to make alloc more feasible"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims" repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"] keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021" edition = "2021"
rust-version = "1.65" rust-version = "1.80"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,11 +17,9 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
rustversion = { version = "1", default-features = false } spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] } hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
[features] [features]
alloc = ["hashbrown"] std = []
std = ["alloc", "spin/std"]
default = ["std"] default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,28 +1,6 @@
# `std` shims # std shims
`std-shims` is a Rust crate with two purposes: A crate which passes through to std when the default `std` feature is enabled,
- Expand the functionality of `core` and `alloc` yet provides a series of shims when it isn't.
- Polyfill functionality only available on newer version of Rust
The goal is to make supporting no-`std` environments, and older versions of `HashSet` and `HashMap` are provided via `hashbrown`.
Rust, as simple as possible. For most use cases, replacing `std::` with
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
advantage of `std-shims`.
# API Surface
`std-shims` only aims to have items _mutually available_ between `alloc` (with
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
no shims available, will not be exported by `std-shims`.
# Dependencies
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
primitives are provided via `spin` (avoiding a requirement on
`critical-section`). Sections of `std::io` are independently matched as
possible. `rustversion` is used to detect when to provide polyfills.
# Disclaimer
No guarantee of one-to-one parity is provided. The shims provided aim to be
sufficient for the average case. Pull requests are _welcome_.

View File

@@ -1,7 +1,7 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::collections::*;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use hashbrown::{HashSet, HashMap};
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::collections::*; pub use std::collections::*;
#[cfg(not(feature = "std"))]
pub use alloc::collections::*;
#[cfg(not(feature = "std"))]
pub use hashbrown::{HashSet, HashMap};

View File

@@ -1,74 +1,42 @@
#[cfg(feature = "std")]
pub use std::io::*;
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
mod shims { mod shims {
use core::fmt::{self, Debug, Display, Formatter}; use core::fmt::{Debug, Formatter};
#[cfg(feature = "alloc")] use alloc::{boxed::Box, vec::Vec};
use extern_alloc::{boxed::Box, vec::Vec};
use crate::error::Error as CoreError;
/// The kind of error.
#[derive(Clone, Copy, PartialEq, Eq, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ErrorKind { pub enum ErrorKind {
UnexpectedEof, UnexpectedEof,
Other, Other,
} }
/// An error.
#[derive(Debug)]
pub struct Error { pub struct Error {
kind: ErrorKind, kind: ErrorKind,
#[cfg(feature = "alloc")] error: Box<dyn Send + Sync>,
error: Box<dyn Send + Sync + CoreError>,
} }
impl Display for Error { impl Debug for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
<Self as Debug>::fmt(self, f) fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
} }
} }
impl CoreError for Error {}
#[cfg(not(feature = "alloc"))]
pub trait IntoBoxSendSyncError {}
#[cfg(not(feature = "alloc"))]
impl<I> IntoBoxSendSyncError for I {}
#[cfg(feature = "alloc")]
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
#[cfg(feature = "alloc")]
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
impl Error { impl Error {
/// Create a new error. pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
/// Error { kind, error: Box::new(error) }
/// The error object itself is silently dropped when `alloc` is not enabled.
#[allow(unused)]
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
#[cfg(not(feature = "alloc"))]
let res = Error { kind };
#[cfg(feature = "alloc")]
let res = Error { kind, error: error.into() };
res
} }
/// Create a new error with `io::ErrorKind::Other` as its kind. pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
/// Error { kind: ErrorKind::Other, error: Box::new(error) }
/// The error object itself is silently dropped when `alloc` is not enabled.
#[allow(unused)]
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
#[cfg(not(feature = "alloc"))]
let res = Error { kind: ErrorKind::Other };
#[cfg(feature = "alloc")]
let res = Error { kind: ErrorKind::Other, error: error.into() };
res
} }
/// The kind of error.
pub fn kind(&self) -> ErrorKind { pub fn kind(&self) -> ErrorKind {
self.kind self.kind
} }
/// Retrieve the inner error. pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
#[cfg(feature = "alloc")]
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
Some(self.error) Some(self.error)
} }
} }
@@ -96,12 +64,6 @@ mod shims {
} }
} }
impl<R: Read> Read for &mut R {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
R::read(*self, buf)
}
}
pub trait BufRead: Read { pub trait BufRead: Read {
fn fill_buf(&mut self) -> Result<&[u8]>; fn fill_buf(&mut self) -> Result<&[u8]>;
fn consume(&mut self, amt: usize); fn consume(&mut self, amt: usize);
@@ -126,7 +88,6 @@ mod shims {
} }
} }
#[cfg(feature = "alloc")]
impl Write for Vec<u8> { impl Write for Vec<u8> {
fn write(&mut self, buf: &[u8]) -> Result<usize> { fn write(&mut self, buf: &[u8]) -> Result<usize> {
self.extend(buf); self.extend(buf);
@@ -134,8 +95,6 @@ mod shims {
} }
} }
} }
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
pub use shims::*; pub use shims::*;
#[cfg(feature = "std")]
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};

View File

@@ -1,102 +1,13 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "alloc"))] pub extern crate alloc;
pub use core::*;
#[cfg(not(feature = "alloc"))]
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
#[cfg(not(feature = "std"))]
#[rustversion::before(1.81)]
pub mod error {
use core::fmt::Debug::Display;
pub trait Error: Debug + Display {}
}
#[cfg(not(feature = "std"))]
#[rustversion::since(1.81)]
pub use core::error;
#[cfg(feature = "alloc")]
extern crate alloc as extern_alloc;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
#[cfg(feature = "std")]
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
pub mod sync;
pub mod collections; pub mod collections;
pub mod io; pub mod io;
pub mod sync;
pub mod prelude { pub use alloc::vec;
// Shim the `std` prelude pub use alloc::str;
#[cfg(feature = "alloc")] pub use alloc::string;
pub use extern_alloc::{
format, vec,
borrow::ToOwned,
boxed::Box,
vec::Vec,
string::{String, ToString},
};
// Shim `div_ceil`
#[rustversion::before(1.73)]
#[doc(hidden)]
pub trait StdShimsDivCeil {
fn div_ceil(self, rhs: Self) -> Self;
}
#[rustversion::before(1.73)]
mod impl_divceil {
use super::StdShimsDivCeil;
impl StdShimsDivCeil for u8 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u16 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u32 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u64 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u128 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for usize {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
}
// Shim `io::Error::other`
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
#[doc(hidden)]
pub trait StdShimsIoErrorOther {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
impl StdShimsIoErrorOther for std::io::Error {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::new(std::io::ErrorKind::Other, error)
}
}
}

View File

@@ -1,28 +1,19 @@
pub use core::sync::atomic; pub use core::sync::*;
#[cfg(all(feature = "alloc", not(feature = "std")))] pub use alloc::sync::*;
pub use extern_alloc::sync::{Arc, Weak};
#[cfg(feature = "std")]
pub use std::sync::{Arc, Weak};
mod mutex_shim { mod mutex_shim {
#[cfg(not(feature = "std"))]
pub use spin::{Mutex, MutexGuard};
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::{Mutex, MutexGuard}; pub use std::sync::*;
#[cfg(not(feature = "std"))]
pub use spin::*;
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct ShimMutex<T>(Mutex<T>); pub struct ShimMutex<T>(Mutex<T>);
impl<T> ShimMutex<T> { impl<T> ShimMutex<T> {
/// Construct a new `Mutex`.
pub const fn new(value: T) -> Self { pub const fn new(value: T) -> Self {
Self(Mutex::new(value)) Self(Mutex::new(value))
} }
/// Acquire a lock on the contents of the `Mutex`.
///
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
/// this may panic if the `Mutex` was poisoned.
pub fn lock(&self) -> MutexGuard<'_, T> { pub fn lock(&self) -> MutexGuard<'_, T> {
#[cfg(feature = "std")] #[cfg(feature = "std")]
let res = self.0.lock().unwrap(); let res = self.0.lock().unwrap();
@@ -34,12 +25,7 @@ mod mutex_shim {
} }
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard}; pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[rustversion::before(1.80)]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::LazyLock; pub use std::sync::LazyLock;
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]

View File

@@ -7,9 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
# This must be specified with the patch version, else Rust believes `1.77` < `1.77.0` and will rust-version = "1.77"
# refuse to compile due to relying on versions introduced with `1.77.0`
rust-version = "1.77.0"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] #![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.

View File

@@ -8,6 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -21,17 +22,15 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
bitvec = { version = "1", default-features = false, features = ["std"] } bitvec = { version = "1", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] } schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
dkg = { package = "dkg-musig", path = "../crypto/dkg/musig", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" } frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" } frost-schnorrkel = { path = "../crypto/schnorrkel" }
hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
zalloc = { path = "../common/zalloc" } zalloc = { path = "../common/zalloc" }
serai-db = { path = "../common/db" } serai-db = { path = "../common/db" }
@@ -42,7 +41,10 @@ messages = { package = "serai-processor-messages", path = "../processor/messages
message-queue = { package = "serai-message-queue", path = "../message-queue" } message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary-sdk = { path = "./tributary-sdk" } tributary-sdk = { path = "./tributary-sdk" }
serai-client-serai = { path = "../substrate/client/serai", default-features = false } serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] }

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -18,10 +18,12 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false } serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
@@ -29,5 +31,3 @@ tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" } serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" } serai-task = { path = "../../common/task", version = "0.1" }
serai-cosign-types = { path = "./types" }

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -24,6 +24,15 @@ pub(crate) struct CosignDelayTask<D: Db> {
pub(crate) db: D, pub(crate) db: D,
} }
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
fn drop(&mut self) {
if let Some(mut txn) = self.0.take() {
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
}
}
}
impl<D: Db> ContinuallyRan for CosignDelayTask<D> { impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
type Error = DoesNotError; type Error = DoesNotError;
@@ -35,14 +44,18 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
// Receive the next block to mark as cosigned // Receive the next block to mark as cosigned
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
txn.close();
break; break;
}; };
// Calculate when we should mark it as valid // Calculate when we should mark it as valid
let time_valid = let time_valid =
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
// Sleep until then // Sleep until then
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO)) tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
.await; .await;
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
// Set the cosigned block // Set the cosigned block
LatestCosignedBlockNumber::set(&mut txn, &block_number); LatestCosignedBlockNumber::set(&mut txn, &block_number);

View File

@@ -1,5 +1,5 @@
use core::future::Future; use core::future::Future;
use std::time::{Duration, Instant, SystemTime}; use std::time::{Duration, SystemTime};
use serai_db::*; use serai_db::*;
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
@@ -77,27 +77,17 @@ pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> { pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
pub(crate) db: D, pub(crate) db: D,
pub(crate) request: R, pub(crate) request: R,
pub(crate) last_request_for_cosigns: Instant,
} }
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> { impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
type Error = String; type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
return false;
}
*last_request_for_cosigns = Instant::now();
true
};
async move { async move {
let mut known_cosign = None; let mut known_cosign = None;
let mut made_progress = false; let mut made_progress = false;
loop { loop {
let mut txn = self.db.txn(); let mut txn = self.db.unsafe_txn();
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
else { else {
break; break;
@@ -128,13 +118,12 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
// Check if the sum weight doesn't cross the required threshold // Check if the sum weight doesn't cross the required threshold
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) { if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
// Request the necessary cosigns over the network // Request the necessary cosigns over the network
if should_request_cosigns(&mut self.last_request_for_cosigns) { // TODO: Add a timer to ensure this isn't called too often
self self
.request .request
.request_notable_cosigns(global_session) .request_notable_cosigns(global_session)
.await .await
.map_err(|e| format!("{e:?}"))?; .map_err(|e| format!("{e:?}"))?;
}
// We return an error so the delay before this task is run again increases // We return an error so the delay before this task is run again increases
return Err(format!( return Err(format!(
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly", "notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
@@ -191,13 +180,11 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
// If this session hasn't yet produced notable cosigns, then we presume we'll see // If this session hasn't yet produced notable cosigns, then we presume we'll see
// the desired non-notable cosigns as part of normal operations, without needing to // the desired non-notable cosigns as part of normal operations, without needing to
// explicitly request them // explicitly request them
if should_request_cosigns(&mut self.last_request_for_cosigns) {
self self
.request .request
.request_notable_cosigns(global_session) .request_notable_cosigns(global_session)
.await .await
.map_err(|e| format!("{e:?}"))?; .map_err(|e| format!("{e:?}"))?;
}
// We return an error so the delay before this task is run again increases // We return an error so the delay before this task is run again increases
return Err(format!( return Err(format!(
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly", "block (#{block_number}) wasn't yet cosigned. this should resolve shortly",

View File

@@ -1,21 +1,10 @@
use core::future::Future; use core::future::Future;
use std::{sync::Arc, collections::HashMap}; use std::{sync::Arc, collections::HashMap};
use blake2::{Digest, Blake2b256}; use serai_client::{
primitives::{SeraiAddress, Amount},
use serai_client_serai::{ validator_sets::primitives::ValidatorSet,
abi::{ Serai,
primitives::{
network_id::{ExternalNetworkId, NetworkId},
balance::Amount,
crypto::Public,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
merkle::IncrementalUnbalancedMerkleTree,
},
validator_sets::Event,
},
Serai, Events,
}; };
use serai_db::*; use serai_db::*;
@@ -23,20 +12,9 @@ use serai_task::ContinuallyRan;
use crate::*; use crate::*;
#[derive(BorshSerialize, BorshDeserialize)]
struct Set {
session: Session,
key: Public,
stake: Amount,
}
create_db!( create_db!(
CosignIntend { CosignIntend {
ScanCosignFrom: () -> u64, ScanCosignFrom: () -> u64,
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount,
Validators: (set: ExternalValidatorSet) -> Vec<SeraiAddress>,
LatestSet: (network: ExternalNetworkId) -> Set,
} }
); );
@@ -50,45 +28,30 @@ db_channel! {
CosignIntendChannels { CosignIntendChannels {
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession), GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
BlockEvents: () -> BlockEventData, BlockEvents: () -> BlockEventData,
IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent, IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
} }
} }
async fn block_has_events_justifying_a_cosign( async fn block_has_events_justifying_a_cosign(
serai: &Serai, serai: &Serai,
block_number: u64, block_number: u64,
) -> Result<(Block, Events, HasEvents), String> { ) -> Result<(Block, HasEvents), String> {
let block = serai let block = serai
.block_by_number(block_number) .finalized_block_by_number(block_number)
.await .await
.map_err(|e| format!("{e:?}"))? .map_err(|e| format!("{e:?}"))?
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?; .ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?; let serai = serai.as_of(block.hash());
if events.validator_sets().set_keys_events().next().is_some() { if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
return Ok((block, events, HasEvents::Notable)); return Ok((block, HasEvents::Notable));
} }
if events.coins().burn_with_instruction_events().next().is_some() { if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
return Ok((block, events, HasEvents::NonNotable)); return Ok((block, HasEvents::NonNotable));
} }
Ok((block, events, HasEvents::No)) Ok((block, HasEvents::No))
}
// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
// block.
fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> {
let mut sets = vec![];
for network in ExternalNetworkId::all() {
let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ExternalValidatorSet { network, session }, key, stake));
}
sets
} }
/// A task to determine which blocks we should intend to cosign. /// A task to determine which blocks we should intend to cosign.
@@ -104,108 +67,56 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
async move { async move {
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
let latest_block_number = let latest_block_number =
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?; self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
for block_number in start_block_number ..= latest_block_number { for block_number in start_block_number ..= latest_block_number {
let mut txn = self.db.txn(); let mut txn = self.db.unsafe_txn();
let (block, events, mut has_events) = let (block, mut has_events) =
block_has_events_justifying_a_cosign(&self.serai, block_number) block_has_events_justifying_a_cosign(&self.serai, block_number)
.await .await
.map_err(|e| format!("{e:?}"))?; .map_err(|e| format!("{e:?}"))?;
let mut builds_upon =
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
// Check we are indexing a linear chain // Check we are indexing a linear chain
if block.header.builds_upon() != if (block_number > 1) &&
builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG) (<[u8; 32]>::from(block.header.parent_hash) !=
SubstrateBlockHash::get(&txn, block_number - 1)
.expect("indexing a block but haven't indexed its parent"))
{ {
Err(format!( Err(format!(
"node's block #{block_number} doesn't build upon the block #{} prior indexed", "node's block #{block_number} doesn't build upon the block #{} prior indexed",
block_number - 1 block_number - 1
))?; ))?;
} }
let block_hash = block.header.hash(); let block_hash = block.hash();
SubstrateBlockHash::set(&mut txn, block_number, &block_hash); SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
builds_upon.append(
serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG,
Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_HEADER_LEAF_TAG])
.chain_update(block_hash.0)
.finalize()
.into(),
);
BuildsUpon::set(&mut txn, &builds_upon);
// Update the stakes
for event in events.validator_sets().allocation_events() {
let Event::Allocation { validator, network, amount } = event else {
panic!("event from `allocation_events` wasn't `Event::Allocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0));
}
for event in events.validator_sets().deallocation_events() {
let Event::Deallocation { validator, network, amount, timeline: _ } = event else {
panic!("event from `deallocation_events` wasn't `Event::Deallocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0));
}
// Handle decided sets
for event in events.validator_sets().set_decided_events() {
let Event::SetDecided { set, validators } = event else {
panic!("event from `set_decided_events` wasn't `Event::SetDecided`")
};
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
Validators::set(
&mut txn,
set,
&validators.iter().map(|(validator, _key_shares)| *validator).collect(),
);
}
// Handle declarations of the latest set
for event in events.validator_sets().set_keys_events() {
let Event::SetKeys { set, key_pair } = event else {
panic!("event from `set_keys_events` wasn't `Event::SetKeys`")
};
let mut stake = 0;
for validator in
Validators::take(&mut txn, *set).expect("set which wasn't decided set keys")
{
stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0;
}
LatestSet::set(
&mut txn,
set.network,
&Set { session: set.session, key: key_pair.0, stake: Amount(stake) },
);
}
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
// If this is notable, it creates a new global session, which we index into the database // If this is notable, it creates a new global session, which we index into the database
// now // now
if has_events == HasEvents::Notable { if has_events == HasEvents::Notable {
let sets_and_keys_and_stakes = cosigning_sets(&txn); let serai = self.serai.as_of(block_hash);
let global_session = GlobalSession::id( let sets_and_keys = cosigning_sets(&serai).await?;
sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(), let global_session =
); GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len()); let mut sets = Vec::with_capacity(sets_and_keys.len());
let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len()); let mut keys = HashMap::with_capacity(sets_and_keys.len());
let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len()); let mut stakes = HashMap::with_capacity(sets_and_keys.len());
let mut total_stake = 0; let mut total_stake = 0;
for (set, key, stake) in sets_and_keys_and_stakes { for (set, key) in &sets_and_keys {
sets.push(set); sets.push(*set);
keys.insert(set.network, key); keys.insert(set.network, SeraiAddress::from(*key));
stakes.insert(set.network, stake.0); let stake = serai
total_stake += stake.0; .validator_sets()
.total_allocated_stake(set.network)
.await
.map_err(|e| format!("{e:?}"))?
.unwrap_or(Amount(0))
.0;
stakes.insert(set.network, stake);
total_stake += stake;
} }
if total_stake == 0 { if total_stake == 0 {
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
@@ -244,7 +155,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
// Tell each set of their expectation to cosign this block // Tell each set of their expectation to cosign this block
for set in global_session_info.sets { for set in global_session_info.sets {
log::debug!("{set:?} will be cosigning block #{block_number}"); log::debug!("{:?} will be cosigning block #{block_number}", set);
IntendedCosigns::send( IntendedCosigns::send(
&mut txn, &mut txn,
set, set,

View File

@@ -1,33 +1,24 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
use core::{fmt::Debug, future::Future}; use core::{fmt::Debug, future::Future};
use std::{sync::Arc, collections::HashMap, time::Instant}; use std::{sync::Arc, collections::HashMap};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client_serai::{ use serai_client::{
abi::{ primitives::{NetworkId, SeraiAddress},
primitives::{ validator_sets::primitives::{Session, ValidatorSet, KeyPair},
BlockHash, Public, Block, Serai, TemporalSerai,
crypto::{Public, KeyPair},
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
},
Block,
},
Serai, State,
}; };
use serai_db::*; use serai_db::*;
use serai_task::*; use serai_task::*;
pub use serai_cosign_types::*;
/// The cosigns which are intended to be performed. /// The cosigns which are intended to be performed.
mod intend; mod intend;
/// The evaluator of the cosigns. /// The evaluator of the cosigns.
@@ -37,6 +28,9 @@ mod delay;
pub use delay::BROADCAST_FREQUENCY; pub use delay::BROADCAST_FREQUENCY;
use delay::LatestCosignedBlockNumber; use delay::LatestCosignedBlockNumber;
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// A 'global session', defined as all validator sets used for cosigning at a given moment. /// A 'global session', defined as all validator sets used for cosigning at a given moment.
/// ///
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign /// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
@@ -58,13 +52,13 @@ use delay::LatestCosignedBlockNumber;
#[derive(Debug, BorshSerialize, BorshDeserialize)] #[derive(Debug, BorshSerialize, BorshDeserialize)]
pub(crate) struct GlobalSession { pub(crate) struct GlobalSession {
pub(crate) start_block_number: u64, pub(crate) start_block_number: u64,
pub(crate) sets: Vec<ExternalValidatorSet>, pub(crate) sets: Vec<ValidatorSet>,
pub(crate) keys: HashMap<ExternalNetworkId, Public>, pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
pub(crate) stakes: HashMap<ExternalNetworkId, u64>, pub(crate) stakes: HashMap<NetworkId, u64>,
pub(crate) total_stake: u64, pub(crate) total_stake: u64,
} }
impl GlobalSession { impl GlobalSession {
fn id(mut cosigners: Vec<ExternalValidatorSet>) -> [u8; 32] { fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap()); cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into() Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
} }
@@ -84,12 +78,56 @@ enum HasEvents {
No, No,
} }
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// The actual cosigner.
pub cosigner: NetworkId,
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
fn verify_signature(&self, signer: serai_client::Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
}
}
create_db! { create_db! {
Cosign { Cosign {
// The following are populated by the intend task and used throughout the library // The following are populated by the intend task and used throughout the library
// An index of Substrate blocks // An index of Substrate blocks
SubstrateBlockHash: (block_number: u64) -> BlockHash, SubstrateBlockHash: (block_number: u64) -> [u8; 32],
// A mapping from a global session's ID to its relevant information. // A mapping from a global session's ID to its relevant information.
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession, GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
// The last block to be cosigned by a global session. // The last block to be cosigned by a global session.
@@ -110,10 +148,7 @@ create_db! {
// one notable block. All validator sets will explicitly produce a cosign for their notable // one notable block. All validator sets will explicitly produce a cosign for their notable
// block, causing the latest cosigned block for a global session to either be the global // block, causing the latest cosigned block for a global session to either be the global
// session's notable cosigns or the network's latest cosigns. // session's notable cosigns or the network's latest cosigns.
NetworksLatestCosignedBlock: ( NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
global_session: [u8; 32],
network: ExternalNetworkId
) -> SignedCosign,
// Cosigns received for blocks not locally recognized as finalized. // Cosigns received for blocks not locally recognized as finalized.
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>, Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
// The global session which faulted. // The global session which faulted.
@@ -121,6 +156,62 @@ create_db! {
} }
} }
/// Fetch the keys used for cosigning by a specific network.
async fn keys_for_network(
serai: &TemporalSerai<'_>,
network: NetworkId,
) -> Result<Option<(Session, KeyPair)>, String> {
// The Serai network never cosigns so it has no keys for cosigning
if network == NetworkId::Serai {
return Ok(None);
}
let Some(latest_session) =
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
else {
// If this network hasn't had a session declared, move on
return Ok(None);
};
// Get the keys for the latest session
if let Some(keys) = serai
.validator_sets()
.keys(ValidatorSet { network, session: latest_session })
.await
.map_err(|e| format!("{e:?}"))?
{
return Ok(Some((latest_session, keys)));
}
// If the latest session has yet to set keys, use the prior session
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
if let Some(keys) = serai
.validator_sets()
.keys(ValidatorSet { network, session: prior_session })
.await
.map_err(|e| format!("{e:?}"))?
{
return Ok(Some((prior_session, keys)));
}
}
Ok(None)
}
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
for network in serai_client::primitives::NETWORKS {
let Some((session, keys)) = keys_for_network(serai, network).await? else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ValidatorSet { network, session }, keys.0));
}
Ok(sets)
}
/// An object usable to request notable cosigns for a block. /// An object usable to request notable cosigns for a block.
pub trait RequestNotableCosigns: 'static + Send { pub trait RequestNotableCosigns: 'static + Send {
/// The error type which may be encountered when requesting notable cosigns. /// The error type which may be encountered when requesting notable cosigns.
@@ -197,11 +288,7 @@ impl<D: Db> Cosigning<D> {
.continually_run(intend_task, vec![evaluator_task_handle]), .continually_run(intend_task, vec![evaluator_task_handle]),
); );
tokio::spawn( tokio::spawn(
(evaluator::CosignEvaluatorTask { (evaluator::CosignEvaluatorTask { db: db.clone(), request })
db: db.clone(),
request,
last_request_for_cosigns: Instant::now(),
})
.continually_run(evaluator_task, vec![delay_task_handle]), .continually_run(evaluator_task, vec![delay_task_handle]),
); );
tokio::spawn( tokio::spawn(
@@ -221,10 +308,7 @@ impl<D: Db> Cosigning<D> {
} }
/// Fetch a cosigned Substrate block's hash by its block number. /// Fetch a cosigned Substrate block's hash by its block number.
pub fn cosigned_block( pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
getter: &impl Get,
block_number: u64,
) -> Result<Option<BlockHash>, Faulted> {
if block_number > Self::latest_cosigned_block_number(getter)? { if block_number > Self::latest_cosigned_block_number(getter)? {
return Ok(None); return Ok(None);
} }
@@ -239,8 +323,8 @@ impl<D: Db> Cosigning<D> {
/// If this global session hasn't produced any notable cosigns, this will return the latest /// If this global session hasn't produced any notable cosigns, this will return the latest
/// cosigns for this session. /// cosigns for this session.
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> { pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
let mut cosigns = vec![]; let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) { if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
cosigns.push(cosign); cosigns.push(cosign);
} }
@@ -257,7 +341,7 @@ impl<D: Db> Cosigning<D> {
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults"); let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault // Also include all of our recognized-as-honest cosigns in an attempt to induce fault
// identification in those who see the faulty cosigns as honest // identification in those who see the faulty cosigns as honest
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) { if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
if cosign.cosign.global_session == faulted { if cosign.cosign.global_session == faulted {
cosigns.push(cosign); cosigns.push(cosign);
@@ -269,8 +353,8 @@ impl<D: Db> Cosigning<D> {
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else { let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
return vec![]; return vec![];
}; };
let mut cosigns = vec![]; let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) { if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
cosigns.push(cosign); cosigns.push(cosign);
} }
@@ -325,8 +409,13 @@ impl<D: Db> Cosigning<D> {
// Check the cosign's signature // Check the cosign's signature
{ {
let key = let key = Public::from({
*global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?; let Some(key) = global_session.keys.get(&network) else {
Err(IntakeCosignError::NonParticipatingNetwork)?
};
*key
});
if !signed_cosign.verify_signature(key) { if !signed_cosign.verify_signature(key) {
Err(IntakeCosignError::InvalidSignature)?; Err(IntakeCosignError::InvalidSignature)?;
} }
@@ -335,7 +424,7 @@ impl<D: Db> Cosigning<D> {
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the // Since we verified this cosign's signature, and have a chain sufficiently long, handle the
// cosign // cosign
let mut txn = self.db.txn(); let mut txn = self.db.unsafe_txn();
if !faulty { if !faulty {
// If this is for a future global session, we don't acknowledge this cosign at this time // If this is for a future global session, we don't acknowledge this cosign at this time
@@ -376,12 +465,12 @@ impl<D: Db> Cosigning<D> {
Ok(()) Ok(())
} }
/// Receive intended cosigns to produce for this ExternalValidatorSet. /// Receive intended cosigns to produce for this ValidatorSet.
/// ///
/// All cosigns intended, up to and including the next notable cosign, are returned. /// All cosigns intended, up to and including the next notable cosign, are returned.
/// ///
/// This will drain the internal channel and not re-yield these intentions again. /// This will drain the internal channel and not re-yield these intentions again.
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<CosignIntent> { pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
let mut res: Vec<CosignIntent> = vec![]; let mut res: Vec<CosignIntent> = vec![];
// While we have yet to find a notable cosign... // While we have yet to find a notable cosign...
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) { while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
@@ -391,3 +480,30 @@ impl<D: Db> Cosigning<D> {
res res
} }
} }
mod tests {
use super::*;
struct RNC;
impl RequestNotableCosigns for RNC {
/// The error type which may be encountered when requesting notable cosigns.
type Error = ();
/// Request the notable cosigns for this global session.
fn request_notable_cosigns(
&self,
global_session: [u8; 32],
) -> impl Send + Future<Output = Result<(), Self::Error>> {
async move { Ok(()) }
}
}
#[tokio::test]
async fn test() {
let db: serai_db::MemDb = serai_db::MemDb::new();
let serai = unsafe { core::mem::transmute(0u64) };
let request = RNC;
let tasks = vec![];
let _ = Cosigning::spawn(db, serai, request, tasks);
core::future::pending().await
}
}

View File

@@ -1,25 +0,0 @@
[package]
name = "serai-cosign-types"
version = "0.1.0"
description = "Evaluator of cosigns for the Serai network"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] }

View File

@@ -1,72 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// The actual cosigner.
pub cosigner: ExternalNetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
borsh::to_vec(self).unwrap()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
/// Verify a cosign's signature.
pub fn verify_signature(&self, signer: Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -22,7 +22,7 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
serai-db = { path = "../../common/db", version = "0.1" } serai-db = { path = "../../common/db", version = "0.1" }
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-cosign = { path = "../cosign" } serai-cosign = { path = "../cosign" }
tributary-sdk = { path = "../tributary-sdk" } tributary-sdk = { path = "../tributary-sdk" }

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.87" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -23,19 +23,19 @@ async-trait = { version = "0.1", default-features = false }
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] } schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../../substrate/client/serai", default-features = false } serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-cosign = { path = "../../cosign" } serai-cosign = { path = "../../cosign" }
tributary-sdk = { path = "../../tributary-sdk" } tributary-sdk = { path = "../../tributary-sdk" }
futures-util = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["sync"] } tokio = { version = "1", default-features = false, features = ["sync"] }
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] } libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
serai-task = { path = "../../../common/task", version = "0.1" } serai-task = { path = "../../../common/task", version = "0.1" }

View File

@@ -7,11 +7,12 @@ use rand_core::{RngCore, OsRng};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use schnorrkel::{Keypair, PublicKey, Signature}; use schnorrkel::{Keypair, PublicKey, Signature};
use serai_client_serai::abi::primitives::crypto::Public; use serai_client::primitives::PublicKey as Public;
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::{ use libp2p::{
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade}, core::UpgradeInfo,
InboundUpgrade, OutboundUpgrade,
identity::{self, PeerId}, identity::{self, PeerId},
noise, noise,
}; };
@@ -104,7 +105,7 @@ impl OnlyValidators {
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig) .verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
.map_err(|_| io::Error::other("invalid signature"))?; .map_err(|_| io::Error::other("invalid signature"))?;
Ok(peer_id_from_public(Public(public_key.to_bytes()))) Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
} }
} }
@@ -118,18 +119,12 @@ impl UpgradeInfo for OnlyValidators {
} }
} }
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S> impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
for OnlyValidators
{
type Output = (PeerId, noise::Output<S>); type Output = (PeerId, noise::Output<S>);
type Error = io::Error; type Error = io::Error;
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>; type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
fn upgrade_inbound( fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
self,
socket: S,
info: <Self as UpgradeInfo>::Info,
) -> <Self as InboundConnectionUpgrade<S>>::Future {
Box::pin(async move { Box::pin(async move {
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair) let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
.unwrap() .unwrap()
@@ -152,18 +147,12 @@ impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrad
} }
} }
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S> impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
for OnlyValidators
{
type Output = (PeerId, noise::Output<S>); type Output = (PeerId, noise::Output<S>);
type Error = io::Error; type Error = io::Error;
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>; type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
fn upgrade_outbound( fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
self,
socket: S,
info: <Self as UpgradeInfo>::Info,
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
Box::pin(async move { Box::pin(async move {
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair) let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
.unwrap() .unwrap()

View File

@@ -1,11 +1,11 @@
use core::{future::Future, str::FromStr}; use core::future::Future;
use std::{sync::Arc, collections::HashSet}; use std::{sync::Arc, collections::HashSet};
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_client_serai::{RpcError, Serai}; use serai_client::{SeraiError, Serai};
use libp2p::{ use libp2p::{
core::multiaddr::{Protocol, Multiaddr}, core::multiaddr::{Protocol, Multiaddr},
@@ -50,7 +50,7 @@ impl ContinuallyRan for DialTask {
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60; const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60; const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
type Error = RpcError; type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
@@ -94,13 +94,6 @@ impl ContinuallyRan for DialTask {
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap()) usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
.unwrap(); .unwrap();
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial); let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
let Ok(randomly_selected_peer) = libp2p::Multiaddr::from_str(&randomly_selected_peer)
else {
log::error!(
"peer from substrate wasn't a valid `Multiaddr`: {randomly_selected_peer}"
);
continue;
};
log::info!("found peer from substrate: {randomly_selected_peer}"); log::info!("found peer from substrate: {randomly_selected_peer}");

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
@@ -13,10 +13,9 @@ use rand_core::{RngCore, OsRng};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use schnorrkel::Keypair; use schnorrkel::Keypair;
use serai_client_serai::{ use serai_client::{
abi::primitives::{ primitives::{NetworkId, PublicKey},
crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet, validator_sets::primitives::ValidatorSet,
},
Serai, Serai,
}; };
@@ -51,7 +50,7 @@ mod ping;
/// The request-response messages and behavior /// The request-response messages and behavior
mod reqres; mod reqres;
use reqres::{InboundRequestId, Request, Response}; use reqres::{RequestId, Request, Response};
/// The gossip messages and behavior /// The gossip messages and behavior
mod gossip; mod gossip;
@@ -67,7 +66,15 @@ use dial::DialTask;
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
fn peer_id_from_public(public: Public) -> PeerId { // usize::max, manually implemented, as max isn't a const fn
const MAX_LIBP2P_MESSAGE_SIZE: usize =
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
} else {
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
};
fn peer_id_from_public(public: PublicKey) -> PeerId {
// 0 represents the identity Multihash, that no hash was performed // 0 represents the identity Multihash, that no hash was performed
// It's an internal constant so we can't refer to the constant inside libp2p // It's an internal constant so we can't refer to the constant inside libp2p
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap() PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
@@ -105,7 +112,7 @@ impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
#[derive(Clone)] #[derive(Clone)]
struct Peers { struct Peers {
peers: Arc<RwLock<HashMap<ExternalNetworkId, HashSet<PeerId>>>>, peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
} }
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai // Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
@@ -136,10 +143,9 @@ struct Libp2pInner {
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>, signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>, signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
heartbeat_requests: heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ExternalValidatorSet, [u8; 32])>>, notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>, inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
} }
/// The libp2p-backed P2P implementation. /// The libp2p-backed P2P implementation.
@@ -170,9 +176,19 @@ impl Libp2p {
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() }) Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
}; };
let new_yamux = || {
let mut config = yamux::Config::default();
// 1 MiB default + max message size
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
// 256 KiB default + max message size
config
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
config
};
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519()) let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
.with_tokio() .with_tokio()
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default) .with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
.unwrap() .unwrap()
.with_behaviour(|_| Behavior { .with_behaviour(|_| Behavior {
allow_list: allow_block_list::Behaviour::default(), allow_list: allow_block_list::Behaviour::default(),
@@ -314,7 +330,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
impl serai_coordinator_p2p::P2p for Libp2p { impl serai_coordinator_p2p::P2p for Libp2p {
type Peer<'a> = Peer<'a>; type Peer<'a> = Peer<'a>;
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> { fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
async move { async move {
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else { let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
return vec![]; return vec![];

View File

@@ -10,7 +10,7 @@ use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::request_response::{ use libp2p::request_response::{
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport, self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
}; };
pub use request_response::{InboundRequestId, Message}; pub use request_response::{RequestId, Message};
use serai_cosign::SignedCosign; use serai_cosign::SignedCosign;
@@ -129,6 +129,7 @@ pub(crate) type Event = GenericEvent<Request, Response>;
pub(crate) type Behavior = Behaviour<Codec>; pub(crate) type Behavior = Behaviour<Codec>;
pub(crate) fn new_behavior() -> Behavior { pub(crate) fn new_behavior() -> Behavior {
let config = Config::default().with_request_timeout(Duration::from_secs(5)); let mut config = Config::default();
config.set_request_timeout(Duration::from_secs(5));
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config) Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
} }

View File

@@ -6,7 +6,7 @@ use std::{
use borsh::BorshDeserialize; use borsh::BorshDeserialize;
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet; use serai_client::validator_sets::primitives::ValidatorSet;
use tokio::sync::{mpsc, oneshot, RwLock}; use tokio::sync::{mpsc, oneshot, RwLock};
@@ -17,7 +17,7 @@ use serai_cosign::SignedCosign;
use futures_util::StreamExt; use futures_util::StreamExt;
use libp2p::{ use libp2p::{
identity::PeerId, identity::PeerId,
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel}, request_response::{RequestId, ResponseChannel},
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm}, swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
}; };
@@ -65,12 +65,12 @@ pub(crate) struct SwarmTask {
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>, tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>, outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>, outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>, inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>, heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>, notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>, inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
} }
impl SwarmTask { impl SwarmTask {
@@ -92,8 +92,7 @@ impl SwarmTask {
} }
} }
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {} gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
gossip::Event::GossipsubNotSupported { peer_id } | gossip::Event::GossipsubNotSupported { peer_id } => {
gossip::Event::SlowPeer { peer_id, .. } => {
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id); let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
} }
} }
@@ -223,20 +222,24 @@ impl SwarmTask {
} }
} }
SwarmEvent::Behaviour(event) => { SwarmEvent::Behaviour(
match event { BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => { ) => {
// This *is* an exhaustive match as these events are empty enums // This *is* an exhaustive match as these events are empty enums
match event {} match event {}
} }
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => { SwarmEvent::Behaviour(
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
) => {
if result.is_err() { if result.is_err() {
self.swarm.close_connection(connection); self.swarm.close_connection(connection);
} }
} }
BehaviorEvent::Reqres(event) => self.handle_reqres(event), SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
BehaviorEvent::Gossip(event) => self.handle_gossip(event), self.handle_reqres(event)
} }
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
self.handle_gossip(event)
} }
// We don't handle any of these // We don't handle any of these
@@ -247,14 +250,7 @@ impl SwarmTask {
SwarmEvent::ExpiredListenAddr { .. } | SwarmEvent::ExpiredListenAddr { .. } |
SwarmEvent::ListenerClosed { .. } | SwarmEvent::ListenerClosed { .. } |
SwarmEvent::ListenerError { .. } | SwarmEvent::ListenerError { .. } |
SwarmEvent::Dialing { .. } | SwarmEvent::Dialing { .. } => {}
SwarmEvent::NewExternalAddrCandidate { .. } |
SwarmEvent::ExternalAddrConfirmed { .. } |
SwarmEvent::ExternalAddrExpired { .. } |
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
// Requires as SwarmEvent is non-exhaustive
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
} }
} }
@@ -325,9 +321,9 @@ impl SwarmTask {
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>, outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>, heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>, notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>, inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
) { ) {
tokio::spawn( tokio::spawn(
SwarmTask { SwarmTask {

View File

@@ -4,8 +4,7 @@ use std::{
collections::{HashSet, HashMap}, collections::{HashSet, HashMap},
}; };
use serai_client_serai::abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session}; use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai};
use serai_client_serai::{RpcError, Serai};
use serai_task::{Task, ContinuallyRan}; use serai_task::{Task, ContinuallyRan};
@@ -25,11 +24,11 @@ pub(crate) struct Validators {
serai: Arc<Serai>, serai: Arc<Serai>,
// A cache for which session we're populated with the validators of // A cache for which session we're populated with the validators of
sessions: HashMap<ExternalNetworkId, Session>, sessions: HashMap<NetworkId, Session>,
// The validators by network // The validators by network
by_network: HashMap<ExternalNetworkId, HashSet<PeerId>>, by_network: HashMap<NetworkId, HashSet<PeerId>>,
// The validators and their networks // The validators and their networks
validators: HashMap<PeerId, HashSet<ExternalNetworkId>>, validators: HashMap<PeerId, HashSet<NetworkId>>,
// The channel to send the changes down // The channel to send the changes down
changes: mpsc::UnboundedSender<Changes>, changes: mpsc::UnboundedSender<Changes>,
@@ -50,28 +49,23 @@ impl Validators {
async fn session_changes( async fn session_changes(
serai: impl Borrow<Serai>, serai: impl Borrow<Serai>,
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>, sessions: impl Borrow<HashMap<NetworkId, Session>>,
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, RpcError> { ) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
/* let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
This uses the latest finalized block, not the latest cosigned block, which should be fine as let temporal_serai = temporal_serai.validator_sets();
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
bypass the cosign protocol unless a historical global session was malicious, in which case
the cosign protocol already breaks.
Besides, we can't connect to historical validators, only the current validators.
*/
let serai = serai.borrow().state().await?;
let mut session_changes = vec![]; let mut session_changes = vec![];
{ {
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but // FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
// we poll it till it yields all futures with the most minimal processing possible // we poll it till it yields all futures with the most minimal processing possible
let mut futures = FuturesUnordered::new(); let mut futures = FuturesUnordered::new();
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
}
let sessions = sessions.borrow(); let sessions = sessions.borrow();
let serai = serai.borrow();
futures.push(async move { futures.push(async move {
let session = match serai.current_session(network.into()).await { let session = match temporal_serai.session(network).await {
Ok(Some(session)) => session, Ok(Some(session)) => session,
Ok(None) => return Ok(None), Ok(None) => return Ok(None),
Err(e) => return Err(e), Err(e) => return Err(e),
@@ -80,16 +74,12 @@ impl Validators {
if sessions.get(&network) == Some(&session) { if sessions.get(&network) == Some(&session) {
Ok(None) Ok(None)
} else { } else {
match serai.current_validators(network.into()).await { match temporal_serai.active_network_validators(network).await {
Ok(Some(validators)) => Ok(Some(( Ok(validators) => Ok(Some((
network, network,
session, session,
validators validators.into_iter().map(peer_id_from_public).collect(),
.into_iter()
.map(|validator| peer_id_from_public(validator.into()))
.collect(),
))), ))),
Ok(None) => panic!("network has session yet no validators"),
Err(e) => Err(e), Err(e) => Err(e),
} }
} }
@@ -107,7 +97,7 @@ impl Validators {
fn incorporate_session_changes( fn incorporate_session_changes(
&mut self, &mut self,
session_changes: Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
) { ) {
let mut removed = HashSet::new(); let mut removed = HashSet::new();
let mut added = HashSet::new(); let mut added = HashSet::new();
@@ -156,17 +146,17 @@ impl Validators {
} }
/// Update the view of the validators. /// Update the view of the validators.
pub(crate) async fn update(&mut self) -> Result<(), RpcError> { pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?; let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
self.incorporate_session_changes(session_changes); self.incorporate_session_changes(session_changes);
Ok(()) Ok(())
} }
pub(crate) fn by_network(&self) -> &HashMap<ExternalNetworkId, HashSet<PeerId>> { pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
&self.by_network &self.by_network
} }
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<ExternalNetworkId>> { pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
self.validators.get(peer_id) self.validators.get(peer_id)
} }
} }
@@ -209,7 +199,7 @@ impl ContinuallyRan for UpdateValidatorsTask {
const DELAY_BETWEEN_ITERATIONS: u64 = 60; const DELAY_BETWEEN_ITERATIONS: u64 = 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60; const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
type Error = RpcError; type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {

View File

@@ -1,7 +1,7 @@
use core::future::Future; use core::future::Future;
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
use serai_primitives::validator_sets::{ExternalValidatorSet, KeyShares}; use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet};
use futures_lite::FutureExt; use futures_lite::FutureExt;
@@ -30,7 +30,7 @@ pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators, /// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
/// and aggregate signature). Accordingly, this should be a safe over-estimate. /// and aggregate signature). Accordingly, this should be a safe over-estimate.
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH * pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((KeyShares::MAX_PER_SET as usize) * 128)); (tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's /// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
/// tip. /// tip.
@@ -38,7 +38,7 @@ pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
/// If the other validator has more blocks then we do, they're expected to inform us. This forms /// If the other validator has more blocks then we do, they're expected to inform us. This forms
/// the sync protocol for our Tributaries. /// the sync protocol for our Tributaries.
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> { pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
pub(crate) set: ExternalValidatorSet, pub(crate) set: ValidatorSet,
pub(crate) tributary: Tributary<TD, Tx, P>, pub(crate) tributary: Tributary<TD, Tx, P>,
pub(crate) reader: TributaryReader<TD, Tx>, pub(crate) reader: TributaryReader<TD, Tx>,
pub(crate) p2p: P, pub(crate) p2p: P,

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
@@ -7,7 +7,7 @@ use std::collections::HashMap;
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet}; use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
use serai_db::Db; use serai_db::Db;
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader}; use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
@@ -25,7 +25,7 @@ use crate::heartbeat::HeartbeatTask;
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)] #[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
pub struct Heartbeat { pub struct Heartbeat {
/// The Tributary this is the heartbeat of. /// The Tributary this is the heartbeat of.
pub set: ExternalValidatorSet, pub set: ValidatorSet,
/// The hash of the latest block added to the Tributary. /// The hash of the latest block added to the Tributary.
pub latest_block_hash: [u8; 32], pub latest_block_hash: [u8; 32],
} }
@@ -56,7 +56,7 @@ pub trait P2p:
type Peer<'a>: Peer<'a>; type Peer<'a>: Peer<'a>;
/// Fetch the peers for this network. /// Fetch the peers for this network.
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>; fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
/// Broadcast a cosign. /// Broadcast a cosign.
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>; fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
@@ -131,13 +131,13 @@ fn handle_heartbeat<D: Db, T: TransactionTrait>(
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>( pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
db: impl Db, db: impl Db,
p2p: P, p2p: P,
mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary<TD, Tx, P>)>, mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary<TD, Tx, P>)>,
mut retire_tributary: mpsc::UnboundedReceiver<ExternalValidatorSet>, mut retire_tributary: mpsc::UnboundedReceiver<ValidatorSet>,
send_cosigns: mpsc::UnboundedSender<SignedCosign>, send_cosigns: mpsc::UnboundedSender<SignedCosign>,
) { ) {
let mut readers = HashMap::<ExternalValidatorSet, TributaryReader<TD, Tx>>::new(); let mut readers = HashMap::<ValidatorSet, TributaryReader<TD, Tx>>::new();
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new(); let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
let mut heartbeat_tasks = HashMap::<ExternalValidatorSet, _>::new(); let mut heartbeat_tasks = HashMap::<ValidatorSet, _>::new();
loop { loop {
tokio::select! { tokio::select! {

View File

@@ -3,12 +3,9 @@ use std::{path::Path, fs};
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait}; pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
use serai_db::{create_db, db_channel}; use serai_db::{create_db, db_channel};
use dkg::Participant; use serai_client::{
primitives::NetworkId,
use serai_client_serai::abi::primitives::{ validator_sets::primitives::{Session, ValidatorSet},
crypto::KeyPair,
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
}; };
use serai_cosign::SignedCosign; use serai_cosign::SignedCosign;
@@ -16,7 +13,7 @@ use serai_coordinator_substrate::NewSetInformation;
use serai_coordinator_tributary::Transaction; use serai_coordinator_tributary::Transaction;
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>; pub(crate) type Db = serai_db::ParityDb;
#[cfg(feature = "rocksdb")] #[cfg(feature = "rocksdb")]
pub(crate) type Db = serai_db::RocksDB; pub(crate) type Db = serai_db::RocksDB;
@@ -44,21 +41,22 @@ pub(crate) fn coordinator_db() -> Db {
db(&format!("{root_path}/coordinator/db")) db(&format!("{root_path}/coordinator/db"))
} }
fn tributary_db_folder(set: ExternalValidatorSet) -> String { fn tributary_db_folder(set: ValidatorSet) -> String {
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified"); let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
let network = match set.network { let network = match set.network {
ExternalNetworkId::Bitcoin => "Bitcoin", NetworkId::Serai => panic!("creating Tributary for the Serai network"),
ExternalNetworkId::Ethereum => "Ethereum", NetworkId::Bitcoin => "Bitcoin",
ExternalNetworkId::Monero => "Monero", NetworkId::Ethereum => "Ethereum",
NetworkId::Monero => "Monero",
}; };
format!("{root_path}/tributary-{network}-{}", set.session.0) format!("{root_path}/tributary-{network}-{}", set.session.0)
} }
pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db { pub(crate) fn tributary_db(set: ValidatorSet) -> Db {
db(&format!("{}/db", tributary_db_folder(set))) db(&format!("{}/db", tributary_db_folder(set)))
} }
pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) { pub(crate) fn prune_tributary_db(set: ValidatorSet) {
log::info!("pruning data directory for tributary {set:?}"); log::info!("pruning data directory for tributary {set:?}");
let db = tributary_db_folder(set); let db = tributary_db_folder(set);
if fs::exists(&db).expect("couldn't check if tributary DB exists") { if fs::exists(&db).expect("couldn't check if tributary DB exists") {
@@ -73,15 +71,11 @@ create_db! {
// The latest Tributary to have been retired for a network // The latest Tributary to have been retired for a network
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been // Since Tributaries are retired sequentially, this is informative to if any Tributary has been
// retired // retired
RetiredTributary: (network: ExternalNetworkId) -> Session, RetiredTributary: (network: NetworkId) -> Session,
// The last handled message from a Processor // The last handled message from a Processor
LastProcessorMessage: (network: ExternalNetworkId) -> u64, LastProcessorMessage: (network: NetworkId) -> u64,
// Cosigns we produced and tried to intake yet incurred an error while doing so // Cosigns we produced and tried to intake yet incurred an error while doing so
ErroneousCosigns: () -> Vec<SignedCosign>, ErroneousCosigns: () -> Vec<SignedCosign>,
// The keys to confirm and set on the Serai network
KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair,
// The key was set on the Serai network
KeySet: (set: ExternalValidatorSet) -> (),
} }
} }
@@ -90,7 +84,7 @@ db_channel! {
// Cosigns we produced // Cosigns we produced
SignedCosigns: () -> SignedCosign, SignedCosigns: () -> SignedCosign,
// Tributaries to clean up upon reboot // Tributaries to clean up upon reboot
TributaryCleanup: () -> ExternalValidatorSet, TributaryCleanup: () -> ValidatorSet,
} }
} }
@@ -99,52 +93,21 @@ mod _internal_db {
db_channel! { db_channel! {
Coordinator { Coordinator {
// Tributary transactions to publish from the Processor messages // Tributary transactions to publish
TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction, TributaryTransactions: (set: ValidatorSet) -> Transaction,
// Tributary transactions to publish from the DKG confirmation task
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
// Participants to remove
RemoveParticipant: (set: ExternalValidatorSet) -> u16,
} }
} }
} }
pub(crate) struct TributaryTransactionsFromProcessorMessages; pub(crate) struct TributaryTransactions;
impl TributaryTransactionsFromProcessorMessages { impl TributaryTransactions {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
// If this set has yet to be retired, send this transaction // If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx); _internal_db::TributaryTransactions::send(txn, set, tx);
} }
} }
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> { pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set) _internal_db::TributaryTransactions::try_recv(txn, set)
}
}
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
impl TributaryTransactionsFromDkgConfirmation {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
// If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
}
}
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
}
}
pub(crate) struct RemoveParticipant;
impl RemoveParticipant {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
// If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant));
}
}
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
_internal_db::RemoveParticipant::try_recv(txn, set)
.map(|i| Participant::new(i).expect("sent invalid participant index for removal"))
} }
} }

View File

@@ -1,441 +0,0 @@
use core::{ops::Deref, future::Future};
use std::{boxed::Box, collections::HashMap};
use zeroize::Zeroizing;
use rand_core::OsRng;
use ciphersuite::{group::GroupEncoding, *};
use dkg::{Participant, musig};
use frost_schnorrkel::{
frost::{curve::Ristretto, FrostError, sign::*},
Schnorrkel,
};
use serai_db::{DbTxn, Db as DbTrait};
#[rustfmt::skip]
use serai_client_serai::abi::primitives::{validator_sets::ExternalValidatorSet, address::SeraiAddress};
use serai_task::{DoesNotError, ContinuallyRan};
use serai_coordinator_substrate::{NewSetInformation, Keys};
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
fn schnorrkel() -> Schnorrkel {
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
}
fn our_i(
set: &NewSetInformation,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
data: &HashMap<Participant, Vec<u8>>,
) -> Participant {
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
let mut our_i = None;
for participant in data.keys() {
let validator_index = usize::from(u16::from(*participant) - 1);
let (validator, _weight) = set.validators[validator_index];
if validator == public {
our_i = Some(*participant);
}
}
our_i.unwrap()
}
// Take a HashMap of participations with non-contiguous Participants and convert them to a
// contiguous sequence.
//
// The input data is expected to not include our own data, which also won't be in the output data.
//
// Returns the mapping from the contiguous Participants to the original Participants.
fn make_contiguous<T>(
our_i: Participant,
mut data: HashMap<Participant, Vec<u8>>,
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
) -> Result<HashMap<Participant, T>, Participant> {
assert!(!data.contains_key(&our_i));
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
ordered_participants.sort_by_key(|participant| u16::from(*participant));
let mut our_i = Some(our_i);
let mut contiguous = HashMap::new();
let mut i = 1;
for participant in ordered_participants {
// If this is the first participant after our own index, increment to account for our index
if let Some(our_i_value) = our_i {
if u16::from(participant) > u16::from(our_i_value) {
i += 1;
our_i = None;
}
}
let contiguous_index = Participant::new(i).unwrap();
let data = match transform(data.remove(&participant).unwrap()) {
Ok(data) => data,
Err(_) => Err(participant)?,
};
contiguous.insert(contiguous_index, data);
i += 1;
}
Ok(contiguous)
}
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
match &result {
Ok(_) => Ok(result.unwrap()),
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
Err(*participant)
}
// All of these should be unreachable
Err(
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_),
) => {
result.unwrap();
unreachable!("continued execution after unwrapping Result::Err");
}
}
}
#[rustfmt::skip]
enum Signer {
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
Share {
attempt: u32,
musig_validators: Vec<SeraiAddress>,
share: [u8; 32],
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
},
}
/// Performs the DKG Confirmation protocol.
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
db: CD,
set: NewSetInformation,
tributary_db: TD,
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
signer: Option<Signer>,
}
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
pub(crate) fn new(
db: CD,
set: NewSetInformation,
tributary_db: TD,
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
) -> Self {
Self { db, set, tributary_db, key, signer: None }
}
fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) {
let mut txn = db.txn();
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
set,
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
);
txn.commit();
}
fn preprocess(
db: &mut CD,
set: ExternalValidatorSet,
attempt: u32,
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
signer: &mut Option<Signer>,
) {
// Perform the preprocess
let public_key = Ristretto::generator() * key.deref();
let (machine, preprocess) = AlgorithmMachine::new(
schnorrkel(),
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
musig(ExternalValidatorSet::musig_context(&set), key, &[public_key]).unwrap(),
)
.preprocess(&mut OsRng);
// We take the preprocess so we can use it in a distinct machine with the actual Musig
// parameters
let seed = machine.cache();
let mut preprocess_bytes = [0u8; 64];
preprocess_bytes.copy_from_slice(&preprocess.serialize());
let preprocess = preprocess_bytes;
let mut txn = db.txn();
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
set,
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
);
txn.commit();
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
}
}
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
type Error = DoesNotError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
// If we were sent a key to set, create the signer for it
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
// Create and publish the initial preprocess
Self::preprocess(&mut self.db, self.set.set, 0, self.key.clone(), &mut self.signer);
made_progress = true;
}
// If we have keys to confirm, handle all messages from the tributary
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
// Handle all messages from the Tributary
loop {
let mut tributary_txn = self.tributary_db.txn();
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
else {
break;
};
match msg {
messages::sign::CoordinatorMessage::Reattempt {
id: messages::sign::SignId { attempt, .. },
} => {
// Create and publish the preprocess for the specified attempt
Self::preprocess(
&mut self.db,
self.set.set,
attempt,
self.key.clone(),
&mut self.signer,
);
}
messages::sign::CoordinatorMessage::Preprocesses {
id: messages::sign::SignId { attempt, .. },
mut preprocesses,
} => {
// Confirm the preprocess we're expected to sign with is the one we locally have
// It may be different if we rebooted and made a second preprocess for this attempt
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
self.signer.take()
else {
// If this message is not expected, commit the txn to drop it and move on
// At some point, we'll get a Reattempt and reset
tributary_txn.commit();
break;
};
// Determine the MuSig key signed with
let musig_validators = {
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
ordered_participants.sort_by_key(|participant| u16::from(*participant));
let mut res = vec![];
for participant in ordered_participants {
let (validator, _weight) =
self.set.validators[usize::from(u16::from(participant) - 1)];
res.push(validator);
}
res
};
let musig_public_keys = musig_validators
.iter()
.map(|key| {
Ristretto::read_G(&mut key.0.as_slice())
.expect("Serai validator had invalid public key")
})
.collect::<Vec<_>>();
let keys = musig(
ExternalValidatorSet::musig_context(&self.set.set),
self.key.clone(),
&musig_public_keys,
)
.unwrap();
// Rebuild the machine
let (machine, preprocess_from_cache) =
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
// Ensure this is a consistent signing session
let our_i = our_i(&self.set, &self.key, &preprocesses);
let consistent = (attempt == our_attempt) &&
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
if !consistent {
tributary_txn.commit();
break;
}
// Reformat the preprocesses into the expected format for Musig
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
machine.read_preprocess(&mut preprocess.as_slice())
}) {
Ok(preprocesses) => preprocesses,
// This yields the *original participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
self.set.validators[usize::from(u16::from(participant) - 1)].0,
);
tributary_txn.commit();
break;
}
};
// Calculate our share
let (machine, share) = match handle_frost_error(machine.sign(
preprocesses,
&ExternalValidatorSet::set_keys_message(&self.set.set, &key_pair),
)) {
Ok((machine, share)) => (machine, share),
// This yields the *musig participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
musig_validators[usize::from(u16::from(participant) - 1)],
);
tributary_txn.commit();
break;
}
};
// Send our share
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
let mut txn = self.db.txn();
TributaryTransactionsFromDkgConfirmation::send(
&mut txn,
self.set.set,
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
);
txn.commit();
self.signer = Some(Signer::Share {
attempt,
musig_validators,
share,
machine: Box::new(machine),
});
}
messages::sign::CoordinatorMessage::Shares {
id: messages::sign::SignId { attempt, .. },
mut shares,
} => {
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
self.signer.take()
else {
tributary_txn.commit();
break;
};
// Ensure this is a consistent signing session
let our_i = our_i(&self.set, &self.key, &shares);
let consistent = (attempt == our_attempt) &&
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
if !consistent {
tributary_txn.commit();
break;
}
// Reformat the shares into the expected format for Musig
let shares = match make_contiguous(our_i, shares, |share| {
machine.read_share(&mut share.as_slice())
}) {
Ok(shares) => shares,
// This yields the *original participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
self.set.validators[usize::from(u16::from(participant) - 1)].0,
);
tributary_txn.commit();
break;
}
};
match handle_frost_error(machine.complete(shares)) {
Ok(signature) => {
// Create the bitvec of the participants
let mut signature_participants;
{
use bitvec::prelude::*;
signature_participants = bitvec![u8, Lsb0; 0; 0];
let mut i = 0;
for (validator, _) in &self.set.validators {
if Some(validator) == musig_validators.get(i) {
signature_participants.push(true);
i += 1;
} else {
signature_participants.push(false);
}
}
}
// This is safe to call multiple times as it'll just change which *valid*
// signature to publish
let mut txn = self.db.txn();
Keys::set(
&mut txn,
self.set.set,
key_pair.clone(),
signature_participants,
signature.into(),
);
txn.commit();
}
// This yields the *musig participant index*
Err(participant) => {
Self::slash(
&mut self.db,
self.set.set,
musig_validators[usize::from(u16::from(participant) - 1)],
);
tributary_txn.commit();
break;
}
}
}
}
// Because we successfully handled this message, note we made proress
made_progress = true;
tributary_txn.commit();
}
}
// Check if the key has been set on Serai
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
KeySet::get(&self.db, self.set.set).is_some()
{
// Take the keys to confirm so we never instantiate the signer again
let mut txn = self.db.txn();
KeysToConfirm::take(&mut txn, self.set.set);
KeySet::take(&mut txn, self.set.set);
txn.commit();
// Drop our own signer
// The task won't die until the Tributary does, but now it'll never do anything again
self.signer = None;
made_progress = true;
}
Ok(made_progress)
}
}
}

View File

@@ -4,24 +4,18 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ff::PrimeField, GroupEncoding}, group::{ff::PrimeField, GroupEncoding},
*, Ciphersuite, Ristretto,
}; };
use borsh::BorshDeserialize; use borsh::BorshDeserialize;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_client_serai::{ use serai_client::{
abi::primitives::{ primitives::{NetworkId, PublicKey},
BlockHash, validator_sets::primitives::ValidatorSet,
crypto::{Public, Signature, ExternalKey, KeyPair},
network_id::ExternalNetworkId,
validator_sets::ExternalValidatorSet,
address::SeraiAddress,
},
Serai, Serai,
}; };
use message_queue::{Service, client::MessageQueue}; use message_queue::{Service, client::MessageQueue};
@@ -29,17 +23,13 @@ use message_queue::{Service, client::MessageQueue};
use serai_task::{Task, TaskHandle, ContinuallyRan}; use serai_task::{Task, TaskHandle, ContinuallyRan};
use serai_cosign::{Faulted, SignedCosign, Cosigning}; use serai_cosign::{Faulted, SignedCosign, Cosigning};
use serai_coordinator_substrate::{ use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches, use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
PublishBatchTask, SlashReports, PublishSlashReportTask,
};
use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans};
mod db; mod db;
use db::*; use db::*;
mod tributary; mod tributary;
mod dkg_confirmation;
mod substrate; mod substrate;
use substrate::SubstrateTask; use substrate::SubstrateTask;
@@ -66,7 +56,9 @@ async fn serai() -> Arc<Serai> {
let Ok(serai) = Serai::new(format!( let Ok(serai) = Serai::new(format!(
"http://{}:9944", "http://{}:9944",
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided") serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
)) else { ))
.await
else {
log::error!("couldn't connect to the Serai node"); log::error!("couldn't connect to the Serai node");
tokio::time::sleep(delay).await; tokio::time::sleep(delay).await;
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY); delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
@@ -153,24 +145,11 @@ fn spawn_cosigning<D: serai_db::Db>(
}); });
} }
async fn handle_network( async fn handle_processor_messages(
mut db: impl serai_db::Db, mut db: impl serai_db::Db,
message_queue: Arc<MessageQueue>, message_queue: Arc<MessageQueue>,
serai: Arc<Serai>, network: NetworkId,
network: ExternalNetworkId,
) { ) {
// Spawn the task to publish batches for this network
{
let (publish_batch_task_def, publish_batch_task) = Task::new();
tokio::spawn(
PublishBatchTask::new(db.clone(), serai.clone(), network)
.continually_run(publish_batch_task_def, vec![]),
);
// Forget its handle so it always runs in the background
core::mem::forget(publish_batch_task);
}
// Handle Processor messages
loop { loop {
let (msg_id, msg) = { let (msg_id, msg) = {
let msg = message_queue.next(Service::Processor(network)).await; let msg = message_queue.next(Service::Processor(network)).await;
@@ -200,8 +179,8 @@ async fn handle_network(
match msg { match msg {
messages::ProcessorMessage::KeyGen(msg) => match msg { messages::ProcessorMessage::KeyGen(msg) => match msg {
messages::key_gen::ProcessorMessage::Participation { session, participation } => { messages::key_gen::ProcessorMessage::Participation { session, participation } => {
let set = ExternalValidatorSet { network, session }; let set = ValidatorSet { network, session };
TributaryTransactionsFromProcessorMessages::send( TributaryTransactions::send(
&mut txn, &mut txn,
set, set,
&Transaction::DkgParticipation { participation, signed: Signed::default() }, &Transaction::DkgParticipation { participation, signed: Signed::default() },
@@ -211,91 +190,49 @@ async fn handle_network(
session, session,
substrate_key, substrate_key,
network_key, network_key,
} => { } => todo!("TODO Transaction::DkgConfirmationPreprocess"),
KeysToConfirm::set(
&mut txn,
ExternalValidatorSet { network, session },
&KeyPair(
Public(substrate_key),
ExternalKey(
network_key
.try_into()
.expect("generated a network key which exceeds the maximum key length"),
),
),
);
}
messages::key_gen::ProcessorMessage::Blame { session, participant } => { messages::key_gen::ProcessorMessage::Blame { session, participant } => {
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant); let set = ValidatorSet { network, session };
TributaryTransactions::send(
&mut txn,
set,
&Transaction::RemoveParticipant {
participant: todo!("TODO"),
signed: Signed::default(),
},
);
} }
}, },
messages::ProcessorMessage::Sign(msg) => match msg { messages::ProcessorMessage::Sign(msg) => match msg {
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => { messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant); let set = ValidatorSet { network, session };
TributaryTransactions::send(
&mut txn,
set,
&Transaction::RemoveParticipant {
participant: todo!("TODO"),
signed: Signed::default(),
},
);
} }
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => { messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
let set = ExternalValidatorSet { network, session: id.session }; todo!("TODO Transaction::Batch + Transaction::Sign")
if id.attempt == 0 {
// Batches are declared by their intent to be signed
if let messages::sign::VariantSignId::Batch(hash) = id.id {
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::Batch { hash },
);
}
}
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::Sign {
id: id.id,
attempt: id.attempt,
round: SigningProtocolRound::Preprocess,
data: preprocesses,
signed: Signed::default(),
},
);
}
messages::sign::ProcessorMessage::Shares { id, shares } => {
let set = ExternalValidatorSet { network, session: id.session };
TributaryTransactionsFromProcessorMessages::send(
&mut txn,
set,
&Transaction::Sign {
id: id.id,
attempt: id.attempt,
round: SigningProtocolRound::Share,
data: shares,
signed: Signed::default(),
},
);
} }
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
}, },
messages::ProcessorMessage::Coordinator(msg) => match msg { messages::ProcessorMessage::Coordinator(msg) => match msg {
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => { messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
SignedCosigns::send(&mut txn, &cosign); SignedCosigns::send(&mut txn, &cosign);
} }
messages::coordinator::ProcessorMessage::SignedBatch { batch } => { messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
SignedBatches::send(&mut txn, &batch); todo!("TODO PublishBatchTask")
} }
messages::coordinator::ProcessorMessage::SignedSlashReport { messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
session, todo!("TODO PublishSlashReportTask")
slash_report,
signature,
} => {
SlashReports::set(
&mut txn,
ExternalValidatorSet { network, session },
slash_report,
Signature(signature),
);
} }
}, },
messages::ProcessorMessage::Substrate(msg) => match msg { messages::ProcessorMessage::Substrate(msg) => match msg {
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => { messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
let block = BlockHash(block);
let mut by_session = HashMap::new(); let mut by_session = HashMap::new();
for plan in plans { for plan in plans {
by_session by_session
@@ -304,9 +241,9 @@ async fn handle_network(
.push(plan.transaction_plan_id); .push(plan.transaction_plan_id);
} }
for (session, plans) in by_session { for (session, plans) in by_session {
let set = ExternalValidatorSet { network, session }; let set = ValidatorSet { network, session };
SubstrateBlockPlans::set(&mut txn, set, block, &plans); SubstrateBlockPlans::set(&mut txn, set, block, &plans);
TributaryTransactionsFromProcessorMessages::send( TributaryTransactions::send(
&mut txn, &mut txn,
set, set,
&Transaction::SubstrateBlock { hash: block }, &Transaction::SubstrateBlock { hash: block },
@@ -358,7 +295,7 @@ async fn main() {
let mut key_bytes = [0; 32]; let mut key_bytes = [0; 32];
key_bytes.copy_from_slice(&key_vec); key_bytes.copy_from_slice(&key_vec);
key_vec.zeroize(); key_vec.zeroize();
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::from_repr(key_bytes).unwrap()); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());
key_bytes.zeroize(); key_bytes.zeroize();
key key
}; };
@@ -372,16 +309,10 @@ async fn main() {
// Cleanup all historic Tributaries // Cleanup all historic Tributaries
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) { while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
prune_tributary_db(to_cleanup); prune_tributary_db(to_cleanup);
// Remove the keys to confirm for this network
KeysToConfirm::take(&mut txn, to_cleanup);
KeySet::take(&mut txn, to_cleanup);
// Drain the cosign intents created for this set // Drain the cosign intents created for this set
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {} while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
// Drain the transactions to publish for this set // Drain the transactions to publish for this set
while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {} while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {}
// Drain the participants to remove for this set
while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {}
// Remove the SignSlashReport notification // Remove the SignSlashReport notification
SignSlashReport::try_recv(&mut txn, to_cleanup); SignSlashReport::try_recv(&mut txn, to_cleanup);
} }
@@ -445,7 +376,7 @@ async fn main() {
EphemeralEventStream::new( EphemeralEventStream::new(
db.clone(), db.clone(),
serai.clone(), serai.clone(),
SeraiAddress((<Ristretto as WrappedGroup>::generator() * serai_key.deref()).to_bytes()), PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
) )
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]), .continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
); );
@@ -486,29 +417,12 @@ async fn main() {
.continually_run(substrate_task_def, vec![]), .continually_run(substrate_task_def, vec![]),
); );
// Handle each of the networks // Handle all of the Processors' messages
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network)); if network == NetworkId::Serai {
continue;
} }
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
// Spawn the task to set keys
{
let (set_keys_task_def, set_keys_task) = Task::new();
tokio::spawn(
SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]),
);
// Forget its handle so it always runs in the background
core::mem::forget(set_keys_task);
}
// Spawn the task to publish slash reports
{
let (publish_slash_report_task_def, publish_slash_report_task) = Task::new();
tokio::spawn(
PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]),
);
// Always have this run in the background
core::mem::forget(publish_slash_report_task);
} }
// Run the spawned tasks ad-infinitum // Run the spawned tasks ad-infinitum

View File

@@ -3,17 +3,13 @@ use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use ciphersuite::*; use ciphersuite::{Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_db::{DbTxn, Db as DbTrait}; use serai_db::{DbTxn, Db as DbTrait};
use serai_client_serai::abi::primitives::{ use serai_client::validator_sets::primitives::{Session, ValidatorSet};
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
};
use message_queue::{Service, Metadata, client::MessageQueue}; use message_queue::{Service, Metadata, client::MessageQueue};
use tributary_sdk::Tributary; use tributary_sdk::Tributary;
@@ -23,16 +19,16 @@ use serai_task::ContinuallyRan;
use serai_coordinator_tributary::Transaction; use serai_coordinator_tributary::Transaction;
use serai_coordinator_p2p::P2p; use serai_coordinator_p2p::P2p;
use crate::{Db, KeySet}; use crate::Db;
pub(crate) struct SubstrateTask<P: P2p> { pub(crate) struct SubstrateTask<P: P2p> {
pub(crate) serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>, pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
pub(crate) db: Db, pub(crate) db: Db,
pub(crate) message_queue: Arc<MessageQueue>, pub(crate) message_queue: Arc<MessageQueue>,
pub(crate) p2p: P, pub(crate) p2p: P,
pub(crate) p2p_add_tributary: pub(crate) p2p_add_tributary:
mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>, mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ExternalValidatorSet>, pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ValidatorSet>,
} }
impl<P: P2p> ContinuallyRan for SubstrateTask<P> { impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
@@ -42,7 +38,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
let mut made_progress = false; let mut made_progress = false;
// Handle the Canonical events // Handle the Canonical events
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
loop { loop {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network) let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
@@ -51,9 +47,8 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
}; };
match msg { match msg {
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => { // TODO: Stop trying to confirm the DKG
KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &()); messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
}
messages::substrate::CoordinatorMessage::SlashesReported { session } => { messages::substrate::CoordinatorMessage::SlashesReported { session } => {
let prior_retired = crate::db::RetiredTributary::get(&txn, network); let prior_retired = crate::db::RetiredTributary::get(&txn, network);
let next_to_be_retired = let next_to_be_retired =
@@ -62,7 +57,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
crate::db::RetiredTributary::set(&mut txn, network, &session); crate::db::RetiredTributary::set(&mut txn, network, &session);
self self
.p2p_retire_tributary .p2p_retire_tributary
.send(ExternalValidatorSet { network, session }) .send(ValidatorSet { network, session })
.expect("p2p retire_tributary channel dropped?"); .expect("p2p retire_tributary channel dropped?");
} }
messages::substrate::CoordinatorMessage::Block { .. } => {} messages::substrate::CoordinatorMessage::Block { .. } => {}
@@ -112,10 +107,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
*/ */
crate::db::TributaryCleanup::send( crate::db::TributaryCleanup::send(
&mut txn, &mut txn,
&ExternalValidatorSet { &ValidatorSet { network: new_set.set.network, session: Session(historic_session) },
network: new_set.set.network,
session: Session(historic_session),
},
); );
} }

View File

@@ -4,14 +4,14 @@ use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::OsRng; use rand_core::OsRng;
use blake2::{digest::typenum::U32, Digest, Blake2s}; use blake2::{digest::typenum::U32, Digest, Blake2s};
use ciphersuite::*; use ciphersuite::{Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet; use scale::Encode;
use serai_client::validator_sets::primitives::ValidatorSet;
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary}; use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
@@ -21,25 +21,14 @@ use message_queue::{Service, Metadata, client::MessageQueue};
use serai_cosign::{Faulted, CosignIntent, Cosigning}; use serai_cosign::{Faulted, CosignIntent, Cosigning};
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport}; use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
use serai_coordinator_tributary::{ use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
};
use serai_coordinator_p2p::P2p; use serai_coordinator_p2p::P2p;
use crate::{ use crate::{Db, TributaryTransactions};
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
};
create_db! {
Coordinator {
PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction,
}
}
db_channel! { db_channel! {
Coordinator { Coordinator {
PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent, PendingCosigns: (set: ValidatorSet) -> CosignIntent,
} }
} }
@@ -48,7 +37,7 @@ db_channel! {
/// This is not a well-designed function. This is specific to the context in which its called, /// This is not a well-designed function. This is specific to the context in which its called,
/// within this file. It should only be considered an internal helper for this domain alone. /// within this file. It should only be considered an internal helper for this domain alone.
async fn provide_transaction<TD: DbTrait, P: P2p>( async fn provide_transaction<TD: DbTrait, P: P2p>(
set: ExternalValidatorSet, set: ValidatorSet,
tributary: &Tributary<TD, Transaction, P>, tributary: &Tributary<TD, Transaction, P>,
tx: Transaction, tx: Transaction,
) { ) {
@@ -67,7 +56,9 @@ async fn provide_transaction<TD: DbTrait, P: P2p>(
// advancing // advancing
Err(ProvidedError::LocalMismatchesOnChain) => loop { Err(ProvidedError::LocalMismatchesOnChain) => loop {
log::error!( log::error!(
"Tributary {set:?} was supposed to provide {tx:?} but peers disagree, halting Tributary", "Tributary {:?} was supposed to provide {:?} but peers disagree, halting Tributary",
set,
tx,
); );
// Print this every five minutes as this does need to be handled // Print this every five minutes as this does need to be handled
tokio::time::sleep(Duration::from_secs(5 * 60)).await; tokio::time::sleep(Duration::from_secs(5 * 60)).await;
@@ -156,102 +147,13 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
} }
} }
#[must_use] /// Adds all of the transactions sent via `TributaryTransactions`.
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
tributary: &Tributary<TD, Transaction, P>,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
mut tx: Transaction,
) -> bool {
// If this is a signed transaction, sign it
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
tx.sign(&mut OsRng, tributary.genesis(), key);
}
let res = tributary.add_transaction(tx.clone()).await;
match &res {
// Fresh publication, already published
Ok(true | false) => {}
Err(
TransactionError::TooLargeTransaction |
TransactionError::InvalidSigner |
TransactionError::InvalidSignature |
TransactionError::InvalidContent,
) => {
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
}
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
// issue is this transaction was already included on-chain
Err(TransactionError::InvalidNonce) => {
let TransactionKind::Signed(order, signed) = tx.kind() else {
panic!("non-Signed transaction had InvalidNonce");
};
let next_nonce = tributary
.next_nonce(&signed.signer, &order)
.await
.expect("signer who is a present validator didn't have a nonce");
assert!(next_nonce != signed.nonce);
// We're publishing an old transaction
if next_nonce > signed.nonce {
return true;
}
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
}
// We've published too many transactions recently
Err(TransactionError::TooManyInMempool) => {
return false;
}
// This isn't a Provided transaction so this should never be hit
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
}
true
}
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
set: ExternalValidatorSet,
tributary_db: &mut TD,
tributary: &Tributary<TD, Transaction, P>,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
tx: Transaction,
) -> bool {
let kind = tx.kind();
match kind {
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
// If this is a transaction with signing data, check the topic is recognized before
// publishing
let topic = tx.topic();
let still_requires_recognition = if let Some(topic) = topic {
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
.then_some(topic)
} else {
None
};
if let Some(topic) = still_requires_recognition {
// Queue the transaction until the topic is recognized
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
let mut tributary_txn = tributary_db.txn();
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
tributary_txn.commit();
} else {
// Actually add the transaction
if !add_signed_unsigned_transaction(tributary, key, tx).await {
return false;
}
}
}
}
true
}
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> { pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
db: CD, db: CD,
tributary_db: TD, tributary_db: TD,
tributary: Tributary<TD, Transaction, P>, tributary: Tributary<TD, Transaction, P>,
set: NewSetInformation, set: ValidatorSet,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
} }
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> { impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
type Error = DoesNotError; type Error = DoesNotError;
@@ -259,87 +161,49 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
let mut made_progress = false; let mut made_progress = false;
// Provide/add all transactions sent our way
loop { loop {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set) let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
else {
break;
};
if !add_with_recognition_check( let kind = tx.kind();
self.set.set, match kind {
&mut self.tributary_db, TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
&self.tributary, TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
&self.key, // If this is a signed transaction, sign it
tx, if matches!(kind, TransactionKind::Signed(_, _)) {
) tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
.await }
{
// Actually add the transaction
// TODO: If this is a preprocess, make sure the topic has been recognized
let res = self.tributary.add_transaction(tx.clone()).await;
match &res {
// Fresh publication, already published
Ok(true | false) => {}
Err(
TransactionError::TooLargeTransaction |
TransactionError::InvalidSigner |
TransactionError::InvalidNonce |
TransactionError::InvalidSignature |
TransactionError::InvalidContent,
) => {
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
}
// We've published too many transactions recently
// Drop this txn to try to publish it again later on a future iteration
Err(TransactionError::TooManyInMempool) => {
drop(txn);
break; break;
} }
// This isn't a Provided transaction so this should never be hit
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
}
}
}
made_progress = true; made_progress = true;
txn.commit(); txn.commit();
} }
loop {
let mut txn = self.db.txn();
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
else {
break;
};
if !add_with_recognition_check(
self.set.set,
&mut self.tributary_db,
&self.tributary,
&self.key,
tx,
)
.await
{
break;
}
made_progress = true;
txn.commit();
}
// Provide/add all transactions due to newly recognized topics
loop {
let mut tributary_txn = self.tributary_db.txn();
let Some(topic) =
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
else {
break;
};
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
break;
}
}
made_progress = true;
tributary_txn.commit();
}
// Publish any participant removals
loop {
let mut txn = self.db.txn();
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
let tx = Transaction::RemoveParticipant {
participant: self.set.participant_indexes_reverse_lookup[&participant],
signed: Default::default(),
};
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
break;
}
made_progress = true;
txn.commit();
}
Ok(made_progress) Ok(made_progress)
} }
} }
@@ -348,7 +212,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue. /// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> { pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
tributary_db: TD, tributary_db: TD,
set: ExternalValidatorSet, set: ValidatorSet,
message_queue: Arc<MessageQueue>, message_queue: Arc<MessageQueue>,
} }
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> { impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
@@ -381,7 +245,7 @@ pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
tributary_db: TD, tributary_db: TD,
tributary: Tributary<TD, Transaction, P>, tributary: Tributary<TD, Transaction, P>,
set: NewSetInformation, set: NewSetInformation,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
} }
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> { impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
type Error = DoesNotError; type Error = DoesNotError;
@@ -428,7 +292,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD
/// Run the scan task whenever the Tributary adds a new block. /// Run the scan task whenever the Tributary adds a new block.
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>( async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
db: CD, db: CD,
set: ExternalValidatorSet, set: ValidatorSet,
tributary: Tributary<TD, Transaction, P>, tributary: Tributary<TD, Transaction, P>,
scan_tributary_task: TaskHandle, scan_tributary_task: TaskHandle,
tasks_to_keep_alive: Vec<TaskHandle>, tasks_to_keep_alive: Vec<TaskHandle>,
@@ -459,17 +323,15 @@ async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
/// - Spawn the ScanTributaryTask /// - Spawn the ScanTributaryTask
/// - Spawn the ProvideCosignCosignedTransactionsTask /// - Spawn the ProvideCosignCosignedTransactionsTask
/// - Spawn the TributaryProcessorMessagesTask /// - Spawn the TributaryProcessorMessagesTask
/// - Spawn the AddTributaryTransactionsTask
/// - Spawn the ConfirmDkgTask
/// - Spawn the SignSlashReportTask /// - Spawn the SignSlashReportTask
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval) /// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
pub(crate) async fn spawn_tributary<P: P2p>( pub(crate) async fn spawn_tributary<P: P2p>(
db: Db, db: Db,
message_queue: Arc<MessageQueue>, message_queue: Arc<MessageQueue>,
p2p: P, p2p: P,
p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>, p2p_add_tributary: &mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
set: NewSetInformation, set: NewSetInformation,
serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>, serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
) { ) {
// Don't spawn retired Tributaries // Don't spawn retired Tributaries
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >= if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
@@ -478,8 +340,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
return; return;
} }
let genesis = let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
<[u8; 32]>::from(Blake2s::<U32>::digest(borsh::to_vec(&(set.serai_block, set.set)).unwrap()));
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will // Since the Serai block will be finalized, then cosigned, before we handle this, this time will
// be a couple of minutes stale. While the Tributary will still function with a start time in the // be a couple of minutes stale. While the Tributary will still function with a start time in the
@@ -490,7 +351,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
let mut tributary_validators = Vec::with_capacity(set.validators.len()); let mut tributary_validators = Vec::with_capacity(set.validators.len());
for (validator, weight) in set.validators.iter().copied() { for (validator, weight) in set.validators.iter().copied() {
let validator_key = <Ristretto as GroupIo>::read_G(&mut validator.0.as_slice()) let validator_key = <Ristretto as Ciphersuite>::read_G(&mut validator.0.as_slice())
.expect("Serai validator had an invalid public key"); .expect("Serai validator had an invalid public key");
let weight = u64::from(weight); let weight = u64::from(weight);
tributary_validators.push((validator_key, weight)); tributary_validators.push((validator_key, weight));
@@ -542,45 +403,38 @@ pub(crate) async fn spawn_tributary<P: P2p>(
// Spawn the scan task // Spawn the scan task
let (scan_tributary_task_def, scan_tributary_task) = Task::new(); let (scan_tributary_task_def, scan_tributary_task) = Task::new();
tokio::spawn( tokio::spawn(
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader) ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is // This is the only handle for this TributaryProcessorMessagesTask, so when this task is
// dropped, it will be too // dropped, it will be too
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]), .continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
); );
// Spawn the add transactions task
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
tokio::spawn(
(AddTributaryTransactionsTask {
db: db.clone(),
tributary_db: tributary_db.clone(),
tributary: tributary.clone(),
set: set.clone(),
key: serai_key.clone(),
})
.continually_run(add_tributary_transactions_task_def, vec![]),
);
// Spawn the task to confirm the DKG result
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
tokio::spawn(
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
);
// Spawn the sign slash report task // Spawn the sign slash report task
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new(); let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
tokio::spawn( tokio::spawn(
(SignSlashReportTask { (SignSlashReportTask {
db: db.clone(), db: db.clone(),
tributary_db, tributary_db: tributary_db.clone(),
tributary: tributary.clone(), tributary: tributary.clone(),
set: set.clone(), set: set.clone(),
key: serai_key, key: serai_key.clone(),
}) })
.continually_run(sign_slash_report_task_def, vec![]), .continually_run(sign_slash_report_task_def, vec![]),
); );
// Spawn the add transactions task
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
tokio::spawn(
(AddTributaryTransactionsTask {
db: db.clone(),
tributary_db,
tributary: tributary.clone(),
set: set.set,
key: serai_key,
})
.continually_run(add_tributary_transactions_task_def, vec![]),
);
// Whenever a new block occurs, immediately run the scan task // Whenever a new block occurs, immediately run the scan task
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the // This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad // Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
@@ -590,6 +444,10 @@ pub(crate) async fn spawn_tributary<P: P2p>(
set.set, set.set,
tributary, tributary,
scan_tributary_task, scan_tributary_task,
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task], vec![
provide_cosign_cosigned_transactions_task,
sign_slash_report_task,
add_tributary_transactions_task,
],
)); ));
} }

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -20,15 +20,14 @@ workspace = true
[dependencies] [dependencies]
bitvec = { version = "1", default-features = false, features = ["std"] } bitvec = { version = "1", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
futures = { version = "0.3", default-features = false, features = ["std"] } futures = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" } serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" } serai-task = { path = "../../common/task", version = "0.1" }

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -3,13 +3,7 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered}; use futures::stream::{StreamExt, FuturesOrdered};
use serai_client_serai::{ use serai_client::Serai;
abi::{
self,
primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
},
Serai,
};
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage}; use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
@@ -21,7 +15,6 @@ use serai_cosign::Cosigning;
create_db!( create_db!(
CoordinatorSubstrateCanonical { CoordinatorSubstrateCanonical {
NextBlock: () -> u64, NextBlock: () -> u64,
LastIndexedBatchId: (network: ExternalNetworkId) -> u32,
} }
); );
@@ -52,10 +45,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
// These are all the events which generate canonical messages // These are all the events which generate canonical messages
struct CanonicalEvents { struct CanonicalEvents {
time: u64, time: u64,
set_keys_events: Vec<abi::validator_sets::Event>, key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
slash_report_events: Vec<abi::validator_sets::Event>, set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
batch_events: Vec<abi::in_instructions::Event>, batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
burn_events: Vec<abi::coins::Event>, burn_events: Vec<serai_client::coins::CoinsEvent>,
} }
// For a cosigned block, fetch all relevant events // For a cosigned block, fetch all relevant events
@@ -73,24 +66,40 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
} }
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()), Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
}; };
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?; let temporal_serai = serai.as_of(block_hash);
let set_keys_events = events.validator_sets().set_keys_events().cloned().collect(); let temporal_serai_validators = temporal_serai.validator_sets();
let slash_report_events = let temporal_serai_instructions = temporal_serai.in_instructions();
events.validator_sets().slash_report_events().cloned().collect(); let temporal_serai_coins = temporal_serai.coins();
let batch_events = events.in_instructions().batch_events().cloned().collect();
let burn_events = events.coins().burn_with_instruction_events().cloned().collect(); let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else { tokio::try_join!(
serai.block(block_hash),
temporal_serai_validators.key_gen_events(),
temporal_serai_validators.set_retired_events(),
temporal_serai_instructions.batch_events(),
temporal_serai_coins.burn_with_instruction_events(),
)
.map_err(|e| format!("{e:?}"))?;
let Some(block) = block else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))? Err(format!("Serai node didn't have cosigned block #{block_number}"))?
}; };
// We use time in seconds, not milliseconds, here let time = if block_number == 0 {
let time = block.header.unix_time_in_millis() / 1000; block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok(( Ok((
block_number, block_number,
CanonicalEvents { CanonicalEvents {
time, time,
set_keys_events, key_gen_events,
slash_report_events, set_retired_events,
batch_events, batch_events,
burn_events, burn_events,
}, },
@@ -122,9 +131,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
for set_keys in block.set_keys_events { for key_gen in block.key_gen_events {
let abi::validator_sets::Event::SetKeys { set, key_pair } = &set_keys else { let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
panic!("`SetKeys` event wasn't a `SetKeys` event: {set_keys:?}"); else {
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
}; };
crate::Canonical::send( crate::Canonical::send(
&mut txn, &mut txn,
@@ -137,9 +147,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
); );
} }
for slash_report in block.slash_report_events { for set_retired in block.set_retired_events {
let abi::validator_sets::Event::SlashReport { set } = &slash_report else { let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
panic!("`SlashReport` event wasn't a `SlashReport` event: {slash_report:?}"); else {
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
}; };
crate::Canonical::send( crate::Canonical::send(
&mut txn, &mut txn,
@@ -148,12 +159,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
); );
} }
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
let mut batch = None; let mut batch = None;
for this_batch in &block.batch_events { for this_batch in &block.batch_events {
// Only irrefutable as this is the only member of the enum at this time let serai_client::in_instructions::InInstructionsEvent::Batch {
#[expect(irrefutable_let_patterns)]
let abi::in_instructions::Event::Batch {
network: batch_network, network: batch_network,
publishing_session, publishing_session,
id, id,
@@ -171,7 +180,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
batch = Some(ExecutedBatch { batch = Some(ExecutedBatch {
id: *id, id: *id,
publisher: *publishing_session, publisher: *publishing_session,
external_network_block_hash: external_network_block_hash.0, external_network_block_hash: *external_network_block_hash,
in_instructions_hash: *in_instructions_hash, in_instructions_hash: *in_instructions_hash,
in_instruction_results: in_instruction_results in_instruction_results: in_instruction_results
.iter() .iter()
@@ -184,20 +193,15 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
}) })
.collect(), .collect(),
}); });
if LastIndexedBatchId::get(&txn, network) != id.checked_sub(1) {
panic!(
"next batch from Serai's ID was not an increment of the last indexed batch's ID"
);
}
LastIndexedBatchId::set(&mut txn, network, id);
} }
} }
let mut burns = vec![]; let mut burns = vec![];
for burn in &block.burn_events { for burn in &block.burn_events {
let abi::coins::Event::BurnWithInstruction { from: _, instruction } = &burn else { let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}"); &burn
else {
panic!("Burn event wasn't a Burn.in event: {burn:?}");
}; };
if instruction.balance.coin.network() == network { if instruction.balance.coin.network() == network {
burns.push(instruction.clone()); burns.push(instruction.clone());
@@ -218,7 +222,3 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
} }
} }
} }
pub(crate) fn last_indexed_batch_id(txn: &impl DbTxn, network: ExternalNetworkId) -> Option<u32> {
LastIndexedBatchId::get(txn, network)
}

View File

@@ -3,14 +3,9 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered}; use futures::stream::{StreamExt, FuturesOrdered};
use serai_client_serai::{ use serai_client::{
abi::primitives::{ primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
BlockHash, validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
crypto::EmbeddedEllipticCurveKeys as EmbeddedEllipticCurveKeysStruct,
network_id::ExternalNetworkId,
validator_sets::{KeyShares, ExternalValidatorSet},
address::SeraiAddress,
},
Serai, Serai,
}; };
@@ -24,10 +19,6 @@ use crate::NewSetInformation;
create_db!( create_db!(
CoordinatorSubstrateEphemeral { CoordinatorSubstrateEphemeral {
NextBlock: () -> u64, NextBlock: () -> u64,
EmbeddedEllipticCurveKeys: (
network: ExternalNetworkId,
validator: SeraiAddress
) -> EmbeddedEllipticCurveKeysStruct,
} }
); );
@@ -35,14 +26,14 @@ create_db!(
pub struct EphemeralEventStream<D: Db> { pub struct EphemeralEventStream<D: Db> {
db: D, db: D,
serai: Arc<Serai>, serai: Arc<Serai>,
validator: SeraiAddress, validator: PublicKey,
} }
impl<D: Db> EphemeralEventStream<D> { impl<D: Db> EphemeralEventStream<D> {
/// Create a new ephemeral event stream. /// Create a new ephemeral event stream.
/// ///
/// Only one of these may exist over the provided database. /// Only one of these may exist over the provided database.
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self { pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
Self { db, serai, validator } Self { db, serai, validator }
} }
} }
@@ -58,11 +49,10 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
// These are all the events which generate canonical messages // These are all the events which generate canonical messages
struct EphemeralEvents { struct EphemeralEvents {
block_hash: BlockHash, block_hash: [u8; 32],
time: u64, time: u64,
embedded_elliptic_curve_keys_events: Vec<serai_client_serai::abi::validator_sets::Event>, new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
set_decided_events: Vec<serai_client_serai::abi::validator_sets::Event>, accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
accepted_handover_events: Vec<serai_client_serai::abi::validator_sets::Event>,
} }
// For a cosigned block, fetch all relevant events // For a cosigned block, fetch all relevant events
@@ -81,31 +71,31 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()), Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
}; };
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?; let temporal_serai = serai.as_of(block_hash);
let embedded_elliptic_curve_keys_events = events let temporal_serai_validators = temporal_serai.validator_sets();
.validator_sets() let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
.set_embedded_elliptic_curve_keys_events() serai.block(block_hash),
.cloned() temporal_serai_validators.new_set_events(),
.collect::<Vec<_>>(); temporal_serai_validators.accepted_handover_events(),
let set_decided_events = )
events.validator_sets().set_decided_events().cloned().collect::<Vec<_>>(); .map_err(|e| format!("{e:?}"))?;
let accepted_handover_events = let Some(block) = block else {
events.validator_sets().accepted_handover_events().cloned().collect::<Vec<_>>();
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))? Err(format!("Serai node didn't have cosigned block #{block_number}"))?
}; };
// We use time in seconds, not milliseconds, here let time = if block_number == 0 {
let time = block.header.unix_time_in_millis() / 1000; block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok(( Ok((
block_number, block_number,
EphemeralEvents { EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
block_hash,
time,
embedded_elliptic_curve_keys_events,
set_decided_events,
accepted_handover_events,
},
)) ))
} }
} }
@@ -136,102 +126,118 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
for event in block.embedded_elliptic_curve_keys_events { for new_set in block.new_set_events {
let serai_client_serai::abi::validator_sets::Event::SetEmbeddedEllipticCurveKeys { let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
validator, panic!("NewSet event wasn't a NewSet event: {new_set:?}");
keys,
} = &event
else {
panic!(
"{}: {event:?}",
"`SetEmbeddedEllipticCurveKeys` event wasn't a `SetEmbeddedEllipticCurveKeys` event"
);
};
EmbeddedEllipticCurveKeys::set(&mut txn, keys.network(), *validator, keys);
}
for set_decided in block.set_decided_events {
let serai_client_serai::abi::validator_sets::Event::SetDecided { set, validators } =
&set_decided
else {
panic!("`SetDecided` event wasn't a `SetDecided` event: {set_decided:?}");
}; };
// We only coordinate over external networks // We only coordinate over external networks
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; if set.network == NetworkId::Serai {
let validators = continue;
validators.iter().map(|(validator, weight)| (*validator, weight.0)).collect::<Vec<_>>(); }
let serai = self.serai.as_of(block.block_hash);
let serai = serai.validator_sets();
let Some(validators) =
serai.participants(set.network).await.map_err(|e| format!("{e:?}"))?
else {
Err(format!(
"block #{block_number} declared a new set but didn't have the participants"
))?
};
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator); let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
if in_set { if in_set {
if u16::try_from(validators.len()).is_err() { if u16::try_from(validators.len()).is_err() {
Err("more than u16::MAX validators sent")?; Err("more than u16::MAX validators sent")?;
} }
let Ok(validators) = validators
.into_iter()
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
.collect::<Result<Vec<_>, _>>()
else {
Err("validator's weight exceeded u16::MAX".to_string())?
};
// Do the summation in u32 so we don't risk a u16 overflow // Do the summation in u32 so we don't risk a u16 overflow
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>(); let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
if total_weight > u32::from(KeyShares::MAX_PER_SET) { if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
Err(format!( Err(format!(
"{set:?} has {total_weight} key shares when the max is {}", "{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
KeyShares::MAX_PER_SET
))?; ))?;
} }
let total_weight = u16::try_from(total_weight) let total_weight = u16::try_from(total_weight).unwrap();
.expect("value smaller than `u16` constant but doesn't fit in `u16`");
// Fetch all of the validators' embedded elliptic curve keys // Fetch all of the validators' embedded elliptic curve keys
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
for (validator, _) in &validators {
let validator = *validator;
// try_join doesn't return a future so we need to wrap it in this additional async
// block
embedded_elliptic_curve_keys.push_back(async move {
tokio::try_join!(
// One future to fetch the substrate embedded key
serai
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
// One future to fetch the external embedded key, if there is a distinct curve
async {
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
} else {
Ok(None)
}
}
)
.map(|(substrate_embedded_key, external_embedded_key)| {
(validator, substrate_embedded_key, external_embedded_key)
})
});
}
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight)); let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
for (validator, weight) in &validators { for (validator, weight) in &validators {
let keys = match EmbeddedEllipticCurveKeys::get(&txn, set.network, *validator) let (future_validator, substrate_embedded_key, external_embedded_key) =
.expect("selected validator lacked embedded elliptic curve keys") embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
{ assert_eq!(*validator, future_validator);
EmbeddedEllipticCurveKeysStruct::Bitcoin(substrate, external) => { let external_embedded_key =
assert_eq!(set.network, ExternalNetworkId::Bitcoin); external_embedded_key.unwrap_or(substrate_embedded_key.clone());
(substrate, external.to_vec()) match (substrate_embedded_key, external_embedded_key) {
} (Some(substrate_embedded_key), Some(external_embedded_key)) => {
EmbeddedEllipticCurveKeysStruct::Ethereum(substrate, external) => { let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
assert_eq!(set.network, ExternalNetworkId::Ethereum); .map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
(substrate, external.to_vec())
}
EmbeddedEllipticCurveKeysStruct::Monero(substrate) => {
assert_eq!(set.network, ExternalNetworkId::Monero);
(substrate, substrate.to_vec())
}
};
for _ in 0 .. *weight { for _ in 0 .. *weight {
evrf_public_keys.push(keys.clone()); evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
}
}
_ => Err("NewSet with validator missing an embedded key".to_string())?,
} }
} }
let mut new_set = NewSetInformation { crate::NewSet::send(
set, &mut txn,
serai_block: block.block_hash.0, &NewSetInformation {
set: *set,
serai_block: block.block_hash,
declaration_time: block.time, declaration_time: block.time,
// TODO: This should be inlined into the Processor's key gen code // TODO: Why do we have this as an explicit field here?
// It's legacy from when we removed participants from the key gen // Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
threshold: ((total_weight * 2) / 3) + 1, threshold: ((total_weight * 2) / 3) + 1,
// TODO: Why are `validators` and `evrf_public_keys` two separate fields?
validators, validators,
evrf_public_keys, evrf_public_keys,
participant_indexes: Default::default(), },
participant_indexes_reverse_lookup: Default::default(), );
};
// These aren't serialized, and we immediately serialize and drop this, so this isn't
// necessary. It's just good practice not have this be dirty
new_set.init_participant_indexes();
crate::NewSet::send(&mut txn, &new_set);
} }
} }
for accepted_handover in block.accepted_handover_events { for accepted_handover in block.accepted_handover_events {
let serai_client_serai::abi::validator_sets::Event::AcceptedHandover { set } = let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
&accepted_handover &accepted_handover
else { else {
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}"); panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
}; };
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; crate::SignSlashReport::send(&mut txn, *set);
crate::SignSlashReport::send(&mut txn, set);
} }
txn.commit(); txn.commit();

View File

@@ -1,21 +1,14 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
use std::collections::HashMap; use scale::{Encode, Decode};
use borsh::{io, BorshSerialize, BorshDeserialize};
use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
use dkg::Participant; validator_sets::primitives::{Session, ValidatorSet, KeyPair},
in_instructions::primitives::SignedBatch,
use serai_client_serai::abi::{
primitives::{
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet, SlashReport},
crypto::{Signature, KeyPair},
address::SeraiAddress,
instructions::SignedBatch,
},
Transaction, Transaction,
}; };
@@ -23,7 +16,6 @@ use serai_db::*;
mod canonical; mod canonical;
pub use canonical::CanonicalEventStream; pub use canonical::CanonicalEventStream;
use canonical::last_indexed_batch_id;
mod ephemeral; mod ephemeral;
pub use ephemeral::EphemeralEventStream; pub use ephemeral::EphemeralEventStream;
@@ -34,50 +26,39 @@ pub use publish_batch::PublishBatchTask;
mod publish_slash_report; mod publish_slash_report;
pub use publish_slash_report::PublishSlashReportTask; pub use publish_slash_report::PublishSlashReportTask;
fn borsh_serialize_validators<W: io::Write>(
validators: &Vec<(PublicKey, u16)>,
writer: &mut W,
) -> Result<(), io::Error> {
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
writer.write_all(&validators.encode())
}
fn borsh_deserialize_validators<R: io::Read>(
reader: &mut R,
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
}
/// The information for a new set. /// The information for a new set.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
#[borsh(init = init_participant_indexes)]
pub struct NewSetInformation { pub struct NewSetInformation {
/// The set. /// The set.
pub set: ExternalValidatorSet, pub set: ValidatorSet,
/// The Serai block which declared it. /// The Serai block which declared it.
pub serai_block: [u8; 32], pub serai_block: [u8; 32],
/// The time of the block which declared it, in seconds since the epoch. /// The time of the block which declared it, in seconds.
pub declaration_time: u64, pub declaration_time: u64,
/// The threshold to use. /// The threshold to use.
pub threshold: u16, pub threshold: u16,
/// The validators, with the amount of key shares they have. /// The validators, with the amount of key shares they have.
pub validators: Vec<(SeraiAddress, u16)>, #[borsh(
serialize_with = "borsh_serialize_validators",
deserialize_with = "borsh_deserialize_validators"
)]
pub validators: Vec<(PublicKey, u16)>,
/// The eVRF public keys. /// The eVRF public keys.
///
/// This will have the necessary copies of the keys proper for each validator's weight,
/// accordingly syncing up with `participant_indexes`.
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>, pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
/// The participant indexes, indexed by their validator.
#[borsh(skip)]
pub participant_indexes: HashMap<SeraiAddress, Vec<Participant>>,
/// The validators, indexed by their participant indexes.
#[borsh(skip)]
pub participant_indexes_reverse_lookup: HashMap<Participant, SeraiAddress>,
}
impl NewSetInformation {
fn init_participant_indexes(&mut self) {
let mut next_i = 1;
self.participant_indexes = HashMap::with_capacity(self.validators.len());
self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len());
for (validator, weight) in &self.validators {
let mut these_is = Vec::with_capacity((*weight).into());
for _ in 0 .. *weight {
let this_i = Participant::new(next_i).unwrap();
next_i += 1;
these_is.push(this_i);
self.participant_indexes_reverse_lookup.insert(this_i, *validator);
}
self.participant_indexes.insert(*validator, these_is);
}
}
} }
mod _public_db { mod _public_db {
@@ -86,24 +67,24 @@ mod _public_db {
db_channel!( db_channel!(
CoordinatorSubstrate { CoordinatorSubstrate {
// Canonical messages to send to the processor // Canonical messages to send to the processor
Canonical: (network: ExternalNetworkId) -> messages::substrate::CoordinatorMessage, Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage,
// Relevant new set, from an ephemeral event stream // Relevant new set, from an ephemeral event stream
NewSet: () -> NewSetInformation, NewSet: () -> NewSetInformation,
// Potentially relevant sign slash report, from an ephemeral event stream // Potentially relevant sign slash report, from an ephemeral event stream
SignSlashReport: (set: ExternalValidatorSet) -> (), SignSlashReport: (set: ValidatorSet) -> (),
// Signed batches to publish onto the Serai network // Signed batches to publish onto the Serai network
SignedBatches: (network: ExternalNetworkId) -> SignedBatch, SignedBatches: (network: NetworkId) -> SignedBatch,
} }
); );
create_db!( create_db!(
CoordinatorSubstrate { CoordinatorSubstrate {
// Keys to set on the Serai network // Keys to set on the Serai network
Keys: (network: ExternalNetworkId) -> (Session, Transaction), Keys: (network: NetworkId) -> (Session, Vec<u8>),
// Slash reports to publish onto the Serai network // Slash reports to publish onto the Serai network
SlashReports: (network: ExternalNetworkId) -> (Session, Transaction), SlashReports: (network: NetworkId) -> (Session, Vec<u8>),
} }
); );
} }
@@ -113,7 +94,7 @@ pub struct Canonical;
impl Canonical { impl Canonical {
pub(crate) fn send( pub(crate) fn send(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
network: ExternalNetworkId, network: NetworkId,
msg: &messages::substrate::CoordinatorMessage, msg: &messages::substrate::CoordinatorMessage,
) { ) {
_public_db::Canonical::send(txn, network, msg); _public_db::Canonical::send(txn, network, msg);
@@ -121,7 +102,7 @@ impl Canonical {
/// Try to receive a canonical event, returning `None` if there is none to receive. /// Try to receive a canonical event, returning `None` if there is none to receive.
pub fn try_recv( pub fn try_recv(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
network: ExternalNetworkId, network: NetworkId,
) -> Option<messages::substrate::CoordinatorMessage> { ) -> Option<messages::substrate::CoordinatorMessage> {
_public_db::Canonical::try_recv(txn, network) _public_db::Canonical::try_recv(txn, network)
} }
@@ -145,12 +126,12 @@ impl NewSet {
/// notifications for all relevant validator sets will be included. /// notifications for all relevant validator sets will be included.
pub struct SignSlashReport; pub struct SignSlashReport;
impl SignSlashReport { impl SignSlashReport {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet) { pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet) {
_public_db::SignSlashReport::send(txn, set, &()); _public_db::SignSlashReport::send(txn, set, &());
} }
/// Try to receive a notification to sign a slash report, returning `None` if there is none to /// Try to receive a notification to sign a slash report, returning `None` if there is none to
/// receive. /// receive.
pub fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<()> { pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<()> {
_public_db::SignSlashReport::try_recv(txn, set) _public_db::SignSlashReport::try_recv(txn, set)
} }
} }
@@ -164,7 +145,7 @@ impl Keys {
/// reported at once. /// reported at once.
pub fn set( pub fn set(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
set: ExternalValidatorSet, set: ValidatorSet,
key_pair: KeyPair, key_pair: KeyPair,
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>, signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
signature: Signature, signature: Signature,
@@ -176,19 +157,17 @@ impl Keys {
} }
} }
let tx = serai_client_serai::ValidatorSets::set_keys( let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
set.network, set.network,
key_pair, key_pair,
signature_participants, signature_participants,
signature, signature,
); );
_public_db::Keys::set(txn, set.network, &(set.session, tx)); _public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
} }
pub(crate) fn take( pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
txn: &mut impl DbTxn, let (session, tx) = _public_db::Keys::take(txn, network)?;
network: ExternalNetworkId, Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
) -> Option<(Session, Transaction)> {
_public_db::Keys::take(txn, network)
} }
} }
@@ -196,14 +175,20 @@ impl Keys {
pub struct SignedBatches; pub struct SignedBatches;
impl SignedBatches { impl SignedBatches {
/// Send a `SignedBatch` to publish onto Serai. /// Send a `SignedBatch` to publish onto Serai.
///
/// These will be published sequentially. Out-of-order sending risks hanging the task.
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) { pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
_public_db::SignedBatches::send(txn, batch.batch.network(), batch); _public_db::SignedBatches::send(txn, batch.batch.network, batch);
} }
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> { pub(crate) fn try_recv(txn: &mut impl DbTxn, network: NetworkId) -> Option<SignedBatch> {
_public_db::SignedBatches::try_recv(txn, network) _public_db::SignedBatches::try_recv(txn, network)
} }
} }
/// The slash report was invalid.
#[derive(Debug)]
pub struct InvalidSlashReport;
/// The slash reports to publish onto Serai. /// The slash reports to publish onto Serai.
pub struct SlashReports; pub struct SlashReports;
impl SlashReports { impl SlashReports {
@@ -211,27 +196,33 @@ impl SlashReports {
/// ///
/// This only saves the most recent slashes as only a single session is eligible to have its /// This only saves the most recent slashes as only a single session is eligible to have its
/// slashes reported at once. /// slashes reported at once.
///
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
/// as invalid here.
pub fn set( pub fn set(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
set: ExternalValidatorSet, set: ValidatorSet,
slash_report: SlashReport, slashes: Vec<(SeraiAddress, u32)>,
signature: Signature, signature: Signature,
) { ) -> Result<(), InvalidSlashReport> {
// If we have a more recent slash report, don't write this historic one // If we have a more recent slash report, don't write this historic one
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) { if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
if existing_session.0 >= set.session.0 { if existing_session.0 >= set.session.0 {
return; return Ok(());
} }
} }
let tx = let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
serai_client_serai::ValidatorSets::report_slashes(set.network, slash_report, signature); set.network,
_public_db::SlashReports::set(txn, set.network, &(set.session, tx)); slashes.try_into().map_err(|_| InvalidSlashReport)?,
signature,
);
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
Ok(())
} }
pub(crate) fn take( pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
txn: &mut impl DbTxn, let (session, tx) = _public_db::SlashReports::take(txn, network)?;
network: ExternalNetworkId, Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
) -> Option<(Session, Transaction)> {
_public_db::SlashReports::take(txn, network)
} }
} }

View File

@@ -1,85 +1,65 @@
use core::future::Future; use core::future::Future;
use std::sync::Arc; use std::sync::Arc;
use serai_client_serai::{ use serai_db::{DbTxn, Db};
abi::primitives::{network_id::ExternalNetworkId, instructions::SignedBatch},
RpcError, Serai, use serai_client::{primitives::NetworkId, SeraiError, Serai};
};
use serai_db::{Get, DbTxn, Db, create_db};
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
use crate::SignedBatches; use crate::SignedBatches;
create_db!(
CoordinatorSubstrate {
LastPublishedBatch: (network: ExternalNetworkId) -> u32,
BatchesToPublish: (network: ExternalNetworkId, batch: u32) -> SignedBatch,
}
);
/// Publish `SignedBatch`s from `SignedBatches` onto Serai. /// Publish `SignedBatch`s from `SignedBatches` onto Serai.
pub struct PublishBatchTask<D: Db> { pub struct PublishBatchTask<D: Db> {
db: D, db: D,
serai: Arc<Serai>, serai: Arc<Serai>,
network: ExternalNetworkId, network: NetworkId,
} }
impl<D: Db> PublishBatchTask<D> { impl<D: Db> PublishBatchTask<D> {
/// Create a task to publish `SignedBatch`s onto Serai. /// Create a task to publish `SignedBatch`s onto Serai.
pub fn new(db: D, serai: Arc<Serai>, network: ExternalNetworkId) -> Self { ///
Self { db, serai, network } /// Returns None if `network == NetworkId::Serai`.
// TODO: ExternalNetworkId
pub fn new(db: D, serai: Arc<Serai>, network: NetworkId) -> Option<Self> {
if network == NetworkId::Serai {
None?
};
Some(Self { db, serai, network })
} }
} }
impl<D: Db> ContinuallyRan for PublishBatchTask<D> { impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
type Error = RpcError; type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
// Read from SignedBatches, which is sequential, into our own mapping let mut made_progress = false;
loop { loop {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else { let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
// No batch to publish at this time
break; break;
}; };
// If this is a Batch not yet published, save it into our unordered mapping // Publish this Batch if it hasn't already been published
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id()) { let serai = self.serai.as_of_latest_finalized_block().await?;
BatchesToPublish::set(&mut txn, self.network, batch.batch.id(), &batch); let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
} if last_batch < Some(batch.batch.id) {
// This stream of Batches *should* be sequential within the larger context of the Serai
txn.commit(); // coordinator. In this library, we use a more relaxed definition and don't assert
} // sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
// that is a documented fault of the `SignedBatches` API.
// Synchronize our last published batch with the Serai network's
let next_to_publish = {
let mut txn = self.db.txn();
let last_batch = crate::last_indexed_batch_id(&txn, self.network);
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
while our_last_batch < last_batch {
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
// Clean up the Batch to publish since it's already been published
BatchesToPublish::take(&mut txn, self.network, next_batch);
our_last_batch = Some(next_batch);
}
if let Some(last_batch) = our_last_batch {
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
}
txn.commit();
last_batch.map(|batch| batch + 1).unwrap_or(0)
};
let made_progress =
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
self self
.serai .serai
.publish_transaction(&serai_client_serai::InInstructions::execute_batch(batch)) .publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
.await?; .await?;
true }
} else {
false txn.commit();
}; made_progress = true;
}
Ok(made_progress) Ok(made_progress)
} }
} }

View File

@@ -3,10 +3,7 @@ use std::sync::Arc;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_client_serai::{ use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session},
Serai,
};
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
@@ -25,29 +22,37 @@ impl<D: Db> PublishSlashReportTask<D> {
} }
} }
impl<D: Db> PublishSlashReportTask<D> { impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
// Returns if a slash report was successfully published type Error = String;
async fn publish(&mut self, network: ExternalNetworkId) -> Result<bool, String> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
};
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else { let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
// No slash report to publish // No slash report to publish
return Ok(false); continue;
}; };
// This uses the latest finalized block, not the latest cosigned block, which should be let serai =
// fine as in the worst case, the only impact is no longer attempting TX publication self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?; let serai = serai.validator_sets();
let session_after_slash_report = Session(session.0 + 1); let session_after_slash_report = Session(session.0 + 1);
let current_session = let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0); let current_session = current_session.map(|session| session.0);
// Only attempt to publish the slash report for session #n while session #n+1 is still // Only attempt to publish the slash report for session #n while session #n+1 is still
// active // active
let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0); let session_after_slash_report_retired =
current_session > Some(session_after_slash_report.0);
if session_after_slash_report_retired { if session_after_slash_report_retired {
// Commit the txn to drain this slash report from the database and not try it again later // Commit the txn to drain this slash report from the database and not try it again later
txn.commit(); txn.commit();
return Ok(false); continue;
} }
if Some(session_after_slash_report.0) != current_session { if Some(session_after_slash_report.0) != current_session {
@@ -58,45 +63,26 @@ impl<D: Db> PublishSlashReportTask<D> {
} }
// If this session which should publish a slash report already has, move on // If this session which should publish a slash report already has, move on
if !serai.pending_slash_report(network).await.map_err(|e| format!("{e:?}"))? { let key_pending_slash_report =
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
if key_pending_slash_report.is_none() {
txn.commit(); txn.commit();
return Ok(false); continue;
}; };
// Since this slash report is still pending, publish it match self.serai.publish(&slash_report).await {
match self.serai.publish_transaction(&slash_report).await {
Ok(()) => { Ok(()) => {
txn.commit(); txn.commit();
Ok(true) made_progress = true;
} }
// This could be specific to this TX (such as an already in mempool error) and it may be // This could be specific to this TX (such as an already in mempool error) and it may be
// worthwhile to continue iteration with the other pending slash reports. We assume this // worthwhile to continue iteration with the other pending slash reports. We assume this
// error ephemeral and that the latency incurred for this ephemeral error to resolve is // error ephemeral and that the latency incurred for this ephemeral error to resolve is
// miniscule compared to the window available to publish the slash report. That makes // miniscule compared to the window available to publish the slash report. That makes
// this a non-issue. // this a non-issue.
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")), Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
} }
} }
}
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
type Error = String;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
let mut error = None;
for network in ExternalNetworkId::all() {
let network_res = self.publish(network).await;
// We made progress if any network successfully published their slash report
made_progress |= network_res == Ok(true);
// We want to yield the first error *after* attempting for every network
error = error.or(network_res.err());
}
// Yield the error
if let Some(error) = error {
Err(error)?
}
Ok(made_progress) Ok(made_progress)
} }
} }

View File

@@ -3,10 +3,7 @@ use std::sync::Arc;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_client_serai::{ use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
abi::primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
Serai,
};
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
@@ -31,18 +28,21 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
let mut made_progress = false; let mut made_progress = false;
for network in ExternalNetworkId::all() { for network in serai_client::primitives::NETWORKS {
if network == NetworkId::Serai {
continue;
};
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some((session, keys)) = Keys::take(&mut txn, network) else { let Some((session, keys)) = Keys::take(&mut txn, network) else {
// No keys to set // No keys to set
continue; continue;
}; };
// This uses the latest finalized block, not the latest cosigned block, which should be let serai =
// fine as in the worst case, the only impact is no longer attempting TX publication self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?; let serai = serai.validator_sets();
let current_session = let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0); let current_session = current_session.map(|session| session.0);
// Only attempt to set these keys if this isn't a retired session // Only attempt to set these keys if this isn't a retired session
if Some(session.0) < current_session { if Some(session.0) < current_session {
@@ -60,7 +60,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
// If this session already has had its keys set, move on // If this session already has had its keys set, move on
if serai if serai
.keys(ExternalValidatorSet { network, session }) .keys(ValidatorSet { network, session })
.await .await
.map_err(|e| format!("{e:?}"))? .map_err(|e| format!("{e:?}"))?
.is_some() .is_some()
@@ -69,7 +69,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
continue; continue;
}; };
match self.serai.publish_transaction(&keys).await { match self.serai.publish(&keys).await {
Ok(()) => { Ok(()) => {
txn.commit(); txn.commit();
made_progress = true; made_progress = true;

View File

@@ -6,7 +6,7 @@ license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021" edition = "2021"
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -24,19 +24,18 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
rand = { version = "0.8", default-features = false, features = ["std"] } rand = { version = "0.8", default-features = false, features = ["std"] }
rand_chacha = { version = "0.3", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std"] } ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] }
dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std", "aggregate"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
serai-db = { path = "../../common/db", version = "0.1" } serai-db = { path = "../../common/db", version = "0.1" }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] } futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" } tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,11 +1,10 @@
use std::collections::{VecDeque, HashSet}; use std::collections::{VecDeque, HashSet};
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, *};
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};
use borsh::BorshDeserialize; use scale::Decode;
use tendermint::ext::{Network, Commit}; use tendermint::ext::{Network, Commit};
@@ -21,7 +20,7 @@ pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
block_number: u64, block_number: u64,
tip: [u8; 32], tip: [u8; 32],
participants: HashSet<[u8; 32]>, participants: HashSet<<Ristretto as Ciphersuite>::G>,
provided: ProvidedTransactions<D, T>, provided: ProvidedTransactions<D, T>,
mempool: Mempool<D, T>, mempool: Mempool<D, T>,
@@ -56,28 +55,25 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
} }
fn next_nonce_key( fn next_nonce_key(
genesis: &[u8; 32], genesis: &[u8; 32],
signer: &<Ristretto as WrappedGroup>::G, signer: &<Ristretto as Ciphersuite>::G,
order: &[u8], order: &[u8],
) -> Vec<u8> { ) -> Vec<u8> {
D::key( D::key(
b"tributary_blockchain", b"tributary_blockchain",
b"next_nonce", b"next_nonce",
[genesis.as_slice(), signer.to_bytes().as_slice(), order].concat(), [genesis.as_ref(), signer.to_bytes().as_ref(), order].concat(),
) )
} }
pub(crate) fn new( pub(crate) fn new(
db: D, db: D,
genesis: [u8; 32], genesis: [u8; 32],
participants: &[<Ristretto as WrappedGroup>::G], participants: &[<Ristretto as Ciphersuite>::G],
) -> Self { ) -> Self {
let mut res = Self { let mut res = Self {
db: Some(db.clone()), db: Some(db.clone()),
genesis, genesis,
participants: participants participants: participants.iter().copied().collect(),
.iter()
.map(<<Ristretto as WrappedGroup>::G as GroupEncoding>::to_bytes)
.collect(),
block_number: 0, block_number: 0,
tip: genesis, tip: genesis,
@@ -109,7 +105,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> { pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> {
db.get(Self::block_key(&genesis, block)) db.get(Self::block_key(&genesis, block))
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_slice()).unwrap()) .map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap())
} }
pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> { pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> {
@@ -169,14 +165,14 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
// we must have a commit per valid hash // we must have a commit per valid hash
let commit = Self::commit_from_db(db, genesis, &hash).unwrap(); let commit = Self::commit_from_db(db, genesis, &hash).unwrap();
// commit has to be valid if it is coming from our db // commit has to be valid if it is coming from our db
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap()) Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
}; };
let unsigned_in_chain = let unsigned_in_chain =
|hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();
self.mempool.add::<N, _>( self.mempool.add::<N, _>(
|signer, order| { |signer, order| {
if self.participants.contains(&signer.to_bytes()) { if self.participants.contains(&signer) {
Some( Some(
db.get(Self::next_nonce_key(&self.genesis, &signer, &order)) db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
@@ -199,13 +195,13 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
pub(crate) fn next_nonce( pub(crate) fn next_nonce(
&self, &self,
signer: &<Ristretto as WrappedGroup>::G, signer: &<Ristretto as Ciphersuite>::G,
order: &[u8], order: &[u8],
) -> Option<u32> { ) -> Option<u32> {
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) { if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
return Some(next_nonce); return Some(next_nonce);
} }
if self.participants.contains(&signer.to_bytes()) { if self.participants.contains(signer) {
Some( Some(
self self
.db .db
@@ -244,7 +240,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> { let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {
let commit = self.commit_by_block_number(block)?; let commit = self.commit_by_block_number(block)?;
// commit has to be valid if it is coming from our db // commit has to be valid if it is coming from our db
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap()) Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
}; };
let mut txn_db = db.clone(); let mut txn_db = db.clone();
@@ -254,7 +250,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
self.tip, self.tip,
self.provided.transactions.clone(), self.provided.transactions.clone(),
&mut |signer, order| { &mut |signer, order| {
if self.participants.contains(&signer.to_bytes()) { if self.participants.contains(signer) {
let key = Self::next_nonce_key(&self.genesis, signer, order); let key = Self::next_nonce_key(&self.genesis, signer, order);
let next = txn let next = txn
.get(&key) .get(&key)

Some files were not shown because too many files have changed in this diff Show More