1 Commits

Author SHA1 Message Date
Luke Parker
e7e8fd6388 Move ff-group-tests to ff 0.14.0-pre.0 2025-07-12 03:32:40 -04:00
871 changed files with 53612 additions and 24571 deletions

2
.github/LICENSE vendored
View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: "30.0" default: "27.0"
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Bitcoin Daemon Cache - name: Bitcoin Daemon Cache
id: cache-bitcoind id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: bitcoin.tar.gz path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -7,20 +7,13 @@ runs:
- name: Remove unused packages - name: Remove unused packages
shell: bash shell: bash
run: | run: |
# Ensure the repositories are synced sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt update -y
# Actually perform the removals
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*" sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*" sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
# This removal command requires the prior removals due to unmet dependencies otherwise
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*" sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
sudo apt autoremove -y
# Reinstall python3 as a general dependency of a functional operating system sudo apt clean
sudo apt install -y python3 --fix-missing docker system prune -a --volumes
if: runner.os == 'Linux' if: runner.os == 'Linux'
- name: Remove unused packages - name: Remove unused packages
@@ -38,48 +31,19 @@ runs:
shell: bash shell: bash
run: | run: |
if [ "$RUNNER_OS" == "Linux" ]; then if [ "$RUNNER_OS" == "Linux" ]; then
sudo apt install -y ca-certificates protobuf-compiler libclang-dev sudo apt install -y ca-certificates protobuf-compiler
elif [ "$RUNNER_OS" == "Windows" ]; then elif [ "$RUNNER_OS" == "Windows" ]; then
choco install protoc choco install protoc
elif [ "$RUNNER_OS" == "macOS" ]; then elif [ "$RUNNER_OS" == "macOS" ]; then
brew install protobuf llvm brew install protobuf
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
fi fi
- name: Install solc - name: Install solc
shell: bash shell: bash
run: | run: |
cargo +1.91 install svm-rs --version =0.5.19 cargo install svm-rs
svm install 0.8.29 svm install 0.8.26
svm use 0.8.29 svm use 0.8.26
- name: Remove preinstalled Docker
shell: bash
run: |
docker system prune -a --volumes
sudo apt remove -y *docker*
# Install uidmap which will be required for the explicitly installed Docker
sudo apt install uidmap
if: runner.os == 'Linux'
- name: Update system dependencies
shell: bash
run: |
sudo apt update -y
sudo apt upgrade -y
sudo apt autoremove -y
sudo apt clean
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with:
rootless: true
set-host: true
if: runner.os == 'Linux'
# - name: Cache Rust # - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.4.3 default: v0.18.3.4
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Wallet RPC Cache - name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: monero-wallet-rpc path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,14 +5,14 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: v0.18.4.3 default: v0.18.3.4
runs: runs:
using: "composite" using: "composite"
steps: steps:
- name: Monero Daemon Cache - name: Monero Daemon Cache
id: cache-monerod id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: /usr/bin/monerod path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }} key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,12 +5,12 @@ inputs:
monero-version: monero-version:
description: "Monero version to download and run as a regtest node" description: "Monero version to download and run as a regtest node"
required: false required: false
default: v0.18.4.3 default: v0.18.3.4
bitcoin-version: bitcoin-version:
description: "Bitcoin version to download and run as a regtest node" description: "Bitcoin version to download and run as a regtest node"
required: false required: false
default: "30.0" default: "27.1"
runs: runs:
using: "composite" using: "composite"

View File

@@ -1 +1 @@
nightly-2025-11-11 nightly-2025-02-01

View File

@@ -32,17 +32,13 @@ jobs:
-p dalek-ff-group \ -p dalek-ff-group \
-p minimal-ed448 \ -p minimal-ed448 \
-p ciphersuite \ -p ciphersuite \
-p ciphersuite-kp256 \
-p multiexp \ -p multiexp \
-p schnorr-signatures \ -p schnorr-signatures \
-p prime-field \ -p dleq \
-p short-weierstrass \ -p generalized-bulletproofs \
-p secq256k1 \ -p generalized-bulletproofs-circuit-abstraction \
-p embedwards25519 \ -p ec-divisors \
-p generalized-bulletproofs-ec-gadgets \
-p dkg \ -p dkg \
-p dkg-recovery \
-p dkg-dealer \
-p dkg-musig \
-p dkg-evrf \
-p modular-frost \ -p modular-frost \
-p frost-schnorrkel -p frost-schnorrkel

View File

@@ -12,13 +12,13 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache - name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: ~/.cargo/advisory-db path: ~/.cargo/advisory-db
key: rust-advisory-db key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.91 install cargo-deny --version =0.18.5 run: cargo install --locked cargo-deny
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check

View File

@@ -11,7 +11,7 @@ jobs:
clippy: clippy:
strategy: strategy:
matrix: matrix:
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest] os: [ubuntu-latest, macos-13, macos-14, windows-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
@@ -26,7 +26,7 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install nightly rust - name: Install nightly rust
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
- name: Run Clippy - name: Run Clippy
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
@@ -46,16 +46,16 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache - name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
with: with:
path: ~/.cargo/advisory-db path: ~/.cargo/advisory-db
key: rust-advisory-db key: rust-advisory-db
- name: Install cargo deny - name: Install cargo deny
run: cargo +1.91 install cargo-deny --version =0.18.5 run: cargo install --locked cargo-deny
- name: Run cargo deny - name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph run: cargo deny -L error --all-features check
fmt: fmt:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -88,114 +88,19 @@ jobs:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use - name: Verify all dependencies are in use
run: | run: |
cargo +1.91 install cargo-machete --version =0.9.1 cargo install cargo-machete
cargo +1.91 machete cargo machete
msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify claimed `rust-version`
shell: bash
run: |
cargo +1.91 install cargo-msrv --version =0.18.4
function check_msrv {
# We `cd` into the directory passed as the first argument, but will return to the
# directory called from.
return_to=$(pwd)
echo "Checking $1"
cd $1
# We then find the existing `rust-version` using `grep` (for the right line) and then a
# regex (to strip to just the major and minor version).
existing=$(cat ./Cargo.toml | grep "rust-version" | grep -Eo "[0-9]+\.[0-9]+")
# We then backup the `Cargo.toml`, allowing us to restore it after, saving time on future
# MSRV checks (as they'll benefit from immediately exiting if the queried version is less
# than the declared MSRV).
mv ./Cargo.toml ./Cargo.toml.bak
# We then use an inverted (`-v`) grep to remove the existing `rust-version` from the
# `Cargo.toml`, as required because else earlier versions of Rust won't even attempt to
# compile this crate.
cat ./Cargo.toml.bak | grep -v "rust-version" > Cargo.toml
# We then find the actual `rust-version` using `cargo-msrv` (again stripping to just the
# major and minor version).
actual=$(cargo msrv find --output-format minimal | grep -Eo "^[0-9]+\.[0-9]+")
# Finally, we compare the two.
echo "Declared rust-version: $existing"
echo "Actual rust-version: $actual"
[ $existing == $actual ]
result=$?
# Restore the original `Cargo.toml`.
rm Cargo.toml
mv ./Cargo.toml.bak ./Cargo.toml
# Return to the directory called from and return the result.
cd $return_to
return $result
}
# Check each member of the workspace
function check_workspace {
# Get the members array from the workspace's `Cargo.toml`
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
# Keep all lines after the start of the array, then keep all lines before the next "]"
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
# We accomplish the latter by pruning all characters after the entry's ","
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
# Replace the first line, which was "members = [" and is now "members = [,", with "["
members=$(echo "$members" | sed "1s/.*/\[/")
# Correct the last line, which was malleated to "],"
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
# Don't check the following
# Most of these are binaries, with the exception of the Substrate runtime which has a
# bespoke build pipeline
members=$(echo "$members" | grep -v "networks/ethereum/relayer\"")
members=$(echo "$members" | grep -v "message-queue\"")
members=$(echo "$members" | grep -v "processor/bin\"")
members=$(echo "$members" | grep -v "processor/bitcoin\"")
members=$(echo "$members" | grep -v "processor/ethereum\"")
members=$(echo "$members" | grep -v "processor/monero\"")
members=$(echo "$members" | grep -v "coordinator\"")
members=$(echo "$members" | grep -v "substrate/runtime\"")
members=$(echo "$members" | grep -v "substrate/node\"")
members=$(echo "$members" | grep -v "orchestration\"")
# Don't check the tests
members=$(echo "$members" | grep -v "mini\"")
members=$(echo "$members" | grep -v "tests/")
# Remove the trailing comma by replacing the last line's "," with ""
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
echo $members | jq -r ".[]" | while read -r member; do
check_msrv $member
correct=$?
if [ $correct -ne 0 ]; then
return $correct
fi
done
}
check_workspace
slither: slither:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Slither - name: Slither
run: | run: |
python3 -m pip install solc-select
solc-select install 0.8.26
solc-select use 0.8.26
python3 -m pip install slither-analyzer python3 -m pip install slither-analyzer
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol

72
.github/workflows/monero-tests.yaml vendored Normal file
View File

@@ -0,0 +1,72 @@
name: Monero Tests
on:
push:
branches:
- develop
paths:
- "networks/monero/**"
- "processor/**"
pull_request:
paths:
- "networks/monero/**"
- "processor/**"
workflow_dispatch:
jobs:
# Only run these once since they will be consistent regardless of any node
unit-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
- name: Run Unit Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
# Doesn't run unit tests with features as the tests workflow will
integration-tests:
runs-on: ubuntu-latest
# Test against all supported protocol versions
strategy:
matrix:
version: [v0.17.3.2, v0.18.3.4]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies
with:
monero-version: ${{ matrix.version }}
- name: Run Integration Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
- name: Run Integration Tests
# Don't run if the the tests workflow also will
if: ${{ matrix.version != 'v0.18.3.4' }}
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'

259
.github/workflows/msrv.yml vendored Normal file
View File

@@ -0,0 +1,259 @@
name: Weekly MSRV Check
on:
schedule:
- cron: "0 0 * * 0"
workflow_dispatch:
jobs:
msrv-common:
name: Run cargo msrv on common
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on common
run: |
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
cargo msrv verify --manifest-path common/env/Cargo.toml
cargo msrv verify --manifest-path common/db/Cargo.toml
cargo msrv verify --manifest-path common/task/Cargo.toml
cargo msrv verify --manifest-path common/request/Cargo.toml
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
msrv-crypto:
name: Run cargo msrv on crypto
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on crypto
run: |
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
msrv-networks:
name: Run cargo msrv on networks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on networks
run: |
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
cargo msrv verify --manifest-path networks/monero/Cargo.toml
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
msrv-message-queue:
name: Run cargo msrv on message-queue
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on message-queue
run: |
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
msrv-processor:
name: Run cargo msrv on processor
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on processor
run: |
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/messages/Cargo.toml
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
cargo msrv verify --manifest-path processor/signers/Cargo.toml
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
cargo msrv verify --manifest-path processor/monero/Cargo.toml
msrv-coordinator:
name: Run cargo msrv on coordinator
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on coordinator
run: |
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
cargo msrv verify --manifest-path coordinator/Cargo.toml
msrv-substrate:
name: Run cargo msrv on substrate
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on substrate
run: |
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
cargo msrv verify --manifest-path substrate/client/Cargo.toml
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
cargo msrv verify --manifest-path substrate/node/Cargo.toml
msrv-orchestration:
name: Run cargo msrv on orchestration
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on message-queue
run: |
cargo msrv verify --manifest-path orchestration/Cargo.toml
msrv-mini:
name: Run cargo msrv on mini
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on mini
run: |
cargo msrv verify --manifest-path mini/Cargo.toml

View File

@@ -34,3 +34,16 @@ jobs:
-p ethereum-schnorr-contract \ -p ethereum-schnorr-contract \
-p alloy-simple-request-transport \ -p alloy-simple-request-transport \
-p serai-ethereum-relayer \ -p serai-ethereum-relayer \
-p monero-io \
-p monero-generators \
-p monero-primitives \
-p monero-mlsag \
-p monero-clsag \
-p monero-borromean \
-p monero-bulletproofs \
-p monero-serai \
-p monero-rpc \
-p monero-simple-request-rpc \
-p monero-address \
-p monero-wallet \
-p monero-serai-verify-chain

View File

@@ -28,18 +28,8 @@ jobs:
- name: Install Build Dependencies - name: Install Build Dependencies
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Install RISC-V Toolchain - name: Install RISC-V Toolchain
run: | run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
sudo apt update
sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal --component rust-src --target riscv32imac-unknown-none-elf
- name: Verify no-std builds - name: Verify no-std builds
run: | run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core -p serai-no-std-tests
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core,alloc -p serai-no-std-tests --features "alloc"

View File

@@ -46,16 +46,16 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac uses: actions/checkout@v3
- name: Setup Ruby - name: Setup Ruby
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb uses: ruby/setup-ruby@v1
with: with:
bundler-cache: true bundler-cache: true
cache-version: 0 cache-version: 0
working-directory: "${{ github.workspace }}/docs" working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages - name: Setup Pages
id: pages id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b uses: actions/configure-pages@v3
- name: Build with Jekyll - name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env: env:
@@ -69,12 +69,12 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Buld Rust docs - name: Buld Rust docs
run: | run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
mv target/doc docs/_site/rust mv target/doc docs/_site/rust
- name: Upload artifact - name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b uses: actions/upload-pages-artifact@v3
with: with:
path: "docs/_site/" path: "docs/_site/"
@@ -88,4 +88,4 @@ jobs:
steps: steps:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
id: deployment id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e uses: actions/deploy-pages@v4

View File

@@ -61,7 +61,6 @@ jobs:
-p serai-monero-processor \ -p serai-monero-processor \
-p tendermint-machine \ -p tendermint-machine \
-p tributary-sdk \ -p tributary-sdk \
-p serai-cosign-types \
-p serai-cosign \ -p serai-cosign \
-p serai-coordinator-substrate \ -p serai-coordinator-substrate \
-p serai-coordinator-tributary \ -p serai-coordinator-tributary \
@@ -83,20 +82,23 @@ jobs:
run: | run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-primitives \ -p serai-primitives \
-p serai-abi \ -p serai-coins-primitives \
-p substrate-median \
-p serai-core-pallet \
-p serai-coins-pallet \ -p serai-coins-pallet \
-p serai-validator-sets-pallet \
-p serai-signals-pallet \
-p serai-dex-pallet \ -p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \ -p serai-genesis-liquidity-pallet \
-p serai-economic-security-pallet \ -p serai-emissions-primitives \
-p serai-emissions-pallet \ -p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \ -p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \ -p serai-runtime \
-p serai-node -p serai-node
-p serai-substrate-tests
test-serai-client: test-serai-client:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -107,9 +109,4 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run Tests - name: Run Tests
run: | run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client

8
.gitignore vendored
View File

@@ -1,13 +1,7 @@
target target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!/Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile !orchestration/runtime/Dockerfile
.test-logs .test-logs
.vscode .vscode

7907
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,20 @@
[workspace] [workspace]
resolver = "2" resolver = "2"
members = [ members = [
# Version patches
"patches/parking_lot_core",
"patches/parking_lot",
"patches/zstd",
"patches/rocksdb",
# std patches
"patches/matches",
"patches/is-terminal",
# Rewrites/redirects
"patches/option-ext",
"patches/directories-next",
"common/std-shims", "common/std-shims",
"common/zalloc", "common/zalloc",
"common/patchable-async-sleep", "common/patchable-async-sleep",
@@ -15,21 +29,19 @@ members = [
"crypto/dalek-ff-group", "crypto/dalek-ff-group",
"crypto/ed448", "crypto/ed448",
"crypto/ciphersuite", "crypto/ciphersuite",
"crypto/ciphersuite/kp256",
"crypto/multiexp", "crypto/multiexp",
"crypto/schnorr", "crypto/schnorr",
"crypto/dleq",
"crypto/prime-field", "crypto/evrf/secq256k1",
"crypto/short-weierstrass", "crypto/evrf/embedwards25519",
"crypto/secq256k1", "crypto/evrf/generalized-bulletproofs",
"crypto/embedwards25519", "crypto/evrf/circuit-abstraction",
"crypto/evrf/divisors",
"crypto/evrf/ec-gadgets",
"crypto/dkg", "crypto/dkg",
"crypto/dkg/recovery",
"crypto/dkg/dealer",
"crypto/dkg/musig",
"crypto/dkg/evrf",
"crypto/frost", "crypto/frost",
"crypto/schnorrkel", "crypto/schnorrkel",
@@ -40,6 +52,20 @@ members = [
"networks/ethereum/alloy-simple-request-transport", "networks/ethereum/alloy-simple-request-transport",
"networks/ethereum/relayer", "networks/ethereum/relayer",
"networks/monero/io",
"networks/monero/generators",
"networks/monero/primitives",
"networks/monero/ringct/mlsag",
"networks/monero/ringct/clsag",
"networks/monero/ringct/borromean",
"networks/monero/ringct/bulletproofs",
"networks/monero",
"networks/monero/rpc",
"networks/monero/rpc/simple-request",
"networks/monero/wallet/address",
"networks/monero/wallet",
"networks/monero/verify-chain",
"message-queue", "message-queue",
"processor/messages", "processor/messages",
@@ -62,14 +88,13 @@ members = [
"processor/ethereum/primitives", "processor/ethereum/primitives",
"processor/ethereum/test-primitives", "processor/ethereum/test-primitives",
"processor/ethereum/deployer", "processor/ethereum/deployer",
"processor/ethereum/erc20",
"processor/ethereum/router", "processor/ethereum/router",
"processor/ethereum/erc20",
"processor/ethereum", "processor/ethereum",
"processor/monero", "processor/monero",
"coordinator/tributary-sdk/tendermint", "coordinator/tributary-sdk/tendermint",
"coordinator/tributary-sdk", "coordinator/tributary-sdk",
"coordinator/cosign/types",
"coordinator/cosign", "coordinator/cosign",
"coordinator/substrate", "coordinator/substrate",
"coordinator/tributary", "coordinator/tributary",
@@ -78,27 +103,34 @@ members = [
"coordinator", "coordinator",
"substrate/primitives", "substrate/primitives",
"substrate/coins/primitives",
"substrate/coins/pallet",
"substrate/dex/pallet",
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/signals/primitives",
"substrate/signals/pallet",
"substrate/abi", "substrate/abi",
"substrate/median",
"substrate/core",
"substrate/coins",
"substrate/validator-sets",
"substrate/signals",
"substrate/dex",
"substrate/genesis-liquidity",
"substrate/economic-security",
"substrate/emissions",
"substrate/in-instructions",
"substrate/runtime", "substrate/runtime",
"substrate/node", "substrate/node",
"substrate/client/serai",
"substrate/client/bitcoin",
"substrate/client/ethereum",
"substrate/client/monero",
"substrate/client", "substrate/client",
"orchestration", "orchestration",
@@ -111,93 +143,60 @@ members = [
"tests/message-queue", "tests/message-queue",
# TODO "tests/processor", # TODO "tests/processor",
# TODO "tests/coordinator", # TODO "tests/coordinator",
"tests/substrate",
# TODO "tests/full-stack", # TODO "tests/full-stack",
"tests/reproducible-runtime", "tests/reproducible-runtime",
] ]
[profile.dev.package]
# Always compile Monero (and a variety of dependencies) with optimizations due # Always compile Monero (and a variety of dependencies) with optimizations due
# to the extensive operations required for Bulletproofs # to the extensive operations required for Bulletproofs
[profile.dev.package]
subtle = { opt-level = 3 } subtle = { opt-level = 3 }
sha3 = { opt-level = 3 }
blake2 = { opt-level = 3 }
ff = { opt-level = 3 } ff = { opt-level = 3 }
group = { opt-level = 3 } group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 } crypto-bigint = { opt-level = 3 }
secp256k1 = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 } curve25519-dalek = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 } dalek-ff-group = { opt-level = 3 }
minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 } multiexp = { opt-level = 3 }
monero-io = { opt-level = 3 }
monero-primitives = { opt-level = 3 }
monero-ed25519 = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs-generators = { opt-level = 3 }
monero-bulletproofs = {opt-level = 3 }
monero-oxide = { opt-level = 3 }
# Always compile the eVRF DKG tree with optimizations as well
secp256k1 = { opt-level = 3 }
secq256k1 = { opt-level = 3 } secq256k1 = { opt-level = 3 }
embedwards25519 = { opt-level = 3 } embedwards25519 = { opt-level = 3 }
generalized-bulletproofs = { opt-level = 3 } generalized-bulletproofs = { opt-level = 3 }
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 } generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
ec-divisors = { opt-level = 3 }
generalized-bulletproofs-ec-gadgets = { opt-level = 3 } generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
# revm also effectively requires being built with optimizations dkg = { opt-level = 3 }
revm = { opt-level = 3 }
revm-bytecode = { opt-level = 3 } monero-generators = { opt-level = 3 }
revm-context = { opt-level = 3 } monero-borromean = { opt-level = 3 }
revm-context-interface = { opt-level = 3 } monero-bulletproofs = { opt-level = 3 }
revm-database = { opt-level = 3 } monero-mlsag = { opt-level = 3 }
revm-database-interface = { opt-level = 3 } monero-clsag = { opt-level = 3 }
revm-handler = { opt-level = 3 }
revm-inspector = { opt-level = 3 }
revm-interpreter = { opt-level = 3 }
revm-precompile = { opt-level = 3 }
revm-primitives = { opt-level = 3 }
revm-state = { opt-level = 3 }
[profile.release] [profile.release]
panic = "unwind" panic = "unwind"
overflow-checks = true
[patch.crates-io] [patch.crates-io]
# Point to empty crates for crates unused within in our tree
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
c-kzg = { path = "patches/ethereum/c-kzg" }
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
# Dependencies from monero-oxide which originate from within our own tree, potentially shimmed to account for deviations since publishing
std-shims = { path = "patches/std-shims" }
simple-request = { path = "patches/simple-request" }
multiexp = { path = "crypto/multiexp" }
flexible-transcript = { path = "crypto/transcript" }
ciphersuite = { path = "patches/ciphersuite" }
dalek-ff-group = { path = "patches/dalek-ff-group" }
minimal-ed448 = { path = "crypto/ed448" }
modular-frost = { path = "crypto/frost" }
# Patch due to `std` now including the required functionality
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
# This has a non-deprecated `std` alternative since Rust's 2024 edition
home = { path = "patches/home" }
# Updates to the latest version
darling = { path = "patches/darling" }
thiserror = { path = "patches/thiserror" }
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201 # https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
parking_lot_core = { path = "patches/parking_lot_core" }
parking_lot = { path = "patches/parking_lot" }
# wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" }
# Needed for WAL compression
rocksdb = { path = "patches/rocksdb" }
# is-terminal now has an std-based solution with an equivalent API
is-terminal = { path = "patches/is-terminal" }
# So does matches
matches = { path = "patches/matches" }
# directories-next was created because directories was unmaintained # directories-next was created because directories was unmaintained
# directories-next is now unmaintained while directories is maintained # directories-next is now unmaintained while directories is maintained
# The directories author pulls in ridiculously pointless crates and prefers # The directories author pulls in ridiculously pointless crates and prefers
@@ -206,19 +205,7 @@ lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev
option-ext = { path = "patches/option-ext" } option-ext = { path = "patches/option-ext" }
directories-next = { path = "patches/directories-next" } directories-next = { path = "patches/directories-next" }
# Patch from a fork back to upstream
parity-bip39 = { path = "patches/parity-bip39" }
# Patch to include `FromUniformBytes<64>` over `Scalar`
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
# `jemalloc` conflicts with `mimalloc`, so patch to a `rocksdb` which never uses `jemalloc`
librocksdb-sys = { path = "patches/librocksdb-sys" }
[workspace.lints.clippy] [workspace.lints.clippy]
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
manual_is_multiple_of = "allow"
unwrap_or_default = "allow" unwrap_or_default = "allow"
map_unwrap_or = "allow" map_unwrap_or = "allow"
needless_continue = "allow" needless_continue = "allow"
@@ -260,7 +247,7 @@ redundant_closure_for_method_calls = "deny"
redundant_else = "deny" redundant_else = "deny"
string_add_assign = "deny" string_add_assign = "deny"
string_slice = "deny" string_slice = "deny"
unchecked_time_subtraction = "deny" unchecked_duration_subtraction = "deny"
uninlined_format_args = "deny" uninlined_format_args = "deny"
unnecessary_box_returns = "deny" unnecessary_box_returns = "deny"
unnecessary_join = "deny" unnecessary_join = "deny"
@@ -269,6 +256,3 @@ unnested_or_patterns = "deny"
unused_async = "deny" unused_async = "deny"
unused_self = "deny" unused_self = "deny"
zero_sized_map_values = "deny" zero_sized_map_values = "deny"
[workspace.lints.rust]
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648

View File

@@ -59,6 +59,7 @@ issued at the discretion of the Immunefi program managers.
- [Website](https://serai.exchange/): https://serai.exchange/ - [Website](https://serai.exchange/): https://serai.exchange/
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/ - [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX - [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz - [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org - [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/ - [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/

View File

@@ -0,0 +1,427 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.

View File

@@ -11,4 +11,4 @@ It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
Please see Please see
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
for the actual report. for provenance.

View File

@@ -1,50 +0,0 @@
# eVRF DKG
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
the IACR preprint server. Within it was a one-round unbiased DKG and a
one-round unbiased threshold DKG. Unfortunately, both simply describe
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
in practice, the need for an additional round of communication to occur where
all participants confirm they received their secret shares.
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
achieve a verifiable encryption scheme. This allows the secret shares to be
posted to any 'bulletin board' (such as a blockchain) and for all observers to
confirm:
- A participant participated
- The secret shares sent can be received by the intended recipient so long as
they can access the bulletin board
Additionally, Serai desired a robust scheme (albeit with an biased key as the
output, which is fine for our purposes). Accordingly, our implementation
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
for verifiable encryption, with the caller allowed to decide the set of
participants. They may:
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
paper
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
the eVRF paper
- Select a post-determined set (with any solution for the Common Subset
problem), allowing achieving a robust threshold biased DKG
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
this is unnecessary when the resulting key will be biased. Any proof of
knowledge for the coefficients, as necessary for their extraction within the
security proofs, would be sufficient.
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
provide proofs for its security. This resulted in
[this paper](<./Security Proofs.pdf>).
Our implementation itself is then built on top of the audited
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
and
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
Note we do not use the originally premised DDH eVRF yet the one premised on
elliptic curve divisors, the methodology of which is commented on
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
Our implementation itself is unaudited at this time however.

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.77" rust-version = "1.71"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true } parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true } rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
[features] [features]
parity-db = ["dep:parity-db"] parity-db = ["dep:parity-db"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -15,7 +15,7 @@ pub fn serai_db_key(
/// ///
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro /// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key, /// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
/// they must be `borsh` serializable. The return type is used to auto (de)serialize the database /// they must be `scale` encodable. The return type is used to auto encode and decode the database
/// value bytes using `borsh`. /// value bytes using `borsh`.
/// ///
/// # Arguments /// # Arguments
@@ -54,10 +54,11 @@ macro_rules! create_db {
)?; )?;
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> { pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
use scale::Encode;
$crate::serai_db_key( $crate::serai_db_key(
stringify!($db_name).as_bytes(), stringify!($db_name).as_bytes(),
stringify!($field_name).as_bytes(), stringify!($field_name).as_bytes(),
&borsh::to_vec(&($($arg),*)).unwrap(), ($($arg),*).encode()
) )
} }
pub(crate) fn set( pub(crate) fn set(

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.64" rust-version = "1.71"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

2
common/env/LICENSE vendored
View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
// Obtain a variable from the Serai environment/secret store. // Obtain a variable from the Serai environment/secret store.
pub fn var(variable: &str) -> Option<String> { pub fn var(variable: &str) -> Option<String> {

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["async", "sleep", "tokio", "smol", "async-std"] keywords = ["async", "sleep", "tokio", "smol", "async-std"]
edition = "2021" edition = "2021"
rust-version = "1.70" rust-version = "1.71"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2024-2025 Luke Parker Copyright (c) 2024 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]

View File

@@ -1,9 +1,9 @@
[package] [package]
name = "simple-request" name = "simple-request"
version = "0.3.0" version = "0.1.0"
description = "A simple HTTP(S) request library" description = "A simple HTTP(S) request library"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/request" repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["http", "https", "async", "request", "ssl"] keywords = ["http", "https", "async", "request", "ssl"]
edition = "2021" edition = "2021"
@@ -19,10 +19,9 @@ workspace = true
[dependencies] [dependencies]
tower-service = { version = "0.3", default-features = false } tower-service = { version = "0.3", default-features = false }
hyper = { version = "1", default-features = false, features = ["http1", "client"] } hyper = { version = "1", default-features = false, features = ["http1", "client"] }
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] } hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
http-body-util = { version = "0.1", default-features = false } http-body-util = { version = "0.1", default-features = false }
futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false }
tokio = { version = "1", default-features = false, features = ["sync"] }
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
@@ -30,8 +29,6 @@ zeroize = { version = "1", optional = true }
base64ct = { version = "1", features = ["alloc"], optional = true } base64ct = { version = "1", features = ["alloc"], optional = true }
[features] [features]
tokio = ["hyper-util/tokio"] tls = ["hyper-rustls"]
tls = ["tokio", "hyper-rustls"]
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
basic-auth = ["zeroize", "base64ct"] basic-auth = ["zeroize", "base64ct"]
default = ["tls"] default = ["tls"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,20 +1,19 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
use core::{pin::Pin, future::Future};
use std::sync::Arc; use std::sync::Arc;
use futures_util::FutureExt; use tokio::sync::Mutex;
use ::tokio::sync::Mutex;
use tower_service::Service as TowerService; use tower_service::Service as TowerService;
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
pub use hyper;
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector}; use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
use hyper_util::{
rt::tokio::TokioExecutor,
client::legacy::{Client as HyperClient, connect::HttpConnector},
};
pub use hyper;
mod request; mod request;
pub use request::*; pub use request::*;
@@ -38,86 +37,52 @@ type Connector = HttpConnector;
type Connector = HttpsConnector<HttpConnector>; type Connector = HttpsConnector<HttpConnector>;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
enum Connection< enum Connection {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
> {
ConnectionPool(HyperClient<Connector, Full<Bytes>>), ConnectionPool(HyperClient<Connector, Full<Bytes>>),
Connection { Connection {
executor: E,
connector: Connector, connector: Connector,
host: Uri, host: Uri,
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>, connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
}, },
} }
/// An HTTP client.
///
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
/// the `tls` feature is active without using the `tokio` executor will cause errors.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Client< pub struct Client {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>, connection: Connection,
> {
connection: Connection<E>,
} }
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>> impl Client {
Client<E> fn connector() -> Connector {
{
#[allow(clippy::unnecessary_wraps)]
fn connector() -> Result<Connector, Error> {
let mut res = HttpConnector::new(); let mut res = HttpConnector::new();
res.set_keepalive(Some(core::time::Duration::from_secs(60))); res.set_keepalive(Some(core::time::Duration::from_secs(60)));
res.set_nodelay(true); res.set_nodelay(true);
res.set_reuse_address(true); res.set_reuse_address(true);
#[cfg(feature = "tls")]
if core::any::TypeId::of::<E>() !=
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
{
Err(Error::ConnectionError(
"`tls` feature enabled but not using the `tokio` executor".into(),
))?;
}
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
res.enforce_http(false); res.enforce_http(false);
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
let https = HttpsConnectorBuilder::new().with_native_roots(); let res = HttpsConnectorBuilder::new()
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))] .with_native_roots()
let https = https.map_err(|e| { .expect("couldn't fetch system's SSL roots")
Error::ConnectionError( .https_or_http()
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}") .enable_http1()
.into(), .wrap_connector(res);
) res
})?;
// Fallback to `webpki-roots` if present
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
#[cfg(feature = "tls")]
let res = https.https_or_http().enable_http1().wrap_connector(res);
Ok(res)
} }
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> { pub fn with_connection_pool() -> Client {
Ok(Client { Client {
connection: Connection::ConnectionPool( connection: Connection::ConnectionPool(
HyperClient::builder(executor) HyperClient::builder(TokioExecutor::new())
.pool_idle_timeout(core::time::Duration::from_secs(60)) .pool_idle_timeout(core::time::Duration::from_secs(60))
.build(Self::connector()?), .build(Self::connector()),
), ),
}) }
} }
pub fn with_executor_and_without_connection_pool( pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
executor: E,
host: &str,
) -> Result<Client<E>, Error> {
Ok(Client { Ok(Client {
connection: Connection::Connection { connection: Connection::Connection {
executor, connector: Self::connector(),
connector: Self::connector()?,
host: { host: {
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?; let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
if uri.host().is_none() { if uri.host().is_none() {
@@ -130,9 +95,9 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
}) })
} }
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> { pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
let request: Request = request.into(); let request: Request = request.into();
let Request { mut request, response_size_limit } = request; let mut request = request.0;
if let Some(header_host) = request.headers().get(hyper::header::HOST) { if let Some(header_host) = request.headers().get(hyper::header::HOST) {
match &self.connection { match &self.connection {
Connection::ConnectionPool(_) => {} Connection::ConnectionPool(_) => {}
@@ -166,7 +131,7 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
Connection::ConnectionPool(client) => { Connection::ConnectionPool(client) => {
client.request(request).await.map_err(Error::HyperUtil)? client.request(request).await.map_err(Error::HyperUtil)?
} }
Connection::Connection { executor, connector, host, connection } => { Connection::Connection { connector, host, connection } => {
let mut connection_lock = connection.lock().await; let mut connection_lock = connection.lock().await;
// If there's not a connection... // If there's not a connection...
@@ -178,46 +143,28 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
let call_res = call_res.map_err(Error::ConnectionError); let call_res = call_res.map_err(Error::ConnectionError);
let (requester, connection) = let (requester, connection) =
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?; hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
// This task will die when we drop the requester // This will die when we drop the requester, so we don't need to track an AbortHandle
executor.execute(Box::pin(connection.map(|_| ()))); // for it
tokio::spawn(connection);
*connection_lock = Some(requester); *connection_lock = Some(requester);
} }
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned"); let connection = connection_lock.as_mut().unwrap();
let mut err = connection.ready().await.err(); let mut err = connection.ready().await.err();
if err.is_none() { if err.is_none() {
// Send the request // Send the request
let response = connection.send_request(request).await; let res = connection.send_request(request).await;
if let Ok(response) = response { if let Ok(res) = res {
return Ok(Response { response, size_limit: response_size_limit, client: self }); return Ok(Response(res, self));
} }
err = response.err(); err = res.err();
} }
// Since this connection has been put into an error state, drop it // Since this connection has been put into an error state, drop it
*connection_lock = None; *connection_lock = None;
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))? Err(Error::Hyper(err.unwrap()))?
} }
}; };
Ok(Response { response, size_limit: response_size_limit, client: self }) Ok(Response(response, self))
} }
} }
#[cfg(feature = "tokio")]
mod tokio {
use hyper_util::rt::tokio::TokioExecutor;
use super::*;
pub type TokioClient = Client<TokioExecutor>;
impl Client<TokioExecutor> {
pub fn with_connection_pool() -> Result<Self, Error> {
Self::with_executor_and_connection_pool(TokioExecutor::new())
}
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
}
}
}
#[cfg(feature = "tokio")]
pub use tokio::TokioClient;

View File

@@ -7,15 +7,11 @@ pub use http_body_util::Full;
use crate::Error; use crate::Error;
#[derive(Debug)] #[derive(Debug)]
pub struct Request { pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
pub(crate) request: hyper::Request<Full<Bytes>>,
pub(crate) response_size_limit: Option<usize>,
}
impl Request { impl Request {
#[cfg(feature = "basic-auth")] #[cfg(feature = "basic-auth")]
fn username_password_from_uri(&self) -> Result<(String, String), Error> { fn username_password_from_uri(&self) -> Result<(String, String), Error> {
if let Some(authority) = self.request.uri().authority() { if let Some(authority) = self.0.uri().authority() {
let authority = authority.as_str(); let authority = authority.as_str();
if authority.contains('@') { if authority.contains('@') {
// Decode the username and password from the URI // Decode the username and password from the URI
@@ -40,10 +36,9 @@ impl Request {
let mut formatted = format!("{username}:{password}"); let mut formatted = format!("{username}:{password}");
let mut encoded = Base64::encode_string(formatted.as_bytes()); let mut encoded = Base64::encode_string(formatted.as_bytes());
formatted.zeroize(); formatted.zeroize();
self.request.headers_mut().insert( self.0.headers_mut().insert(
hyper::header::AUTHORIZATION, hyper::header::AUTHORIZATION,
HeaderValue::from_str(&format!("Basic {encoded}")) HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
.expect("couldn't form header from base64-encoded string"),
); );
encoded.zeroize(); encoded.zeroize();
} }
@@ -64,17 +59,9 @@ impl Request {
pub fn with_basic_auth(&mut self) { pub fn with_basic_auth(&mut self) {
let _ = self.basic_auth_from_uri(); let _ = self.basic_auth_from_uri();
} }
/// Set a size limit for the response.
///
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
self.response_size_limit = response_size_limit;
}
} }
impl From<hyper::Request<Full<Bytes>>> for Request { impl From<hyper::Request<Full<Bytes>>> for Request {
fn from(request: hyper::Request<Full<Bytes>>) -> Request { fn from(request: hyper::Request<Full<Bytes>>) -> Request {
Request { request, response_size_limit: None } Request(request)
} }
} }

View File

@@ -1,54 +1,24 @@
use core::{pin::Pin, future::Future};
use std::io;
use hyper::{ use hyper::{
StatusCode, StatusCode,
header::{HeaderValue, HeaderMap}, header::{HeaderValue, HeaderMap},
body::Incoming, body::{Buf, Incoming},
rt::Executor,
}; };
use http_body_util::BodyExt; use http_body_util::BodyExt;
use futures_util::{Stream, StreamExt};
use crate::{Client, Error}; use crate::{Client, Error};
// Borrows the client so its async task lives as long as this response exists. // Borrows the client so its async task lives as long as this response exists.
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug)] #[derive(Debug)]
pub struct Response< pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
'a, impl Response<'_> {
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
> {
pub(crate) response: hyper::Response<Incoming>,
pub(crate) size_limit: Option<usize>,
pub(crate) client: &'a Client<E>,
}
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
Response<'_, E>
{
pub fn status(&self) -> StatusCode { pub fn status(&self) -> StatusCode {
self.response.status() self.0.status()
} }
pub fn headers(&self) -> &HeaderMap<HeaderValue> { pub fn headers(&self) -> &HeaderMap<HeaderValue> {
self.response.headers() self.0.headers()
} }
pub async fn body(self) -> Result<impl std::io::Read, Error> { pub async fn body(self) -> Result<impl std::io::Read, Error> {
let mut body = self.response.into_body().into_data_stream(); Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
let mut res: Vec<u8> = vec![];
loop {
if let Some(size_limit) = self.size_limit {
let (lower, upper) = body.size_hint();
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
Err(Error::ConnectionError("response exceeded size limit".into()))?;
}
}
let Some(part) = body.next().await else { break };
let part = part.map_err(Error::Hyper)?;
res.extend(part.as_ref());
}
Ok(io::Cursor::new(res))
} }
} }

View File

@@ -1,13 +1,13 @@
[package] [package]
name = "std-shims" name = "std-shims"
version = "0.1.5" version = "0.1.1"
description = "A series of std shims to make alloc more feasible" description = "A series of std shims to make alloc more feasible"
license = "MIT" license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims" repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["nostd", "no_std", "alloc", "io"] keywords = ["nostd", "no_std", "alloc", "io"]
edition = "2021" edition = "2021"
rust-version = "1.65" rust-version = "1.80"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -17,11 +17,9 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
rustversion = { version = "1", default-features = false } spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] } hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
[features] [features]
alloc = ["hashbrown"] std = []
std = ["alloc", "spin/std"]
default = ["std"] default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,28 +1,6 @@
# `std` shims # std shims
`std-shims` is a Rust crate with two purposes: A crate which passes through to std when the default `std` feature is enabled,
- Expand the functionality of `core` and `alloc` yet provides a series of shims when it isn't.
- Polyfill functionality only available on newer version of Rust
The goal is to make supporting no-`std` environments, and older versions of `HashSet` and `HashMap` are provided via `hashbrown`.
Rust, as simple as possible. For most use cases, replacing `std::` with
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
advantage of `std-shims`.
# API Surface
`std-shims` only aims to have items _mutually available_ between `alloc` (with
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
no shims available, will not be exported by `std-shims`.
# Dependencies
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
primitives are provided via `spin` (avoiding a requirement on
`critical-section`). Sections of `std::io` are independently matched as
possible. `rustversion` is used to detect when to provide polyfills.
# Disclaimer
No guarantee of one-to-one parity is provided. The shims provided aim to be
sufficient for the average case. Pull requests are _welcome_.

View File

@@ -1,7 +1,7 @@
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::collections::*;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use hashbrown::{HashSet, HashMap};
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::collections::*; pub use std::collections::*;
#[cfg(not(feature = "std"))]
pub use alloc::collections::*;
#[cfg(not(feature = "std"))]
pub use hashbrown::{HashSet, HashMap};

View File

@@ -1,74 +1,42 @@
#[cfg(feature = "std")]
pub use std::io::*;
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
mod shims { mod shims {
use core::fmt::{self, Debug, Display, Formatter}; use core::fmt::{Debug, Formatter};
#[cfg(feature = "alloc")] use alloc::{boxed::Box, vec::Vec};
use extern_alloc::{boxed::Box, vec::Vec};
use crate::error::Error as CoreError;
/// The kind of error.
#[derive(Clone, Copy, PartialEq, Eq, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum ErrorKind { pub enum ErrorKind {
UnexpectedEof, UnexpectedEof,
Other, Other,
} }
/// An error.
#[derive(Debug)]
pub struct Error { pub struct Error {
kind: ErrorKind, kind: ErrorKind,
#[cfg(feature = "alloc")] error: Box<dyn Send + Sync>,
error: Box<dyn Send + Sync + CoreError>,
} }
impl Display for Error { impl Debug for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
<Self as Debug>::fmt(self, f) fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
} }
} }
impl CoreError for Error {}
#[cfg(not(feature = "alloc"))]
pub trait IntoBoxSendSyncError {}
#[cfg(not(feature = "alloc"))]
impl<I> IntoBoxSendSyncError for I {}
#[cfg(feature = "alloc")]
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
#[cfg(feature = "alloc")]
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
impl Error { impl Error {
/// Create a new error. pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
/// Error { kind, error: Box::new(error) }
/// The error object itself is silently dropped when `alloc` is not enabled.
#[allow(unused)]
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
#[cfg(not(feature = "alloc"))]
let res = Error { kind };
#[cfg(feature = "alloc")]
let res = Error { kind, error: error.into() };
res
} }
/// Create a new error with `io::ErrorKind::Other` as its kind. pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
/// Error { kind: ErrorKind::Other, error: Box::new(error) }
/// The error object itself is silently dropped when `alloc` is not enabled.
#[allow(unused)]
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
#[cfg(not(feature = "alloc"))]
let res = Error { kind: ErrorKind::Other };
#[cfg(feature = "alloc")]
let res = Error { kind: ErrorKind::Other, error: error.into() };
res
} }
/// The kind of error.
pub fn kind(&self) -> ErrorKind { pub fn kind(&self) -> ErrorKind {
self.kind self.kind
} }
/// Retrieve the inner error. pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
#[cfg(feature = "alloc")]
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
Some(self.error) Some(self.error)
} }
} }
@@ -96,12 +64,6 @@ mod shims {
} }
} }
impl<R: Read> Read for &mut R {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
R::read(*self, buf)
}
}
pub trait BufRead: Read { pub trait BufRead: Read {
fn fill_buf(&mut self) -> Result<&[u8]>; fn fill_buf(&mut self) -> Result<&[u8]>;
fn consume(&mut self, amt: usize); fn consume(&mut self, amt: usize);
@@ -126,7 +88,6 @@ mod shims {
} }
} }
#[cfg(feature = "alloc")]
impl Write for Vec<u8> { impl Write for Vec<u8> {
fn write(&mut self, buf: &[u8]) -> Result<usize> { fn write(&mut self, buf: &[u8]) -> Result<usize> {
self.extend(buf); self.extend(buf);
@@ -134,8 +95,6 @@ mod shims {
} }
} }
} }
#[cfg(not(feature = "std"))] #[cfg(not(feature = "std"))]
pub use shims::*; pub use shims::*;
#[cfg(feature = "std")]
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};

View File

@@ -1,102 +1,13 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "alloc"))] pub extern crate alloc;
pub use core::*;
#[cfg(not(feature = "alloc"))]
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
#[cfg(not(feature = "std"))]
#[rustversion::before(1.81)]
pub mod error {
use core::fmt::Debug::Display;
pub trait Error: Debug + Display {}
}
#[cfg(not(feature = "std"))]
#[rustversion::since(1.81)]
pub use core::error;
#[cfg(feature = "alloc")]
extern crate alloc as extern_alloc;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
#[cfg(feature = "std")]
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
pub mod sync;
pub mod collections; pub mod collections;
pub mod io; pub mod io;
pub mod sync;
pub mod prelude { pub use alloc::vec;
// Shim the `std` prelude pub use alloc::str;
#[cfg(feature = "alloc")] pub use alloc::string;
pub use extern_alloc::{
format, vec,
borrow::ToOwned,
boxed::Box,
vec::Vec,
string::{String, ToString},
};
// Shim `div_ceil`
#[rustversion::before(1.73)]
#[doc(hidden)]
pub trait StdShimsDivCeil {
fn div_ceil(self, rhs: Self) -> Self;
}
#[rustversion::before(1.73)]
mod impl_divceil {
use super::StdShimsDivCeil;
impl StdShimsDivCeil for u8 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u16 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u32 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u64 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for u128 {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
impl StdShimsDivCeil for usize {
fn div_ceil(self, rhs: Self) -> Self {
(self + (rhs - 1)) / rhs
}
}
}
// Shim `io::Error::other`
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
#[doc(hidden)]
pub trait StdShimsIoErrorOther {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>;
}
#[cfg(feature = "std")]
#[rustversion::before(1.74)]
impl StdShimsIoErrorOther for std::io::Error {
fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
std::io::Error::new(std::io::ErrorKind::Other, error)
}
}
}

View File

@@ -1,28 +1,19 @@
pub use core::sync::atomic; pub use core::sync::*;
#[cfg(all(feature = "alloc", not(feature = "std")))] pub use alloc::sync::*;
pub use extern_alloc::sync::{Arc, Weak};
#[cfg(feature = "std")]
pub use std::sync::{Arc, Weak};
mod mutex_shim { mod mutex_shim {
#[cfg(not(feature = "std"))]
pub use spin::{Mutex, MutexGuard};
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::{Mutex, MutexGuard}; pub use std::sync::*;
#[cfg(not(feature = "std"))]
pub use spin::*;
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct ShimMutex<T>(Mutex<T>); pub struct ShimMutex<T>(Mutex<T>);
impl<T> ShimMutex<T> { impl<T> ShimMutex<T> {
/// Construct a new `Mutex`.
pub const fn new(value: T) -> Self { pub const fn new(value: T) -> Self {
Self(Mutex::new(value)) Self(Mutex::new(value))
} }
/// Acquire a lock on the contents of the `Mutex`.
///
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
/// this may panic if the `Mutex` was poisoned.
pub fn lock(&self) -> MutexGuard<'_, T> { pub fn lock(&self) -> MutexGuard<'_, T> {
#[cfg(feature = "std")] #[cfg(feature = "std")]
let res = self.0.lock().unwrap(); let res = self.0.lock().unwrap();
@@ -34,12 +25,7 @@ mod mutex_shim {
} }
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard}; pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
#[rustversion::before(1.80)]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;
#[rustversion::since(1.80)]
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use std::sync::LazyLock; pub use std::sync::LazyLock;
#[cfg(not(feature = "std"))]
pub use spin::Lazy as LazyLock;

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]

View File

@@ -7,9 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
# This must be specified with the patch version, else Rust believes `1.77` < `1.77.0` and will rust-version = "1.77"
# refuse to compile due to relying on versions introduced with `1.77.0`
rust-version = "1.77.0"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,5 +1,5 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] #![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.

View File

@@ -8,6 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -21,16 +22,15 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
bitvec = { version = "1", default-features = false, features = ["std"] } bitvec = { version = "1", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] } schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } dkg = { path = "../crypto/dkg", default-features = false, features = ["std"] }
dkg = { package = "dkg-musig", path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" } frost-schnorrkel = { path = "../crypto/schnorrkel" }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
zalloc = { path = "../common/zalloc" } zalloc = { path = "../common/zalloc" }
@@ -42,7 +42,7 @@ messages = { package = "serai-processor-messages", path = "../processor/messages
message-queue = { package = "serai-message-queue", path = "../message-queue" } message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary-sdk = { path = "./tributary-sdk" } tributary-sdk = { path = "./tributary-sdk" }
serai-client-serai = { path = "../substrate/client/serai", default-features = false } serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] }

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -18,10 +18,12 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true workspace = true
[dependencies] [dependencies]
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false } serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
@@ -29,5 +31,3 @@ tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" } serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" } serai-task = { path = "../../common/task", version = "0.1" }
serai-cosign-types = { path = "./types" }

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,21 +1,10 @@
use core::future::Future; use core::future::Future;
use std::{sync::Arc, collections::HashMap}; use std::{sync::Arc, collections::HashMap};
use blake2::{Digest, Blake2b256}; use serai_client::{
primitives::{SeraiAddress, Amount},
use serai_client_serai::{ validator_sets::primitives::ExternalValidatorSet,
abi::{ Serai,
primitives::{
network_id::{ExternalNetworkId, NetworkId},
balance::Amount,
crypto::Public,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
merkle::IncrementalUnbalancedMerkleTree,
},
validator_sets::Event,
},
Serai, Events,
}; };
use serai_db::*; use serai_db::*;
@@ -23,20 +12,9 @@ use serai_task::ContinuallyRan;
use crate::*; use crate::*;
#[derive(BorshSerialize, BorshDeserialize)]
struct Set {
session: Session,
key: Public,
stake: Amount,
}
create_db!( create_db!(
CosignIntend { CosignIntend {
ScanCosignFrom: () -> u64, ScanCosignFrom: () -> u64,
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount,
Validators: (set: ExternalValidatorSet) -> Vec<SeraiAddress>,
LatestSet: (network: ExternalNetworkId) -> Set,
} }
); );
@@ -57,38 +35,23 @@ db_channel! {
async fn block_has_events_justifying_a_cosign( async fn block_has_events_justifying_a_cosign(
serai: &Serai, serai: &Serai,
block_number: u64, block_number: u64,
) -> Result<(Block, Events, HasEvents), String> { ) -> Result<(Block, HasEvents), String> {
let block = serai let block = serai
.block_by_number(block_number) .finalized_block_by_number(block_number)
.await .await
.map_err(|e| format!("{e:?}"))? .map_err(|e| format!("{e:?}"))?
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?; .ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?; let serai = serai.as_of(block.hash());
if events.validator_sets().set_keys_events().next().is_some() { if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
return Ok((block, events, HasEvents::Notable)); return Ok((block, HasEvents::Notable));
} }
if events.coins().burn_with_instruction_events().next().is_some() { if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
return Ok((block, events, HasEvents::NonNotable)); return Ok((block, HasEvents::NonNotable));
} }
Ok((block, events, HasEvents::No)) Ok((block, HasEvents::No))
}
// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
// block.
fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> {
let mut sets = vec![];
for network in ExternalNetworkId::all() {
let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ExternalValidatorSet { network, session }, key, stake));
}
sets
} }
/// A task to determine which blocks we should intend to cosign. /// A task to determine which blocks we should intend to cosign.
@@ -104,108 +67,56 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
async move { async move {
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
let latest_block_number = let latest_block_number =
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?; self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
for block_number in start_block_number ..= latest_block_number { for block_number in start_block_number ..= latest_block_number {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let (block, events, mut has_events) = let (block, mut has_events) =
block_has_events_justifying_a_cosign(&self.serai, block_number) block_has_events_justifying_a_cosign(&self.serai, block_number)
.await .await
.map_err(|e| format!("{e:?}"))?; .map_err(|e| format!("{e:?}"))?;
let mut builds_upon =
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
// Check we are indexing a linear chain // Check we are indexing a linear chain
if block.header.builds_upon() != if (block_number > 1) &&
builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG) (<[u8; 32]>::from(block.header.parent_hash) !=
SubstrateBlockHash::get(&txn, block_number - 1)
.expect("indexing a block but haven't indexed its parent"))
{ {
Err(format!( Err(format!(
"node's block #{block_number} doesn't build upon the block #{} prior indexed", "node's block #{block_number} doesn't build upon the block #{} prior indexed",
block_number - 1 block_number - 1
))?; ))?;
} }
let block_hash = block.header.hash(); let block_hash = block.hash();
SubstrateBlockHash::set(&mut txn, block_number, &block_hash); SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
builds_upon.append(
serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG,
Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_HEADER_LEAF_TAG])
.chain_update(block_hash.0)
.finalize()
.into(),
);
BuildsUpon::set(&mut txn, &builds_upon);
// Update the stakes
for event in events.validator_sets().allocation_events() {
let Event::Allocation { validator, network, amount } = event else {
panic!("event from `allocation_events` wasn't `Event::Allocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0));
}
for event in events.validator_sets().deallocation_events() {
let Event::Deallocation { validator, network, amount, timeline: _ } = event else {
panic!("event from `deallocation_events` wasn't `Event::Deallocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0));
}
// Handle decided sets
for event in events.validator_sets().set_decided_events() {
let Event::SetDecided { set, validators } = event else {
panic!("event from `set_decided_events` wasn't `Event::SetDecided`")
};
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
Validators::set(
&mut txn,
set,
&validators.iter().map(|(validator, _key_shares)| *validator).collect(),
);
}
// Handle declarations of the latest set
for event in events.validator_sets().set_keys_events() {
let Event::SetKeys { set, key_pair } = event else {
panic!("event from `set_keys_events` wasn't `Event::SetKeys`")
};
let mut stake = 0;
for validator in
Validators::take(&mut txn, *set).expect("set which wasn't decided set keys")
{
stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0;
}
LatestSet::set(
&mut txn,
set.network,
&Set { session: set.session, key: key_pair.0, stake: Amount(stake) },
);
}
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
// If this is notable, it creates a new global session, which we index into the database // If this is notable, it creates a new global session, which we index into the database
// now // now
if has_events == HasEvents::Notable { if has_events == HasEvents::Notable {
let sets_and_keys_and_stakes = cosigning_sets(&txn); let serai = self.serai.as_of(block_hash);
let global_session = GlobalSession::id( let sets_and_keys = cosigning_sets(&serai).await?;
sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(), let global_session =
); GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len()); let mut sets = Vec::with_capacity(sets_and_keys.len());
let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len()); let mut keys = HashMap::with_capacity(sets_and_keys.len());
let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len()); let mut stakes = HashMap::with_capacity(sets_and_keys.len());
let mut total_stake = 0; let mut total_stake = 0;
for (set, key, stake) in sets_and_keys_and_stakes { for (set, key) in &sets_and_keys {
sets.push(set); sets.push(*set);
keys.insert(set.network, key); keys.insert(set.network, SeraiAddress::from(*key));
stakes.insert(set.network, stake.0); let stake = serai
total_stake += stake.0; .validator_sets()
.total_allocated_stake(set.network.into())
.await
.map_err(|e| format!("{e:?}"))?
.unwrap_or(Amount(0))
.0;
stakes.insert(set.network, stake);
total_stake += stake;
} }
if total_stake == 0 { if total_stake == 0 {
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
@@ -244,7 +155,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
// Tell each set of their expectation to cosign this block // Tell each set of their expectation to cosign this block
for set in global_session_info.sets { for set in global_session_info.sets {
log::debug!("{set:?} will be cosigning block #{block_number}"); log::debug!("{:?} will be cosigning block #{block_number}", set);
IntendedCosigns::send( IntendedCosigns::send(
&mut txn, &mut txn,
set, set,

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
@@ -7,27 +7,18 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client_serai::{ use serai_client::{
abi::{ primitives::{ExternalNetworkId, SeraiAddress},
primitives::{ validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
BlockHash, Public, Block, Serai, TemporalSerai,
crypto::{Public, KeyPair},
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
},
Block,
},
Serai, State,
}; };
use serai_db::*; use serai_db::*;
use serai_task::*; use serai_task::*;
pub use serai_cosign_types::*;
/// The cosigns which are intended to be performed. /// The cosigns which are intended to be performed.
mod intend; mod intend;
/// The evaluator of the cosigns. /// The evaluator of the cosigns.
@@ -37,6 +28,9 @@ mod delay;
pub use delay::BROADCAST_FREQUENCY; pub use delay::BROADCAST_FREQUENCY;
use delay::LatestCosignedBlockNumber; use delay::LatestCosignedBlockNumber;
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// A 'global session', defined as all validator sets used for cosigning at a given moment. /// A 'global session', defined as all validator sets used for cosigning at a given moment.
/// ///
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign /// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
@@ -59,7 +53,7 @@ use delay::LatestCosignedBlockNumber;
pub(crate) struct GlobalSession { pub(crate) struct GlobalSession {
pub(crate) start_block_number: u64, pub(crate) start_block_number: u64,
pub(crate) sets: Vec<ExternalValidatorSet>, pub(crate) sets: Vec<ExternalValidatorSet>,
pub(crate) keys: HashMap<ExternalNetworkId, Public>, pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
pub(crate) stakes: HashMap<ExternalNetworkId, u64>, pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
pub(crate) total_stake: u64, pub(crate) total_stake: u64,
} }
@@ -84,12 +78,74 @@ enum HasEvents {
No, No,
} }
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// The actual cosigner.
pub cosigner: ExternalNetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
self.encode()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
fn verify_signature(&self, signer: serai_client::Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}
create_db! { create_db! {
Cosign { Cosign {
// The following are populated by the intend task and used throughout the library // The following are populated by the intend task and used throughout the library
// An index of Substrate blocks // An index of Substrate blocks
SubstrateBlockHash: (block_number: u64) -> BlockHash, SubstrateBlockHash: (block_number: u64) -> [u8; 32],
// A mapping from a global session's ID to its relevant information. // A mapping from a global session's ID to its relevant information.
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession, GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
// The last block to be cosigned by a global session. // The last block to be cosigned by a global session.
@@ -121,6 +177,60 @@ create_db! {
} }
} }
/// Fetch the keys used for cosigning by a specific network.
async fn keys_for_network(
serai: &TemporalSerai<'_>,
network: ExternalNetworkId,
) -> Result<Option<(Session, KeyPair)>, String> {
let Some(latest_session) =
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
else {
// If this network hasn't had a session declared, move on
return Ok(None);
};
// Get the keys for the latest session
if let Some(keys) = serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: latest_session })
.await
.map_err(|e| format!("{e:?}"))?
{
return Ok(Some((latest_session, keys)));
}
// If the latest session has yet to set keys, use the prior session
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
if let Some(keys) = serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.await
.map_err(|e| format!("{e:?}"))?
{
return Ok(Some((prior_session, keys)));
}
}
Ok(None)
}
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
/// block.
async fn cosigning_sets(
serai: &TemporalSerai<'_>,
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let Some((session, keys)) = keys_for_network(serai, network).await? else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ExternalValidatorSet { network, session }, keys.0));
}
Ok(sets)
}
/// An object usable to request notable cosigns for a block. /// An object usable to request notable cosigns for a block.
pub trait RequestNotableCosigns: 'static + Send { pub trait RequestNotableCosigns: 'static + Send {
/// The error type which may be encountered when requesting notable cosigns. /// The error type which may be encountered when requesting notable cosigns.
@@ -221,10 +331,7 @@ impl<D: Db> Cosigning<D> {
} }
/// Fetch a cosigned Substrate block's hash by its block number. /// Fetch a cosigned Substrate block's hash by its block number.
pub fn cosigned_block( pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
getter: &impl Get,
block_number: u64,
) -> Result<Option<BlockHash>, Faulted> {
if block_number > Self::latest_cosigned_block_number(getter)? { if block_number > Self::latest_cosigned_block_number(getter)? {
return Ok(None); return Ok(None);
} }
@@ -239,8 +346,8 @@ impl<D: Db> Cosigning<D> {
/// If this global session hasn't produced any notable cosigns, this will return the latest /// If this global session hasn't produced any notable cosigns, this will return the latest
/// cosigns for this session. /// cosigns for this session.
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> { pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
let mut cosigns = vec![]; let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) { if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
cosigns.push(cosign); cosigns.push(cosign);
} }
@@ -257,7 +364,7 @@ impl<D: Db> Cosigning<D> {
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults"); let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault // Also include all of our recognized-as-honest cosigns in an attempt to induce fault
// identification in those who see the faulty cosigns as honest // identification in those who see the faulty cosigns as honest
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) { if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
if cosign.cosign.global_session == faulted { if cosign.cosign.global_session == faulted {
cosigns.push(cosign); cosigns.push(cosign);
@@ -269,8 +376,8 @@ impl<D: Db> Cosigning<D> {
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else { let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
return vec![]; return vec![];
}; };
let mut cosigns = vec![]; let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) { if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
cosigns.push(cosign); cosigns.push(cosign);
} }
@@ -325,8 +432,13 @@ impl<D: Db> Cosigning<D> {
// Check the cosign's signature // Check the cosign's signature
{ {
let key = let key = Public::from({
*global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?; let Some(key) = global_session.keys.get(&network) else {
Err(IntakeCosignError::NonParticipatingNetwork)?
};
*key
});
if !signed_cosign.verify_signature(key) { if !signed_cosign.verify_signature(key) {
Err(IntakeCosignError::InvalidSignature)?; Err(IntakeCosignError::InvalidSignature)?;
} }

View File

@@ -1,25 +0,0 @@
[package]
name = "serai-cosign-types"
version = "0.1.0"
description = "Evaluator of cosigns for the Serai network"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] }

View File

@@ -1,72 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// The actual cosigner.
pub cosigner: ExternalNetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
borsh::to_vec(self).unwrap()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
/// Verify a cosign's signature.
pub fn verify_signature(&self, signer: Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -22,7 +22,7 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
serai-db = { path = "../../common/db", version = "0.1" } serai-db = { path = "../../common/db", version = "0.1" }
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-cosign = { path = "../cosign" } serai-cosign = { path = "../cosign" }
tributary-sdk = { path = "../tributary-sdk" } tributary-sdk = { path = "../tributary-sdk" }

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.87" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -23,19 +23,19 @@ async-trait = { version = "0.1", default-features = false }
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
zeroize = { version = "^1.5", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] } schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../../substrate/client/serai", default-features = false } serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-cosign = { path = "../../cosign" } serai-cosign = { path = "../../cosign" }
tributary-sdk = { path = "../../tributary-sdk" } tributary-sdk = { path = "../../tributary-sdk" }
futures-util = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["sync"] } tokio = { version = "1", default-features = false, features = ["sync"] }
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] } libp2p = { version = "0.54", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
serai-task = { path = "../../../common/task", version = "0.1" } serai-task = { path = "../../../common/task", version = "0.1" }

View File

@@ -7,7 +7,7 @@ use rand_core::{RngCore, OsRng};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use schnorrkel::{Keypair, PublicKey, Signature}; use schnorrkel::{Keypair, PublicKey, Signature};
use serai_client_serai::abi::primitives::crypto::Public; use serai_client::primitives::PublicKey as Public;
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::{ use libp2p::{
@@ -104,7 +104,7 @@ impl OnlyValidators {
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig) .verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
.map_err(|_| io::Error::other("invalid signature"))?; .map_err(|_| io::Error::other("invalid signature"))?;
Ok(peer_id_from_public(Public(public_key.to_bytes()))) Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
} }
} }

View File

@@ -1,11 +1,11 @@
use core::{future::Future, str::FromStr}; use core::future::Future;
use std::{sync::Arc, collections::HashSet}; use std::{sync::Arc, collections::HashSet};
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_client_serai::{RpcError, Serai}; use serai_client::{SeraiError, Serai};
use libp2p::{ use libp2p::{
core::multiaddr::{Protocol, Multiaddr}, core::multiaddr::{Protocol, Multiaddr},
@@ -50,7 +50,7 @@ impl ContinuallyRan for DialTask {
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60; const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60; const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
type Error = RpcError; type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
@@ -94,13 +94,6 @@ impl ContinuallyRan for DialTask {
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap()) usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
.unwrap(); .unwrap();
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial); let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
let Ok(randomly_selected_peer) = libp2p::Multiaddr::from_str(&randomly_selected_peer)
else {
log::error!(
"peer from substrate wasn't a valid `Multiaddr`: {randomly_selected_peer}"
);
continue;
};
log::info!("found peer from substrate: {randomly_selected_peer}"); log::info!("found peer from substrate: {randomly_selected_peer}");

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
@@ -13,10 +13,9 @@ use rand_core::{RngCore, OsRng};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use schnorrkel::Keypair; use schnorrkel::Keypair;
use serai_client_serai::{ use serai_client::{
abi::primitives::{ primitives::{ExternalNetworkId, PublicKey},
crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet, validator_sets::primitives::ExternalValidatorSet,
},
Serai, Serai,
}; };
@@ -67,7 +66,7 @@ use dial::DialTask;
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
fn peer_id_from_public(public: Public) -> PeerId { fn peer_id_from_public(public: PublicKey) -> PeerId {
// 0 represents the identity Multihash, that no hash was performed // 0 represents the identity Multihash, that no hash was performed
// It's an internal constant so we can't refer to the constant inside libp2p // It's an internal constant so we can't refer to the constant inside libp2p
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap() PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()

View File

@@ -6,7 +6,7 @@ use std::{
use borsh::BorshDeserialize; use borsh::BorshDeserialize;
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet; use serai_client::validator_sets::primitives::ExternalValidatorSet;
use tokio::sync::{mpsc, oneshot, RwLock}; use tokio::sync::{mpsc, oneshot, RwLock};
@@ -92,8 +92,7 @@ impl SwarmTask {
} }
} }
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {} gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
gossip::Event::GossipsubNotSupported { peer_id } | gossip::Event::GossipsubNotSupported { peer_id } => {
gossip::Event::SlowPeer { peer_id, .. } => {
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id); let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
} }
} }

View File

@@ -4,8 +4,9 @@ use std::{
collections::{HashSet, HashMap}, collections::{HashSet, HashMap},
}; };
use serai_client_serai::abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session}; use serai_client::{
use serai_client_serai::{RpcError, Serai}; primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
};
use serai_task::{Task, ContinuallyRan}; use serai_task::{Task, ContinuallyRan};
@@ -51,7 +52,7 @@ impl Validators {
async fn session_changes( async fn session_changes(
serai: impl Borrow<Serai>, serai: impl Borrow<Serai>,
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>, sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, RpcError> { ) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
/* /*
This uses the latest finalized block, not the latest cosigned block, which should be fine as This uses the latest finalized block, not the latest cosigned block, which should be fine as
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
@@ -60,18 +61,18 @@ impl Validators {
Besides, we can't connect to historical validators, only the current validators. Besides, we can't connect to historical validators, only the current validators.
*/ */
let serai = serai.borrow().state().await?; let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
let temporal_serai = temporal_serai.validator_sets();
let mut session_changes = vec![]; let mut session_changes = vec![];
{ {
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but // FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
// we poll it till it yields all futures with the most minimal processing possible // we poll it till it yields all futures with the most minimal processing possible
let mut futures = FuturesUnordered::new(); let mut futures = FuturesUnordered::new();
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
let sessions = sessions.borrow(); let sessions = sessions.borrow();
let serai = serai.borrow();
futures.push(async move { futures.push(async move {
let session = match serai.current_session(network.into()).await { let session = match temporal_serai.session(network.into()).await {
Ok(Some(session)) => session, Ok(Some(session)) => session,
Ok(None) => return Ok(None), Ok(None) => return Ok(None),
Err(e) => return Err(e), Err(e) => return Err(e),
@@ -80,16 +81,12 @@ impl Validators {
if sessions.get(&network) == Some(&session) { if sessions.get(&network) == Some(&session) {
Ok(None) Ok(None)
} else { } else {
match serai.current_validators(network.into()).await { match temporal_serai.active_network_validators(network.into()).await {
Ok(Some(validators)) => Ok(Some(( Ok(validators) => Ok(Some((
network, network,
session, session,
validators validators.into_iter().map(peer_id_from_public).collect(),
.into_iter()
.map(|validator| peer_id_from_public(validator.into()))
.collect(),
))), ))),
Ok(None) => panic!("network has session yet no validators"),
Err(e) => Err(e), Err(e) => Err(e),
} }
} }
@@ -156,7 +153,7 @@ impl Validators {
} }
/// Update the view of the validators. /// Update the view of the validators.
pub(crate) async fn update(&mut self) -> Result<(), RpcError> { pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?; let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
self.incorporate_session_changes(session_changes); self.incorporate_session_changes(session_changes);
Ok(()) Ok(())
@@ -209,7 +206,7 @@ impl ContinuallyRan for UpdateValidatorsTask {
const DELAY_BETWEEN_ITERATIONS: u64 = 60; const DELAY_BETWEEN_ITERATIONS: u64 = 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60; const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
type Error = RpcError; type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {

View File

@@ -1,7 +1,7 @@
use core::future::Future; use core::future::Future;
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
use serai_primitives::validator_sets::{ExternalValidatorSet, KeyShares}; use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
use futures_lite::FutureExt; use futures_lite::FutureExt;
@@ -30,7 +30,7 @@ pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators, /// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
/// and aggregate signature). Accordingly, this should be a safe over-estimate. /// and aggregate signature). Accordingly, this should be a safe over-estimate.
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH * pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((KeyShares::MAX_PER_SET as usize) * 128)); (tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's /// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
/// tip. /// tip.

View File

@@ -1,4 +1,4 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
@@ -7,7 +7,7 @@ use std::collections::HashMap;
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet}; use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
use serai_db::Db; use serai_db::Db;
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader}; use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};

View File

@@ -5,10 +5,9 @@ use serai_db::{create_db, db_channel};
use dkg::Participant; use dkg::Participant;
use serai_client_serai::abi::primitives::{ use serai_client::{
crypto::KeyPair, primitives::ExternalNetworkId,
network_id::ExternalNetworkId, validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
validator_sets::{Session, ExternalValidatorSet},
}; };
use serai_cosign::SignedCosign; use serai_cosign::SignedCosign;
@@ -104,7 +103,7 @@ mod _internal_db {
// Tributary transactions to publish from the DKG confirmation task // Tributary transactions to publish from the DKG confirmation task
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction, TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
// Participants to remove // Participants to remove
RemoveParticipant: (set: ExternalValidatorSet) -> u16, RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
} }
} }
} }
@@ -140,11 +139,10 @@ impl RemoveParticipant {
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) { pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
// If this set has yet to be retired, send this transaction // If this set has yet to be retired, send this transaction
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant)); _internal_db::RemoveParticipant::send(txn, set, &participant);
} }
} }
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> { pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
_internal_db::RemoveParticipant::try_recv(txn, set) _internal_db::RemoveParticipant::try_recv(txn, set)
.map(|i| Participant::new(i).expect("sent invalid participant index for removal"))
} }
} }

View File

@@ -3,17 +3,22 @@ use std::{boxed::Box, collections::HashMap};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::OsRng; use rand_core::OsRng;
use ciphersuite::{group::GroupEncoding, *}; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use dkg::{Participant, musig};
use frost_schnorrkel::{ use frost_schnorrkel::{
frost::{curve::Ristretto, FrostError, sign::*}, frost::{
dkg::{Participant, musig::musig},
FrostError,
sign::*,
},
Schnorrkel, Schnorrkel,
}; };
use serai_db::{DbTxn, Db as DbTrait}; use serai_db::{DbTxn, Db as DbTrait};
#[rustfmt::skip] use serai_client::{
use serai_client_serai::abi::primitives::{validator_sets::ExternalValidatorSet, address::SeraiAddress}; primitives::SeraiAddress,
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
};
use serai_task::{DoesNotError, ContinuallyRan}; use serai_task::{DoesNotError, ContinuallyRan};
@@ -28,7 +33,7 @@ fn schnorrkel() -> Schnorrkel {
fn our_i( fn our_i(
set: &NewSetInformation, set: &NewSetInformation,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
data: &HashMap<Participant, Vec<u8>>, data: &HashMap<Participant, Vec<u8>>,
) -> Participant { ) -> Participant {
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes()); let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
@@ -122,7 +127,7 @@ pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
set: NewSetInformation, set: NewSetInformation,
tributary_db: TD, tributary_db: TD,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
signer: Option<Signer>, signer: Option<Signer>,
} }
@@ -131,7 +136,7 @@ impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
db: CD, db: CD,
set: NewSetInformation, set: NewSetInformation,
tributary_db: TD, tributary_db: TD,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
) -> Self { ) -> Self {
Self { db, set, tributary_db, key, signer: None } Self { db, set, tributary_db, key, signer: None }
} }
@@ -150,15 +155,16 @@ impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
db: &mut CD, db: &mut CD,
set: ExternalValidatorSet, set: ExternalValidatorSet,
attempt: u32, attempt: u32,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
signer: &mut Option<Signer>, signer: &mut Option<Signer>,
) { ) {
// Perform the preprocess // Perform the preprocess
let public_key = Ristretto::generator() * key.deref();
let (machine, preprocess) = AlgorithmMachine::new( let (machine, preprocess) = AlgorithmMachine::new(
schnorrkel(), schnorrkel(),
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet // We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
musig(ExternalValidatorSet::musig_context(&set), key, &[public_key]).unwrap(), musig(&musig_context(set.into()), key, &[Ristretto::generator() * key.deref()])
.unwrap()
.into(),
) )
.preprocess(&mut OsRng); .preprocess(&mut OsRng);
// We take the preprocess so we can use it in a distinct machine with the actual Musig // We take the preprocess so we can use it in a distinct machine with the actual Musig
@@ -193,7 +199,7 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
// If we were sent a key to set, create the signer for it // If we were sent a key to set, create the signer for it
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() { if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
// Create and publish the initial preprocess // Create and publish the initial preprocess
Self::preprocess(&mut self.db, self.set.set, 0, self.key.clone(), &mut self.signer); Self::preprocess(&mut self.db, self.set.set, 0, &self.key, &mut self.signer);
made_progress = true; made_progress = true;
} }
@@ -213,13 +219,7 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
id: messages::sign::SignId { attempt, .. }, id: messages::sign::SignId { attempt, .. },
} => { } => {
// Create and publish the preprocess for the specified attempt // Create and publish the preprocess for the specified attempt
Self::preprocess( Self::preprocess(&mut self.db, self.set.set, attempt, &self.key, &mut self.signer);
&mut self.db,
self.set.set,
attempt,
self.key.clone(),
&mut self.signer,
);
} }
messages::sign::CoordinatorMessage::Preprocesses { messages::sign::CoordinatorMessage::Preprocesses {
id: messages::sign::SignId { attempt, .. }, id: messages::sign::SignId { attempt, .. },
@@ -258,12 +258,9 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let keys = musig( let keys = musig(&musig_context(self.set.set.into()), &self.key, &musig_public_keys)
ExternalValidatorSet::musig_context(&self.set.set), .unwrap()
self.key.clone(), .into();
&musig_public_keys,
)
.unwrap();
// Rebuild the machine // Rebuild the machine
let (machine, preprocess_from_cache) = let (machine, preprocess_from_cache) =
@@ -297,10 +294,9 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
}; };
// Calculate our share // Calculate our share
let (machine, share) = match handle_frost_error(machine.sign( let (machine, share) = match handle_frost_error(
preprocesses, machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
&ExternalValidatorSet::set_keys_message(&self.set.set, &key_pair), ) {
)) {
Ok((machine, share)) => (machine, share), Ok((machine, share)) => (machine, share),
// This yields the *musig participant index* // This yields the *musig participant index*
Err(participant) => { Err(participant) => {

View File

@@ -4,24 +4,18 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
use zeroize::{Zeroize, Zeroizing}; use zeroize::{Zeroize, Zeroizing};
use rand_core::{RngCore, OsRng}; use rand_core::{RngCore, OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::{ use ciphersuite::{
group::{ff::PrimeField, GroupEncoding}, group::{ff::PrimeField, GroupEncoding},
*, Ciphersuite, Ristretto,
}; };
use borsh::BorshDeserialize; use borsh::BorshDeserialize;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_client_serai::{ use serai_client::{
abi::primitives::{ primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
BlockHash, validator_sets::primitives::{ExternalValidatorSet, KeyPair},
crypto::{Public, Signature, ExternalKey, KeyPair},
network_id::ExternalNetworkId,
validator_sets::ExternalValidatorSet,
address::SeraiAddress,
},
Serai, Serai,
}; };
use message_queue::{Service, client::MessageQueue}; use message_queue::{Service, client::MessageQueue};
@@ -66,7 +60,9 @@ async fn serai() -> Arc<Serai> {
let Ok(serai) = Serai::new(format!( let Ok(serai) = Serai::new(format!(
"http://{}:9944", "http://{}:9944",
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided") serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
)) else { ))
.await
else {
log::error!("couldn't connect to the Serai node"); log::error!("couldn't connect to the Serai node");
tokio::time::sleep(delay).await; tokio::time::sleep(delay).await;
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY); delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
@@ -216,12 +212,10 @@ async fn handle_network(
&mut txn, &mut txn,
ExternalValidatorSet { network, session }, ExternalValidatorSet { network, session },
&KeyPair( &KeyPair(
Public(substrate_key), PublicKey::from_raw(substrate_key),
ExternalKey( network_key
network_key .try_into()
.try_into() .expect("generated a network key which exceeds the maximum key length"),
.expect("generated a network key which exceeds the maximum key length"),
),
), ),
); );
} }
@@ -295,7 +289,6 @@ async fn handle_network(
}, },
messages::ProcessorMessage::Substrate(msg) => match msg { messages::ProcessorMessage::Substrate(msg) => match msg {
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => { messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
let block = BlockHash(block);
let mut by_session = HashMap::new(); let mut by_session = HashMap::new();
for plan in plans { for plan in plans {
by_session by_session
@@ -358,7 +351,7 @@ async fn main() {
let mut key_bytes = [0; 32]; let mut key_bytes = [0; 32];
key_bytes.copy_from_slice(&key_vec); key_bytes.copy_from_slice(&key_vec);
key_vec.zeroize(); key_vec.zeroize();
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::from_repr(key_bytes).unwrap()); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(key_bytes).unwrap());
key_bytes.zeroize(); key_bytes.zeroize();
key key
}; };
@@ -445,7 +438,7 @@ async fn main() {
EphemeralEventStream::new( EphemeralEventStream::new(
db.clone(), db.clone(),
serai.clone(), serai.clone(),
SeraiAddress((<Ristretto as WrappedGroup>::generator() * serai_key.deref()).to_bytes()), SeraiAddress((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
) )
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]), .continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
); );
@@ -487,7 +480,7 @@ async fn main() {
); );
// Handle each of the networks // Handle each of the networks
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network)); tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
} }

View File

@@ -3,17 +3,13 @@ use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use ciphersuite::*; use ciphersuite::{Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_db::{DbTxn, Db as DbTrait}; use serai_db::{DbTxn, Db as DbTrait};
use serai_client_serai::abi::primitives::{ use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
};
use message_queue::{Service, Metadata, client::MessageQueue}; use message_queue::{Service, Metadata, client::MessageQueue};
use tributary_sdk::Tributary; use tributary_sdk::Tributary;
@@ -26,7 +22,7 @@ use serai_coordinator_p2p::P2p;
use crate::{Db, KeySet}; use crate::{Db, KeySet};
pub(crate) struct SubstrateTask<P: P2p> { pub(crate) struct SubstrateTask<P: P2p> {
pub(crate) serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>, pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
pub(crate) db: Db, pub(crate) db: Db,
pub(crate) message_queue: Arc<MessageQueue>, pub(crate) message_queue: Arc<MessageQueue>,
pub(crate) p2p: P, pub(crate) p2p: P,
@@ -42,7 +38,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
let mut made_progress = false; let mut made_progress = false;
// Handle the Canonical events // Handle the Canonical events
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
loop { loop {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network) let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)

View File

@@ -4,14 +4,14 @@ use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::OsRng; use rand_core::OsRng;
use blake2::{digest::typenum::U32, Digest, Blake2s}; use blake2::{digest::typenum::U32, Digest, Blake2s};
use ciphersuite::*; use ciphersuite::{Ciphersuite, Ristretto};
use dalek_ff_group::Ristretto;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet; use scale::Encode;
use serai_client::validator_sets::primitives::ExternalValidatorSet;
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary}; use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
@@ -67,7 +67,9 @@ async fn provide_transaction<TD: DbTrait, P: P2p>(
// advancing // advancing
Err(ProvidedError::LocalMismatchesOnChain) => loop { Err(ProvidedError::LocalMismatchesOnChain) => loop {
log::error!( log::error!(
"Tributary {set:?} was supposed to provide {tx:?} but peers disagree, halting Tributary", "Tributary {:?} was supposed to provide {:?} but peers disagree, halting Tributary",
set,
tx,
); );
// Print this every five minutes as this does need to be handled // Print this every five minutes as this does need to be handled
tokio::time::sleep(Duration::from_secs(5 * 60)).await; tokio::time::sleep(Duration::from_secs(5 * 60)).await;
@@ -159,7 +161,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
#[must_use] #[must_use]
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>( async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
tributary: &Tributary<TD, Transaction, P>, tributary: &Tributary<TD, Transaction, P>,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
mut tx: Transaction, mut tx: Transaction,
) -> bool { ) -> bool {
// If this is a signed transaction, sign it // If this is a signed transaction, sign it
@@ -212,7 +214,7 @@ async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
set: ExternalValidatorSet, set: ExternalValidatorSet,
tributary_db: &mut TD, tributary_db: &mut TD,
tributary: &Tributary<TD, Transaction, P>, tributary: &Tributary<TD, Transaction, P>,
key: &Zeroizing<<Ristretto as WrappedGroup>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
tx: Transaction, tx: Transaction,
) -> bool { ) -> bool {
let kind = tx.kind(); let kind = tx.kind();
@@ -251,7 +253,7 @@ pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p>
tributary_db: TD, tributary_db: TD,
tributary: Tributary<TD, Transaction, P>, tributary: Tributary<TD, Transaction, P>,
set: NewSetInformation, set: NewSetInformation,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
} }
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> { impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
type Error = DoesNotError; type Error = DoesNotError;
@@ -381,7 +383,7 @@ pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
tributary_db: TD, tributary_db: TD,
tributary: Tributary<TD, Transaction, P>, tributary: Tributary<TD, Transaction, P>,
set: NewSetInformation, set: NewSetInformation,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
} }
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> { impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
type Error = DoesNotError; type Error = DoesNotError;
@@ -469,7 +471,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
p2p: P, p2p: P,
p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>, p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
set: NewSetInformation, set: NewSetInformation,
serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>, serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
) { ) {
// Don't spawn retired Tributaries // Don't spawn retired Tributaries
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >= if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
@@ -478,8 +480,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
return; return;
} }
let genesis = let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
<[u8; 32]>::from(Blake2s::<U32>::digest(borsh::to_vec(&(set.serai_block, set.set)).unwrap()));
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will // Since the Serai block will be finalized, then cosigned, before we handle this, this time will
// be a couple of minutes stale. While the Tributary will still function with a start time in the // be a couple of minutes stale. While the Tributary will still function with a start time in the
@@ -490,7 +491,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
let mut tributary_validators = Vec::with_capacity(set.validators.len()); let mut tributary_validators = Vec::with_capacity(set.validators.len());
for (validator, weight) in set.validators.iter().copied() { for (validator, weight) in set.validators.iter().copied() {
let validator_key = <Ristretto as GroupIo>::read_G(&mut validator.0.as_slice()) let validator_key = <Ristretto as Ciphersuite>::read_G(&mut validator.0.as_slice())
.expect("Serai validator had an invalid public key"); .expect("Serai validator had an invalid public key");
let weight = u64::from(weight); let weight = u64::from(weight);
tributary_validators.push((validator_key, weight)); tributary_validators.push((validator_key, weight));

View File

@@ -8,7 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
publish = false publish = false
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -20,15 +20,17 @@ workspace = true
[dependencies] [dependencies]
bitvec = { version = "1", default-features = false, features = ["std"] } bitvec = { version = "1", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false } serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
futures = { version = "0.3", default-features = false, features = ["std"] } futures = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" } serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" } serai-task = { path = "../../common/task", version = "0.1" }

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -3,13 +3,7 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered}; use futures::stream::{StreamExt, FuturesOrdered};
use serai_client_serai::{ use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
abi::{
self,
primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
},
Serai,
};
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage}; use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
@@ -21,7 +15,6 @@ use serai_cosign::Cosigning;
create_db!( create_db!(
CoordinatorSubstrateCanonical { CoordinatorSubstrateCanonical {
NextBlock: () -> u64, NextBlock: () -> u64,
LastIndexedBatchId: (network: ExternalNetworkId) -> u32,
} }
); );
@@ -52,10 +45,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
// These are all the events which generate canonical messages // These are all the events which generate canonical messages
struct CanonicalEvents { struct CanonicalEvents {
time: u64, time: u64,
set_keys_events: Vec<abi::validator_sets::Event>, key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
slash_report_events: Vec<abi::validator_sets::Event>, set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
batch_events: Vec<abi::in_instructions::Event>, batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
burn_events: Vec<abi::coins::Event>, burn_events: Vec<serai_client::coins::CoinsEvent>,
} }
// For a cosigned block, fetch all relevant events // For a cosigned block, fetch all relevant events
@@ -73,24 +66,40 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
} }
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()), Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
}; };
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?; let temporal_serai = serai.as_of(block_hash);
let set_keys_events = events.validator_sets().set_keys_events().cloned().collect(); let temporal_serai_validators = temporal_serai.validator_sets();
let slash_report_events = let temporal_serai_instructions = temporal_serai.in_instructions();
events.validator_sets().slash_report_events().cloned().collect(); let temporal_serai_coins = temporal_serai.coins();
let batch_events = events.in_instructions().batch_events().cloned().collect();
let burn_events = events.coins().burn_with_instruction_events().cloned().collect(); let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else { tokio::try_join!(
serai.block(block_hash),
temporal_serai_validators.key_gen_events(),
temporal_serai_validators.set_retired_events(),
temporal_serai_instructions.batch_events(),
temporal_serai_coins.burn_with_instruction_events(),
)
.map_err(|e| format!("{e:?}"))?;
let Some(block) = block else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))? Err(format!("Serai node didn't have cosigned block #{block_number}"))?
}; };
// We use time in seconds, not milliseconds, here let time = if block_number == 0 {
let time = block.header.unix_time_in_millis() / 1000; block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok(( Ok((
block_number, block_number,
CanonicalEvents { CanonicalEvents {
time, time,
set_keys_events, key_gen_events,
slash_report_events, set_retired_events,
batch_events, batch_events,
burn_events, burn_events,
}, },
@@ -122,9 +131,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
for set_keys in block.set_keys_events { for key_gen in block.key_gen_events {
let abi::validator_sets::Event::SetKeys { set, key_pair } = &set_keys else { let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
panic!("`SetKeys` event wasn't a `SetKeys` event: {set_keys:?}"); else {
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
}; };
crate::Canonical::send( crate::Canonical::send(
&mut txn, &mut txn,
@@ -137,10 +147,12 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
); );
} }
for slash_report in block.slash_report_events { for set_retired in block.set_retired_events {
let abi::validator_sets::Event::SlashReport { set } = &slash_report else { let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
panic!("`SlashReport` event wasn't a `SlashReport` event: {slash_report:?}"); else {
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
}; };
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
crate::Canonical::send( crate::Canonical::send(
&mut txn, &mut txn,
set.network, set.network,
@@ -148,12 +160,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
); );
} }
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
let mut batch = None; let mut batch = None;
for this_batch in &block.batch_events { for this_batch in &block.batch_events {
// Only irrefutable as this is the only member of the enum at this time let serai_client::in_instructions::InInstructionsEvent::Batch {
#[expect(irrefutable_let_patterns)]
let abi::in_instructions::Event::Batch {
network: batch_network, network: batch_network,
publishing_session, publishing_session,
id, id,
@@ -184,19 +194,14 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
}) })
.collect(), .collect(),
}); });
if LastIndexedBatchId::get(&txn, network) != id.checked_sub(1) {
panic!(
"next batch from Serai's ID was not an increment of the last indexed batch's ID"
);
}
LastIndexedBatchId::set(&mut txn, network, id);
} }
} }
let mut burns = vec![]; let mut burns = vec![];
for burn in &block.burn_events { for burn in &block.burn_events {
let abi::coins::Event::BurnWithInstruction { from: _, instruction } = &burn else { let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
&burn
else {
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}"); panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
}; };
if instruction.balance.coin.network() == network { if instruction.balance.coin.network() == network {
@@ -218,7 +223,3 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
} }
} }
} }
pub(crate) fn last_indexed_batch_id(txn: &impl DbTxn, network: ExternalNetworkId) -> Option<u32> {
LastIndexedBatchId::get(txn, network)
}

View File

@@ -3,14 +3,9 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered}; use futures::stream::{StreamExt, FuturesOrdered};
use serai_client_serai::{ use serai_client::{
abi::primitives::{ primitives::{SeraiAddress, EmbeddedEllipticCurve},
BlockHash, validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
crypto::EmbeddedEllipticCurveKeys as EmbeddedEllipticCurveKeysStruct,
network_id::ExternalNetworkId,
validator_sets::{KeyShares, ExternalValidatorSet},
address::SeraiAddress,
},
Serai, Serai,
}; };
@@ -24,10 +19,6 @@ use crate::NewSetInformation;
create_db!( create_db!(
CoordinatorSubstrateEphemeral { CoordinatorSubstrateEphemeral {
NextBlock: () -> u64, NextBlock: () -> u64,
EmbeddedEllipticCurveKeys: (
network: ExternalNetworkId,
validator: SeraiAddress
) -> EmbeddedEllipticCurveKeysStruct,
} }
); );
@@ -58,11 +49,10 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
// These are all the events which generate canonical messages // These are all the events which generate canonical messages
struct EphemeralEvents { struct EphemeralEvents {
block_hash: BlockHash, block_hash: [u8; 32],
time: u64, time: u64,
embedded_elliptic_curve_keys_events: Vec<serai_client_serai::abi::validator_sets::Event>, new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
set_decided_events: Vec<serai_client_serai::abi::validator_sets::Event>, accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
accepted_handover_events: Vec<serai_client_serai::abi::validator_sets::Event>,
} }
// For a cosigned block, fetch all relevant events // For a cosigned block, fetch all relevant events
@@ -81,31 +71,31 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()), Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
}; };
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?; let temporal_serai = serai.as_of(block_hash);
let embedded_elliptic_curve_keys_events = events let temporal_serai_validators = temporal_serai.validator_sets();
.validator_sets() let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
.set_embedded_elliptic_curve_keys_events() serai.block(block_hash),
.cloned() temporal_serai_validators.new_set_events(),
.collect::<Vec<_>>(); temporal_serai_validators.accepted_handover_events(),
let set_decided_events = )
events.validator_sets().set_decided_events().cloned().collect::<Vec<_>>(); .map_err(|e| format!("{e:?}"))?;
let accepted_handover_events = let Some(block) = block else {
events.validator_sets().accepted_handover_events().cloned().collect::<Vec<_>>();
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))? Err(format!("Serai node didn't have cosigned block #{block_number}"))?
}; };
// We use time in seconds, not milliseconds, here let time = if block_number == 0 {
let time = block.header.unix_time_in_millis() / 1000; block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok(( Ok((
block_number, block_number,
EphemeralEvents { EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
block_hash,
time,
embedded_elliptic_curve_keys_events,
set_decided_events,
accepted_handover_events,
},
)) ))
} }
} }
@@ -136,82 +126,105 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
for event in block.embedded_elliptic_curve_keys_events { for new_set in block.new_set_events {
let serai_client_serai::abi::validator_sets::Event::SetEmbeddedEllipticCurveKeys { let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
validator, panic!("NewSet event wasn't a NewSet event: {new_set:?}");
keys,
} = &event
else {
panic!(
"{}: {event:?}",
"`SetEmbeddedEllipticCurveKeys` event wasn't a `SetEmbeddedEllipticCurveKeys` event"
);
}; };
EmbeddedEllipticCurveKeys::set(&mut txn, keys.network(), *validator, keys);
}
for set_decided in block.set_decided_events {
let serai_client_serai::abi::validator_sets::Event::SetDecided { set, validators } =
&set_decided
else {
panic!("`SetDecided` event wasn't a `SetDecided` event: {set_decided:?}");
};
// We only coordinate over external networks // We only coordinate over external networks
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
let validators =
validators.iter().map(|(validator, weight)| (*validator, weight.0)).collect::<Vec<_>>();
let serai = self.serai.as_of(block.block_hash);
let serai = serai.validator_sets();
let Some(validators) =
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
else {
Err(format!(
"block #{block_number} declared a new set but didn't have the participants"
))?
};
let validators = validators
.into_iter()
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
.collect::<Vec<_>>();
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator); let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
if in_set { if in_set {
if u16::try_from(validators.len()).is_err() { if u16::try_from(validators.len()).is_err() {
Err("more than u16::MAX validators sent")?; Err("more than u16::MAX validators sent")?;
} }
let Ok(validators) = validators
.into_iter()
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
.collect::<Result<Vec<_>, _>>()
else {
Err("validator's weight exceeded u16::MAX".to_string())?
};
// Do the summation in u32 so we don't risk a u16 overflow // Do the summation in u32 so we don't risk a u16 overflow
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>(); let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
if total_weight > u32::from(KeyShares::MAX_PER_SET) { if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
Err(format!( Err(format!(
"{set:?} has {total_weight} key shares when the max is {}", "{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
KeyShares::MAX_PER_SET
))?; ))?;
} }
let total_weight = u16::try_from(total_weight) let total_weight = u16::try_from(total_weight).unwrap();
.expect("value smaller than `u16` constant but doesn't fit in `u16`");
// Fetch all of the validators' embedded elliptic curve keys // Fetch all of the validators' embedded elliptic curve keys
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
for (validator, _) in &validators {
let validator = *validator;
// try_join doesn't return a future so we need to wrap it in this additional async
// block
embedded_elliptic_curve_keys.push_back(async move {
tokio::try_join!(
// One future to fetch the substrate embedded key
serai.embedded_elliptic_curve_key(
validator.into(),
EmbeddedEllipticCurve::Embedwards25519
),
// One future to fetch the external embedded key, if there is a distinct curve
async {
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
} else {
Ok(None)
}
}
)
.map(|(substrate_embedded_key, external_embedded_key)| {
(validator, substrate_embedded_key, external_embedded_key)
})
});
}
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight)); let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
for (validator, weight) in &validators { for (validator, weight) in &validators {
let keys = match EmbeddedEllipticCurveKeys::get(&txn, set.network, *validator) let (future_validator, substrate_embedded_key, external_embedded_key) =
.expect("selected validator lacked embedded elliptic curve keys") embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
{ assert_eq!(*validator, future_validator);
EmbeddedEllipticCurveKeysStruct::Bitcoin(substrate, external) => { let external_embedded_key =
assert_eq!(set.network, ExternalNetworkId::Bitcoin); external_embedded_key.unwrap_or(substrate_embedded_key.clone());
(substrate, external.to_vec()) match (substrate_embedded_key, external_embedded_key) {
(Some(substrate_embedded_key), Some(external_embedded_key)) => {
let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
.map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
for _ in 0 .. *weight {
evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
}
} }
EmbeddedEllipticCurveKeysStruct::Ethereum(substrate, external) => { _ => Err("NewSet with validator missing an embedded key".to_string())?,
assert_eq!(set.network, ExternalNetworkId::Ethereum);
(substrate, external.to_vec())
}
EmbeddedEllipticCurveKeysStruct::Monero(substrate) => {
assert_eq!(set.network, ExternalNetworkId::Monero);
(substrate, substrate.to_vec())
}
};
for _ in 0 .. *weight {
evrf_public_keys.push(keys.clone());
} }
} }
let mut new_set = NewSetInformation { let mut new_set = NewSetInformation {
set, set,
serai_block: block.block_hash.0, serai_block: block.block_hash,
declaration_time: block.time, declaration_time: block.time,
// TODO: This should be inlined into the Processor's key gen code // TODO: This should be inlined into the Processor's key gen code
// It's legacy from when we removed participants from the key gen // It's legacy from when we removed participants from the key gen
threshold: ((total_weight * 2) / 3) + 1, threshold: ((total_weight * 2) / 3) + 1,
// TODO: Why are `validators` and `evrf_public_keys` two separate fields?
validators, validators,
evrf_public_keys, evrf_public_keys,
participant_indexes: Default::default(), participant_indexes: Default::default(),
@@ -225,7 +238,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
} }
for accepted_handover in block.accepted_handover_events { for accepted_handover in block.accepted_handover_events {
let serai_client_serai::abi::validator_sets::Event::AcceptedHandover { set } = let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
&accepted_handover &accepted_handover
else { else {
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}"); panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");

View File

@@ -1,21 +1,18 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
use std::collections::HashMap; use std::collections::HashMap;
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use dkg::Participant; use dkg::Participant;
use serai_client_serai::abi::{ use serai_client::{
primitives::{ primitives::{ExternalNetworkId, SeraiAddress, Signature},
network_id::ExternalNetworkId, validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
validator_sets::{Session, ExternalValidatorSet, SlashReport}, in_instructions::primitives::SignedBatch,
crypto::{Signature, KeyPair},
address::SeraiAddress,
instructions::SignedBatch,
},
Transaction, Transaction,
}; };
@@ -23,7 +20,6 @@ use serai_db::*;
mod canonical; mod canonical;
pub use canonical::CanonicalEventStream; pub use canonical::CanonicalEventStream;
use canonical::last_indexed_batch_id;
mod ephemeral; mod ephemeral;
pub use ephemeral::EphemeralEventStream; pub use ephemeral::EphemeralEventStream;
@@ -42,7 +38,7 @@ pub struct NewSetInformation {
pub set: ExternalValidatorSet, pub set: ExternalValidatorSet,
/// The Serai block which declared it. /// The Serai block which declared it.
pub serai_block: [u8; 32], pub serai_block: [u8; 32],
/// The time of the block which declared it, in seconds since the epoch. /// The time of the block which declared it, in seconds.
pub declaration_time: u64, pub declaration_time: u64,
/// The threshold to use. /// The threshold to use.
pub threshold: u16, pub threshold: u16,
@@ -101,9 +97,9 @@ mod _public_db {
create_db!( create_db!(
CoordinatorSubstrate { CoordinatorSubstrate {
// Keys to set on the Serai network // Keys to set on the Serai network
Keys: (network: ExternalNetworkId) -> (Session, Transaction), Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
// Slash reports to publish onto the Serai network // Slash reports to publish onto the Serai network
SlashReports: (network: ExternalNetworkId) -> (Session, Transaction), SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
} }
); );
} }
@@ -176,19 +172,20 @@ impl Keys {
} }
} }
let tx = serai_client_serai::ValidatorSets::set_keys( let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
set.network, set.network,
key_pair, key_pair,
signature_participants, signature_participants,
signature, signature,
); );
_public_db::Keys::set(txn, set.network, &(set.session, tx)); _public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
} }
pub(crate) fn take( pub(crate) fn take(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
network: ExternalNetworkId, network: ExternalNetworkId,
) -> Option<(Session, Transaction)> { ) -> Option<(Session, Transaction)> {
_public_db::Keys::take(txn, network) let (session, tx) = _public_db::Keys::take(txn, network)?;
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
} }
} }
@@ -197,7 +194,7 @@ pub struct SignedBatches;
impl SignedBatches { impl SignedBatches {
/// Send a `SignedBatch` to publish onto Serai. /// Send a `SignedBatch` to publish onto Serai.
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) { pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
_public_db::SignedBatches::send(txn, batch.batch.network(), batch); _public_db::SignedBatches::send(txn, batch.batch.network, batch);
} }
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> { pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
_public_db::SignedBatches::try_recv(txn, network) _public_db::SignedBatches::try_recv(txn, network)
@@ -224,14 +221,18 @@ impl SlashReports {
} }
} }
let tx = let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
serai_client_serai::ValidatorSets::report_slashes(set.network, slash_report, signature); set.network,
_public_db::SlashReports::set(txn, set.network, &(set.session, tx)); slash_report,
signature,
);
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
} }
pub(crate) fn take( pub(crate) fn take(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
network: ExternalNetworkId, network: ExternalNetworkId,
) -> Option<(Session, Transaction)> { ) -> Option<(Session, Transaction)> {
_public_db::SlashReports::take(txn, network) let (session, tx) = _public_db::SlashReports::take(txn, network)?;
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
} }
} }

View File

@@ -1,10 +1,8 @@
use core::future::Future; use core::future::Future;
use std::sync::Arc; use std::sync::Arc;
use serai_client_serai::{ #[rustfmt::skip]
abi::primitives::{network_id::ExternalNetworkId, instructions::SignedBatch}, use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
RpcError, Serai,
};
use serai_db::{Get, DbTxn, Db, create_db}; use serai_db::{Get, DbTxn, Db, create_db};
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
@@ -33,7 +31,7 @@ impl<D: Db> PublishBatchTask<D> {
} }
impl<D: Db> ContinuallyRan for PublishBatchTask<D> { impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
type Error = RpcError; type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
@@ -45,8 +43,8 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
}; };
// If this is a Batch not yet published, save it into our unordered mapping // If this is a Batch not yet published, save it into our unordered mapping
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id()) { if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
BatchesToPublish::set(&mut txn, self.network, batch.batch.id(), &batch); BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
} }
txn.commit(); txn.commit();
@@ -54,8 +52,12 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
// Synchronize our last published batch with the Serai network's // Synchronize our last published batch with the Serai network's
let next_to_publish = { let next_to_publish = {
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.as_of_latest_finalized_block().await?;
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let last_batch = crate::last_indexed_batch_id(&txn, self.network);
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network); let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
while our_last_batch < last_batch { while our_last_batch < last_batch {
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0); let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
@@ -66,7 +68,6 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
if let Some(last_batch) = our_last_batch { if let Some(last_batch) = our_last_batch {
LastPublishedBatch::set(&mut txn, self.network, &last_batch); LastPublishedBatch::set(&mut txn, self.network, &last_batch);
} }
txn.commit();
last_batch.map(|batch| batch + 1).unwrap_or(0) last_batch.map(|batch| batch + 1).unwrap_or(0)
}; };
@@ -74,7 +75,7 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) { if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
self self
.serai .serai
.publish_transaction(&serai_client_serai::InInstructions::execute_batch(batch)) .publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
.await?; .await?;
true true
} else { } else {

View File

@@ -3,10 +3,7 @@ use std::sync::Arc;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_client_serai::{ use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session},
Serai,
};
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
@@ -36,10 +33,10 @@ impl<D: Db> PublishSlashReportTask<D> {
// This uses the latest finalized block, not the latest cosigned block, which should be // This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication // fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?; let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();
let session_after_slash_report = Session(session.0 + 1); let session_after_slash_report = Session(session.0 + 1);
let current_session = let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0); let current_session = current_session.map(|session| session.0);
// Only attempt to publish the slash report for session #n while session #n+1 is still // Only attempt to publish the slash report for session #n while session #n+1 is still
// active // active
@@ -58,13 +55,14 @@ impl<D: Db> PublishSlashReportTask<D> {
} }
// If this session which should publish a slash report already has, move on // If this session which should publish a slash report already has, move on
if !serai.pending_slash_report(network).await.map_err(|e| format!("{e:?}"))? { let key_pending_slash_report =
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
if key_pending_slash_report.is_none() {
txn.commit(); txn.commit();
return Ok(false); return Ok(false);
}; };
// Since this slash report is still pending, publish it match self.serai.publish(&slash_report).await {
match self.serai.publish_transaction(&slash_report).await {
Ok(()) => { Ok(()) => {
txn.commit(); txn.commit();
Ok(true) Ok(true)
@@ -86,7 +84,7 @@ impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
async move { async move {
let mut made_progress = false; let mut made_progress = false;
let mut error = None; let mut error = None;
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
let network_res = self.publish(network).await; let network_res = self.publish(network).await;
// We made progress if any network successfully published their slash report // We made progress if any network successfully published their slash report
made_progress |= network_res == Ok(true); made_progress |= network_res == Ok(true);

View File

@@ -3,10 +3,7 @@ use std::sync::Arc;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_client_serai::{ use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
abi::primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
Serai,
};
use serai_task::ContinuallyRan; use serai_task::ContinuallyRan;
@@ -31,7 +28,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move { async move {
let mut made_progress = false; let mut made_progress = false;
for network in ExternalNetworkId::all() { for network in serai_client::primitives::EXTERNAL_NETWORKS {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some((session, keys)) = Keys::take(&mut txn, network) else { let Some((session, keys)) = Keys::take(&mut txn, network) else {
// No keys to set // No keys to set
@@ -40,9 +37,10 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
// This uses the latest finalized block, not the latest cosigned block, which should be // This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication // fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?; let serai =
let current_session = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?; let serai = serai.validator_sets();
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0); let current_session = current_session.map(|session| session.0);
// Only attempt to set these keys if this isn't a retired session // Only attempt to set these keys if this isn't a retired session
if Some(session.0) < current_session { if Some(session.0) < current_session {
@@ -69,7 +67,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
continue; continue;
}; };
match self.serai.publish_transaction(&keys).await { match self.serai.publish(&keys).await {
Ok(()) => { Ok(()) => {
txn.commit(); txn.commit();
made_progress = true; made_progress = true;

View File

@@ -6,7 +6,7 @@ license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021" edition = "2021"
rust-version = "1.85" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -24,19 +24,18 @@ zeroize = { version = "^1.5", default-features = false, features = ["std"] }
rand = { version = "0.8", default-features = false, features = ["std"] } rand = { version = "0.8", default-features = false, features = ["std"] }
rand_chacha = { version = "0.3", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] } blake2 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] }
ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std"] } ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] }
dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std", "aggregate"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
serai-db = { path = "../../common/db", version = "0.1" } serai-db = { path = "../../common/db", version = "0.1" }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] } futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" } tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker Copyright (c) 2023 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,11 +1,10 @@
use std::collections::{VecDeque, HashSet}; use std::collections::{VecDeque, HashSet};
use dalek_ff_group::Ristretto; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, *};
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};
use borsh::BorshDeserialize; use scale::Decode;
use tendermint::ext::{Network, Commit}; use tendermint::ext::{Network, Commit};
@@ -21,7 +20,7 @@ pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
block_number: u64, block_number: u64,
tip: [u8; 32], tip: [u8; 32],
participants: HashSet<[u8; 32]>, participants: HashSet<<Ristretto as Ciphersuite>::G>,
provided: ProvidedTransactions<D, T>, provided: ProvidedTransactions<D, T>,
mempool: Mempool<D, T>, mempool: Mempool<D, T>,
@@ -56,28 +55,25 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
} }
fn next_nonce_key( fn next_nonce_key(
genesis: &[u8; 32], genesis: &[u8; 32],
signer: &<Ristretto as WrappedGroup>::G, signer: &<Ristretto as Ciphersuite>::G,
order: &[u8], order: &[u8],
) -> Vec<u8> { ) -> Vec<u8> {
D::key( D::key(
b"tributary_blockchain", b"tributary_blockchain",
b"next_nonce", b"next_nonce",
[genesis.as_slice(), signer.to_bytes().as_slice(), order].concat(), [genesis.as_ref(), signer.to_bytes().as_ref(), order].concat(),
) )
} }
pub(crate) fn new( pub(crate) fn new(
db: D, db: D,
genesis: [u8; 32], genesis: [u8; 32],
participants: &[<Ristretto as WrappedGroup>::G], participants: &[<Ristretto as Ciphersuite>::G],
) -> Self { ) -> Self {
let mut res = Self { let mut res = Self {
db: Some(db.clone()), db: Some(db.clone()),
genesis, genesis,
participants: participants participants: participants.iter().copied().collect(),
.iter()
.map(<<Ristretto as WrappedGroup>::G as GroupEncoding>::to_bytes)
.collect(),
block_number: 0, block_number: 0,
tip: genesis, tip: genesis,
@@ -109,7 +105,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> { pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> {
db.get(Self::block_key(&genesis, block)) db.get(Self::block_key(&genesis, block))
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_slice()).unwrap()) .map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap())
} }
pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> { pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> {
@@ -169,14 +165,14 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
// we must have a commit per valid hash // we must have a commit per valid hash
let commit = Self::commit_from_db(db, genesis, &hash).unwrap(); let commit = Self::commit_from_db(db, genesis, &hash).unwrap();
// commit has to be valid if it is coming from our db // commit has to be valid if it is coming from our db
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap()) Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
}; };
let unsigned_in_chain = let unsigned_in_chain =
|hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some(); |hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();
self.mempool.add::<N, _>( self.mempool.add::<N, _>(
|signer, order| { |signer, order| {
if self.participants.contains(&signer.to_bytes()) { if self.participants.contains(&signer) {
Some( Some(
db.get(Self::next_nonce_key(&self.genesis, &signer, &order)) db.get(Self::next_nonce_key(&self.genesis, &signer, &order))
.map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())),
@@ -199,13 +195,13 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
pub(crate) fn next_nonce( pub(crate) fn next_nonce(
&self, &self,
signer: &<Ristretto as WrappedGroup>::G, signer: &<Ristretto as Ciphersuite>::G,
order: &[u8], order: &[u8],
) -> Option<u32> { ) -> Option<u32> {
if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) { if let Some(next_nonce) = self.mempool.next_nonce_in_mempool(signer, order.to_vec()) {
return Some(next_nonce); return Some(next_nonce);
} }
if self.participants.contains(&signer.to_bytes()) { if self.participants.contains(signer) {
Some( Some(
self self
.db .db
@@ -244,7 +240,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> { let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {
let commit = self.commit_by_block_number(block)?; let commit = self.commit_by_block_number(block)?;
// commit has to be valid if it is coming from our db // commit has to be valid if it is coming from our db
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap()) Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
}; };
let mut txn_db = db.clone(); let mut txn_db = db.clone();
@@ -254,7 +250,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
self.tip, self.tip,
self.provided.transactions.clone(), self.provided.transactions.clone(),
&mut |signer, order| { &mut |signer, order| {
if self.participants.contains(&signer.to_bytes()) { if self.participants.contains(signer) {
let key = Self::next_nonce_key(&self.genesis, signer, order); let key = Self::next_nonce_key(&self.genesis, signer, order);
let next = txn let next = txn
.get(&key) .get(&key)

View File

@@ -3,11 +3,9 @@ use std::{sync::Arc, io};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use borsh::BorshDeserialize; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::*;
use dalek_ff_group::Ristretto;
use scale::Decode;
use futures_channel::mpsc::UnboundedReceiver; use futures_channel::mpsc::UnboundedReceiver;
use futures_util::{StreamExt, SinkExt}; use futures_util::{StreamExt, SinkExt};
use ::tendermint::{ use ::tendermint::{
@@ -163,8 +161,8 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
db: D, db: D,
genesis: [u8; 32], genesis: [u8; 32],
start_time: u64, start_time: u64,
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
validators: Vec<(<Ristretto as WrappedGroup>::G, u64)>, validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
p2p: P, p2p: P,
) -> Option<Self> { ) -> Option<Self> {
log::info!("new Tributary with genesis {}", hex::encode(genesis)); log::info!("new Tributary with genesis {}", hex::encode(genesis));
@@ -178,7 +176,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
let block_number = BlockNumber(blockchain.block_number()); let block_number = BlockNumber(blockchain.block_number());
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) { let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap().end_time Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
} else { } else {
start_time start_time
}; };
@@ -236,7 +234,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
pub async fn next_nonce( pub async fn next_nonce(
&self, &self,
signer: &<Ristretto as WrappedGroup>::G, signer: &<Ristretto as Ciphersuite>::G,
order: &[u8], order: &[u8],
) -> Option<u32> { ) -> Option<u32> {
self.network.blockchain.read().await.next_nonce(signer, order) self.network.blockchain.read().await.next_nonce(signer, order)
@@ -277,8 +275,8 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
} }
let block = TendermintBlock(block.serialize()); let block = TendermintBlock(block.serialize());
let mut commit_ref = commit.as_slice(); let mut commit_ref = commit.as_ref();
let Ok(commit) = Commit::<Arc<Validators>>::deserialize_reader(&mut commit_ref) else { let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
log::error!("sent an invalidly serialized commit"); log::error!("sent an invalidly serialized commit");
return false; return false;
}; };
@@ -328,7 +326,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
Some(&TENDERMINT_MESSAGE) => { Some(&TENDERMINT_MESSAGE) => {
let Ok(msg) = let Ok(msg) =
SignedMessageFor::<TendermintNetwork<D, T, P>>::deserialize_reader(&mut &msg[1 ..]) SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
else { else {
log::error!("received invalid tendermint message"); log::error!("received invalid tendermint message");
return false; return false;
@@ -368,17 +366,15 @@ impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash) Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
} }
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> { pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
self self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
.commit(hash)
.map(|commit| Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap())
} }
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> { pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
Blockchain::<D, T>::block_after(&self.0, self.1, hash) Blockchain::<D, T>::block_after(&self.0, self.1, hash)
} }
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> { pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
self.commit(hash).map(|commit| { self
Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap().end_time .commit(hash)
}) .map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
} }
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool { pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {

View File

@@ -1,7 +1,6 @@
use std::collections::HashMap; use std::collections::HashMap;
use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::{group::GroupEncoding, *};
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
@@ -21,9 +20,9 @@ pub(crate) struct Mempool<D: Db, T: TransactionTrait> {
db: D, db: D,
genesis: [u8; 32], genesis: [u8; 32],
last_nonce_in_mempool: HashMap<([u8; 32], Vec<u8>), u32>, last_nonce_in_mempool: HashMap<(<Ristretto as Ciphersuite>::G, Vec<u8>), u32>,
txs: HashMap<[u8; 32], Transaction<T>>, txs: HashMap<[u8; 32], Transaction<T>>,
txs_per_signer: HashMap<[u8; 32], u32>, txs_per_signer: HashMap<<Ristretto as Ciphersuite>::G, u32>,
} }
impl<D: Db, T: TransactionTrait> Mempool<D, T> { impl<D: Db, T: TransactionTrait> Mempool<D, T> {
@@ -82,7 +81,6 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
} }
Transaction::Application(tx) => match tx.kind() { Transaction::Application(tx) => match tx.kind() {
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
let signer = signer.to_bytes();
let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1; let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
res.txs_per_signer.insert(signer, amount); res.txs_per_signer.insert(signer, amount);
@@ -108,7 +106,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
// Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error.
pub(crate) fn add< pub(crate) fn add<
N: Network, N: Network,
F: FnOnce(<Ristretto as WrappedGroup>::G, Vec<u8>) -> Option<u32>, F: FnOnce(<Ristretto as Ciphersuite>::G, Vec<u8>) -> Option<u32>,
>( >(
&mut self, &mut self,
blockchain_next_nonce: F, blockchain_next_nonce: F,
@@ -141,8 +139,6 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
}; };
let mut next_nonce = blockchain_next_nonce; let mut next_nonce = blockchain_next_nonce;
let signer = signer.to_bytes();
if let Some(mempool_last_nonce) = if let Some(mempool_last_nonce) =
self.last_nonce_in_mempool.get(&(signer, order.clone())) self.last_nonce_in_mempool.get(&(signer, order.clone()))
{ {
@@ -182,10 +178,10 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
// Returns None if the mempool doesn't have a nonce tracked. // Returns None if the mempool doesn't have a nonce tracked.
pub(crate) fn next_nonce_in_mempool( pub(crate) fn next_nonce_in_mempool(
&self, &self,
signer: &<Ristretto as WrappedGroup>::G, signer: &<Ristretto as Ciphersuite>::G,
order: Vec<u8>, order: Vec<u8>,
) -> Option<u32> { ) -> Option<u32> {
self.last_nonce_in_mempool.get(&(signer.to_bytes(), order)).copied().map(|nonce| nonce + 1) self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1)
} }
/// Get transactions to include in a block. /// Get transactions to include in a block.
@@ -246,8 +242,6 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
if let Some(tx) = self.txs.remove(tx) { if let Some(tx) = self.txs.remove(tx) {
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() { if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
let signer = signer.to_bytes();
let amount = *self.txs_per_signer.get(&signer).unwrap() - 1; let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
self.txs_per_signer.insert(signer, amount); self.txs_per_signer.insert(signer, amount);

View File

@@ -10,10 +10,12 @@ use rand_chacha::ChaCha12Rng;
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::{ use ciphersuite::{
group::{ff::PrimeField, GroupEncoding}, group::{
*, GroupEncoding,
ff::{Field, PrimeField},
},
Ciphersuite, Ristretto,
}; };
use dalek_ff_group::Ristretto;
use schnorr::{ use schnorr::{
SchnorrSignature, SchnorrSignature,
aggregate::{SchnorrAggregator, SchnorrAggregate}, aggregate::{SchnorrAggregator, SchnorrAggregate},
@@ -21,7 +23,7 @@ use schnorr::{
use serai_db::Db; use serai_db::Db;
use borsh::{BorshSerialize, BorshDeserialize}; use scale::{Encode, Decode};
use tendermint::{ use tendermint::{
SignedMessageFor, SignedMessageFor,
ext::{ ext::{
@@ -48,26 +50,24 @@ fn challenge(
key: [u8; 32], key: [u8; 32],
nonce: &[u8], nonce: &[u8],
msg: &[u8], msg: &[u8],
) -> <Ristretto as WrappedGroup>::F { ) -> <Ristretto as Ciphersuite>::F {
let mut transcript = RecommendedTranscript::new(b"Tributary Chain Tendermint Message"); let mut transcript = RecommendedTranscript::new(b"Tributary Chain Tendermint Message");
transcript.append_message(b"genesis", genesis); transcript.append_message(b"genesis", genesis);
transcript.append_message(b"key", key); transcript.append_message(b"key", key);
transcript.append_message(b"nonce", nonce); transcript.append_message(b"nonce", nonce);
transcript.append_message(b"message", msg); transcript.append_message(b"message", msg);
<Ristretto as WrappedGroup>::F::from_bytes_mod_order_wide( <Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&transcript.challenge(b"schnorr").into())
&transcript.challenge(b"schnorr").into(),
)
} }
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub struct Signer { pub struct Signer {
genesis: [u8; 32], genesis: [u8; 32],
key: Zeroizing<<Ristretto as WrappedGroup>::F>, key: Zeroizing<<Ristretto as Ciphersuite>::F>,
} }
impl Signer { impl Signer {
pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<<Ristretto as WrappedGroup>::F>) -> Signer { pub(crate) fn new(genesis: [u8; 32], key: Zeroizing<<Ristretto as Ciphersuite>::F>) -> Signer {
Signer { genesis, key } Signer { genesis, key }
} }
} }
@@ -100,10 +100,10 @@ impl SignerTrait for Signer {
assert_eq!(nonce_ref, [0; 64].as_ref()); assert_eq!(nonce_ref, [0; 64].as_ref());
let nonce = let nonce =
Zeroizing::new(<Ristretto as WrappedGroup>::F::from_bytes_mod_order_wide(&nonce_arr)); Zeroizing::new(<Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(&nonce_arr));
nonce_arr.zeroize(); nonce_arr.zeroize();
assert!(!bool::from(nonce.ct_eq(&<Ristretto as WrappedGroup>::F::ZERO))); assert!(!bool::from(nonce.ct_eq(&<Ristretto as Ciphersuite>::F::ZERO)));
let challenge = challenge( let challenge = challenge(
self.genesis, self.genesis,
@@ -132,7 +132,7 @@ pub struct Validators {
impl Validators { impl Validators {
pub(crate) fn new( pub(crate) fn new(
genesis: [u8; 32], genesis: [u8; 32],
validators: Vec<(<Ristretto as WrappedGroup>::G, u64)>, validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
) -> Option<Validators> { ) -> Option<Validators> {
let mut total_weight = 0; let mut total_weight = 0;
let mut weights = HashMap::new(); let mut weights = HashMap::new();
@@ -163,6 +163,7 @@ impl SignatureScheme for Validators {
type AggregateSignature = Vec<u8>; type AggregateSignature = Vec<u8>;
type Signer = Arc<Signer>; type Signer = Arc<Signer>;
#[must_use]
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool { fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {
if !self.weights.contains_key(&validator) { if !self.weights.contains_key(&validator) {
return false; return false;
@@ -195,6 +196,7 @@ impl SignatureScheme for Validators {
aggregate.serialize() aggregate.serialize()
} }
#[must_use]
fn verify_aggregate( fn verify_aggregate(
&self, &self,
signers: &[Self::ValidatorId], signers: &[Self::ValidatorId],
@@ -219,7 +221,7 @@ impl SignatureScheme for Validators {
signers signers
.iter() .iter()
.zip(challenges) .zip(challenges)
.map(|(s, c)| (<Ristretto as GroupIo>::read_G(&mut s.as_slice()).unwrap(), c)) .map(|(s, c)| (<Ristretto as Ciphersuite>::read_G(&mut s.as_slice()).unwrap(), c))
.collect::<Vec<_>>() .collect::<Vec<_>>()
.as_slice(), .as_slice(),
) )
@@ -248,7 +250,7 @@ impl Weights for Validators {
} }
} }
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
pub struct TendermintBlock(pub Vec<u8>); pub struct TendermintBlock(pub Vec<u8>);
impl BlockTrait for TendermintBlock { impl BlockTrait for TendermintBlock {
type Id = [u8; 32]; type Id = [u8; 32];
@@ -300,7 +302,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()> { fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()> {
async move { async move {
let mut to_broadcast = vec![TENDERMINT_MESSAGE]; let mut to_broadcast = vec![TENDERMINT_MESSAGE];
msg.serialize(&mut to_broadcast).unwrap(); to_broadcast.extend(msg.encode());
self.p2p.broadcast(self.genesis, to_broadcast).await self.p2p.broadcast(self.genesis, to_broadcast).await
} }
} }
@@ -390,7 +392,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
return invalid_block(); return invalid_block();
}; };
let encoded_commit = borsh::to_vec(&commit).unwrap(); let encoded_commit = commit.encode();
loop { loop {
let block_res = self.blockchain.write().await.add_block::<Self>( let block_res = self.blockchain.write().await.add_block::<Self>(
&block, &block,

View File

@@ -1,11 +1,10 @@
use std::io; use std::io;
use borsh::BorshDeserialize; use scale::{Encode, Decode, IoReader};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{Ciphersuite, Ristretto};
use ciphersuite::*;
use crate::{ use crate::{
transaction::{Transaction, TransactionKind, TransactionError}, transaction::{Transaction, TransactionKind, TransactionError},
@@ -27,14 +26,14 @@ pub enum TendermintTx {
impl ReadWrite for TendermintTx { impl ReadWrite for TendermintTx {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> { fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
Evidence::deserialize_reader(reader) Evidence::decode(&mut IoReader(reader))
.map(TendermintTx::SlashEvidence) .map(TendermintTx::SlashEvidence)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid evidence format")) .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid evidence format"))
} }
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self { match self {
TendermintTx::SlashEvidence(ev) => writer.write_all(&borsh::to_vec(&ev).unwrap()), TendermintTx::SlashEvidence(ev) => writer.write_all(&ev.encode()),
} }
} }
} }
@@ -50,7 +49,7 @@ impl Transaction for TendermintTx {
Blake2s256::digest(self.serialize()).into() Blake2s256::digest(self.serialize()).into()
} }
fn sig_hash(&self, _genesis: [u8; 32]) -> <Ristretto as WrappedGroup>::F { fn sig_hash(&self, _genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
match self { match self {
TendermintTx::SlashEvidence(_) => panic!("sig_hash called on slash evidence transaction"), TendermintTx::SlashEvidence(_) => panic!("sig_hash called on slash evidence transaction"),
} }

View File

@@ -1,9 +1,10 @@
use std::{sync::Arc, io, collections::HashMap, fmt::Debug}; use std::{sync::Arc, io, collections::HashMap, fmt::Debug};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use ciphersuite::{
use dalek_ff_group::Ristretto; group::{ff::Field, Group},
use ciphersuite::{group::Group, *}; Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use serai_db::MemDb; use serai_db::MemDb;
@@ -29,11 +30,11 @@ impl NonceTransaction {
nonce, nonce,
distinguisher, distinguisher,
Signed { Signed {
signer: <Ristretto as WrappedGroup>::G::identity(), signer: <Ristretto as Ciphersuite>::G::identity(),
nonce, nonce,
signature: SchnorrSignature::<Ristretto> { signature: SchnorrSignature::<Ristretto> {
R: <Ristretto as WrappedGroup>::G::identity(), R: <Ristretto as Ciphersuite>::G::identity(),
s: <Ristretto as WrappedGroup>::F::ZERO, s: <Ristretto as Ciphersuite>::F::ZERO,
}, },
}, },
) )

View File

@@ -10,8 +10,7 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::*;
use serai_db::{DbTxn, Db, MemDb}; use serai_db::{DbTxn, Db, MemDb};
@@ -31,7 +30,7 @@ type N = TendermintNetwork<MemDb, SignedTransaction, DummyP2p>;
fn new_blockchain<T: TransactionTrait>( fn new_blockchain<T: TransactionTrait>(
genesis: [u8; 32], genesis: [u8; 32],
participants: &[<Ristretto as WrappedGroup>::G], participants: &[<Ristretto as Ciphersuite>::G],
) -> (MemDb, Blockchain<MemDb, T>) { ) -> (MemDb, Blockchain<MemDb, T>) {
let db = MemDb::new(); let db = MemDb::new();
let blockchain = Blockchain::new(db.clone(), genesis, participants); let blockchain = Blockchain::new(db.clone(), genesis, participants);
@@ -82,7 +81,7 @@ fn invalid_block() {
assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err()); assert!(blockchain.verify_block::<N>(&block, &validators, false).is_err());
} }
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0); let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);
// Not a participant // Not a participant
@@ -134,7 +133,7 @@ fn invalid_block() {
blockchain.verify_block::<N>(&block, &validators, false).unwrap(); blockchain.verify_block::<N>(&block, &validators, false).unwrap();
match &mut block.transactions[0] { match &mut block.transactions[0] {
Transaction::Application(tx) => { Transaction::Application(tx) => {
tx.1.signature.s += <Ristretto as WrappedGroup>::F::ONE; tx.1.signature.s += <Ristretto as Ciphersuite>::F::ONE;
} }
_ => panic!("non-signed tx found"), _ => panic!("non-signed tx found"),
} }
@@ -150,7 +149,7 @@ fn invalid_block() {
fn signed_transaction() { fn signed_transaction() {
let genesis = new_genesis(); let genesis = new_genesis();
let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0); let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0);
let signer = tx.1.signer; let signer = tx.1.signer;
@@ -339,7 +338,7 @@ fn provided_transaction() {
#[tokio::test] #[tokio::test]
async fn tendermint_evidence_tx() { async fn tendermint_evidence_tx() {
let genesis = new_genesis(); let genesis = new_genesis();
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
let signer = Signer::new(genesis, key.clone()); let signer = Signer::new(genesis, key.clone());
let signer_id = Ristretto::generator() * key.deref(); let signer_id = Ristretto::generator() * key.deref();
let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap()); let validators = Arc::new(Validators::new(genesis, vec![(signer_id, 1)]).unwrap());
@@ -379,7 +378,7 @@ async fn tendermint_evidence_tx() {
let mut mempool: Vec<Transaction<SignedTransaction>> = vec![]; let mut mempool: Vec<Transaction<SignedTransaction>> = vec![];
let mut signers = vec![]; let mut signers = vec![];
for _ in 0 .. 5 { for _ in 0 .. 5 {
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
let signer = Signer::new(genesis, key.clone()); let signer = Signer::new(genesis, key.clone());
let signer_id = Ristretto::generator() * key.deref(); let signer_id = Ristretto::generator() * key.deref();
signers.push((signer_id, 1)); signers.push((signer_id, 1));
@@ -446,7 +445,7 @@ async fn block_tx_ordering() {
} }
let genesis = new_genesis(); let genesis = new_genesis();
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
// signer // signer
let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer; let signer = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 0).1.signer;

View File

@@ -3,8 +3,7 @@ use std::{sync::Arc, collections::HashMap};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng}; use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::*;
use tendermint::ext::Commit; use tendermint::ext::Commit;
@@ -33,7 +32,7 @@ async fn mempool_addition() {
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] }) Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
}; };
let unsigned_in_chain = |_: [u8; 32]| false; let unsigned_in_chain = |_: [u8; 32]| false;
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0); let first_tx = signed_transaction(&mut OsRng, genesis, &key, 0);
let signer = first_tx.1.signer; let signer = first_tx.1.signer;
@@ -125,7 +124,7 @@ async fn mempool_addition() {
// If the mempool doesn't have a nonce for an account, it should successfully use the // If the mempool doesn't have a nonce for an account, it should successfully use the
// blockchain's // blockchain's
let second_key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let second_key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
let tx = signed_transaction(&mut OsRng, genesis, &second_key, 2); let tx = signed_transaction(&mut OsRng, genesis, &second_key, 2);
let second_signer = tx.1.signer; let second_signer = tx.1.signer;
assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), None); assert_eq!(mempool.next_nonce_in_mempool(&second_signer, vec![]), None);
@@ -165,7 +164,7 @@ fn too_many_mempool() {
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] }) Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
}; };
let unsigned_in_chain = |_: [u8; 32]| false; let unsigned_in_chain = |_: [u8; 32]| false;
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
// We should be able to add transactions up to the limit // We should be able to add transactions up to the limit
for i in 0 .. ACCOUNT_MEMPOOL_LIMIT { for i in 0 .. ACCOUNT_MEMPOOL_LIMIT {

View File

@@ -6,10 +6,14 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{
use ciphersuite::*; group::{ff::Field, Group},
Ciphersuite, Ristretto,
};
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use scale::Encode;
use ::tendermint::{ use ::tendermint::{
ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber}, ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber},
SignedMessageFor, DataFor, Message, SignedMessage, Data, Evidence, SignedMessageFor, DataFor, Message, SignedMessage, Data, Evidence,
@@ -29,11 +33,11 @@ mod tendermint;
pub fn random_signed<R: RngCore + CryptoRng>(rng: &mut R) -> Signed { pub fn random_signed<R: RngCore + CryptoRng>(rng: &mut R) -> Signed {
Signed { Signed {
signer: <Ristretto as WrappedGroup>::G::random(&mut *rng), signer: <Ristretto as Ciphersuite>::G::random(&mut *rng),
nonce: u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(), nonce: u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(),
signature: SchnorrSignature::<Ristretto> { signature: SchnorrSignature::<Ristretto> {
R: <Ristretto as WrappedGroup>::G::random(&mut *rng), R: <Ristretto as Ciphersuite>::G::random(&mut *rng),
s: <Ristretto as WrappedGroup>::F::random(rng), s: <Ristretto as Ciphersuite>::F::random(rng),
}, },
} }
} }
@@ -132,18 +136,18 @@ impl Transaction for SignedTransaction {
pub fn signed_transaction<R: RngCore + CryptoRng>( pub fn signed_transaction<R: RngCore + CryptoRng>(
rng: &mut R, rng: &mut R,
genesis: [u8; 32], genesis: [u8; 32],
key: &Zeroizing<<Ristretto as WrappedGroup>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
nonce: u32, nonce: u32,
) -> SignedTransaction { ) -> SignedTransaction {
let mut data = vec![0; 512]; let mut data = vec![0; 512];
rng.fill_bytes(&mut data); rng.fill_bytes(&mut data);
let signer = <Ristretto as WrappedGroup>::generator() * **key; let signer = <Ristretto as Ciphersuite>::generator() * **key;
let mut tx = let mut tx =
SignedTransaction(data, Signed { signer, nonce, signature: random_signed(rng).signature }); SignedTransaction(data, Signed { signer, nonce, signature: random_signed(rng).signature });
let sig_nonce = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(rng)); let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
tx.1.signature.R = Ristretto::generator() * sig_nonce.deref(); tx.1.signature.R = Ristretto::generator() * sig_nonce.deref();
tx.1.signature = SchnorrSignature::sign(key, sig_nonce, tx.sig_hash(genesis)); tx.1.signature = SchnorrSignature::sign(key, sig_nonce, tx.sig_hash(genesis));
@@ -158,7 +162,7 @@ pub fn random_signed_transaction<R: RngCore + CryptoRng>(
let mut genesis = [0; 32]; let mut genesis = [0; 32];
rng.fill_bytes(&mut genesis); rng.fill_bytes(&mut genesis);
let key = Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut *rng)); let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut *rng));
// Shift over an additional bit to ensure it won't overflow when incremented // Shift over an additional bit to ensure it won't overflow when incremented
let nonce = u32::try_from(rng.next_u64() >> 32 >> 1).unwrap(); let nonce = u32::try_from(rng.next_u64() >> 32 >> 1).unwrap();
@@ -175,11 +179,12 @@ pub async fn tendermint_meta() -> ([u8; 32], Signer, [u8; 32], Arc<Validators>)
// signer // signer
let genesis = new_genesis(); let genesis = new_genesis();
let signer = let signer =
Signer::new(genesis, Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng))); Signer::new(genesis, Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng)));
let validator_id = signer.validator_id().await.unwrap(); let validator_id = signer.validator_id().await.unwrap();
// schema // schema
let signer_pub = <Ristretto as GroupIo>::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap(); let signer_pub =
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut validator_id.as_slice()).unwrap();
let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap()); let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1)]).unwrap());
(genesis, signer, validator_id, validators) (genesis, signer, validator_id, validators)
@@ -198,7 +203,7 @@ pub async fn signed_from_data<N: Network>(
round: RoundNumber(round_number), round: RoundNumber(round_number),
data, data,
}; };
let sig = signer.sign(&borsh::to_vec(&msg).unwrap()).await; let sig = signer.sign(&msg.encode()).await;
SignedMessage { msg, sig } SignedMessage { msg, sig }
} }
@@ -211,5 +216,5 @@ pub async fn random_evidence_tx<N: Network>(
let data = Data::Proposal(Some(RoundNumber(0)), b); let data = Data::Proposal(Some(RoundNumber(0)), b);
let signer_id = signer.validator_id().await.unwrap(); let signer_id = signer.validator_id().await.unwrap();
let signed = signed_from_data::<N>(signer, signer_id, 0, 0, data).await; let signed = signed_from_data::<N>(signer, signer_id, 0, 0, data).await;
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap())) TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()))
} }

View File

@@ -2,8 +2,7 @@ use rand::rngs::OsRng;
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use dalek_ff_group::Ristretto; use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
use ciphersuite::*;
use crate::{ use crate::{
ReadWrite, ReadWrite,
@@ -69,7 +68,7 @@ fn signed_transaction() {
} }
{ {
let mut tx = tx.clone(); let mut tx = tx.clone();
tx.1.signature.s += <Ristretto as WrappedGroup>::F::ONE; tx.1.signature.s += <Ristretto as Ciphersuite>::F::ONE;
assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err()); assert!(verify_transaction(&tx, genesis, &mut |_, _| Some(tx.1.nonce)).is_err());
} }

View File

@@ -3,8 +3,9 @@ use std::sync::Arc;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand::{RngCore, rngs::OsRng}; use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto; use ciphersuite::{Ristretto, Ciphersuite, group::ff::Field};
use ciphersuite::*;
use scale::Encode;
use tendermint::{ use tendermint::{
time::CanonicalInstant, time::CanonicalInstant,
@@ -50,10 +51,7 @@ async fn invalid_valid_round() {
async move { async move {
let data = Data::Proposal(valid_round, TendermintBlock(vec![])); let data = Data::Proposal(valid_round, TendermintBlock(vec![]));
let signed = signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, data).await; let signed = signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, data).await;
( (signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())))
signed.clone(),
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap())),
)
} }
}; };
@@ -71,8 +69,7 @@ async fn invalid_valid_round() {
let mut random_sig = [0u8; 64]; let mut random_sig = [0u8; 64];
OsRng.fill_bytes(&mut random_sig); OsRng.fill_bytes(&mut random_sig);
signed.sig = random_sig; signed.sig = random_sig;
let tx = let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()));
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap()));
// should fail // should fail
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err()); assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
@@ -92,10 +89,7 @@ async fn invalid_precommit_signature() {
let signed = let signed =
signed_from_data::<N>(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit)) signed_from_data::<N>(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit))
.await; .await;
( (signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())))
signed.clone(),
TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap())),
)
} }
}; };
@@ -125,8 +119,7 @@ async fn invalid_precommit_signature() {
let mut random_sig = [0u8; 64]; let mut random_sig = [0u8; 64];
OsRng.fill_bytes(&mut random_sig); OsRng.fill_bytes(&mut random_sig);
signed.sig = random_sig; signed.sig = random_sig;
let tx = let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode()));
TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap()));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err()); assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
} }
} }
@@ -144,32 +137,24 @@ async fn evidence_with_prevote() {
// it should fail for all reasons. // it should fail for all reasons.
let mut txs = vec![]; let mut txs = vec![];
txs.push(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit( txs.push(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(
borsh::to_vec( signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
&&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await
.await, .encode(),
)
.unwrap(),
))); )));
txs.push(TendermintTx::SlashEvidence(Evidence::InvalidValidRound( txs.push(TendermintTx::SlashEvidence(Evidence::InvalidValidRound(
borsh::to_vec( signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await
.await, .encode(),
)
.unwrap(),
))); )));
// Since these require a second message, provide this one again // Since these require a second message, provide this one again
// ConflictingMessages can be fired for actually conflicting Prevotes however // ConflictingMessages can be fired for actually conflicting Prevotes however
txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingMessages( txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec( signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id)) .await
.await, .encode(),
) signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.unwrap(), .await
borsh::to_vec( .encode(),
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await,
)
.unwrap(),
))); )));
txs txs
} }
@@ -203,16 +188,16 @@ async fn conflicting_msgs_evidence_tx() {
// non-conflicting data should fail // non-conflicting data should fail
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await; let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
)); ));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err()); assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
// conflicting data should pass // conflicting data should pass
let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap(); verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
@@ -220,16 +205,16 @@ async fn conflicting_msgs_evidence_tx() {
// (except for Precommit) // (except for Precommit)
let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err(); verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
// Proposals for different block numbers should also fail as evidence // Proposals for different block numbers should also fail as evidence
let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err(); verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
} }
@@ -239,16 +224,16 @@ async fn conflicting_msgs_evidence_tx() {
// non-conflicting data should fail // non-conflicting data should fail
let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await; let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
)); ));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err()); assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
// conflicting data should pass // conflicting data should pass
let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await; let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap(); verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
@@ -256,16 +241,16 @@ async fn conflicting_msgs_evidence_tx() {
// (except for Precommit) // (except for Precommit)
let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await; let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err(); verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
// Proposals for different block numbers should also fail as evidence // Proposals for different block numbers should also fail as evidence
let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await; let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err(); verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
} }
@@ -275,7 +260,7 @@ async fn conflicting_msgs_evidence_tx() {
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await; let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
let signer_2 = let signer_2 =
Signer::new(genesis, Zeroizing::new(<Ristretto as WrappedGroup>::F::random(&mut OsRng))); Signer::new(genesis, Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng)));
let signed_id_2 = signer_2.validator_id().await.unwrap(); let signed_id_2 = signer_2.validator_id().await.unwrap();
let signed_2 = signed_from_data::<N>( let signed_2 = signed_from_data::<N>(
signer_2.into(), signer_2.into(),
@@ -287,14 +272,15 @@ async fn conflicting_msgs_evidence_tx() {
.await; .await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
// update schema so that we don't fail due to invalid signature // update schema so that we don't fail due to invalid signature
let signer_pub = <Ristretto as GroupIo>::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap(); let signer_pub =
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut signer_id.as_slice()).unwrap();
let signer_pub_2 = let signer_pub_2 =
<Ristretto as GroupIo>::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap(); <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut signed_id_2.as_slice()).unwrap();
let validators = let validators =
Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap()); Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap());
@@ -306,8 +292,8 @@ async fn conflicting_msgs_evidence_tx() {
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await; let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await;
let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await; let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages( let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(), signed_1.encode(),
borsh::to_vec(&signed_2).unwrap(), signed_2.encode(),
)); ));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err()); assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
} }

View File

@@ -8,9 +8,8 @@ use blake2::{Digest, Blake2b512};
use ciphersuite::{ use ciphersuite::{
group::{Group, GroupEncoding}, group::{Group, GroupEncoding},
*, Ciphersuite, Ristretto,
}; };
use dalek_ff_group::Ristretto;
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite}; use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
@@ -43,7 +42,7 @@ pub enum TransactionError {
/// Data for a signed transaction. /// Data for a signed transaction.
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub struct Signed { pub struct Signed {
pub signer: <Ristretto as WrappedGroup>::G, pub signer: <Ristretto as Ciphersuite>::G,
pub nonce: u32, pub nonce: u32,
pub signature: SchnorrSignature<Ristretto>, pub signature: SchnorrSignature<Ristretto>,
} }
@@ -160,10 +159,10 @@ pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
/// Do not override this unless you know what you're doing. /// Do not override this unless you know what you're doing.
/// ///
/// Panics if called on non-signed transactions. /// Panics if called on non-signed transactions.
fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as WrappedGroup>::F { fn sig_hash(&self, genesis: [u8; 32]) -> <Ristretto as Ciphersuite>::F {
match self.kind() { match self.kind() {
TransactionKind::Signed(order, Signed { signature, .. }) => { TransactionKind::Signed(order, Signed { signature, .. }) => {
<Ristretto as WrappedGroup>::F::from_bytes_mod_order_wide( <Ristretto as Ciphersuite>::F::from_bytes_mod_order_wide(
&Blake2b512::digest( &Blake2b512::digest(
[ [
b"Tributary Signed Transaction", b"Tributary Signed Transaction",
@@ -182,8 +181,8 @@ pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
} }
} }
pub trait GAIN: FnMut(&<Ristretto as WrappedGroup>::G, &[u8]) -> Option<u32> {} pub trait GAIN: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32> {}
impl<F: FnMut(&<Ristretto as WrappedGroup>::G, &[u8]) -> Option<u32>> GAIN for F {} impl<F: FnMut(&<Ristretto as Ciphersuite>::G, &[u8]) -> Option<u32>> GAIN for F {}
pub(crate) fn verify_transaction<F: GAIN, T: Transaction>( pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
tx: &T, tx: &T,

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021" edition = "2021"
rust-version = "1.77" rust-version = "1.81"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -21,7 +21,7 @@ thiserror = { version = "2", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] }
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } parity-scale-codec = { version = "3", default-features = false, features = ["std", "derive"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] } futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2022-2025 Luke Parker Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -3,41 +3,33 @@ use std::{sync::Arc, collections::HashSet};
use thiserror::Error; use thiserror::Error;
use borsh::{BorshSerialize, BorshDeserialize}; use parity_scale_codec::{Encode, Decode};
use crate::{SignedMessageFor, SlashEvent, commit_msg}; use crate::{SignedMessageFor, SlashEvent, commit_msg};
/// An alias for a series of traits required for a type to be usable as a validator ID, /// An alias for a series of traits required for a type to be usable as a validator ID,
/// automatically implemented for all types satisfying those traits. /// automatically implemented for all types satisfying those traits.
pub trait ValidatorId: pub trait ValidatorId:
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + BorshSerialize + BorshDeserialize Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode
{ {
} }
#[rustfmt::skip] impl<V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode> ValidatorId
impl< for V
V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + BorshSerialize + BorshDeserialize,
> ValidatorId for V
{ {
} }
/// An alias for a series of traits required for a type to be usable as a signature, /// An alias for a series of traits required for a type to be usable as a signature,
/// automatically implemented for all types satisfying those traits. /// automatically implemented for all types satisfying those traits.
pub trait Signature: pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {}
Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode> Signature for S {}
{
}
impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize> Signature
for S
{
}
// Type aliases which are distinct according to the type system // Type aliases which are distinct according to the type system
/// A struct containing a Block Number, wrapped to have a distinct type. /// A struct containing a Block Number, wrapped to have a distinct type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
pub struct BlockNumber(pub u64); pub struct BlockNumber(pub u64);
/// A struct containing a round number, wrapped to have a distinct type. /// A struct containing a round number, wrapped to have a distinct type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
pub struct RoundNumber(pub u32); pub struct RoundNumber(pub u32);
/// A signer for a validator. /// A signer for a validator.
@@ -122,6 +114,7 @@ impl<S: SignatureScheme> SignatureScheme for Arc<S> {
self.as_ref().aggregate(validators, msg, sigs) self.as_ref().aggregate(validators, msg, sigs)
} }
#[must_use]
fn verify_aggregate( fn verify_aggregate(
&self, &self,
signers: &[Self::ValidatorId], signers: &[Self::ValidatorId],
@@ -135,7 +128,7 @@ impl<S: SignatureScheme> SignatureScheme for Arc<S> {
/// A commit for a specific block. /// A commit for a specific block.
/// ///
/// The list of validators have weight exceeding the threshold for a valid commit. /// The list of validators have weight exceeding the threshold for a valid commit.
#[derive(PartialEq, Debug, BorshSerialize, BorshDeserialize)] #[derive(PartialEq, Debug, Encode, Decode)]
pub struct Commit<S: SignatureScheme> { pub struct Commit<S: SignatureScheme> {
/// End time of the round which created this commit, used as the start time of the next block. /// End time of the round which created this commit, used as the start time of the next block.
pub end_time: u64, pub end_time: u64,
@@ -193,7 +186,7 @@ impl<W: Weights> Weights for Arc<W> {
} }
/// Simplified error enum representing a block's validity. /// Simplified error enum representing a block's validity.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)]
pub enum BlockError { pub enum BlockError {
/// Malformed block which is wholly invalid. /// Malformed block which is wholly invalid.
#[error("invalid block")] #[error("invalid block")]
@@ -205,20 +198,9 @@ pub enum BlockError {
} }
/// Trait representing a Block. /// Trait representing a Block.
pub trait Block: pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {
Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize
{
// Type used to identify blocks. Presumably a cryptographic hash of the block. // Type used to identify blocks. Presumably a cryptographic hash of the block.
type Id: Send type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode;
+ Sync
+ Copy
+ Clone
+ PartialEq
+ Eq
+ AsRef<[u8]>
+ Debug
+ BorshSerialize
+ BorshDeserialize;
/// Return the deterministic, unique ID for this block. /// Return the deterministic, unique ID for this block.
fn id(&self) -> Self::Id; fn id(&self) -> Self::Id;

Some files were not shown because too many files have changed in this diff Show More