16 Commits

Author SHA1 Message Date
Luke Parker
ca93c82156 Remove borsh from dkg
It pulls in a lot of bespoke dependencies for little utility directly present.

Moves the necessary code into the processor.
2025-11-16 18:25:23 -05:00
Luke Parker
5b1875dae6 Update lockfile after recent cherry picks 2025-11-16 18:21:02 -05:00
Luke Parker
bcd68441be Use Alpine to build the runtime
Smaller, works without issue.
2025-11-16 18:21:02 -05:00
Luke Parker
4ebf9ad9c7 Rust 1.91.1 due to the regression re: wasm builds 2025-11-16 18:21:02 -05:00
Luke Parker
807572199c Update misc versions 2025-11-16 18:21:02 -05:00
Luke Parker
3cdc1536c5 Make ethereum-schnorr-contract no-std and no-alloc eligible 2025-11-16 18:21:01 -05:00
Luke Parker
9e13e5ebff Patch from parity-bip39 back to bip39
Per https://github.com/michalkucharczyk/rust-bip39/tree/mku-2.0.1-release,
`parity-bip39` was a fork to publish a release of `bip39` with two specific PRs
merged. Not only have those PRs been merged, yet `bip39` now accepts
`bitcoin_hashes 0.14` (https://github.com/rust-bitcoin/rust-bip39/pull/76),
making this a great time to reconcile (even though it does technically add a
git dependency until the new release is cut...).
2025-11-16 18:21:01 -05:00
Luke Parker
9b2c254eee Patch librocksdb-sys to never enable jemalloc, which conflicts with mimalloc
Allows us to update mimalloc and enable the newly added guard pages.

Conflict identified by @PlasmaPower.
2025-11-16 18:20:55 -05:00
Luke Parker
0883479068 Remove rust-src as a component for WASM
It's unnecessary since `wasm32v1-none`.
2025-11-16 17:57:07 -05:00
Luke Parker
c5480c63be Build and run the message queue over Alpine
We prior stopped doing so for stability reasons, but this _should_ be tried
again.
2025-11-16 17:56:51 -05:00
Luke Parker
4280ee6987 revm 33 2025-11-16 17:54:43 -05:00
Luke Parker
91673d7ae3 Remove std feature from revm
It's unnecessary and bloats the tree decently.
2025-11-16 17:52:58 -05:00
Luke Parker
927f07b62b Move bitcoin-serai to core-json and feature-gate the RPC functionality 2025-11-16 17:52:33 -05:00
Luke Parker
7e774d6d2d Correct when spin::Lazy is exposed as std_shims::sync::LazyLock
It's intended to always be used, even on `std`, when `std::sync::LazyLock` is
not available.
2025-11-16 17:52:26 -05:00
Luke Parker
fccd06b376 Bump revm 2025-11-16 17:52:23 -05:00
Luke Parker
e3edc0a7fc Add patches to remove the unused optional dependencies tracked in tree
Also performs the usual `cargo update`.
2025-11-16 17:51:38 -05:00
402 changed files with 18583 additions and 14556 deletions

View File

@@ -12,7 +12,7 @@ runs:
steps:
- name: Bitcoin Daemon Cache
id: cache-bitcoind
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: bitcoin.tar.gz
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -52,7 +52,7 @@ runs:
- name: Install solc
shell: bash
run: |
cargo +1.91.1 install svm-rs --version =0.5.21
cargo +1.91 install svm-rs --version =0.5.19
svm install 0.8.29
svm use 0.8.29
@@ -75,8 +75,11 @@ runs:
if: runner.os == 'Linux'
- name: Install rootless Docker
uses: docker/setup-docker-action@e61617a16c407a86262fb923c35a616ddbe070b3 # 4.6.0
uses: docker/setup-docker-action@b60f85385d03ac8acfca6d9996982511d8620a19
with:
rootless: true
set-host: true
if: runner.os == 'Linux'
# - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -5,14 +5,14 @@ inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.4.4
default: v0.18.4.3
runs:
using: "composite"
steps:
- name: Monero Wallet RPC Cache
id: cache-monero-wallet-rpc
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: monero-wallet-rpc
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,14 +5,14 @@ inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.4.4
default: v0.18.4.3
runs:
using: "composite"
steps:
- name: Monero Daemon Cache
id: cache-monerod
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # 4.2.4
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: /usr/bin/monerod
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}

View File

@@ -5,7 +5,7 @@ inputs:
monero-version:
description: "Monero version to download and run as a regtest node"
required: false
default: v0.18.4.4
default: v0.18.4.3
bitcoin-version:
description: "Bitcoin version to download and run as a regtest node"
@@ -19,9 +19,9 @@ runs:
uses: ./.github/actions/build-dependencies
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with:
version: v1.5.0
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
cache: false
- name: Run a Monero Regtest Node

View File

@@ -1 +1 @@
nightly-2025-12-01
nightly-2025-11-11

View File

@@ -17,7 +17,7 @@ jobs:
test-common:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -31,7 +31,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -19,7 +19,7 @@ jobs:
test-crypto:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -9,10 +9,16 @@ jobs:
name: Run cargo deny
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo +1.91.1 install cargo-deny --version =0.18.7
run: cargo +1.91 install cargo-deny --version =0.18.5
- name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph

View File

@@ -13,7 +13,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use
id: nightly
@@ -43,10 +43,16 @@ jobs:
deny:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Advisory Cache
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
with:
path: ~/.cargo/advisory-db
key: rust-advisory-db
- name: Install cargo deny
run: cargo +1.91.1 install cargo-deny --version =0.18.7
run: cargo +1.91 install cargo-deny --version =0.18.5
- name: Run cargo deny
run: cargo deny -L error --all-features check --hide-inclusion-graph
@@ -54,7 +60,7 @@ jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Get nightly version to use
id: nightly
@@ -67,10 +73,10 @@ jobs:
- name: Run rustfmt
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e # 1.5.0
- name: Install foundry
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with:
version: v1.5.0
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
cache: false
- name: Run forge fmt
@@ -79,20 +85,20 @@ jobs:
machete:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify all dependencies are in use
run: |
cargo +1.91.1 install cargo-machete --version =0.9.1
cargo +1.91.1 machete
cargo +1.91 install cargo-machete --version =0.9.1
cargo +1.91 machete
msrv:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Verify claimed `rust-version`
shell: bash
run: |
cargo +1.91.1 install cargo-msrv --version =0.18.4
cargo +1.91 install cargo-msrv --version =0.18.4
function check_msrv {
# We `cd` into the directory passed as the first argument, but will return to the
@@ -183,16 +189,16 @@ jobs:
slither:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Slither
run: |
python3 -m pip install slither-analyzer==0.11.3
python3 -m pip install slither-analyzer
slither ./networks/ethereum/schnorr/contracts/Schnorr.sol
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
slither processor/ethereum/deployer/contracts/Deployer.sol
slither processor/ethereum/erc20/contracts/IERC20.sol

View File

@@ -27,7 +27,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -17,7 +17,7 @@ jobs:
test-common:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -9,7 +9,7 @@ jobs:
name: Update nightly
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
with:
submodules: "recursive"

View File

@@ -21,7 +21,7 @@ jobs:
test-networks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Test Dependencies
uses: ./.github/actions/test-dependencies

View File

@@ -23,7 +23,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -46,16 +46,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Setup Ruby
uses: ruby/setup-ruby@8aeb6ff8030dd539317f8e1769a044873b56ea71 # 1.268.0
uses: ruby/setup-ruby@44511735964dcb71245e7e55f72539531f7bc0eb
with:
bundler-cache: true
cache-version: 0
working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages
id: pages
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # 5.0.0
uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b
- name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
@@ -74,7 +74,7 @@ jobs:
mv target/doc docs/_site/rust
- name: Upload artifact
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b # 4.0.0
uses: actions/upload-pages-artifact@7b1f4a764d45c48632c6b24a0339c27f5614fb0b
with:
path: "docs/_site/"
@@ -88,4 +88,4 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # 4.0.5
uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e

View File

@@ -31,7 +31,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies

View File

@@ -27,10 +27,10 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Reproducible Runtime tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests -- --nocapture
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests

View File

@@ -29,7 +29,7 @@ jobs:
test-infra:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
@@ -61,7 +61,6 @@ jobs:
-p serai-monero-processor \
-p tendermint-machine \
-p tributary-sdk \
-p serai-cosign-types \
-p serai-cosign \
-p serai-coordinator-substrate \
-p serai-coordinator-tributary \
@@ -74,7 +73,7 @@ jobs:
test-substrate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
@@ -83,33 +82,31 @@ jobs:
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-primitives \
-p serai-abi \
-p substrate-median \
-p serai-core-pallet \
-p serai-coins-primitives \
-p serai-coins-pallet \
-p serai-validator-sets-pallet \
-p serai-signals-pallet \
-p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \
-p serai-economic-security-pallet \
-p serai-emissions-primitives \
-p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \
-p serai-node
-p serai-substrate-tests
test-serai-client:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Run Tests
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client

3
.gitignore vendored
View File

@@ -2,10 +2,11 @@ target
# Don't commit any `Cargo.lock` which aren't the workspace's
Cargo.lock
!/Cargo.lock
!./Cargo.lock
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
Dockerfile
Dockerfile.fast-epoch
!orchestration/runtime/Dockerfile
.test-logs

1979
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -62,14 +62,13 @@ members = [
"processor/ethereum/primitives",
"processor/ethereum/test-primitives",
"processor/ethereum/deployer",
"processor/ethereum/erc20",
"processor/ethereum/router",
"processor/ethereum/erc20",
"processor/ethereum",
"processor/monero",
"coordinator/tributary-sdk/tendermint",
"coordinator/tributary-sdk",
"coordinator/cosign/types",
"coordinator/cosign",
"coordinator/substrate",
"coordinator/tributary",
@@ -78,27 +77,34 @@ members = [
"coordinator",
"substrate/primitives",
"substrate/coins/primitives",
"substrate/coins/pallet",
"substrate/dex/pallet",
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/signals/primitives",
"substrate/signals/pallet",
"substrate/abi",
"substrate/median",
"substrate/core",
"substrate/coins",
"substrate/validator-sets",
"substrate/signals",
"substrate/dex",
"substrate/genesis-liquidity",
"substrate/economic-security",
"substrate/emissions",
"substrate/in-instructions",
"substrate/runtime",
"substrate/node",
"substrate/client/serai",
"substrate/client/bitcoin",
"substrate/client/ethereum",
"substrate/client/monero",
"substrate/client",
"orchestration",
@@ -111,7 +117,6 @@ members = [
"tests/message-queue",
# TODO "tests/processor",
# TODO "tests/coordinator",
"tests/substrate",
# TODO "tests/full-stack",
"tests/reproducible-runtime",
]
@@ -133,14 +138,11 @@ dalek-ff-group = { opt-level = 3 }
multiexp = { opt-level = 3 }
monero-io = { opt-level = 3 }
monero-primitives = { opt-level = 3 }
monero-ed25519 = { opt-level = 3 }
monero-generators = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs-generators = { opt-level = 3 }
monero-bulletproofs = {opt-level = 3 }
monero-oxide = { opt-level = 3 }
# Always compile the eVRF DKG tree with optimizations as well
@@ -170,14 +172,13 @@ panic = "unwind"
overflow-checks = true
[patch.crates-io]
# Point to empty crates for crates unused within in our tree
alloy-eip2124 = { path = "patches/ethereum/alloy-eip2124" }
# Point to empty crates for unused crates in our tree
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
c-kzg = { path = "patches/ethereum/c-kzg" }
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
# Dependencies from monero-oxide which originate from within our own tree, potentially shimmed to account for deviations since publishing
# Dependencies from monero-oxide which originate from within our own tree
std-shims = { path = "patches/std-shims" }
simple-request = { path = "patches/simple-request" }
multiexp = { path = "crypto/multiexp" }
@@ -187,8 +188,6 @@ dalek-ff-group = { path = "patches/dalek-ff-group" }
minimal-ed448 = { path = "crypto/ed448" }
modular-frost = { path = "crypto/frost" }
# Patch due to `std` now including the required functionality
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
# This has a non-deprecated `std` alternative since Rust's 2024 edition
home = { path = "patches/home" }
@@ -214,12 +213,15 @@ parity-bip39 = { path = "patches/parity-bip39" }
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
# `jemalloc` conflicts with `mimalloc`, so patch to a `rocksdb` which never uses `jemalloc`
librocksdb-sys = { path = "patches/librocksdb-sys" }
[workspace.lints.clippy]
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
manual_is_multiple_of = "allow"
unwrap_or_default = "allow"
map_unwrap_or = "allow"
needless_continue = "allow"
manual_is_multiple_of = "allow"
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
borrow_as_ptr = "deny"
cast_lossless = "deny"
cast_possible_truncation = "deny"

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
rust-version = "1.77"
rust-version = "1.65"
[package.metadata.docs.rs]
all-features = true
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true }
parity-db = { version = "0.5", default-features = false, optional = true }
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
[features]

View File

@@ -15,7 +15,7 @@ pub fn serai_db_key(
///
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
/// they must be `borsh` serializable. The return type is used to auto (de)serialize the database
/// they must be `scale` encodable. The return type is used to auto encode and decode the database
/// value bytes using `borsh`.
///
/// # Arguments
@@ -54,10 +54,11 @@ macro_rules! create_db {
)?;
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
use scale::Encode;
$crate::serai_db_key(
stringify!($db_name).as_bytes(),
stringify!($field_name).as_bytes(),
&borsh::to_vec(&($($arg),*)).unwrap(),
($($arg),*).encode()
)
}
pub(crate) fn set(

View File

@@ -31,6 +31,7 @@ frost = { package = "modular-frost", path = "../crypto/frost" }
frost-schnorrkel = { path = "../crypto/schnorrkel" }
hex = { version = "0.4", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
zalloc = { path = "../common/zalloc" }
@@ -42,7 +43,7 @@ messages = { package = "serai-processor-messages", path = "../processor/messages
message-queue = { package = "serai-message-queue", path = "../message-queue" }
tributary-sdk = { path = "./tributary-sdk" }
serai-client-serai = { path = "../substrate/client/serai", default-features = false }
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }

View File

@@ -19,9 +19,11 @@ workspace = true
[dependencies]
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] }
@@ -29,5 +31,3 @@ tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" }
serai-cosign-types = { path = "./types" }

View File

@@ -1,21 +1,10 @@
use core::future::Future;
use std::{sync::Arc, collections::HashMap};
use blake2::{Digest, Blake2b256};
use serai_client_serai::{
abi::{
primitives::{
network_id::{ExternalNetworkId, NetworkId},
balance::Amount,
crypto::Public,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
merkle::IncrementalUnbalancedMerkleTree,
},
validator_sets::Event,
},
Serai, Events,
use serai_client::{
primitives::{SeraiAddress, Amount},
validator_sets::primitives::ExternalValidatorSet,
Serai,
};
use serai_db::*;
@@ -23,20 +12,9 @@ use serai_task::ContinuallyRan;
use crate::*;
#[derive(BorshSerialize, BorshDeserialize)]
struct Set {
session: Session,
key: Public,
stake: Amount,
}
create_db!(
CosignIntend {
ScanCosignFrom: () -> u64,
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
Stakes: (network: ExternalNetworkId, validator: SeraiAddress) -> Amount,
Validators: (set: ExternalValidatorSet) -> Vec<SeraiAddress>,
LatestSet: (network: ExternalNetworkId) -> Set,
}
);
@@ -57,38 +35,23 @@ db_channel! {
async fn block_has_events_justifying_a_cosign(
serai: &Serai,
block_number: u64,
) -> Result<(Block, Events, HasEvents), String> {
) -> Result<(Block, HasEvents), String> {
let block = serai
.block_by_number(block_number)
.finalized_block_by_number(block_number)
.await
.map_err(|e| format!("{e:?}"))?
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
let events = serai.events(block.header.hash()).await.map_err(|e| format!("{e:?}"))?;
let serai = serai.as_of(block.hash());
if events.validator_sets().set_keys_events().next().is_some() {
return Ok((block, events, HasEvents::Notable));
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
return Ok((block, HasEvents::Notable));
}
if events.coins().burn_with_instruction_events().next().is_some() {
return Ok((block, events, HasEvents::NonNotable));
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
return Ok((block, HasEvents::NonNotable));
}
Ok((block, events, HasEvents::No))
}
// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
// block.
fn cosigning_sets(getter: &impl Get) -> Vec<(ExternalValidatorSet, Public, Amount)> {
let mut sets = vec![];
for network in ExternalNetworkId::all() {
let Some(Set { session, key, stake }) = LatestSet::get(getter, network) else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ExternalValidatorSet { network, session }, key, stake));
}
sets
Ok((block, HasEvents::No))
}
/// A task to determine which blocks we should intend to cosign.
@@ -104,108 +67,56 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
async move {
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
let latest_block_number =
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?;
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
for block_number in start_block_number ..= latest_block_number {
let mut txn = self.db.txn();
let (block, events, mut has_events) =
let (block, mut has_events) =
block_has_events_justifying_a_cosign(&self.serai, block_number)
.await
.map_err(|e| format!("{e:?}"))?;
let mut builds_upon =
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
// Check we are indexing a linear chain
if block.header.builds_upon() !=
builds_upon.clone().calculate(serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG)
if (block_number > 1) &&
(<[u8; 32]>::from(block.header.parent_hash) !=
SubstrateBlockHash::get(&txn, block_number - 1)
.expect("indexing a block but haven't indexed its parent"))
{
Err(format!(
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
block_number - 1
))?;
}
let block_hash = block.header.hash();
let block_hash = block.hash();
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
builds_upon.append(
serai_client_serai::abi::BLOCK_HEADER_BRANCH_TAG,
Blake2b256::new_with_prefix([serai_client_serai::abi::BLOCK_HEADER_LEAF_TAG])
.chain_update(block_hash.0)
.finalize()
.into(),
);
BuildsUpon::set(&mut txn, &builds_upon);
// Update the stakes
for event in events.validator_sets().allocation_events() {
let Event::Allocation { validator, network, amount } = event else {
panic!("event from `allocation_events` wasn't `Event::Allocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 + amount.0));
}
for event in events.validator_sets().deallocation_events() {
let Event::Deallocation { validator, network, amount, timeline: _ } = event else {
panic!("event from `deallocation_events` wasn't `Event::Deallocation`")
};
let Ok(network) = ExternalNetworkId::try_from(*network) else { continue };
let existing = Stakes::get(&txn, network, *validator).unwrap_or(Amount(0));
Stakes::set(&mut txn, network, *validator, &Amount(existing.0 - amount.0));
}
// Handle decided sets
for event in events.validator_sets().set_decided_events() {
let Event::SetDecided { set, validators } = event else {
panic!("event from `set_decided_events` wasn't `Event::SetDecided`")
};
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
Validators::set(
&mut txn,
set,
&validators.iter().map(|(validator, _key_shares)| *validator).collect(),
);
}
// Handle declarations of the latest set
for event in events.validator_sets().set_keys_events() {
let Event::SetKeys { set, key_pair } = event else {
panic!("event from `set_keys_events` wasn't `Event::SetKeys`")
};
let mut stake = 0;
for validator in
Validators::take(&mut txn, *set).expect("set which wasn't decided set keys")
{
stake += Stakes::get(&txn, set.network, validator).unwrap_or(Amount(0)).0;
}
LatestSet::set(
&mut txn,
set.network,
&Set { session: set.session, key: key_pair.0, stake: Amount(stake) },
);
}
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
// If this is notable, it creates a new global session, which we index into the database
// now
if has_events == HasEvents::Notable {
let sets_and_keys_and_stakes = cosigning_sets(&txn);
let global_session = GlobalSession::id(
sets_and_keys_and_stakes.iter().map(|(set, _key, _stake)| *set).collect(),
);
let serai = self.serai.as_of(block_hash);
let sets_and_keys = cosigning_sets(&serai).await?;
let global_session =
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
let mut sets = Vec::with_capacity(sets_and_keys_and_stakes.len());
let mut keys = HashMap::with_capacity(sets_and_keys_and_stakes.len());
let mut stakes = HashMap::with_capacity(sets_and_keys_and_stakes.len());
let mut sets = Vec::with_capacity(sets_and_keys.len());
let mut keys = HashMap::with_capacity(sets_and_keys.len());
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
let mut total_stake = 0;
for (set, key, stake) in sets_and_keys_and_stakes {
sets.push(set);
keys.insert(set.network, key);
stakes.insert(set.network, stake.0);
total_stake += stake.0;
for (set, key) in &sets_and_keys {
sets.push(*set);
keys.insert(set.network, SeraiAddress::from(*key));
let stake = serai
.validator_sets()
.total_allocated_stake(set.network.into())
.await
.map_err(|e| format!("{e:?}"))?
.unwrap_or(Amount(0))
.0;
stakes.insert(set.network, stake);
total_stake += stake;
}
if total_stake == 0 {
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;

View File

@@ -7,27 +7,18 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
use blake2::{Digest, Blake2s256};
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_client_serai::{
abi::{
primitives::{
BlockHash,
crypto::{Public, KeyPair},
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
address::SeraiAddress,
},
Block,
},
Serai, State,
use serai_client::{
primitives::{ExternalNetworkId, SeraiAddress},
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
Public, Block, Serai, TemporalSerai,
};
use serai_db::*;
use serai_task::*;
pub use serai_cosign_types::*;
/// The cosigns which are intended to be performed.
mod intend;
/// The evaluator of the cosigns.
@@ -37,6 +28,9 @@ mod delay;
pub use delay::BROADCAST_FREQUENCY;
use delay::LatestCosignedBlockNumber;
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
///
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
@@ -59,7 +53,7 @@ use delay::LatestCosignedBlockNumber;
pub(crate) struct GlobalSession {
pub(crate) start_block_number: u64,
pub(crate) sets: Vec<ExternalValidatorSet>,
pub(crate) keys: HashMap<ExternalNetworkId, Public>,
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
pub(crate) total_stake: u64,
}
@@ -84,12 +78,74 @@ enum HasEvents {
No,
}
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// The actual cosigner.
pub cosigner: ExternalNetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
self.encode()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
fn verify_signature(&self, signer: serai_client::Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}
create_db! {
Cosign {
// The following are populated by the intend task and used throughout the library
// An index of Substrate blocks
SubstrateBlockHash: (block_number: u64) -> BlockHash,
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
// A mapping from a global session's ID to its relevant information.
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
// The last block to be cosigned by a global session.
@@ -121,6 +177,60 @@ create_db! {
}
}
/// Fetch the keys used for cosigning by a specific network.
async fn keys_for_network(
serai: &TemporalSerai<'_>,
network: ExternalNetworkId,
) -> Result<Option<(Session, KeyPair)>, String> {
let Some(latest_session) =
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
else {
// If this network hasn't had a session declared, move on
return Ok(None);
};
// Get the keys for the latest session
if let Some(keys) = serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: latest_session })
.await
.map_err(|e| format!("{e:?}"))?
{
return Ok(Some((latest_session, keys)));
}
// If the latest session has yet to set keys, use the prior session
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
if let Some(keys) = serai
.validator_sets()
.keys(ExternalValidatorSet { network, session: prior_session })
.await
.map_err(|e| format!("{e:?}"))?
{
return Ok(Some((prior_session, keys)));
}
}
Ok(None)
}
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
/// block.
async fn cosigning_sets(
serai: &TemporalSerai<'_>,
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let Some((session, keys)) = keys_for_network(serai, network).await? else {
// If this network doesn't have usable keys, move on
continue;
};
sets.push((ExternalValidatorSet { network, session }, keys.0));
}
Ok(sets)
}
/// An object usable to request notable cosigns for a block.
pub trait RequestNotableCosigns: 'static + Send {
/// The error type which may be encountered when requesting notable cosigns.
@@ -221,10 +331,7 @@ impl<D: Db> Cosigning<D> {
}
/// Fetch a cosigned Substrate block's hash by its block number.
pub fn cosigned_block(
getter: &impl Get,
block_number: u64,
) -> Result<Option<BlockHash>, Faulted> {
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
if block_number > Self::latest_cosigned_block_number(getter)? {
return Ok(None);
}
@@ -239,8 +346,8 @@ impl<D: Db> Cosigning<D> {
/// If this global session hasn't produced any notable cosigns, this will return the latest
/// cosigns for this session.
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
let mut cosigns = vec![];
for network in ExternalNetworkId::all() {
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
for network in serai_client::primitives::EXTERNAL_NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
cosigns.push(cosign);
}
@@ -257,7 +364,7 @@ impl<D: Db> Cosigning<D> {
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
// identification in those who see the faulty cosigns as honest
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
if cosign.cosign.global_session == faulted {
cosigns.push(cosign);
@@ -269,8 +376,8 @@ impl<D: Db> Cosigning<D> {
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
return vec![];
};
let mut cosigns = vec![];
for network in ExternalNetworkId::all() {
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
for network in serai_client::primitives::EXTERNAL_NETWORKS {
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
cosigns.push(cosign);
}
@@ -325,8 +432,13 @@ impl<D: Db> Cosigning<D> {
// Check the cosign's signature
{
let key =
*global_session.keys.get(&network).ok_or(IntakeCosignError::NonParticipatingNetwork)?;
let key = Public::from({
let Some(key) = global_session.keys.get(&network) else {
Err(IntakeCosignError::NonParticipatingNetwork)?
};
*key
});
if !signed_cosign.verify_signature(key) {
Err(IntakeCosignError::InvalidSignature)?;
}

View File

@@ -1,25 +0,0 @@
[package]
name = "serai-cosign-types"
version = "0.1.0"
description = "Evaluator of cosigns for the Serai network"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] }

View File

@@ -1,72 +0,0 @@
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(missing_docs)]
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
/// The schnorrkel context to used when signing a cosign.
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// If this cosign must be handled before further cosigns are.
pub notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: BlockHash,
/// The actual cosigner.
pub cosigner: ExternalNetworkId,
}
impl CosignIntent {
/// Convert this into a `Cosign`.
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
Cosign { global_session, block_number, block_hash, cosigner }
}
}
impl Cosign {
/// The message to sign to sign this cosign.
///
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
pub fn signature_message(&self) -> Vec<u8> {
// We use a schnorrkel context to domain-separate this
borsh::to_vec(self).unwrap()
}
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
/// Verify a cosign's signature.
pub fn verify_signature(&self, signer: Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
}
}

View File

@@ -22,7 +22,7 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
serai-db = { path = "../../common/db", version = "0.1" }
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-cosign = { path = "../cosign" }
tributary-sdk = { path = "../tributary-sdk" }

View File

@@ -29,7 +29,7 @@ schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client-serai = { path = "../../../substrate/client/serai", default-features = false }
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-cosign = { path = "../../cosign" }
tributary-sdk = { path = "../../tributary-sdk" }

View File

@@ -7,7 +7,7 @@ use rand_core::{RngCore, OsRng};
use blake2::{Digest, Blake2s256};
use schnorrkel::{Keypair, PublicKey, Signature};
use serai_client_serai::abi::primitives::crypto::Public;
use serai_client::primitives::PublicKey as Public;
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libp2p::{
@@ -104,7 +104,7 @@ impl OnlyValidators {
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
.map_err(|_| io::Error::other("invalid signature"))?;
Ok(peer_id_from_public(Public(public_key.to_bytes())))
Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
}
}

View File

@@ -1,11 +1,11 @@
use core::{future::Future, str::FromStr};
use core::future::Future;
use std::{sync::Arc, collections::HashSet};
use rand_core::{RngCore, OsRng};
use tokio::sync::mpsc;
use serai_client_serai::{RpcError, Serai};
use serai_client::{SeraiError, Serai};
use libp2p::{
core::multiaddr::{Protocol, Multiaddr},
@@ -50,7 +50,7 @@ impl ContinuallyRan for DialTask {
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
type Error = RpcError;
type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
@@ -94,13 +94,6 @@ impl ContinuallyRan for DialTask {
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
.unwrap();
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
let Ok(randomly_selected_peer) = libp2p::Multiaddr::from_str(&randomly_selected_peer)
else {
log::error!(
"peer from substrate wasn't a valid `Multiaddr`: {randomly_selected_peer}"
);
continue;
};
log::info!("found peer from substrate: {randomly_selected_peer}");

View File

@@ -13,10 +13,9 @@ use rand_core::{RngCore, OsRng};
use zeroize::Zeroizing;
use schnorrkel::Keypair;
use serai_client_serai::{
abi::primitives::{
crypto::Public, network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet,
},
use serai_client::{
primitives::{ExternalNetworkId, PublicKey},
validator_sets::primitives::ExternalValidatorSet,
Serai,
};
@@ -67,7 +66,7 @@ use dial::DialTask;
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
fn peer_id_from_public(public: Public) -> PeerId {
fn peer_id_from_public(public: PublicKey) -> PeerId {
// 0 represents the identity Multihash, that no hash was performed
// It's an internal constant so we can't refer to the constant inside libp2p
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()

View File

@@ -6,7 +6,7 @@ use std::{
use borsh::BorshDeserialize;
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
use serai_client::validator_sets::primitives::ExternalValidatorSet;
use tokio::sync::{mpsc, oneshot, RwLock};

View File

@@ -4,8 +4,9 @@ use std::{
collections::{HashSet, HashMap},
};
use serai_client_serai::abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session};
use serai_client_serai::{RpcError, Serai};
use serai_client::{
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
};
use serai_task::{Task, ContinuallyRan};
@@ -51,7 +52,7 @@ impl Validators {
async fn session_changes(
serai: impl Borrow<Serai>,
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, RpcError> {
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
/*
This uses the latest finalized block, not the latest cosigned block, which should be fine as
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
@@ -60,18 +61,18 @@ impl Validators {
Besides, we can't connect to historical validators, only the current validators.
*/
let serai = serai.borrow().state().await?;
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
let temporal_serai = temporal_serai.validator_sets();
let mut session_changes = vec![];
{
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
// we poll it till it yields all futures with the most minimal processing possible
let mut futures = FuturesUnordered::new();
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let sessions = sessions.borrow();
let serai = serai.borrow();
futures.push(async move {
let session = match serai.current_session(network.into()).await {
let session = match temporal_serai.session(network.into()).await {
Ok(Some(session)) => session,
Ok(None) => return Ok(None),
Err(e) => return Err(e),
@@ -80,16 +81,12 @@ impl Validators {
if sessions.get(&network) == Some(&session) {
Ok(None)
} else {
match serai.current_validators(network.into()).await {
Ok(Some(validators)) => Ok(Some((
match temporal_serai.active_network_validators(network.into()).await {
Ok(validators) => Ok(Some((
network,
session,
validators
.into_iter()
.map(|validator| peer_id_from_public(validator.into()))
.collect(),
validators.into_iter().map(peer_id_from_public).collect(),
))),
Ok(None) => panic!("network has session yet no validators"),
Err(e) => Err(e),
}
}
@@ -156,7 +153,7 @@ impl Validators {
}
/// Update the view of the validators.
pub(crate) async fn update(&mut self) -> Result<(), RpcError> {
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
self.incorporate_session_changes(session_changes);
Ok(())
@@ -209,7 +206,7 @@ impl ContinuallyRan for UpdateValidatorsTask {
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
type Error = RpcError;
type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {

View File

@@ -1,7 +1,7 @@
use core::future::Future;
use std::time::{Duration, SystemTime};
use serai_primitives::validator_sets::{ExternalValidatorSet, KeyShares};
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
use futures_lite::FutureExt;
@@ -30,7 +30,7 @@ pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((KeyShares::MAX_PER_SET as usize) * 128));
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
/// tip.

View File

@@ -7,7 +7,7 @@ use std::collections::HashMap;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet};
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
use serai_db::Db;
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};

View File

@@ -5,10 +5,9 @@ use serai_db::{create_db, db_channel};
use dkg::Participant;
use serai_client_serai::abi::primitives::{
crypto::KeyPair,
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
use serai_client::{
primitives::ExternalNetworkId,
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
};
use serai_cosign::SignedCosign;

View File

@@ -12,8 +12,10 @@ use frost_schnorrkel::{
use serai_db::{DbTxn, Db as DbTrait};
#[rustfmt::skip]
use serai_client_serai::abi::primitives::{validator_sets::ExternalValidatorSet, address::SeraiAddress};
use serai_client::{
primitives::SeraiAddress,
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
};
use serai_task::{DoesNotError, ContinuallyRan};
@@ -158,7 +160,7 @@ impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
let (machine, preprocess) = AlgorithmMachine::new(
schnorrkel(),
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
musig(ExternalValidatorSet::musig_context(&set), key, &[public_key]).unwrap(),
musig(musig_context(set.into()), key, &[public_key]).unwrap(),
)
.preprocess(&mut OsRng);
// We take the preprocess so we can use it in a distinct machine with the actual Musig
@@ -258,12 +260,9 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
})
.collect::<Vec<_>>();
let keys = musig(
ExternalValidatorSet::musig_context(&self.set.set),
self.key.clone(),
&musig_public_keys,
)
.unwrap();
let keys =
musig(musig_context(self.set.set.into()), self.key.clone(), &musig_public_keys)
.unwrap();
// Rebuild the machine
let (machine, preprocess_from_cache) =
@@ -297,10 +296,9 @@ impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
};
// Calculate our share
let (machine, share) = match handle_frost_error(machine.sign(
preprocesses,
&ExternalValidatorSet::set_keys_message(&self.set.set, &key_pair),
)) {
let (machine, share) = match handle_frost_error(
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
) {
Ok((machine, share)) => (machine, share),
// This yields the *musig participant index*
Err(participant) => {

View File

@@ -14,14 +14,9 @@ use borsh::BorshDeserialize;
use tokio::sync::mpsc;
use serai_client_serai::{
abi::primitives::{
BlockHash,
crypto::{Public, Signature, ExternalKey, KeyPair},
network_id::ExternalNetworkId,
validator_sets::ExternalValidatorSet,
address::SeraiAddress,
},
use serai_client::{
primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
Serai,
};
use message_queue::{Service, client::MessageQueue};
@@ -66,7 +61,9 @@ async fn serai() -> Arc<Serai> {
let Ok(serai) = Serai::new(format!(
"http://{}:9944",
serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided")
)) else {
))
.await
else {
log::error!("couldn't connect to the Serai node");
tokio::time::sleep(delay).await;
delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY);
@@ -216,12 +213,10 @@ async fn handle_network(
&mut txn,
ExternalValidatorSet { network, session },
&KeyPair(
Public(substrate_key),
ExternalKey(
network_key
.try_into()
.expect("generated a network key which exceeds the maximum key length"),
),
PublicKey::from_raw(substrate_key),
network_key
.try_into()
.expect("generated a network key which exceeds the maximum key length"),
),
);
}
@@ -289,13 +284,12 @@ async fn handle_network(
&mut txn,
ExternalValidatorSet { network, session },
slash_report,
Signature(signature),
Signature::from(signature),
);
}
},
messages::ProcessorMessage::Substrate(msg) => match msg {
messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => {
let block = BlockHash(block);
let mut by_session = HashMap::new();
for plan in plans {
by_session
@@ -487,7 +481,7 @@ async fn main() {
);
// Handle each of the networks
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
}

View File

@@ -10,10 +10,7 @@ use tokio::sync::mpsc;
use serai_db::{DbTxn, Db as DbTrait};
use serai_client_serai::abi::primitives::{
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet},
};
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
use message_queue::{Service, Metadata, client::MessageQueue};
use tributary_sdk::Tributary;
@@ -42,7 +39,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
let mut made_progress = false;
// Handle the Canonical events
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
loop {
let mut txn = self.db.txn();
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)

View File

@@ -11,7 +11,8 @@ use tokio::sync::mpsc;
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
use serai_client_serai::abi::primitives::validator_sets::ExternalValidatorSet;
use scale::Encode;
use serai_client::validator_sets::primitives::ExternalValidatorSet;
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
@@ -478,8 +479,7 @@ pub(crate) async fn spawn_tributary<P: P2p>(
return;
}
let genesis =
<[u8; 32]>::from(Blake2s::<U32>::digest(borsh::to_vec(&(set.serai_block, set.set)).unwrap()));
let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will
// be a couple of minutes stale. While the Tributary will still function with a start time in the

View File

@@ -20,15 +20,17 @@ workspace = true
[dependencies]
bitvec = { version = "1", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] }
futures = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" }

View File

@@ -3,13 +3,7 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered};
use serai_client_serai::{
abi::{
self,
primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
},
Serai,
};
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
@@ -21,7 +15,6 @@ use serai_cosign::Cosigning;
create_db!(
CoordinatorSubstrateCanonical {
NextBlock: () -> u64,
LastIndexedBatchId: (network: ExternalNetworkId) -> u32,
}
);
@@ -52,10 +45,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
// These are all the events which generate canonical messages
struct CanonicalEvents {
time: u64,
set_keys_events: Vec<abi::validator_sets::Event>,
slash_report_events: Vec<abi::validator_sets::Event>,
batch_events: Vec<abi::in_instructions::Event>,
burn_events: Vec<abi::coins::Event>,
key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
burn_events: Vec<serai_client::coins::CoinsEvent>,
}
// For a cosigned block, fetch all relevant events
@@ -73,24 +66,40 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
}
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
};
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
let set_keys_events = events.validator_sets().set_keys_events().cloned().collect();
let slash_report_events =
events.validator_sets().slash_report_events().cloned().collect();
let batch_events = events.in_instructions().batch_events().cloned().collect();
let burn_events = events.coins().burn_with_instruction_events().cloned().collect();
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
let temporal_serai = serai.as_of(block_hash);
let temporal_serai_validators = temporal_serai.validator_sets();
let temporal_serai_instructions = temporal_serai.in_instructions();
let temporal_serai_coins = temporal_serai.coins();
let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
tokio::try_join!(
serai.block(block_hash),
temporal_serai_validators.key_gen_events(),
temporal_serai_validators.set_retired_events(),
temporal_serai_instructions.batch_events(),
temporal_serai_coins.burn_with_instruction_events(),
)
.map_err(|e| format!("{e:?}"))?;
let Some(block) = block else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
};
// We use time in seconds, not milliseconds, here
let time = block.header.unix_time_in_millis() / 1000;
let time = if block_number == 0 {
block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok((
block_number,
CanonicalEvents {
time,
set_keys_events,
slash_report_events,
key_gen_events,
set_retired_events,
batch_events,
burn_events,
},
@@ -122,9 +131,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
let mut txn = self.db.txn();
for set_keys in block.set_keys_events {
let abi::validator_sets::Event::SetKeys { set, key_pair } = &set_keys else {
panic!("`SetKeys` event wasn't a `SetKeys` event: {set_keys:?}");
for key_gen in block.key_gen_events {
let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
else {
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
};
crate::Canonical::send(
&mut txn,
@@ -137,10 +147,12 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
);
}
for slash_report in block.slash_report_events {
let abi::validator_sets::Event::SlashReport { set } = &slash_report else {
panic!("`SlashReport` event wasn't a `SlashReport` event: {slash_report:?}");
for set_retired in block.set_retired_events {
let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
else {
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
};
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
crate::Canonical::send(
&mut txn,
set.network,
@@ -148,12 +160,10 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
);
}
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let mut batch = None;
for this_batch in &block.batch_events {
// Only irrefutable as this is the only member of the enum at this time
#[expect(irrefutable_let_patterns)]
let abi::in_instructions::Event::Batch {
let serai_client::in_instructions::InInstructionsEvent::Batch {
network: batch_network,
publishing_session,
id,
@@ -184,19 +194,14 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
})
.collect(),
});
if LastIndexedBatchId::get(&txn, network) != id.checked_sub(1) {
panic!(
"next batch from Serai's ID was not an increment of the last indexed batch's ID"
);
}
LastIndexedBatchId::set(&mut txn, network, id);
}
}
let mut burns = vec![];
for burn in &block.burn_events {
let abi::coins::Event::BurnWithInstruction { from: _, instruction } = &burn else {
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
&burn
else {
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
};
if instruction.balance.coin.network() == network {
@@ -218,7 +223,3 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
}
}
}
pub(crate) fn last_indexed_batch_id(txn: &impl DbTxn, network: ExternalNetworkId) -> Option<u32> {
LastIndexedBatchId::get(txn, network)
}

View File

@@ -3,14 +3,9 @@ use std::sync::Arc;
use futures::stream::{StreamExt, FuturesOrdered};
use serai_client_serai::{
abi::primitives::{
BlockHash,
crypto::EmbeddedEllipticCurveKeys as EmbeddedEllipticCurveKeysStruct,
network_id::ExternalNetworkId,
validator_sets::{KeyShares, ExternalValidatorSet},
address::SeraiAddress,
},
use serai_client::{
primitives::{SeraiAddress, EmbeddedEllipticCurve},
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
Serai,
};
@@ -24,10 +19,6 @@ use crate::NewSetInformation;
create_db!(
CoordinatorSubstrateEphemeral {
NextBlock: () -> u64,
EmbeddedEllipticCurveKeys: (
network: ExternalNetworkId,
validator: SeraiAddress
) -> EmbeddedEllipticCurveKeysStruct,
}
);
@@ -58,11 +49,10 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
// These are all the events which generate canonical messages
struct EphemeralEvents {
block_hash: BlockHash,
block_hash: [u8; 32],
time: u64,
embedded_elliptic_curve_keys_events: Vec<serai_client_serai::abi::validator_sets::Event>,
set_decided_events: Vec<serai_client_serai::abi::validator_sets::Event>,
accepted_handover_events: Vec<serai_client_serai::abi::validator_sets::Event>,
new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
}
// For a cosigned block, fetch all relevant events
@@ -81,31 +71,31 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
};
let events = serai.events(block_hash).await.map_err(|e| format!("{e}"))?;
let embedded_elliptic_curve_keys_events = events
.validator_sets()
.set_embedded_elliptic_curve_keys_events()
.cloned()
.collect::<Vec<_>>();
let set_decided_events =
events.validator_sets().set_decided_events().cloned().collect::<Vec<_>>();
let accepted_handover_events =
events.validator_sets().accepted_handover_events().cloned().collect::<Vec<_>>();
let Some(block) = serai.block(block_hash).await.map_err(|e| format!("{e:?}"))? else {
let temporal_serai = serai.as_of(block_hash);
let temporal_serai_validators = temporal_serai.validator_sets();
let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
serai.block(block_hash),
temporal_serai_validators.new_set_events(),
temporal_serai_validators.accepted_handover_events(),
)
.map_err(|e| format!("{e:?}"))?;
let Some(block) = block else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
};
// We use time in seconds, not milliseconds, here
let time = block.header.unix_time_in_millis() / 1000;
let time = if block_number == 0 {
block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok((
block_number,
EphemeralEvents {
block_hash,
time,
embedded_elliptic_curve_keys_events,
set_decided_events,
accepted_handover_events,
},
EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
))
}
}
@@ -136,82 +126,105 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
let mut txn = self.db.txn();
for event in block.embedded_elliptic_curve_keys_events {
let serai_client_serai::abi::validator_sets::Event::SetEmbeddedEllipticCurveKeys {
validator,
keys,
} = &event
else {
panic!(
"{}: {event:?}",
"`SetEmbeddedEllipticCurveKeys` event wasn't a `SetEmbeddedEllipticCurveKeys` event"
);
for new_set in block.new_set_events {
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
};
EmbeddedEllipticCurveKeys::set(&mut txn, keys.network(), *validator, keys);
}
for set_decided in block.set_decided_events {
let serai_client_serai::abi::validator_sets::Event::SetDecided { set, validators } =
&set_decided
else {
panic!("`SetDecided` event wasn't a `SetDecided` event: {set_decided:?}");
};
// We only coordinate over external networks
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
let validators =
validators.iter().map(|(validator, weight)| (*validator, weight.0)).collect::<Vec<_>>();
let serai = self.serai.as_of(block.block_hash);
let serai = serai.validator_sets();
let Some(validators) =
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
else {
Err(format!(
"block #{block_number} declared a new set but didn't have the participants"
))?
};
let validators = validators
.into_iter()
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
.collect::<Vec<_>>();
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
if in_set {
if u16::try_from(validators.len()).is_err() {
Err("more than u16::MAX validators sent")?;
}
let Ok(validators) = validators
.into_iter()
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
.collect::<Result<Vec<_>, _>>()
else {
Err("validator's weight exceeded u16::MAX".to_string())?
};
// Do the summation in u32 so we don't risk a u16 overflow
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
if total_weight > u32::from(KeyShares::MAX_PER_SET) {
if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) {
Err(format!(
"{set:?} has {total_weight} key shares when the max is {}",
KeyShares::MAX_PER_SET
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
))?;
}
let total_weight = u16::try_from(total_weight)
.expect("value smaller than `u16` constant but doesn't fit in `u16`");
let total_weight = u16::try_from(total_weight).unwrap();
// Fetch all of the validators' embedded elliptic curve keys
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
for (validator, _) in &validators {
let validator = *validator;
// try_join doesn't return a future so we need to wrap it in this additional async
// block
embedded_elliptic_curve_keys.push_back(async move {
tokio::try_join!(
// One future to fetch the substrate embedded key
serai.embedded_elliptic_curve_key(
validator.into(),
EmbeddedEllipticCurve::Embedwards25519
),
// One future to fetch the external embedded key, if there is a distinct curve
async {
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
} else {
Ok(None)
}
}
)
.map(|(substrate_embedded_key, external_embedded_key)| {
(validator, substrate_embedded_key, external_embedded_key)
})
});
}
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
for (validator, weight) in &validators {
let keys = match EmbeddedEllipticCurveKeys::get(&txn, set.network, *validator)
.expect("selected validator lacked embedded elliptic curve keys")
{
EmbeddedEllipticCurveKeysStruct::Bitcoin(substrate, external) => {
assert_eq!(set.network, ExternalNetworkId::Bitcoin);
(substrate, external.to_vec())
let (future_validator, substrate_embedded_key, external_embedded_key) =
embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
assert_eq!(*validator, future_validator);
let external_embedded_key =
external_embedded_key.unwrap_or(substrate_embedded_key.clone());
match (substrate_embedded_key, external_embedded_key) {
(Some(substrate_embedded_key), Some(external_embedded_key)) => {
let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
.map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
for _ in 0 .. *weight {
evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
}
}
EmbeddedEllipticCurveKeysStruct::Ethereum(substrate, external) => {
assert_eq!(set.network, ExternalNetworkId::Ethereum);
(substrate, external.to_vec())
}
EmbeddedEllipticCurveKeysStruct::Monero(substrate) => {
assert_eq!(set.network, ExternalNetworkId::Monero);
(substrate, substrate.to_vec())
}
};
for _ in 0 .. *weight {
evrf_public_keys.push(keys.clone());
_ => Err("NewSet with validator missing an embedded key".to_string())?,
}
}
let mut new_set = NewSetInformation {
set,
serai_block: block.block_hash.0,
serai_block: block.block_hash,
declaration_time: block.time,
// TODO: This should be inlined into the Processor's key gen code
// It's legacy from when we removed participants from the key gen
threshold: ((total_weight * 2) / 3) + 1,
// TODO: Why are `validators` and `evrf_public_keys` two separate fields?
validators,
evrf_public_keys,
participant_indexes: Default::default(),
@@ -225,7 +238,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
}
for accepted_handover in block.accepted_handover_events {
let serai_client_serai::abi::validator_sets::Event::AcceptedHandover { set } =
let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
&accepted_handover
else {
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");

View File

@@ -4,18 +4,15 @@
use std::collections::HashMap;
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize};
use dkg::Participant;
use serai_client_serai::abi::{
primitives::{
network_id::ExternalNetworkId,
validator_sets::{Session, ExternalValidatorSet, SlashReport},
crypto::{Signature, KeyPair},
address::SeraiAddress,
instructions::SignedBatch,
},
use serai_client::{
primitives::{ExternalNetworkId, SeraiAddress, Signature},
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
in_instructions::primitives::SignedBatch,
Transaction,
};
@@ -23,7 +20,6 @@ use serai_db::*;
mod canonical;
pub use canonical::CanonicalEventStream;
use canonical::last_indexed_batch_id;
mod ephemeral;
pub use ephemeral::EphemeralEventStream;
@@ -42,7 +38,7 @@ pub struct NewSetInformation {
pub set: ExternalValidatorSet,
/// The Serai block which declared it.
pub serai_block: [u8; 32],
/// The time of the block which declared it, in seconds since the epoch.
/// The time of the block which declared it, in seconds.
pub declaration_time: u64,
/// The threshold to use.
pub threshold: u16,
@@ -101,9 +97,9 @@ mod _public_db {
create_db!(
CoordinatorSubstrate {
// Keys to set on the Serai network
Keys: (network: ExternalNetworkId) -> (Session, Transaction),
Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
// Slash reports to publish onto the Serai network
SlashReports: (network: ExternalNetworkId) -> (Session, Transaction),
SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
}
);
}
@@ -176,19 +172,20 @@ impl Keys {
}
}
let tx = serai_client_serai::ValidatorSets::set_keys(
let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys(
set.network,
key_pair,
signature_participants,
signature,
);
_public_db::Keys::set(txn, set.network, &(set.session, tx));
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
}
pub(crate) fn take(
txn: &mut impl DbTxn,
network: ExternalNetworkId,
) -> Option<(Session, Transaction)> {
_public_db::Keys::take(txn, network)
let (session, tx) = _public_db::Keys::take(txn, network)?;
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
}
}
@@ -197,7 +194,7 @@ pub struct SignedBatches;
impl SignedBatches {
/// Send a `SignedBatch` to publish onto Serai.
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
_public_db::SignedBatches::send(txn, batch.batch.network(), batch);
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
}
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
_public_db::SignedBatches::try_recv(txn, network)
@@ -224,14 +221,18 @@ impl SlashReports {
}
}
let tx =
serai_client_serai::ValidatorSets::report_slashes(set.network, slash_report, signature);
_public_db::SlashReports::set(txn, set.network, &(set.session, tx));
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
set.network,
slash_report,
signature,
);
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
}
pub(crate) fn take(
txn: &mut impl DbTxn,
network: ExternalNetworkId,
) -> Option<(Session, Transaction)> {
_public_db::SlashReports::take(txn, network)
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
}
}

View File

@@ -1,10 +1,8 @@
use core::future::Future;
use std::sync::Arc;
use serai_client_serai::{
abi::primitives::{network_id::ExternalNetworkId, instructions::SignedBatch},
RpcError, Serai,
};
#[rustfmt::skip]
use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
use serai_db::{Get, DbTxn, Db, create_db};
use serai_task::ContinuallyRan;
@@ -33,7 +31,7 @@ impl<D: Db> PublishBatchTask<D> {
}
impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
type Error = RpcError;
type Error = SeraiError;
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
@@ -45,8 +43,8 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
};
// If this is a Batch not yet published, save it into our unordered mapping
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id()) {
BatchesToPublish::set(&mut txn, self.network, batch.batch.id(), &batch);
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
}
txn.commit();
@@ -54,8 +52,12 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
// Synchronize our last published batch with the Serai network's
let next_to_publish = {
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.as_of_latest_finalized_block().await?;
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
let mut txn = self.db.txn();
let last_batch = crate::last_indexed_batch_id(&txn, self.network);
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
while our_last_batch < last_batch {
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
@@ -66,7 +68,6 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
if let Some(last_batch) = our_last_batch {
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
}
txn.commit();
last_batch.map(|batch| batch + 1).unwrap_or(0)
};
@@ -74,7 +75,7 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
self
.serai
.publish_transaction(&serai_client_serai::InInstructions::execute_batch(batch))
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
.await?;
true
} else {

View File

@@ -3,10 +3,7 @@ use std::sync::Arc;
use serai_db::{DbTxn, Db};
use serai_client_serai::{
abi::primitives::{network_id::ExternalNetworkId, validator_sets::Session},
Serai,
};
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
use serai_task::ContinuallyRan;
@@ -36,10 +33,10 @@ impl<D: Db> PublishSlashReportTask<D> {
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?;
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();
let session_after_slash_report = Session(session.0 + 1);
let current_session =
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0);
// Only attempt to publish the slash report for session #n while session #n+1 is still
// active
@@ -58,13 +55,14 @@ impl<D: Db> PublishSlashReportTask<D> {
}
// If this session which should publish a slash report already has, move on
if !serai.pending_slash_report(network).await.map_err(|e| format!("{e:?}"))? {
let key_pending_slash_report =
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
if key_pending_slash_report.is_none() {
txn.commit();
return Ok(false);
};
// Since this slash report is still pending, publish it
match self.serai.publish_transaction(&slash_report).await {
match self.serai.publish(&slash_report).await {
Ok(()) => {
txn.commit();
Ok(true)
@@ -86,7 +84,7 @@ impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
async move {
let mut made_progress = false;
let mut error = None;
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let network_res = self.publish(network).await;
// We made progress if any network successfully published their slash report
made_progress |= network_res == Ok(true);

View File

@@ -3,10 +3,7 @@ use std::sync::Arc;
use serai_db::{DbTxn, Db};
use serai_client_serai::{
abi::primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet},
Serai,
};
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
use serai_task::ContinuallyRan;
@@ -31,7 +28,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
async move {
let mut made_progress = false;
for network in ExternalNetworkId::all() {
for network in serai_client::primitives::EXTERNAL_NETWORKS {
let mut txn = self.db.txn();
let Some((session, keys)) = Keys::take(&mut txn, network) else {
// No keys to set
@@ -40,9 +37,10 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
// This uses the latest finalized block, not the latest cosigned block, which should be
// fine as in the worst case, the only impact is no longer attempting TX publication
let serai = self.serai.state().await.map_err(|e| format!("{e:?}"))?;
let current_session =
serai.current_session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let serai =
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
let serai = serai.validator_sets();
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
let current_session = current_session.map(|session| session.0);
// Only attempt to set these keys if this isn't a retired session
if Some(session.0) < current_session {
@@ -69,7 +67,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
continue;
};
match self.serai.publish_transaction(&keys).await {
match self.serai.publish(&keys).await {
Ok(()) => {
txn.commit();
made_progress = true;

View File

@@ -36,7 +36,7 @@ log = { version = "0.4", default-features = false, features = ["std"] }
serai-db = { path = "../../common/db", version = "0.1" }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }

View File

@@ -5,7 +5,7 @@ use ciphersuite::{group::GroupEncoding, *};
use serai_db::{Get, DbTxn, Db};
use borsh::BorshDeserialize;
use scale::Decode;
use tendermint::ext::{Network, Commit};
@@ -62,7 +62,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
D::key(
b"tributary_blockchain",
b"next_nonce",
[genesis.as_slice(), signer.to_bytes().as_slice(), order].concat(),
[genesis.as_ref(), signer.to_bytes().as_ref(), order].concat(),
)
}
@@ -109,7 +109,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> {
db.get(Self::block_key(&genesis, block))
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_slice()).unwrap())
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap())
}
pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> {
@@ -169,7 +169,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
// we must have a commit per valid hash
let commit = Self::commit_from_db(db, genesis, &hash).unwrap();
// commit has to be valid if it is coming from our db
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap())
Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
};
let unsigned_in_chain =
|hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();
@@ -244,7 +244,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {
let commit = self.commit_by_block_number(block)?;
// commit has to be valid if it is coming from our db
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap())
Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
};
let mut txn_db = db.clone();

View File

@@ -3,11 +3,10 @@ use std::{sync::Arc, io};
use zeroize::Zeroizing;
use borsh::BorshDeserialize;
use ciphersuite::*;
use dalek_ff_group::Ristretto;
use scale::Decode;
use futures_channel::mpsc::UnboundedReceiver;
use futures_util::{StreamExt, SinkExt};
use ::tendermint::{
@@ -178,7 +177,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
let block_number = BlockNumber(blockchain.block_number());
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap().end_time
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
} else {
start_time
};
@@ -277,8 +276,8 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
}
let block = TendermintBlock(block.serialize());
let mut commit_ref = commit.as_slice();
let Ok(commit) = Commit::<Arc<Validators>>::deserialize_reader(&mut commit_ref) else {
let mut commit_ref = commit.as_ref();
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
log::error!("sent an invalidly serialized commit");
return false;
};
@@ -328,7 +327,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
Some(&TENDERMINT_MESSAGE) => {
let Ok(msg) =
SignedMessageFor::<TendermintNetwork<D, T, P>>::deserialize_reader(&mut &msg[1 ..])
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
else {
log::error!("received invalid tendermint message");
return false;
@@ -368,17 +367,15 @@ impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
}
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
self
.commit(hash)
.map(|commit| Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap())
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
}
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
}
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
self.commit(hash).map(|commit| {
Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap().end_time
})
self
.commit(hash)
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
}
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {

View File

@@ -21,7 +21,7 @@ use schnorr::{
use serai_db::Db;
use borsh::{BorshSerialize, BorshDeserialize};
use scale::{Encode, Decode};
use tendermint::{
SignedMessageFor,
ext::{
@@ -248,7 +248,7 @@ impl Weights for Validators {
}
}
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
pub struct TendermintBlock(pub Vec<u8>);
impl BlockTrait for TendermintBlock {
type Id = [u8; 32];
@@ -300,7 +300,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()> {
async move {
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
msg.serialize(&mut to_broadcast).unwrap();
to_broadcast.extend(msg.encode());
self.p2p.broadcast(self.genesis, to_broadcast).await
}
}
@@ -390,7 +390,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
return invalid_block();
};
let encoded_commit = borsh::to_vec(&commit).unwrap();
let encoded_commit = commit.encode();
loop {
let block_res = self.blockchain.write().await.add_block::<Self>(
&block,

View File

@@ -1,6 +1,6 @@
use std::io;
use borsh::BorshDeserialize;
use scale::{Encode, Decode, IoReader};
use blake2::{Digest, Blake2s256};
@@ -27,14 +27,14 @@ pub enum TendermintTx {
impl ReadWrite for TendermintTx {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
Evidence::deserialize_reader(reader)
Evidence::decode(&mut IoReader(reader))
.map(TendermintTx::SlashEvidence)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid evidence format"))
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
TendermintTx::SlashEvidence(ev) => writer.write_all(&borsh::to_vec(&ev).unwrap()),
TendermintTx::SlashEvidence(ev) => writer.write_all(&ev.encode()),
}
}
}

View File

@@ -10,6 +10,8 @@ use dalek_ff_group::Ristretto;
use ciphersuite::*;
use schnorr::SchnorrSignature;
use scale::Encode;
use ::tendermint::{
ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber},
SignedMessageFor, DataFor, Message, SignedMessage, Data, Evidence,
@@ -198,7 +200,7 @@ pub async fn signed_from_data<N: Network>(
round: RoundNumber(round_number),
data,
};
let sig = signer.sign(&borsh::to_vec(&msg).unwrap()).await;
let sig = signer.sign(&msg.encode()).await;
SignedMessage { msg, sig }
}
@@ -211,5 +213,5 @@ pub async fn random_evidence_tx<N: Network>(
let data = Data::Proposal(Some(RoundNumber(0)), b);
let signer_id = signer.validator_id().await.unwrap();
let signed = signed_from_data::<N>(signer, signer_id, 0, 0, data).await;
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap()))
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()))
}

View File

@@ -6,6 +6,8 @@ use rand::{RngCore, rngs::OsRng};
use dalek_ff_group::Ristretto;
use ciphersuite::*;
use scale::Encode;
use tendermint::{
time::CanonicalInstant,
round::RoundData,
@@ -50,10 +52,7 @@ async fn invalid_valid_round() {
async move {
let data = Data::Proposal(valid_round, TendermintBlock(vec![]));
let signed = signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, data).await;
(
signed.clone(),
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap())),
)
(signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())))
}
};
@@ -71,8 +70,7 @@ async fn invalid_valid_round() {
let mut random_sig = [0u8; 64];
OsRng.fill_bytes(&mut random_sig);
signed.sig = random_sig;
let tx =
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap()));
let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()));
// should fail
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
@@ -92,10 +90,7 @@ async fn invalid_precommit_signature() {
let signed =
signed_from_data::<N>(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit))
.await;
(
signed.clone(),
TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap())),
)
(signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())))
}
};
@@ -125,8 +120,7 @@ async fn invalid_precommit_signature() {
let mut random_sig = [0u8; 64];
OsRng.fill_bytes(&mut random_sig);
signed.sig = random_sig;
let tx =
TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap()));
let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode()));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
}
}
@@ -144,32 +138,24 @@ async fn evidence_with_prevote() {
// it should fail for all reasons.
let mut txs = vec![];
txs.push(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(
borsh::to_vec(
&&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await,
)
.unwrap(),
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await
.encode(),
)));
txs.push(TendermintTx::SlashEvidence(Evidence::InvalidValidRound(
borsh::to_vec(
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await,
)
.unwrap(),
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await
.encode(),
)));
// Since these require a second message, provide this one again
// ConflictingMessages can be fired for actually conflicting Prevotes however
txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await,
)
.unwrap(),
borsh::to_vec(
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await,
)
.unwrap(),
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await
.encode(),
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
.await
.encode(),
)));
txs
}
@@ -203,16 +189,16 @@ async fn conflicting_msgs_evidence_tx() {
// non-conflicting data should fail
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_1).unwrap(),
signed_1.encode(),
signed_1.encode(),
));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
// conflicting data should pass
let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
@@ -220,16 +206,16 @@ async fn conflicting_msgs_evidence_tx() {
// (except for Precommit)
let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
// Proposals for different block numbers should also fail as evidence
let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
}
@@ -239,16 +225,16 @@ async fn conflicting_msgs_evidence_tx() {
// non-conflicting data should fail
let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_1).unwrap(),
signed_1.encode(),
signed_1.encode(),
));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
// conflicting data should pass
let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
@@ -256,16 +242,16 @@ async fn conflicting_msgs_evidence_tx() {
// (except for Precommit)
let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
// Proposals for different block numbers should also fail as evidence
let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
}
@@ -287,8 +273,8 @@ async fn conflicting_msgs_evidence_tx() {
.await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
// update schema so that we don't fail due to invalid signature
@@ -306,8 +292,8 @@ async fn conflicting_msgs_evidence_tx() {
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await;
let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await;
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
borsh::to_vec(&signed_1).unwrap(),
borsh::to_vec(&signed_2).unwrap(),
signed_1.encode(),
signed_2.encode(),
));
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
}

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.77"
rust-version = "1.75"
[package.metadata.docs.rs]
all-features = true
@@ -21,7 +21,7 @@ thiserror = { version = "2", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
log = { version = "0.4", default-features = false, features = ["std"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
parity-scale-codec = { version = "3", default-features = false, features = ["std", "derive"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] }
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }

View File

@@ -3,41 +3,33 @@ use std::{sync::Arc, collections::HashSet};
use thiserror::Error;
use borsh::{BorshSerialize, BorshDeserialize};
use parity_scale_codec::{Encode, Decode};
use crate::{SignedMessageFor, SlashEvent, commit_msg};
/// An alias for a series of traits required for a type to be usable as a validator ID,
/// automatically implemented for all types satisfying those traits.
pub trait ValidatorId:
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + BorshSerialize + BorshDeserialize
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode
{
}
#[rustfmt::skip]
impl<
V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + BorshSerialize + BorshDeserialize,
> ValidatorId for V
impl<V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode> ValidatorId
for V
{
}
/// An alias for a series of traits required for a type to be usable as a signature,
/// automatically implemented for all types satisfying those traits.
pub trait Signature:
Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize
{
}
impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize> Signature
for S
{
}
pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {}
impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode> Signature for S {}
// Type aliases which are distinct according to the type system
/// A struct containing a Block Number, wrapped to have a distinct type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
pub struct BlockNumber(pub u64);
/// A struct containing a round number, wrapped to have a distinct type.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
pub struct RoundNumber(pub u32);
/// A signer for a validator.
@@ -135,7 +127,7 @@ impl<S: SignatureScheme> SignatureScheme for Arc<S> {
/// A commit for a specific block.
///
/// The list of validators have weight exceeding the threshold for a valid commit.
#[derive(PartialEq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(PartialEq, Debug, Encode, Decode)]
pub struct Commit<S: SignatureScheme> {
/// End time of the round which created this commit, used as the start time of the next block.
pub end_time: u64,
@@ -193,7 +185,7 @@ impl<W: Weights> Weights for Arc<W> {
}
/// Simplified error enum representing a block's validity.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)]
pub enum BlockError {
/// Malformed block which is wholly invalid.
#[error("invalid block")]
@@ -205,20 +197,9 @@ pub enum BlockError {
}
/// Trait representing a Block.
pub trait Block:
Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize
{
pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {
// Type used to identify blocks. Presumably a cryptographic hash of the block.
type Id: Send
+ Sync
+ Copy
+ Clone
+ PartialEq
+ Eq
+ AsRef<[u8]>
+ Debug
+ BorshSerialize
+ BorshDeserialize;
type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode;
/// Return the deterministic, unique ID for this block.
fn id(&self) -> Self::Id;

View File

@@ -1,3 +1,5 @@
#![expect(clippy::cast_possible_truncation)]
use core::fmt::Debug;
use std::{
@@ -6,7 +8,7 @@ use std::{
collections::{VecDeque, HashMap},
};
use borsh::{BorshSerialize, BorshDeserialize};
use parity_scale_codec::{Encode, Decode, IoReader};
use futures_channel::mpsc;
use futures_util::{
@@ -41,14 +43,14 @@ pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec<u8> {
[&end_time.to_le_bytes(), id].concat()
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
pub enum Step {
Propose,
Prevote,
Precommit,
}
#[derive(Clone, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Eq, Debug, Encode, Decode)]
pub enum Data<B: Block, S: Signature> {
Proposal(Option<RoundNumber>, B),
Prevote(Option<B::Id>),
@@ -90,7 +92,7 @@ impl<B: Block, S: Signature> Data<B, S> {
}
}
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
pub struct Message<V: ValidatorId, B: Block, S: Signature> {
pub sender: V,
pub block: BlockNumber,
@@ -100,7 +102,7 @@ pub struct Message<V: ValidatorId, B: Block, S: Signature> {
}
/// A signed Tendermint consensus message to be broadcast to the other validators.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
pub struct SignedMessage<V: ValidatorId, B: Block, S: Signature> {
pub msg: Message<V, B, S>,
pub sig: S,
@@ -117,18 +119,18 @@ impl<V: ValidatorId, B: Block, S: Signature> SignedMessage<V, B, S> {
&self,
signer: &Scheme,
) -> bool {
signer.verify(self.msg.sender, &borsh::to_vec(&self.msg).unwrap(), &self.sig)
signer.verify(self.msg.sender, &self.msg.encode(), &self.sig)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
pub enum SlashReason {
FailToPropose,
InvalidBlock,
InvalidProposer,
}
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
pub enum Evidence {
ConflictingMessages(Vec<u8>, Vec<u8>),
InvalidPrecommit(Vec<u8>),
@@ -159,7 +161,7 @@ pub type SignedMessageFor<N> = SignedMessage<
>;
pub fn decode_signed_message<N: Network>(mut data: &[u8]) -> Option<SignedMessageFor<N>> {
SignedMessageFor::<N>::deserialize_reader(&mut data).ok()
SignedMessageFor::<N>::decode(&mut data).ok()
}
fn decode_and_verify_signed_message<N: Network>(
@@ -339,7 +341,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
target: "tendermint",
"proposer for block {}, round {round:?} was {} (me: {res})",
self.block.number.0,
hex::encode(borsh::to_vec(&proposer).unwrap()),
hex::encode(proposer.encode()),
);
res
}
@@ -420,11 +422,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
// TODO: If the new slash event has evidence, emit to prevent a low-importance slash from
// cancelling emission of high-importance slashes
if !self.block.slashes.contains(&validator) {
log::info!(
target: "tendermint",
"Slashing validator {}",
hex::encode(borsh::to_vec(&validator).unwrap()),
);
log::info!(target: "tendermint", "Slashing validator {}", hex::encode(validator.encode()));
self.block.slashes.insert(validator);
self.network.slash(validator, slash_event).await;
}
@@ -674,7 +672,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
self
.slash(
msg.sender,
SlashEvent::WithEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap())),
SlashEvent::WithEvidence(Evidence::InvalidPrecommit(signed.encode())),
)
.await;
Err(TendermintError::Malicious)?;
@@ -745,10 +743,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
self.broadcast(Data::Prevote(None));
}
self
.slash(
msg.sender,
SlashEvent::WithEvidence(Evidence::InvalidValidRound(borsh::to_vec(&msg).unwrap())),
)
.slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode())))
.await;
Err(TendermintError::Malicious)?;
}
@@ -1039,7 +1034,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
while !messages.is_empty() {
self.network.broadcast(
SignedMessageFor::<N>::deserialize_reader(&mut messages)
SignedMessageFor::<N>::decode(&mut IoReader(&mut messages))
.expect("saved invalid message to DB")
).await;
}
@@ -1064,7 +1059,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
} {
if our_message {
assert!(sig.is_none());
sig = Some(self.signer.sign(&borsh::to_vec(&msg).unwrap()).await);
sig = Some(self.signer.sign(&msg.encode()).await);
}
let sig = sig.unwrap();
@@ -1084,7 +1079,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
let message_tape_key = message_tape_key(self.genesis);
let mut txn = self.db.txn();
let mut message_tape = txn.get(&message_tape_key).unwrap_or(vec![]);
signed_msg.serialize(&mut message_tape).unwrap();
message_tape.extend(signed_msg.encode());
txn.put(&message_tape_key, message_tape);
txn.commit();
}

View File

@@ -1,5 +1,7 @@
use std::{sync::Arc, collections::HashMap};
use parity_scale_codec::Encode;
use crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence};
type RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, SignedMessageFor<N>>>;
@@ -37,10 +39,7 @@ impl<N: Network> MessageLog<N> {
target: "tendermint",
"Validator sent multiple messages for the same block + round + step"
);
Err(Evidence::ConflictingMessages(
borsh::to_vec(&existing).unwrap(),
borsh::to_vec(&signed).unwrap(),
))?;
Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?;
}
return Ok(false);
}

View File

@@ -4,7 +4,7 @@ use std::{
time::{UNIX_EPOCH, SystemTime, Duration},
};
use borsh::{BorshSerialize, BorshDeserialize};
use parity_scale_codec::{Encode, Decode};
use futures_util::sink::SinkExt;
use tokio::{sync::RwLock, time::sleep};
@@ -89,7 +89,7 @@ impl Weights for TestWeights {
}
}
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
struct TestBlock {
id: TestBlockId,
valid: Result<(), BlockError>,

View File

@@ -21,6 +21,7 @@ workspace = true
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
@@ -29,14 +30,14 @@ dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = fals
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
serai-db = { path = "../../common/db" }
serai-task = { path = "../../common/task", version = "0.1" }
tributary-sdk = { path = "../tributary-sdk" }
serai-cosign-types = { path = "../cosign/types" }
serai-cosign = { path = "../cosign" }
serai-coordinator-substrate = { path = "../substrate" }
messages = { package = "serai-processor-messages", path = "../../processor/messages" }

View File

@@ -1,19 +1,22 @@
#![expect(clippy::cast_possible_truncation)]
use std::collections::HashMap;
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{BlockHash, validator_sets::ExternalValidatorSet, address::SeraiAddress};
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ExternalValidatorSet};
use messages::sign::{VariantSignId, SignId};
use serai_db::*;
use serai_cosign_types::CosignIntent;
use serai_cosign::CosignIntent;
use crate::transaction::SigningProtocolRound;
/// A topic within the database which the group participates in
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
pub enum Topic {
/// Vote to remove a participant
RemoveParticipant {
@@ -122,7 +125,7 @@ impl Topic {
Topic::DkgConfirmation { attempt, round: _ } => Some({
let id = {
let mut id = [0; 32];
let encoded_set = borsh::to_vec(&set).unwrap();
let encoded_set = set.encode();
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
VariantSignId::Batch(id)
};
@@ -232,18 +235,18 @@ create_db!(
SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32,
// The cosign intent for a Substrate block
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: BlockHash) -> CosignIntent,
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
// The latest Substrate block to cosign.
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> BlockHash,
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> [u8; 32],
// The hash of the block we're actively cosigning.
ActivelyCosigning: (set: ExternalValidatorSet) -> BlockHash,
ActivelyCosigning: (set: ExternalValidatorSet) -> [u8; 32],
// If this block has already been cosigned.
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: BlockHash) -> (),
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> (),
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
SubstrateBlockPlans: (
set: ExternalValidatorSet,
substrate_block_hash: BlockHash
substrate_block_hash: [u8; 32]
) -> Vec<[u8; 32]>,
// The weight accumulated for a topic.
@@ -291,26 +294,26 @@ impl TributaryDb {
pub(crate) fn latest_substrate_block_to_cosign(
getter: &impl Get,
set: ExternalValidatorSet,
) -> Option<BlockHash> {
) -> Option<[u8; 32]> {
LatestSubstrateBlockToCosign::get(getter, set)
}
pub(crate) fn set_latest_substrate_block_to_cosign(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
) {
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
}
pub(crate) fn actively_cosigning(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
) -> Option<BlockHash> {
) -> Option<[u8; 32]> {
ActivelyCosigning::get(txn, set)
}
pub(crate) fn start_cosigning(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
substrate_block_number: u64,
) {
assert!(
@@ -335,14 +338,14 @@ impl TributaryDb {
pub(crate) fn mark_cosigned(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
) {
Cosigned::set(txn, set, substrate_block_hash, &());
}
pub(crate) fn cosigned(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
) -> bool {
Cosigned::get(txn, set, substrate_block_hash).is_some()
}

View File

@@ -8,10 +8,9 @@ use std::collections::HashMap;
use ciphersuite::group::GroupEncoding;
use dkg::Participant;
use serai_primitives::{
BlockHash,
validator_sets::{ExternalValidatorSet, Slash},
address::SeraiAddress,
use serai_client::{
primitives::SeraiAddress,
validator_sets::primitives::{ExternalValidatorSet, Slash},
};
use serai_db::*;
@@ -26,7 +25,7 @@ use tributary_sdk::{
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
};
use serai_cosign_types::CosignIntent;
use serai_cosign::CosignIntent;
use serai_coordinator_substrate::NewSetInformation;
use messages::sign::{VariantSignId, SignId};
@@ -80,7 +79,7 @@ impl CosignIntents {
fn take(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
) -> Option<CosignIntent> {
db::CosignIntents::take(txn, set, substrate_block_hash)
}
@@ -114,7 +113,7 @@ impl SubstrateBlockPlans {
pub fn set(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
plans: &Vec<[u8; 32]>,
) {
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
@@ -122,7 +121,7 @@ impl SubstrateBlockPlans {
fn take(
txn: &mut impl DbTxn,
set: ExternalValidatorSet,
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
) -> Option<Vec<[u8; 32]>> {
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
}
@@ -575,9 +574,14 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
};
let msgs = (
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
data.1.as_ref().map(|data| {
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(data).unwrap()
}),
if data.1.is_some() {
Some(
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.1.unwrap())
.unwrap(),
)
} else {
None
},
);
// Since anything with evidence is fundamentally faulty behavior, not just temporal

View File

@@ -12,9 +12,10 @@ use ciphersuite::{
use dalek_ff_group::Ristretto;
use schnorr::SchnorrSignature;
use scale::Encode;
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::{BlockHash, validator_sets::KeyShares, address::SeraiAddress};
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
use messages::sign::VariantSignId;
@@ -28,7 +29,7 @@ use tributary_sdk::{
use crate::db::Topic;
/// The round this data is for, within a signing protocol.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
pub enum SigningProtocolRound {
/// A preprocess.
Preprocess,
@@ -137,7 +138,7 @@ pub enum Transaction {
/// be the one selected to be cosigned.
Cosign {
/// The hash of the Substrate block to cosign
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
},
/// Note an intended-to-be-cosigned Substrate block as cosigned
@@ -175,7 +176,7 @@ pub enum Transaction {
/// cosigning the block in question, it'd be safe to provide this and move on to the next cosign.
Cosigned {
/// The hash of the Substrate block which was cosigned
substrate_block_hash: BlockHash,
substrate_block_hash: [u8; 32],
},
/// Acknowledge a Substrate block
@@ -186,7 +187,7 @@ pub enum Transaction {
/// resulting from its handling.
SubstrateBlock {
/// The hash of the Substrate block
hash: BlockHash,
hash: [u8; 32],
},
/// Acknowledge a Batch
@@ -241,20 +242,19 @@ impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind {
match self {
Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed(
borsh::to_vec(&(b"RemoveParticipant".as_slice(), participant)).unwrap(),
(b"RemoveParticipant", participant).encode(),
signed.to_tributary_signed(0),
),
Transaction::DkgParticipation { signed, .. } => TransactionKind::Signed(
borsh::to_vec(b"DkgParticipation".as_slice()).unwrap(),
signed.to_tributary_signed(0),
),
Transaction::DkgParticipation { signed, .. } => {
TransactionKind::Signed(b"DkgParticipation".encode(), signed.to_tributary_signed(0))
}
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(),
(b"DkgConfirmation", attempt).encode(),
signed.to_tributary_signed(0),
),
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
borsh::to_vec(&(b"DkgConfirmation".as_slice(), attempt)).unwrap(),
(b"DkgConfirmation", attempt).encode(),
signed.to_tributary_signed(1),
),
@@ -264,14 +264,13 @@ impl TransactionTrait for Transaction {
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
borsh::to_vec(&(b"Sign".as_slice(), id, attempt)).unwrap(),
(b"Sign", id, attempt).encode(),
signed.to_tributary_signed(round.nonce()),
),
Transaction::SlashReport { signed, .. } => TransactionKind::Signed(
borsh::to_vec(b"SlashReport".as_slice()).unwrap(),
signed.to_tributary_signed(0),
),
Transaction::SlashReport { signed, .. } => {
TransactionKind::Signed(b"SlashReport".encode(), signed.to_tributary_signed(0))
}
}
}
@@ -303,14 +302,14 @@ impl TransactionTrait for Transaction {
Transaction::Batch { .. } => {}
Transaction::Sign { data, .. } => {
if data.len() > usize::from(KeyShares::MAX_PER_SET) {
if data.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
Err(TransactionError::InvalidContent)?
}
// TODO: MAX_SIGN_LEN
}
Transaction::SlashReport { slash_points, .. } => {
if slash_points.len() > usize::from(KeyShares::MAX_PER_SET) {
if slash_points.len() > usize::from(MAX_KEY_SHARES_PER_SET) {
Err(TransactionError::InvalidContent)?
}
}

View File

@@ -10,6 +10,7 @@ ignore = [
"RUSTSEC-2022-0061", # https://github.com/serai-dex/serai/227
"RUSTSEC-2024-0370", # proc-macro-error is unmaintained
"RUSTSEC-2024-0436", # paste is unmaintained
"RUSTSEC-2025-0057", # fxhash is unmaintained, fixed with bytecodealliance/wasmtime/pull/11634
]
[licenses]
@@ -28,6 +29,7 @@ allow = [
"ISC",
"Zlib",
"Unicode-3.0",
# "OpenSSL", # Commented as it's not currently in-use within the Serai tree
"CDLA-Permissive-2.0",
# Non-invasive copyleft
@@ -71,7 +73,6 @@ exceptions = [
{ allow = ["AGPL-3.0-only"], name = "serai-monero-processor" },
{ allow = ["AGPL-3.0-only"], name = "tributary-sdk" },
{ allow = ["AGPL-3.0-only"], name = "serai-cosign-types" },
{ allow = ["AGPL-3.0-only"], name = "serai-cosign" },
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-substrate" },
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-tributary" },
@@ -80,9 +81,7 @@ exceptions = [
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator" },
{ allow = ["AGPL-3.0-only"], name = "pallet-session" },
{ allow = ["AGPL-3.0-only"], name = "substrate-median" },
{ allow = ["AGPL-3.0-only"], name = "serai-core-pallet" },
{ allow = ["AGPL-3.0-only"], name = "serai-coins-pallet" },
{ allow = ["AGPL-3.0-only"], name = "serai-dex-pallet" },
@@ -108,7 +107,6 @@ exceptions = [
{ allow = ["AGPL-3.0-only"], name = "serai-message-queue-tests" },
{ allow = ["AGPL-3.0-only"], name = "serai-processor-tests" },
{ allow = ["AGPL-3.0-only"], name = "serai-coordinator-tests" },
{ allow = ["AGPL-3.0-only"], name = "serai-substrate-tests" },
{ allow = ["AGPL-3.0-only"], name = "serai-full-stack-tests" },
{ allow = ["AGPL-3.0-only"], name = "serai-reproducible-runtime-tests" },
]
@@ -126,22 +124,12 @@ multiple-versions = "warn"
wildcards = "warn"
highlight = "all"
deny = [
# Contains a non-reproducible binary blob
# https://github.com/serde-rs/serde/pull/2514
# https://github.com/serde-rs/serde/issues/2575
{ name = "serde_derive", version = ">=1.0.172, <1.0.185" },
# Introduced an insecure implementation of `borsh` removed with `0.15.1`
# https://github.com/rust-lang/hashbrown/issues/576
{ name = "hashbrown", version = "=0.15.0" },
# Legacy which _no one_ should use anymore
{ name = "is-terminal", version = "*" },
# Stop introduction into the tree without realizing it
{ name = "once_cell_polyfill", version = "*" },
# Conflicts with our usage of mimalloc
# https://github.com/serai-dex/serai/issues/690
{ name = "tikv-jemalloc-sys", version = "*" },
]
[sources]
@@ -152,6 +140,8 @@ allow-git = [
"https://github.com/rust-lang-nursery/lazy-static.rs",
"https://github.com/kayabaNerve/elliptic-curves",
"https://github.com/monero-oxide/monero-oxide",
"https://github.com/kayabaNerve/monero-oxide",
"https://github.com/rust-bitcoin/rust-bip39",
"https://github.com/rust-rocksdb/rust-rocksdb",
"https://github.com/serai-dex/patch-polkadot-sdk",
]

View File

@@ -1 +1 @@
3.3.10
3.3.4

View File

@@ -1,4 +1,4 @@
source 'https://rubygems.org'
gem "jekyll", "~> 4.4"
gem "just-the-docs", "0.10.1"
gem "jekyll", "~> 4.3.3"
gem "just-the-docs", "0.8.2"

View File

@@ -1,39 +1,34 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.8)
public_suffix (>= 2.0.2, < 8.0)
base64 (0.3.0)
bigdecimal (3.3.1)
addressable (2.8.7)
public_suffix (>= 2.0.2, < 7.0)
bigdecimal (3.1.8)
colorator (1.1.0)
concurrent-ruby (1.3.5)
csv (3.3.5)
concurrent-ruby (1.3.4)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
ffi (1.17.2-x86_64-linux-gnu)
ffi (1.17.0-x86_64-linux-gnu)
forwardable-extended (2.6.0)
google-protobuf (4.33.1-x86_64-linux-gnu)
google-protobuf (4.28.2-x86_64-linux)
bigdecimal
rake (>= 13)
http_parser.rb (0.8.0)
i18n (1.14.7)
i18n (1.14.6)
concurrent-ruby (~> 1.0)
jekyll (4.4.1)
jekyll (4.3.4)
addressable (~> 2.4)
base64 (~> 0.2)
colorator (~> 1.0)
csv (~> 3.0)
em-websocket (~> 0.5)
i18n (~> 1.0)
jekyll-sass-converter (>= 2.0, < 4.0)
jekyll-watch (~> 2.0)
json (~> 2.6)
kramdown (~> 2.3, >= 2.3.1)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
mercenary (~> 0.3, >= 0.3.6)
mercenary (>= 0.3.6, < 0.5)
pathutil (~> 0.9)
rouge (>= 3.0, < 5.0)
safe_yaml (~> 1.0)
@@ -41,20 +36,19 @@ GEM
webrick (~> 1.7)
jekyll-include-cache (0.2.1)
jekyll (>= 3.7, < 5.0)
jekyll-sass-converter (3.1.0)
sass-embedded (~> 1.75)
jekyll-sass-converter (3.0.0)
sass-embedded (~> 1.54)
jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
json (2.16.0)
just-the-docs (0.10.1)
just-the-docs (0.8.2)
jekyll (>= 3.8.5)
jekyll-include-cache
jekyll-seo-tag (>= 2.0)
rake (>= 12.3.1)
kramdown (2.5.1)
rexml (>= 3.3.9)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
@@ -64,27 +58,27 @@ GEM
mercenary (0.4.0)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (7.0.0)
rake (13.3.1)
public_suffix (6.0.1)
rake (13.2.1)
rb-fsevent (0.11.2)
rb-inotify (0.11.1)
ffi (~> 1.0)
rexml (3.4.4)
rouge (4.6.1)
rexml (3.3.7)
rouge (4.4.0)
safe_yaml (1.0.5)
sass-embedded (1.94.2-x86_64-linux-gnu)
google-protobuf (~> 4.31)
sass-embedded (1.79.3-x86_64-linux-gnu)
google-protobuf (~> 4.27)
terminal-table (3.0.2)
unicode-display_width (>= 1.1.1, < 3)
unicode-display_width (2.6.0)
webrick (1.9.2)
webrick (1.8.2)
PLATFORMS
x86_64-linux
DEPENDENCIES
jekyll (~> 4.4)
just-the-docs (= 0.10.1)
jekyll (~> 4.3.3)
just-the-docs (= 0.8.2)
BUNDLED WITH
2.5.22
2.5.11

View File

@@ -46,7 +46,7 @@ serai-db = { path = "../common/db", optional = true }
serai-env = { path = "../common/env" }
serai-primitives = { path = "../substrate/primitives", default-features = false, features = ["std"] }
serai-primitives = { path = "../substrate/primitives", features = ["borsh"] }
[features]
parity-db = ["serai-db/parity-db"]

View File

@@ -7,7 +7,7 @@ use dalek_ff_group::Ristretto;
pub(crate) use ciphersuite::{group::GroupEncoding, WrappedGroup, GroupCanonicalEncoding};
pub(crate) use schnorr_signatures::SchnorrSignature;
pub(crate) use serai_primitives::network_id::ExternalNetworkId;
pub(crate) use serai_primitives::ExternalNetworkId;
pub(crate) use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
@@ -198,7 +198,7 @@ async fn main() {
KEYS.write().unwrap().insert(service, key);
let mut queues = QUEUES.write().unwrap();
if service == Service::Coordinator {
for network in ExternalNetworkId::all() {
for network in serai_primitives::EXTERNAL_NETWORKS {
queues.insert(
(service, Service::Processor(network)),
RwLock::new(Queue(db.clone(), service, Service::Processor(network))),
@@ -213,7 +213,7 @@ async fn main() {
};
// Make queues for each ExternalNetworkId
for network in ExternalNetworkId::all() {
for network in serai_primitives::EXTERNAL_NETWORKS {
// Use a match so we error if the list of NetworkIds changes
let Some(key) = read_key(match network {
ExternalNetworkId::Bitcoin => "BITCOIN_KEY",

View File

@@ -4,7 +4,7 @@ use ciphersuite::{group::GroupEncoding, FromUniformBytes, WrappedGroup, WithPref
use borsh::{BorshSerialize, BorshDeserialize};
use serai_primitives::network_id::ExternalNetworkId;
use serai_primitives::ExternalNetworkId;
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
pub enum Service {

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/networks/bitcoin"
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Vrx <vrx00@proton.me>"]
edition = "2021"
rust-version = "1.89"
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true

View File

@@ -155,7 +155,6 @@ impl Rpc {
Err(RpcError::RequestError(Error { code, message }))
}
// `invalidateblock` yields this edge case
// TODO: https://github.com/core-json/core-json/issues/18
RpcResponse { result: None, error: None } => {
if core::any::TypeId::of::<Response>() == core::any::TypeId::of::<()>() {
Ok(Default::default())

View File

@@ -0,0 +1,50 @@
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
# This GPG-signed message exists to confirm the SHA256 sums of Monero binaries.
#
# Please verify the signature against the key for binaryFate in the
# source code repository (/utils/gpg_keys).
#
#
## CLI
4e1481835824b9233f204553d4a19645274824f3f6185d8a4b50198470752f54 monero-android-armv7-v0.18.4.3.tar.bz2
1aebd24aaaec3d1e87a64163f2e30ab2cd45f3902a7a859413f6870944775c21 monero-android-armv8-v0.18.4.3.tar.bz2
ff7b9c5cf2cb3d602c3dff1902ac0bc3394768cefc260b6003a9ad4bcfb7c6a4 monero-freebsd-x64-v0.18.4.3.tar.bz2
3ac83049bc565fb5238501f0fa629cdd473bbe94d5fb815088af8e6ff1d761cd monero-linux-armv7-v0.18.4.3.tar.bz2
b1cc5f135de3ba8512d56deb4b536b38c41addde922b2a53bf443aeaf2a5a800 monero-linux-armv8-v0.18.4.3.tar.bz2
95baaa6e8957b92caeaed7fb19b5c2659373df8dd5f4de2601ed3dae7b17ce2f monero-linux-riscv64-v0.18.4.3.tar.bz2
3a7b36ae4da831a4e9913e0a891728f4c43cd320f9b136cdb6686b1d0a33fafa monero-linux-x64-v0.18.4.3.tar.bz2
e0b51ca71934c33cb83cfa8535ffffebf431a2fc9efe3acf2baad96fb6ce21ec monero-linux-x86-v0.18.4.3.tar.bz2
bab9a6d3c2ca519386cff5ff0b5601642a495ed1a209736acaf354468cba1145 monero-mac-armv8-v0.18.4.3.tar.bz2
a8d8273b14f31569f5b7aa3063fbd322e3caec3d63f9f51e287dfc539c7f7d61 monero-mac-x64-v0.18.4.3.tar.bz2
bd9f615657c35d2d7dd9a5168ad54f1547dbf9a335dee7f12fab115f6f394e36 monero-win-x64-v0.18.4.3.zip
e642ed7bbfa34c30b185387fa553aa9c3ea608db1f3fc0e9332afa9b522c9c1a monero-win-x86-v0.18.4.3.zip
6ba5e082c8fa25216aba7aea8198f3e23d4b138df15c512457081e1eb3d03ff6 monero-source-v0.18.4.3.tar.bz2
#
## GUI
7b9255c696a462a00a810d9c8f94e60400a9e7d6438e8d6a8b693e9c13dca9ab monero-gui-install-win-x64-v0.18.4.3.exe
0bd84de0a7c18b2a3ea8e8eff2194ae000cf1060045badfd4ab48674bc1b9325 monero-gui-linux-x64-v0.18.4.3.tar.bz2
68ea30db32efb4a0671ec723297b6629d932fa188edf76edb38a37adaa3528e6 monero-gui-mac-armv8-v0.18.4.3.dmg
27243b01f030fdae68c59cae1daf21f530bbadeaf10579d2908db9a834191cee monero-gui-mac-x64-v0.18.4.3.dmg
dc9531cb4319b37b2c2dea4126e44a0fe6e7b6f34d278ccf5dd9ba693e3031e0 monero-gui-win-x64-v0.18.4.3.zip
0d44687644db9b1824f324416e98f4a46b3bb0a5ed09af54b2835b6facaa0cdd monero-gui-source-v0.18.4.3.tar.bz2
#
#
# ~binaryFate
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCAAdFiEEgaxZH+nEtlxYBq/D8K9NRioL35IFAmjntxwACgkQ8K9NRioL
35J57g//dUOY1KAoLNaV7XLJyGbNk1lT6c2+A8h1wkK6iNQhXsmnc6rcigsHXrG0
LQyVUuZJ6ELhNb6BnH5V0zbcB72t8XjkSEqlYhStUfMnaUvj1VdXtL/OnSs3fEvt
Zwz6QBTxIKDDYEYyXrvCK96cYaYlIOgK3IVn/zoHdrHRUqTXqRJkFoTHum5l783y
vr9BwMFFWYePUrilphjIiLyJDl+eB5al8PaJkqK2whxBUHoA2jF1edJOSq2mZajI
+L2fBYClePS8oqwoKGquqCh2RVcmdtXtQTVzRIoNx14qFzP8ymqa+6z1Ygkri7bV
qMCJk7KQ8ND7uU9NShpaCIqrZpr5GZ4Al6SRkcpK/7mipQcy2QpKJR3iOpcfiTX1
YmYGVmLB3zmHu2kiS0kogZv6Ob7+tVFzOQ8NZX4FVnpB0N0phqMfNFOfHzdQZrsZ
qg29HNc9sHlUmsOVmE5w+7Oq+s79yvQB3034XXi/9wQu+f8fKRhqZboe0fe77FLf
QXoAYrZZ7LnGz0Z75Q9O4RB7uxM0Ug5imvyEFus4iuBVyBWjgcfyLnbkKJtbXmfn
BZBbTProhPJfVa/VffBxW9HZB27W7O14oGWVpUkGWnVMZfVY/78XTUHwxaScQsPO
SGawjobQsB3pTMNr/kra1XTjkti70si8Fcs5ueYWGB3yfc6r3hU=
=5HRY
-----END PGP SIGNATURE-----

View File

@@ -1,50 +0,0 @@
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
# This GPG-signed message exists to confirm the SHA256 sums of Monero binaries.
#
# Please verify the signature against the key for binaryFate in the
# source code repository (/utils/gpg_keys).
#
#
## CLI
7c2ad18ca3a1ad5bc603630ca935a753537a38a803e98d645edd6a3b94a5f036 monero-android-armv7-v0.18.4.4.tar.bz2
eb81b71f029884ab5fec76597be583982c95fd7dc3fc5f5083a422669cee311e monero-android-armv8-v0.18.4.4.tar.bz2
bc539178df23d1ae8b69569d9c328b5438ae585c0aacbebe12d8e7d387a745b0 monero-freebsd-x64-v0.18.4.4.tar.bz2
2040dc22748ef39ed8a755324d2515261b65315c67b91f449fa1617c5978910b monero-linux-armv7-v0.18.4.4.tar.bz2
b9daede195a24bdd05bba68cb5cb21e42c2e18b82d4d134850408078a44231c5 monero-linux-armv8-v0.18.4.4.tar.bz2
c939ea6e8002798f24a56ac03cbfc4ff586f70d7d9c3321b7794b3bcd1fa4c45 monero-linux-riscv64-v0.18.4.4.tar.bz2
7fe45ee9aade429ccdcfcad93b905ba45da5d3b46d2dc8c6d5afc48bd9e7f108 monero-linux-x64-v0.18.4.4.tar.bz2
8c174b756e104534f3d3a69fe68af66d6dc4d66afa97dfe31735f8d069d20570 monero-linux-x86-v0.18.4.4.tar.bz2
645e9bbae0275f555b2d72a9aa30d5f382df787ca9528d531521750ce2da9768 monero-mac-armv8-v0.18.4.4.tar.bz2
af3d98f09da94632db3e2f53c62cc612e70bf94aa5942d2a5200b4393cd9c842 monero-mac-x64-v0.18.4.4.tar.bz2
7eb3b87a105b3711361dd2b3e492ad14219d21ed8fd3dd726573a6cbd96e83a6 monero-win-x64-v0.18.4.4.zip
a148a2bd2b14183fb36e2cf917fce6f33fb687564db2ed53193b8432097ab398 monero-win-x86-v0.18.4.4.zip
84570eee26238d8f686605b5e31d59569488a3406f32e7045852de91f35508a2 monero-source-v0.18.4.4.tar.bz2
#
## GUI
4c81c8e97bd542daa453776d888557db1ceb2a718d43f6135ad68b12c8119948 monero-gui-install-win-x64-v0.18.4.4.exe
e45cb3fa9d972d67628cfed6463fb7604ae1414a11ba449f5e2f901c769ac788 monero-gui-linux-x64-v0.18.4.4.tar.bz2
a6f071719c401df339dba2d43ec6fffe103fda3e1df46f354b2496f34bb61cc4 monero-gui-mac-armv8-v0.18.4.4.dmg
811df70811a25f31289f24ebc0edc8f7648670384698d4c768bac5c2acbf2026 monero-gui-mac-x64-v0.18.4.4.dmg
b96faa56aa77cabed1f31f3fc9496e756a8da8c1124da2b9cb0b3730a8b6fbd9 monero-gui-win-x64-v0.18.4.4.zip
a7f6b91bc9efaa83173a397614626bf7612123e0017a48f66137ac397f7d19f8 monero-gui-source-v0.18.4.4.tar.bz2
#
#
# ~binaryFate
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCAAdFiEEgaxZH+nEtlxYBq/D8K9NRioL35IFAmkbGLgACgkQ8K9NRioL
35LWYRAAnPeUu7TADV9Nly2gBlwu7bMK6l7pcUzs3hHhCMpg/Zb7wF8lx4D/r/hT
3wf3gNVK6tYl5GMPpF7GSKvK35SSzNN+8khRd7vhRByG75LGLnrNlcBsQU2wOzUv
Rmm2R8L8GP0B/+zXO92uJDMZ7Q7x72O+3fVX05217HBwz2kvzE1NpXe+EJPnUukA
Tr5CRnxKhxPbilvIhoEHdwkScMZqHMfsbdrefrB3KpO3xEaUz+gO9wESp7nzr4vp
Du6gJYBPK25Z2heZHCRsGN4WQP4QQv4MC0IFczc9fkVDBjywsJeNRRUbGtxR/BNt
vNJGI/kS+7KV140j6GkqAh/leZcaVJ5LRyCaHAwEQNA2T5okhrM0WZpoOAsZMi5K
bW4lNOXfWSw6/tokEPeuoi49yw0f9z0C8a4VLNOZGWKqmHcsA8WE6oVfmvVk6xWu
BqTU1Z9LJqL17GWRAReSX1ZuNA0Q0Pb/klUwP4X2afJcCVZ2YeBNr4jr21u3dYXY
QiLj0Gv7gg7a/GiMpVglNn5GzCu6mT0D94sbMNK+U5Tbve7aOtijJZ8JR62eO/mR
h+oNEys/xEcP9PQ5p74cNL71hNSfWSOcNi+GLSgXC75vsOGr7i96uaamilsHnsYB
p8PZMHzOf1pi6i/L5oOEuRgaujd9IjyCbxoYh3bbxxjBOhNEMqU=
=CVLA
-----END PGP SIGNATURE-----

View File

@@ -1,32 +1,13 @@
#check=skip=FromPlatformFlagConstDisallowed
# We want to explicitly set the platform to ensure a constant host environment
# rust:1.91.1-alpine as of November 11th, 2025 (GMT)
FROM --platform=linux/amd64 rust@sha256:700c0959b23445f69c82676b72caa97ca4359decd075dca55b13339df27dc4d3
FROM --platform=linux/amd64 rust@sha256:700c0959b23445f69c82676b72caa97ca4359decd075dca55b13339df27dc4d3 AS deterministic
# In order to compile the runtime, including the `proc-macro`s and build scripts, we need the
# required development libraries. These are traditionally provided by `musl-dev` which is not
# inherently included with this image (https://github.com/rust-lang/docker-rust/issues/68). While we
# could install it here, we'd be unable to pin the installed package by its hash as desired.
#
# Rust does have self-contained libraries, intended to be used when the desired development files
# are not otherwise available. These can be enabled with `link-self-contained=yes`. Unfortunately,
# this doesn't work here (https://github.com/rust-lang/rust/issues/149371).
#
# While we can't set `link-self-contained=yes`, we can install Rust's self-contained libraries onto
# our system so they're generally available.
RUN echo '#!/bin/sh' > libs.sh
RUN echo 'set -e' >> libs.sh
RUN echo 'SYSROOT=$(rustc --print sysroot)' >> libs.sh
RUN echo 'LIBS=$SYSROOT/lib/rustlib/x86_64-unknown-linux-musl/lib/self-contained' >> libs.sh
RUN echo 'ln -s $LIBS/Scrt1.o $LIBS/crti.o $LIBS/crtn.o /usr/lib' >> libs.sh
# We also need `libc.so` which is already present on the system, just not under that name
RUN echo 'ln -s /lib/libc.musl-x86_64.so.1 /usr/lib/libc.so' >> libs.sh
RUN /bin/sh ./libs.sh
RUN apk add musl-dev=1.2.5-r10
# Add the WASM toolchain
# Add the wasm toolchain
RUN rustup target add wasm32v1-none
FROM deterministic
# Add files for build
ADD patches /serai/patches
ADD common /serai/common
@@ -46,12 +27,8 @@ ADD AGPL-3.0 /serai
WORKDIR /serai
# Build the runtime
RUN cargo build --release -p serai-runtime
# Copy the artifact
RUN cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /serai/serai.wasm
# Clean up the build directory
RUN cargo clean
# Copy the runtime to the provided volume
CMD ["cp", "/serai/serai.wasm", "/volume/serai.wasm"]
# Build the runtime, copying it to the volume if it exists
ENV RUSTFLAGS="-Ctarget-feature=-crt-static"
CMD cargo build --release -p serai-runtime && \
mkdir -p /volume && \
cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm

View File

@@ -16,7 +16,7 @@ pub fn coordinator(
) {
let db = network.db();
let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" };
let setup = mimalloc(Os::Debian) +
let setup = mimalloc(Os::Debian).to_string() +
&build_serai_service(
"",
Os::Debian,

View File

@@ -3,7 +3,7 @@ use std::path::Path;
use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
pub fn ethereum_relayer(orchestration_path: &Path, network: Network) {
let setup = mimalloc(Os::Debian) +
let setup = mimalloc(Os::Debian).to_string() +
&build_serai_service(
"",
Os::Debian,

View File

@@ -13,7 +13,7 @@ pub fn message_queue(
ethereum_key: <Ristretto as WrappedGroup>::G,
monero_key: <Ristretto as WrappedGroup>::G,
) {
let setup = mimalloc(Os::Alpine) +
let setup = mimalloc(Os::Alpine).to_string() +
&build_serai_service("", Os::Alpine, network.release(), network.db(), "serai-message-queue");
let env_vars = [

View File

@@ -1,85 +1,36 @@
use crate::Os;
// 2.2.4
const MIMALLOC_VERSION: &str = "fbd8b99c2b828428947d70fdc046bb55609be93e";
const FLAGS: &str =
"-DMI_SECURE=ON -DMI_GUARDED=ON -DMI_BUILD_STATIC=OFF -DMI_BUILD_OBJECT=OFF -DMI_BUILD_TESTS=OFF";
pub fn mimalloc(os: Os) -> String {
let build_script = |env, flags| {
format!(
r#"
#!/bin/sh
set -e
git clone https://github.com/microsoft/mimalloc
cd mimalloc
git checkout {MIMALLOC_VERSION}
# For some reason, `mimalloc` contains binary blobs in the repository, so we remove those now
rm -rf .git ./bin
mkdir -p out/secure
cd out/secure
# `CMakeLists.txt` requires a C++ compiler but `mimalloc` does not use one by default. We claim
# there is a working C++ compiler so CMake doesn't complain, allowing us to not unnecessarily
# install one. If it was ever invoked, our choice of `false` would immediately let us know.
# https://github.com/microsoft/mimalloc/issues/1179
{env} CXX=false cmake -DCMAKE_CXX_COMPILER_WORKS=1 {FLAGS} ../..
make
cd ../..
# Copy the built library to the original directory
cd ..
cp mimalloc/out/secure/libmimalloc-secure.so ./libmimalloc.so
# Clean up the source directory
rm -rf ./mimalloc
"#
)
};
let build_commands = |env, flags| {
let mut result = String::new();
for line in build_script(env, flags)
.lines()
.map(|line| {
assert!(!line.contains('"'));
format!(r#"RUN echo "{line}" >> ./mimalloc.sh"#)
})
.chain(["RUN /bin/sh ./mimalloc.sh", "RUN rm ./mimalloc.sh"].into_iter().map(str::to_string))
{
result.push_str(&line);
result.push('\n');
}
result
};
let alpine_build = build_commands("CC=$(uname -m)-alpine-linux-musl-gcc", "-DMI_LIBC_MUSL=ON");
let debian_build = build_commands("", "");
let alpine_mimalloc = format!(
r#"
pub fn mimalloc(os: Os) -> &'static str {
const ALPINE_MIMALLOC: &str = r#"
FROM alpine:latest AS mimalloc-alpine
RUN apk update && apk upgrade && apk --no-cache add musl-dev gcc make cmake git
RUN apk update && apk upgrade && apk --no-cache add gcc g++ libc-dev make cmake git
RUN git clone https://github.com/microsoft/mimalloc && \
cd mimalloc && \
git checkout fbd8b99c2b828428947d70fdc046bb55609be93e && \
mkdir -p out/secure && \
cd out/secure && \
cmake -DMI_SECURE=ON -DMI_GUARDED=on ../.. && \
make && \
cp ./libmimalloc-secure.so ../../../libmimalloc.so
"#;
{alpine_build}
"#
);
let debian_mimalloc = format!(
r#"
const DEBIAN_MIMALLOC: &str = r#"
FROM debian:trixie-slim AS mimalloc-debian
RUN apt update && apt upgrade -y && apt install -y gcc make cmake git
{debian_build}
"#
);
RUN apt update && apt upgrade -y && apt install -y gcc g++ make cmake git
RUN git clone https://github.com/microsoft/mimalloc && \
cd mimalloc && \
git checkout fbd8b99c2b828428947d70fdc046bb55609be93e && \
mkdir -p out/secure && \
cd out/secure && \
cmake -DMI_SECURE=ON -DMI_GUARDED=on ../.. && \
make && \
cp ./libmimalloc-secure.so ../../../libmimalloc.so
"#;
match os {
Os::Alpine => alpine_mimalloc,
Os::Debian => debian_mimalloc,
Os::Alpine => ALPINE_MIMALLOC,
Os::Debian => DEBIAN_MIMALLOC,
}
}

View File

@@ -29,7 +29,7 @@ RUN tar xzvf bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz
RUN mv bitcoin-${BITCOIN_VERSION}/bin/bitcoind .
"#;
let setup = mimalloc(Os::Debian) + DOWNLOAD_BITCOIN;
let setup = mimalloc(Os::Debian).to_string() + DOWNLOAD_BITCOIN;
let run_bitcoin = format!(
r#"

View File

@@ -17,7 +17,7 @@ pub fn ethereum(orchestration_path: &Path, network: Network) {
(reth(network), nimbus(network))
};
let download = mimalloc(Os::Alpine) + &el_download + &cl_download;
let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download;
let run = format!(
r#"
@@ -26,7 +26,7 @@ CMD ["/run.sh"]
"#,
network.label()
);
let run = mimalloc(Os::Debian) +
let run = mimalloc(Os::Debian).to_string() +
&os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") +
&el_run +
&cl_run +

View File

@@ -10,7 +10,7 @@ fn monero_internal(
monero_binary: &str,
ports: &str,
) {
const MONERO_VERSION: &str = "0.18.4.4";
const MONERO_VERSION: &str = "0.18.4.3";
let arch = match std::env::consts::ARCH {
// We probably would run this without issues yet it's not worth needing to provide support for
@@ -41,7 +41,7 @@ RUN tar -xvjf monero-linux-{arch}-v{MONERO_VERSION}.tar.bz2 --strip-components=1
network.label(),
);
let setup = mimalloc(os) + &download_monero;
let setup = mimalloc(os).to_string() + &download_monero;
let run_monero = format!(
r#"

View File

@@ -17,7 +17,7 @@ pub fn processor(
substrate_evrf_key: Zeroizing<Vec<u8>>,
network_evrf_key: Zeroizing<Vec<u8>>,
) {
let setup = mimalloc(Os::Debian) +
let setup = mimalloc(Os::Debian).to_string() +
&build_serai_service(
if coin == "ethereum" {
r#"

View File

@@ -12,7 +12,10 @@ pub fn serai(
serai_key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
) {
// Always builds in release for performance reasons
let setup = mimalloc(Os::Debian) + &build_serai_service("", Os::Debian, true, "", "serai-node");
let setup =
mimalloc(Os::Debian).to_string() + &build_serai_service("", Os::Debian, true, "", "serai-node");
let setup_fast_epoch = mimalloc(Os::Debian).to_string() +
&build_serai_service("", Os::Debian, true, "fast-epoch", "serai-node");
let env_vars = [("KEY", hex::encode(serai_key.to_repr()))];
let mut env_vars_str = String::new();
@@ -37,9 +40,16 @@ CMD {env_vars_str} "/run.sh"
let run = os(Os::Debian, "", "serai") + &run_serai;
let res = setup + &run;
let res_fast_epoch = setup_fast_epoch + &run;
let mut serai_path = orchestration_path.to_path_buf();
serai_path.push("serai");
let mut serai_fast_epoch_path = serai_path.clone();
serai_path.push("Dockerfile");
serai_fast_epoch_path.push("Dockerfile.fast-epoch");
write_dockerfile(serai_path, &res);
write_dockerfile(serai_fast_epoch_path, &res_fast_epoch);
}

View File

@@ -12,7 +12,8 @@ edition = "2021"
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[lints]
workspace = true
[dependencies]
std-shims = { path = "../../common/std-shims", version = "0.1.4", default-features = false, optional = true }

View File

@@ -26,7 +26,7 @@ impl<C: ciphersuite::GroupIo> Ciphersuite for C {
#[cfg(feature = "alloc")]
fn read_G<R: io::Read>(reader: &mut R) -> io::Result<Self::G> {
<C as ciphersuite::GroupIo>::read_G(reader)
}
}
}
#[cfg(feature = "ed25519")]

View File

@@ -7,12 +7,14 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021"
rust-version = "1.85"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[lints]
workspace = true
[dependencies]
dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false }

View File

@@ -33,4 +33,12 @@ impl FieldElement {
u256 = u256.wrapping_sub(&MODULUS);
}
}
/// Create a `FieldElement` from the reduction of a 512-bit number.
///
/// The bytes are interpreted in little-endian format.
#[deprecated]
pub fn wide_reduce(value: [u8; 64]) -> Self {
<FieldElement as prime_field::ff::FromUniformBytes<_>>::from_uniform_bytes(&value)
}
}

View File

@@ -12,7 +12,5 @@ edition = "2021"
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[dependencies]
darling = { version = "0.21" }

View File

@@ -1,6 +1,6 @@
[package]
name = "directories-next"
version = "2.0.99"
version = "2.0.0"
description = "Patch from directories-next back to directories"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/patches/directories-next"
@@ -12,7 +12,5 @@ edition = "2021"
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[dependencies]
directories = "6"
directories = "5"

View File

@@ -1,19 +0,0 @@
[package]
name = "alloy-eip2124"
version = "0.2.99"
description = "Patch to an empty crate"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/patches/ethereum/alloy-eip2124"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[features]
std = []
serde = []

View File

@@ -12,7 +12,5 @@ edition = "2021"
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[features]
std = []

View File

@@ -12,7 +12,5 @@ edition = "2021"
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[workspace]
[features]
std = []

Some files were not shown because too many files have changed in this diff Show More