diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 6f628172..2765571f 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -37,4 +37,4 @@ runs: - name: Bitcoin Regtest Daemon shell: bash - run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon + run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon diff --git a/.github/actions/build-dependencies/action.yml b/.github/actions/build-dependencies/action.yml index 5994b723..47d77522 100644 --- a/.github/actions/build-dependencies/action.yml +++ b/.github/actions/build-dependencies/action.yml @@ -42,8 +42,8 @@ runs: shell: bash run: | cargo install svm-rs - svm install 0.8.25 - svm use 0.8.25 + svm install 0.8.26 + svm use 0.8.26 # - name: Cache Rust # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 diff --git a/.github/nightly-version b/.github/nightly-version index 9f98e758..09a243d7 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2024-07-01 +nightly-2025-01-01 diff --git a/.github/workflows/common-tests.yml b/.github/workflows/common-tests.yml index 117b5858..b93db510 100644 --- a/.github/workflows/common-tests.yml +++ b/.github/workflows/common-tests.yml @@ -30,4 +30,5 @@ jobs: -p patchable-async-sleep \ -p serai-db \ -p serai-env \ + -p serai-task \ -p simple-request diff --git a/.github/workflows/crypto-tests.yml b/.github/workflows/crypto-tests.yml index d9d1df08..bf20ede3 100644 --- a/.github/workflows/crypto-tests.yml +++ b/.github/workflows/crypto-tests.yml @@ -35,6 +35,10 @@ jobs: -p multiexp \ -p schnorr-signatures \ -p dleq \ + -p generalized-bulletproofs \ + -p generalized-bulletproofs-circuit-abstraction \ + -p ec-divisors \ + -p generalized-bulletproofs-ec-gadgets \ -p dkg \ -p modular-frost \ -p frost-schnorrkel diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index da0bdcfa..cdaae18d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -73,6 +73,15 @@ jobs: - name: Run rustfmt run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check + - name: Install foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 + with: + version: nightly-41d4e5437107f6f42c7711123890147bc736a609 + cache: false + + - name: Run forge fmt + run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol") + machete: runs-on: ubuntu-latest steps: @@ -81,3 +90,25 @@ jobs: run: | cargo install cargo-machete cargo machete + + slither: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + - name: Slither + run: | + python3 -m pip install solc-select + solc-select install 0.8.26 + solc-select use 0.8.26 + + python3 -m pip install slither-analyzer + + slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol + slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol + slither processor/ethereum/deployer/contracts/Deployer.sol + slither processor/ethereum/erc20/contracts/IERC20.sol + + cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/ + cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/ + cd processor/ethereum/router/contracts + slither Router.sol diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml new file mode 100644 index 00000000..acf0eb32 --- /dev/null +++ b/.github/workflows/msrv.yml @@ -0,0 +1,259 @@ +name: Weekly MSRV Check + +on: + schedule: + - cron: "0 0 * * 0" + workflow_dispatch: + +jobs: + msrv-common: + name: Run cargo msrv on common + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on common + run: | + cargo msrv verify --manifest-path common/zalloc/Cargo.toml + cargo msrv verify --manifest-path common/std-shims/Cargo.toml + cargo msrv verify --manifest-path common/env/Cargo.toml + cargo msrv verify --manifest-path common/db/Cargo.toml + cargo msrv verify --manifest-path common/task/Cargo.toml + cargo msrv verify --manifest-path common/request/Cargo.toml + cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml + + msrv-crypto: + name: Run cargo msrv on crypto + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on crypto + run: | + cargo msrv verify --manifest-path crypto/transcript/Cargo.toml + + cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml + cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml + cargo msrv verify --manifest-path crypto/ed448/Cargo.toml + + cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml + + cargo msrv verify --manifest-path crypto/dleq/Cargo.toml + cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml + cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml + + cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml + cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml + cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml + cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml + cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml + cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml + + cargo msrv verify --manifest-path crypto/dkg/Cargo.toml + cargo msrv verify --manifest-path crypto/frost/Cargo.toml + cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml + + msrv-networks: + name: Run cargo msrv on networks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on networks + run: | + cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml + + cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml + cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml + cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml + cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db + + cargo msrv verify --manifest-path networks/monero/io/Cargo.toml + cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml + cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml + cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml + cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml + cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml + cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml + cargo msrv verify --manifest-path networks/monero/Cargo.toml + cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml + cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml + cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml + cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml + cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml + + msrv-message-queue: + name: Run cargo msrv on message-queue + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on message-queue + run: | + cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db + + msrv-processor: + name: Run cargo msrv on processor + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on processor + run: | + cargo msrv verify --manifest-path processor/view-keys/Cargo.toml + + cargo msrv verify --manifest-path processor/primitives/Cargo.toml + cargo msrv verify --manifest-path processor/messages/Cargo.toml + + cargo msrv verify --manifest-path processor/scanner/Cargo.toml + + cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml + cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml + cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml + cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml + cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml + + cargo msrv verify --manifest-path processor/key-gen/Cargo.toml + cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml + cargo msrv verify --manifest-path processor/signers/Cargo.toml + cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db + + cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml + + cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml + cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml + cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml + cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml + cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml + cargo msrv verify --manifest-path processor/ethereum/Cargo.toml + + cargo msrv verify --manifest-path processor/monero/Cargo.toml + + msrv-coordinator: + name: Run cargo msrv on coordinator + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on coordinator + run: | + cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml + cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml + cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml + cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml + cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml + cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml + cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml + cargo msrv verify --manifest-path coordinator/Cargo.toml + + msrv-substrate: + name: Run cargo msrv on substrate + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on substrate + run: | + cargo msrv verify --manifest-path substrate/primitives/Cargo.toml + + cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml + cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml + cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml + cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml + cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml + + cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml + cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml + cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml + + cargo msrv verify --manifest-path substrate/abi/Cargo.toml + cargo msrv verify --manifest-path substrate/client/Cargo.toml + + cargo msrv verify --manifest-path substrate/runtime/Cargo.toml + cargo msrv verify --manifest-path substrate/node/Cargo.toml + + msrv-orchestration: + name: Run cargo msrv on orchestration + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on message-queue + run: | + cargo msrv verify --manifest-path orchestration/Cargo.toml + + msrv-mini: + name: Run cargo msrv on mini + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + + - name: Install Build Dependencies + uses: ./.github/actions/build-dependencies + + - name: Install cargo msrv + run: cargo install --locked cargo-msrv + + - name: Run cargo msrv on mini + run: | + cargo msrv verify --manifest-path mini/Cargo.toml diff --git a/.github/workflows/networks-tests.yml b/.github/workflows/networks-tests.yml index 5966a6a8..a88009fd 100644 --- a/.github/workflows/networks-tests.yml +++ b/.github/workflows/networks-tests.yml @@ -30,8 +30,9 @@ jobs: run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ + -p build-solidity-contracts \ + -p ethereum-schnorr-contract \ -p alloy-simple-request-transport \ - -p ethereum-serai \ -p serai-ethereum-relayer \ -p monero-io \ -p monero-generators \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 05c25972..af93154e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -39,9 +39,33 @@ jobs: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p serai-message-queue \ -p serai-processor-messages \ - -p serai-processor \ + -p serai-processor-key-gen \ + -p serai-processor-view-keys \ + -p serai-processor-frost-attempt-manager \ + -p serai-processor-primitives \ + -p serai-processor-scanner \ + -p serai-processor-scheduler-primitives \ + -p serai-processor-utxo-scheduler-primitives \ + -p serai-processor-utxo-scheduler \ + -p serai-processor-transaction-chaining-scheduler \ + -p serai-processor-smart-contract-scheduler \ + -p serai-processor-signers \ + -p serai-processor-bin \ + -p serai-bitcoin-processor \ + -p serai-processor-ethereum-primitives \ + -p serai-processor-ethereum-test-primitives \ + -p serai-processor-ethereum-deployer \ + -p serai-processor-ethereum-router \ + -p serai-processor-ethereum-erc20 \ + -p serai-ethereum-processor \ + -p serai-monero-processor \ -p tendermint-machine \ - -p tributary-chain \ + -p tributary-sdk \ + -p serai-cosign \ + -p serai-coordinator-substrate \ + -p serai-coordinator-tributary \ + -p serai-coordinator-p2p \ + -p serai-coordinator-libp2p-p2p \ -p serai-coordinator \ -p serai-orchestrator \ -p serai-docker-tests diff --git a/Cargo.lock b/Cargo.lock index bf320e6f..0cfbb442 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,41 +95,58 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.36" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c225801d42099570d0674701dddd4142f0ef715282aeb5985042e2ec962df7" +checksum = "d4e0f0136c085132939da6b753452ebed4efaa73fe523bb855b10c199c2ebfaf" dependencies = [ + "alloy-primitives", "num_enum", "strum 0.26.3", ] [[package]] name = "alloy-consensus" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" +checksum = "f4138dc275554afa6f18c4217262ac9388790b2fc393c2dfe03c51d357abf013" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-trie", "auto_impl", "c-kzg", - "derive_more 1.0.0", + "derive_more", + "k256", + "serde", +] + +[[package]] +name = "alloy-consensus-any" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fa04e1882c31288ce1028fdf31b6ea94cfa9eafa2e497f903ded631c8c6a42c" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", "serde", ] [[package]] name = "alloy-core" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce854562e7cafd5049189d0268d6e5cba05fe6c9cb7c6f8126a79b94800629c" +checksum = "5e3fdddfc89197319b1be19875a70ced62a72bebb67e2276dad688cd59f40e70" dependencies = [ "alloy-primitives", ] @@ -147,21 +164,22 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "cabf647eb4650c91a9d38cb6f972bb320009e7e9d61765fb688a86f1563b33e8" dependencies = [ "alloy-primitives", "alloy-rlp", + "derive_more", "k256", "serde", ] [[package]] name = "alloy-eips" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" +checksum = "52dd5869ed09e399003e0e0ec6903d981b2a92e74c5d37e6b40890bad2517526" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -169,7 +187,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "c-kzg", - "derive_more 1.0.0", + "derive_more", "once_cell", "serde", "sha2", @@ -177,51 +195,44 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +checksum = "e7d2a7fe5c1a9bd6793829ea21a636f30fc2b3f5d2e7418ba86d96e41dd1f460" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-serde", - "serde", -] - -[[package]] -name = "alloy-json-abi" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a438d4486b5d525df3b3004188f9d5cd1d65cd30ecc41e5a3ccef6f6342e8af9" -dependencies = [ - "alloy-primitives", - "alloy-sol-type-parser", + "alloy-trie", "serde", ] [[package]] name = "alloy-json-rpc" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +checksum = "2008bedb8159a255b46b7c8614516eda06679ea82f620913679afbd8031fea72" dependencies = [ "alloy-primitives", "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 2.0.9", "tracing", ] [[package]] name = "alloy-network" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" +checksum = "4556f01fe41d0677495df10a648ddcf7ce118b0e8aa9642a0e2b6dd1fb7259de" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", "alloy-primitives", + "alloy-rpc-types-any", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", @@ -229,14 +240,16 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", - "thiserror", + "serde", + "serde_json", + "thiserror 2.0.9", ] [[package]] name = "alloy-network-primitives" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" +checksum = "f31c3c6b71340a1d076831823f09cb6e02de01de5c6630a9631bdb36f947ff80" dependencies = [ "alloy-consensus", "alloy-eips", @@ -247,9 +260,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +checksum = "4520cd4bc5cec20c32c98e4bc38914c7fb96bf4a712105e44da186a54e65e3ba" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -257,25 +270,26 @@ dependencies = [ "rand", "serde_json", "tempfile", - "thiserror", + "thiserror 2.0.9", "tracing", "url", ] [[package]] name = "alloy-primitives" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +checksum = "0540fd0355d400b59633c27bd4b42173e59943f28e9d3376b77a24771d432d04" dependencies = [ "alloy-rlp", "bytes", "cfg-if", "const-hex", - "derive_more 1.0.0", - "hashbrown 0.14.5", + "derive_more", + "foldhash", + "hashbrown 0.15.2", "hex-literal", - "indexmap 2.5.0", + "indexmap 2.7.0", "itoa", "k256", "keccak-asm", @@ -283,7 +297,7 @@ dependencies = [ "proptest", "rand", "ruint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "sha3", "tiny-keccak", @@ -291,9 +305,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" +checksum = "5a22c4441b3ebe2d77fa9cf629ba68c3f713eb91779cff84275393db97eddd82" dependencies = [ "alloy-chains", "alloy-consensus", @@ -303,7 +317,9 @@ dependencies = [ "alloy-network-primitives", "alloy-primitives", "alloy-rpc-client", + "alloy-rpc-types-debug", "alloy-rpc-types-eth", + "alloy-rpc-types-trace", "alloy-transport", "async-stream", "async-trait", @@ -312,19 +328,22 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "parking_lot 0.12.3", "pin-project", + "schnellru", "serde", "serde_json", - "thiserror", + "thiserror 2.0.9", "tokio", "tracing", + "wasmtimer", ] [[package]] name = "alloy-rlp" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +checksum = "f542548a609dca89fcd72b3b9f355928cf844d4363c5eed9c5273a3dd225e097" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -333,20 +352,20 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "alloy-rpc-client" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +checksum = "d06a292b37e182e514903ede6e623b9de96420e8109ce300da288a96d88b7e4b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -358,34 +377,71 @@ dependencies = [ "serde_json", "tokio", "tokio-stream", - "tower 0.5.1", + "tower 0.5.2", "tracing", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types-any" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca445cef0eb6c2cf51cfb4e214fbf1ebd00893ae2e6f3b944c8101b07990f988" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", +] + +[[package]] +name = "alloy-rpc-types-debug" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "358d6a8d7340b9eb1a7589a6c1fb00df2c9b26e90737fa5ed0108724dd8dac2c" +dependencies = [ + "alloy-primitives", + "serde", ] [[package]] name = "alloy-rpc-types-eth" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" +checksum = "0938bc615c02421bd86c1733ca7205cc3d99a122d9f9bff05726bd604b76a5c2" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-network-primitives", "alloy-primitives", "alloy-rlp", "alloy-serde", "alloy-sol-types", - "derive_more 1.0.0", "itertools 0.13.0", "serde", "serde_json", + "thiserror 2.0.9", +] + +[[package]] +name = "alloy-rpc-types-trace" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd38207e056cc7d1372367fbb4560ddf9107cbd20731743f641246bf0dede149" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "serde", + "serde_json", + "thiserror 2.0.9", ] [[package]] name = "alloy-serde" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" +checksum = "ae0465c71d4dced7525f408d84873aeebb71faf807d22d74c4a426430ccd9b55" dependencies = [ "alloy-primitives", "serde", @@ -394,96 +450,82 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" +checksum = "9bfa395ad5cc952c82358d31e4c68b27bf4a89a5456d9b27e226e77dac50e4ff" dependencies = [ "alloy-primitives", "async-trait", "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 2.0.9", ] [[package]] name = "alloy-simple-request-transport" -version = "0.1.0" +version = "0.1.1" dependencies = [ "alloy-json-rpc", "alloy-transport", "serde_json", "simple-request", - "tower 0.5.1", + "tower 0.5.2", ] [[package]] name = "alloy-sol-macro" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e7f6e8fe5b443f82b3f1e15abfa191128f71569148428e49449d01f6f49e8b" +checksum = "c6d1a14b4a9f6078ad9132775a2ebb465b06b387d60f7413ddc86d7bf7453408" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b96ce28d2fde09abb6135f410c41fad670a3a770b6776869bd852f1df102e6f" +checksum = "4436b4b96d265eb17daea26eb31525c3076d024d10901e446790afbd2f7eeaf5" dependencies = [ - "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", - "indexmap 2.5.0", + "indexmap 2.7.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "906746396a8296537745711630d9185746c0b50c033d5e9d18b0a6eba3d53f90" +checksum = "e5f58698a18b96faa8513519de112b79a96010b4ff84264ce54a217c52a8e98b" dependencies = [ - "alloy-json-abi", "const-hex", "dunce", "heck 0.5.0", "proc-macro2", "quote", - "serde_json", - "syn 2.0.79", + "syn 2.0.94", "syn-solidity", ] -[[package]] -name = "alloy-sol-type-parser" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbd3548d5262867c2c4be6223fe4f2583e21ade0ca1c307fd23bc7f28fca479e" -dependencies = [ - "serde", - "winnow 0.6.20", -] - [[package]] name = "alloy-sol-types" -version = "0.8.5" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a533ce22525969661b25dfe296c112d35eb6861f188fd284f8bd4bb3842ae" +checksum = "c766e4979fc19d70057150befe8e3ea3f0c4cbc6839b8eaaa250803451692305" dependencies = [ - "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", "const-hex", @@ -491,9 +533,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +checksum = "d17722a198f33bbd25337660787aea8b8f57814febb7c746bc30407bdfc39448" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -501,23 +543,40 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 2.0.9", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +checksum = "6e1509599021330a31c4a6816b655e34bf67acb1cc03c564e09fd8754ff6c5de" dependencies = [ "alloy-transport", "url", ] +[[package]] +name = "alloy-trie" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6917c79e837aa7b77b7a6dae9f89cbe15313ac161c4d3cfaf8909ef21f3d22d8" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "arrayvec", + "derive_more", + "nybbles", + "serde", + "smallvec", + "tracing", +] + [[package]] name = "android-tzdata" version = "0.1.1" @@ -544,9 +603,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -559,43 +618,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "approx" @@ -608,9 +667,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" [[package]] name = "ark-ff" @@ -753,6 +812,9 @@ name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] [[package]] name = "asn1-rs" @@ -760,13 +822,29 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" dependencies = [ - "asn1-rs-derive", - "asn1-rs-impl", + "asn1-rs-derive 0.4.0", + "asn1-rs-impl 0.1.0", "displaydoc", "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive 0.5.1", + "asn1-rs-impl 0.2.0", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.69", "time", ] @@ -779,7 +857,19 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", + "synstructure 0.13.1", ] [[package]] @@ -793,6 +883,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", +] + [[package]] name = "async-channel" version = "1.9.0" @@ -806,9 +907,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock", "cfg-if", @@ -853,18 +954,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -880,6 +981,19 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "attohttpc" version = "0.24.1" @@ -891,6 +1005,16 @@ dependencies = [ "url", ] +[[package]] +name = "aurora-engine-modexp" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aef7712851e524f35fbbb74fa6599c5cd8692056a1c36f9ca0d2001b670e7e5" +dependencies = [ + "hex", + "num", +] + [[package]] name = "auto_impl" version = "1.2.0" @@ -899,7 +1023,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -918,7 +1042,7 @@ dependencies = [ "cfg-if", "libc", "miniz_oxide", - "object 0.36.5", + "object 0.36.7", "rustc-demangle", "windows-targets 0.52.6", ] @@ -995,14 +1119,14 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools 0.10.5", "lazy_static", "lazycell", "proc-macro2", @@ -1010,7 +1134,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -1030,9 +1154,9 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" -version = "0.32.3" +version = "0.32.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0032b0e8ead7074cda7fc4f034409607e3f03a6f71d66ade8a307f79b4d99e73" +checksum = "ce6bc65742dea50536e35ad42492b234c27904a27f0abdcbce605015cb4ea026" dependencies = [ "base58ck", "bech32", @@ -1057,9 +1181,9 @@ dependencies = [ [[package]] name = "bitcoin-io" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "340e09e8399c7bd8912f495af6aa58bea0c9214773417ffaa8f6460f93aaee56" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" [[package]] name = "bitcoin-serai" @@ -1075,7 +1199,7 @@ dependencies = [ "serde_json", "simple-request", "std-shims", - "thiserror", + "thiserror 2.0.9", "tokio", "zeroize", ] @@ -1121,6 +1245,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] @@ -1158,9 +1283,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82033247fd8e890df8f740e407ad4d038debb9eb1f40533fffb32e7d17dc6f7" +checksum = "b8ee0c1824c4dea5b5f81736aff91bae041d2c07ee1192bec91054e10e3e601e" dependencies = [ "arrayref", "arrayvec", @@ -1233,7 +1358,7 @@ dependencies = [ "futures-core", "futures-util", "hex", - "http 1.1.0", + "http 1.2.0", "http-body-util", "hyper 1.4.1", "hyper-named-pipe", @@ -1246,7 +1371,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-util", "tower-service", @@ -1267,26 +1392,25 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.0" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe5b10e214954177fb1dc9fbd20a1a2608fe99e6c832033bdc7cea287a20d77" +checksum = "2506947f73ad44e344215ccd6403ac2ae18cd8e046e581a441bf8d199f257f03" dependencies = [ "borsh-derive", - "cfg_aliases", + "cfg_aliases 0.2.1", ] [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "c2593a3b8b938bd68373196c9832f516be11fa487ef4ae745eb282e6a56a7244" dependencies = [ "once_cell", "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.79", - "syn_derive", + "syn 2.0.94", ] [[package]] @@ -1312,9 +1436,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.10.0" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" +checksum = "531a9155a481e2ee699d4f98f43c0ca4ff8ee1bfd55c31e9e98fb29d2b176fe0" dependencies = [ "memchr", "serde", @@ -1329,6 +1453,10 @@ dependencies = [ "semver 0.6.0", ] +[[package]] +name = "build-solidity-contracts" +version = "0.1.1" + [[package]] name = "bumpalo" version = "3.16.0" @@ -1343,9 +1471,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.18.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -1355,9 +1483,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -1399,9 +1527,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", ] @@ -1414,17 +1542,17 @@ checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" dependencies = [ "camino", "cargo-platform", - "semver 1.0.23", + "semver 1.0.24", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "cc" -version = "1.1.28" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -1461,6 +1589,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -1487,9 +1621,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1510,7 +1644,7 @@ dependencies = [ "multibase", "multihash 0.18.1", "serde", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -1560,9 +1694,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.19" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -1570,9 +1704,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.19" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -1589,14 +1723,14 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "codespan-reporting" @@ -1610,9 +1744,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "concurrent-queue" @@ -1625,9 +1759,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -1678,6 +1812,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1704,9 +1848,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -1830,9 +1974,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1849,9 +1993,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -1917,18 +2061,19 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "cxx" -version = "1.0.128" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ccead7d199d584d139148b04b4a368d1ec7556a1d9ea2548febb1b9d49f9a4" +checksum = "2568d7d2cfc051e43414fe1ef80c712cbcd60c3624d1ad1cb4b2572324d0a5d9" dependencies = [ "cc", "cxxbridge-flags", "cxxbridge-macro", + "foldhash", "link-cplusplus", ] @@ -1944,24 +2089,25 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "cxxbridge-flags" -version = "1.0.128" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65777e06cc48f0cb0152024c77d6cf9e4bdb4408e7b48bea993d42fa0f5b02b6" +checksum = "0c710c27f23b7fa00c23aaee9e6fd3e79a6dffc5f5c6217487ec5213f51296b7" [[package]] name = "cxxbridge-macro" -version = "1.0.128" +version = "1.0.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98532a60dedaebc4848cb2cba5023337cc9ea3af16a5b062633fabfd9f18fb60" +checksum = "0aa53ef9fc54b986272efe83e257bbb417d1c3ceab1b732411d8c634fda572be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "rustversion", + "syn 2.0.94", ] [[package]] @@ -2045,7 +2191,21 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs 0.6.2", "displaydoc", "nom", "num-bigint", @@ -2085,17 +2245,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive_more" -version = "0.99.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - [[package]] name = "derive_more" version = "1.0.0" @@ -2113,7 +2262,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", "unicode-xid", ] @@ -2193,23 +2342,34 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "dkg" version = "0.5.1" dependencies = [ + "blake2", "borsh", "chacha20", "ciphersuite", "dleq", + "ec-divisors", + "embedwards25519", "flexible-transcript", + "generalized-bulletproofs", + "generalized-bulletproofs-circuit-abstraction", + "generalized-bulletproofs-ec-gadgets", + "generic-array 1.1.1", "multiexp", + "pasta_curves", + "rand", + "rand_chacha", "rand_core", "schnorr-signatures", + "secq256k1", "std-shims", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -2228,7 +2388,7 @@ dependencies = [ "multiexp", "rand_core", "rustversion", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -2250,7 +2410,7 @@ dependencies = [ "serde", "serde_json", "strum 0.26.3", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -2300,6 +2460,21 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +[[package]] +name = "ec-divisors" +version = "0.1.0" +dependencies = [ + "dalek-ff-group", + "ff", + "group", + "hex", + "pasta_curves", + "rand_core", + "std-shims", + "subtle", + "zeroize", +] + [[package]] name = "ecdsa" version = "0.16.9" @@ -2380,6 +2555,27 @@ dependencies = [ "zeroize", ] +[[package]] +name = "embedwards25519" +version = "0.1.0" +dependencies = [ + "blake2", + "ciphersuite", + "crypto-bigint", + "dalek-ff-group", + "ec-divisors", + "ff-group-tests", + "generalized-bulletproofs-ec-gadgets", + "generic-array 1.1.1", + "hex", + "hex-literal", + "rand_core", + "rustversion", + "std-shims", + "subtle", + "zeroize", +] + [[package]] name = "enum-as-inner" version = "0.5.1" @@ -2401,7 +2597,18 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", +] + +[[package]] +name = "enumn" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", ] [[package]] @@ -2431,33 +2638,31 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] -name = "ethereum-serai" +name = "ethereum-schnorr-contract" version = "0.1.0" dependencies = [ - "alloy-consensus", "alloy-core", - "alloy-network", "alloy-node-bindings", "alloy-provider", "alloy-rpc-client", "alloy-rpc-types-eth", "alloy-simple-request-transport", "alloy-sol-types", - "flexible-transcript", + "build-solidity-contracts", "group", "k256", - "modular-frost", "rand_core", - "thiserror", + "sha3", + "subtle", "tokio", ] @@ -2480,9 +2685,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ "event-listener 5.3.1", "pin-project-lite", @@ -2507,7 +2712,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -2518,9 +2723,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.1.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fastrlp" @@ -2657,6 +2862,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + [[package]] name = "fork-tree" version = "3.0.0" @@ -2782,7 +2993,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -2794,7 +3005,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -2804,7 +3015,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -2911,6 +3122,16 @@ dependencies = [ "futures-util", ] +[[package]] +name = "futures-bounded" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" +dependencies = [ + "futures-timer", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.31" @@ -2947,11 +3168,14 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ + "fastrand", "futures-core", + "futures-io", + "parking", "pin-project-lite", ] @@ -2963,7 +3187,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -2976,6 +3200,17 @@ dependencies = [ "rustls 0.21.12", ] +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls 0.23.20", + "rustls-pki-types", +] + [[package]] name = "futures-sink" version = "0.3.31" @@ -3052,17 +3287,49 @@ dependencies = [ ] [[package]] -name = "generator" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" +name = "generalized-bulletproofs" +version = "0.1.0" +dependencies = [ + "blake2", + "ciphersuite", + "flexible-transcript", + "multiexp", + "rand_core", + "std-shims", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs", + "std-shims", + "zeroize", +] + +[[package]] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +dependencies = [ + "ciphersuite", + "generalized-bulletproofs-circuit-abstraction", + "generic-array 1.1.1", + "std-shims", +] + +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" dependencies = [ - "cc", "cfg-if", "libc", "log", "rustversion", - "windows 0.54.0", + "windows 0.58.0", ] [[package]] @@ -3078,9 +3345,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96512db27971c2c3eece70a1e106fbe6c87760234e31e8f7e5634912fe52794a" +checksum = "2cb8bc4c28d15ade99c7e90b219f30da4be5c88e586277e8cbe886beeb868ab2" dependencies = [ "typenum", ] @@ -3092,8 +3359,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -3135,9 +3404,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "globset" @@ -3148,7 +3417,7 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -3175,7 +3444,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -3220,6 +3489,17 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", "serde", ] @@ -3277,6 +3557,52 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" +[[package]] +name = "hickory-proto" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner 0.6.1", + "futures-channel", + "futures-io", + "futures-util", + "idna 1.0.3", + "ipnet", + "once_cell", + "rand", + "socket2 0.5.8", + "thiserror 1.0.69", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.3", + "rand", + "resolv-conf", + "smallvec", + "thiserror 1.0.69", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -3297,11 +3623,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.9" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3328,9 +3654,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3355,7 +3681,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -3366,7 +3692,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] @@ -3412,7 +3738,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.8", "tokio", "tower-service", "tracing", @@ -3428,7 +3754,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "httparse", "httpdate", @@ -3456,15 +3782,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", - "http 1.1.0", + "http 1.2.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.14", + "rustls 0.23.20", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -3474,18 +3800,18 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.5.8", "tokio", "tower-service", "tracing", @@ -3552,14 +3878,35 @@ dependencies = [ [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279259b0ac81c89d11c290495fdcfa96ea3643b7df311c138b6fe8ca5237f0f8" +dependencies = [ + "idna_mapping", "unicode-bidi", "unicode-normalization", ] +[[package]] +name = "idna_mapping" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5422cc5bc64289a77dbb45e970b86b5e9a04cb500abc7240505aedc1bf40f38" +dependencies = [ + "unicode-joining-type", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -3572,17 +3919,21 @@ dependencies = [ [[package]] name = "if-watch" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" dependencies = [ "async-io", - "core-foundation", + "core-foundation 0.9.4", "fnv", "futures", "if-addrs", "ipnet", "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", "rtnetlink", "system-configuration", "tokio", @@ -3628,13 +3979,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.94", ] [[package]] @@ -3650,12 +4001,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.2", "serde", ] @@ -3699,7 +4050,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2 0.5.8", "widestring", "windows-sys 0.48.0", "winreg", @@ -3730,15 +4081,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.13.0" @@ -3750,9 +4092,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -3765,10 +4107,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -3806,7 +4149,7 @@ dependencies = [ "serde", "serde_json", "soketto 0.7.1", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -3856,7 +4199,7 @@ dependencies = [ "beef", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -3871,7 +4214,6 @@ dependencies = [ "elliptic-curve", "once_cell", "sha2", - "signature", ] [[package]] @@ -3948,15 +4290,15 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets 0.52.6", @@ -3964,9 +4306,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p" @@ -3980,30 +4322,62 @@ dependencies = [ "futures-timer", "getrandom", "instant", - "libp2p-allow-block-list", - "libp2p-connection-limits", - "libp2p-core", - "libp2p-dns", - "libp2p-gossipsub", + "libp2p-allow-block-list 0.2.0", + "libp2p-connection-limits 0.2.1", + "libp2p-core 0.40.1", + "libp2p-dns 0.40.1", "libp2p-identify", "libp2p-identity", "libp2p-kad", - "libp2p-mdns", - "libp2p-metrics", - "libp2p-noise", - "libp2p-ping", - "libp2p-quic", - "libp2p-request-response", - "libp2p-swarm", - "libp2p-tcp", - "libp2p-upnp", + "libp2p-mdns 0.44.0", + "libp2p-metrics 0.13.1", + "libp2p-noise 0.43.2", + "libp2p-ping 0.43.1", + "libp2p-quic 0.9.3", + "libp2p-request-response 0.25.3", + "libp2p-swarm 0.43.7", + "libp2p-tcp 0.40.1", + "libp2p-upnp 0.1.1", "libp2p-wasm-ext", "libp2p-websocket", - "libp2p-yamux", + "libp2p-yamux 0.44.1", "multiaddr", "pin-project", "rw-stream-sink", - "thiserror", + "thiserror 1.0.69", +] + +[[package]] +name = "libp2p" +version = "0.54.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" +dependencies = [ + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list 0.4.0", + "libp2p-connection-limits 0.4.0", + "libp2p-core 0.42.0", + "libp2p-dns 0.42.0", + "libp2p-gossipsub", + "libp2p-identity", + "libp2p-mdns 0.46.0", + "libp2p-metrics 0.15.0", + "libp2p-noise 0.45.0", + "libp2p-ping 0.45.0", + "libp2p-quic 0.11.1", + "libp2p-request-response 0.27.0", + "libp2p-swarm 0.45.1", + "libp2p-tcp 0.42.0", + "libp2p-upnp 0.3.0", + "libp2p-yamux 0.46.0", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror 1.0.69", ] [[package]] @@ -4012,9 +4386,21 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55b46558c5c0bf99d3e2a1a38fd54ff5476ca66dd1737b12466a1824dd219311" dependencies = [ - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", + "void", +] + +[[package]] +name = "libp2p-allow-block-list" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" +dependencies = [ + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", "void", ] @@ -4024,9 +4410,21 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f5107ad45cb20b2f6c3628c7b6014b996fcb13a88053f4569c872c6e30abf58" dependencies = [ - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", + "void", +] + +[[package]] +name = "libp2p-connection-limits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", "void", ] @@ -4053,11 +4451,39 @@ dependencies = [ "rand", "rw-stream-sink", "smallvec", - "thiserror", - "unsigned-varint", + "thiserror 1.0.69", + "unsigned-varint 0.7.2", "void", ] +[[package]] +name = "libp2p-core" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash 0.19.1", + "multistream-select", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror 1.0.69", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", +] + [[package]] name = "libp2p-dns" version = "0.40.1" @@ -4066,7 +4492,7 @@ checksum = "e6a18db73084b4da2871438f6239fef35190b05023de7656e877c18a00541a3b" dependencies = [ "async-trait", "futures", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", "log", "parking_lot 0.12.3", @@ -4075,13 +4501,29 @@ dependencies = [ ] [[package]] -name = "libp2p-gossipsub" -version = "0.45.2" +name = "libp2p-dns" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f9624e2a843b655f1c1b8262b8d5de6f309413fca4d66f01bb0662429f84dc" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ - "asynchronous-codec", - "base64 0.21.7", + "async-trait", + "futures", + "hickory-resolver", + "libp2p-core 0.42.0", + "libp2p-identity", + "parking_lot 0.12.3", + "smallvec", + "tracing", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" +dependencies = [ + "asynchronous-codec 0.7.0", + "base64 0.22.1", "byteorder", "bytes", "either", @@ -4090,20 +4532,19 @@ dependencies = [ "futures-ticker", "getrandom", "hex_fmt", - "instant", - "libp2p-core", + "libp2p-core 0.42.0", "libp2p-identity", - "libp2p-swarm", - "log", - "prometheus-client", + "libp2p-swarm 0.45.1", + "prometheus-client 0.22.3", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.3.1", "rand", "regex", "sha2", "smallvec", - "unsigned-varint", + "tracing", "void", + "web-time", ] [[package]] @@ -4112,28 +4553,28 @@ version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45a96638a0a176bec0a4bcaebc1afa8cf909b114477209d7456ade52c61cd9cd" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "either", "futures", - "futures-bounded", + "futures-bounded 0.1.0", "futures-timer", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", "log", "lru", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.2.0", "smallvec", - "thiserror", + "thiserror 1.0.69", "void", ] [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "bs58", "ed25519-dalek", @@ -4142,7 +4583,7 @@ dependencies = [ "quick-protobuf", "rand", "sha2", - "thiserror", + "thiserror 1.0.69", "tracing", "zeroize", ] @@ -4154,25 +4595,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16ea178dabba6dde6ffc260a8e0452ccdc8f79becf544946692fff9d412fc29d" dependencies = [ "arrayvec", - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", "log", "quick-protobuf", - "quick-protobuf-codec", + "quick-protobuf-codec 0.2.0", "rand", "sha2", "smallvec", - "thiserror", + "thiserror 1.0.69", "uint", - "unsigned-varint", + "unsigned-varint 0.7.2", "void", ] @@ -4185,18 +4626,39 @@ dependencies = [ "data-encoding", "futures", "if-watch", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", "log", "rand", "smallvec", - "socket2 0.5.7", + "socket2 0.5.8", "tokio", "trust-dns-proto 0.22.0", "void", ] +[[package]] +name = "libp2p-mdns" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +dependencies = [ + "data-encoding", + "futures", + "hickory-proto", + "if-watch", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", + "rand", + "smallvec", + "socket2 0.5.8", + "tokio", + "tracing", + "void", +] + [[package]] name = "libp2p-metrics" version = "0.13.1" @@ -4204,15 +4666,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "239ba7d28f8d0b5d77760dc6619c05c7e88e74ec8fbbe97f856f20a56745e620" dependencies = [ "instant", - "libp2p-core", - "libp2p-gossipsub", + "libp2p-core 0.40.1", "libp2p-identify", "libp2p-identity", "libp2p-kad", - "libp2p-ping", - "libp2p-swarm", + "libp2p-ping 0.43.1", + "libp2p-swarm 0.43.7", "once_cell", - "prometheus-client", + "prometheus-client 0.21.2", +] + +[[package]] +name = "libp2p-metrics" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +dependencies = [ + "futures", + "libp2p-core 0.42.0", + "libp2p-gossipsub", + "libp2p-identity", + "libp2p-ping 0.45.0", + "libp2p-swarm 0.45.1", + "pin-project", + "prometheus-client 0.22.3", + "web-time", ] [[package]] @@ -4224,7 +4702,7 @@ dependencies = [ "bytes", "curve25519-dalek", "futures", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", "log", "multiaddr", @@ -4235,7 +4713,33 @@ dependencies = [ "sha2", "snow", "static_assertions", - "thiserror", + "thiserror 1.0.69", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "libp2p-noise" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +dependencies = [ + "asynchronous-codec 0.7.0", + "bytes", + "curve25519-dalek", + "futures", + "libp2p-core 0.42.0", + "libp2p-identity", + "multiaddr", + "multihash 0.19.1", + "once_cell", + "quick-protobuf", + "rand", + "sha2", + "snow", + "static_assertions", + "thiserror 1.0.69", + "tracing", "x25519-dalek", "zeroize", ] @@ -4250,14 +4754,32 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", "log", "rand", "void", ] +[[package]] +name = "libp2p-ping" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" +dependencies = [ + "either", + "futures", + "futures-timer", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", + "rand", + "tracing", + "void", + "web-time", +] + [[package]] name = "libp2p-quic" version = "0.9.3" @@ -4268,20 +4790,44 @@ dependencies = [ "futures", "futures-timer", "if-watch", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-tls", + "libp2p-tls 0.2.1", "log", "parking_lot 0.12.3", - "quinn", + "quinn 0.10.2", "rand", "ring 0.16.20", "rustls 0.21.12", - "socket2 0.5.7", - "thiserror", + "socket2 0.5.8", + "thiserror 1.0.69", "tokio", ] +[[package]] +name = "libp2p-quic" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-tls 0.5.0", + "parking_lot 0.12.3", + "quinn 0.11.6", + "rand", + "ring 0.17.8", + "rustls 0.23.20", + "socket2 0.5.8", + "thiserror 1.0.69", + "tokio", + "tracing", +] + [[package]] name = "libp2p-request-response" version = "0.25.3" @@ -4291,15 +4837,35 @@ dependencies = [ "async-trait", "futures", "instant", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm", + "libp2p-swarm 0.43.7", "log", "rand", "smallvec", "void", ] +[[package]] +name = "libp2p-request-response" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" +dependencies = [ + "async-trait", + "futures", + "futures-bounded 0.2.4", + "futures-timer", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm 0.45.1", + "rand", + "smallvec", + "tracing", + "void", + "web-time", +] + [[package]] name = "libp2p-swarm" version = "0.43.7" @@ -4311,9 +4877,9 @@ dependencies = [ "futures", "futures-timer", "instant", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", - "libp2p-swarm-derive", + "libp2p-swarm-derive 0.33.0", "log", "multistream-select", "once_cell", @@ -4323,6 +4889,30 @@ dependencies = [ "void", ] +[[package]] +name = "libp2p-swarm" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +dependencies = [ + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core 0.42.0", + "libp2p-identity", + "libp2p-swarm-derive 0.35.0", + "lru", + "multistream-select", + "once_cell", + "rand", + "smallvec", + "tokio", + "tracing", + "void", + "web-time", +] + [[package]] name = "libp2p-swarm-derive" version = "0.33.0" @@ -4333,7 +4923,19 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.94", ] [[package]] @@ -4346,13 +4948,30 @@ dependencies = [ "futures-timer", "if-watch", "libc", - "libp2p-core", + "libp2p-core 0.40.1", "libp2p-identity", "log", - "socket2 0.5.7", + "socket2 0.5.8", "tokio", ] +[[package]] +name = "libp2p-tcp" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +dependencies = [ + "futures", + "futures-timer", + "if-watch", + "libc", + "libp2p-core 0.42.0", + "libp2p-identity", + "socket2 0.5.8", + "tokio", + "tracing", +] + [[package]] name = "libp2p-tls" version = "0.2.1" @@ -4360,15 +4979,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8218d1d5482b122ccae396bbf38abdcb283ecc96fa54760e1dfd251f0546ac61" dependencies = [ "futures", - "futures-rustls", - "libp2p-core", + "futures-rustls 0.24.0", + "libp2p-core 0.40.1", "libp2p-identity", - "rcgen", + "rcgen 0.10.0", "ring 0.16.20", "rustls 0.21.12", "rustls-webpki 0.101.7", - "thiserror", - "x509-parser", + "thiserror 1.0.69", + "x509-parser 0.15.1", + "yasna", +] + +[[package]] +name = "libp2p-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +dependencies = [ + "futures", + "futures-rustls 0.26.0", + "libp2p-core 0.42.0", + "libp2p-identity", + "rcgen 0.11.3", + "ring 0.17.8", + "rustls 0.23.20", + "rustls-webpki 0.101.7", + "thiserror 1.0.69", + "x509-parser 0.16.0", "yasna", ] @@ -4381,13 +5019,29 @@ dependencies = [ "futures", "futures-timer", "igd-next", - "libp2p-core", - "libp2p-swarm", + "libp2p-core 0.40.1", + "libp2p-swarm 0.43.7", "log", "tokio", "void", ] +[[package]] +name = "libp2p-upnp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core 0.42.0", + "libp2p-swarm 0.45.1", + "tokio", + "tracing", + "void", +] + [[package]] name = "libp2p-wasm-ext" version = "0.40.0" @@ -4396,7 +5050,7 @@ checksum = "1e5d8e3a9e07da0ef5b55a9f26c009c8fb3c725d492d8bb4b431715786eea79c" dependencies = [ "futures", "js-sys", - "libp2p-core", + "libp2p-core 0.40.1", "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", @@ -4410,15 +5064,15 @@ checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" dependencies = [ "either", "futures", - "futures-rustls", - "libp2p-core", + "futures-rustls 0.24.0", + "libp2p-core 0.40.1", "libp2p-identity", "log", "parking_lot 0.12.3", "pin-project-lite", "rw-stream-sink", - "soketto 0.8.0", - "thiserror", + "soketto 0.8.1", + "thiserror 1.0.69", "url", "webpki-roots", ] @@ -4430,10 +5084,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eedcb62824c4300efb9cfd4e2a6edaf3ca097b9e68b36dabe45a44469fd6a85" dependencies = [ "futures", - "libp2p-core", + "libp2p-core 0.40.1", "log", - "thiserror", - "yamux", + "thiserror 1.0.69", + "yamux 0.12.1", +] + +[[package]] +name = "libp2p-yamux" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +dependencies = [ + "either", + "futures", + "libp2p-core 0.42.0", + "thiserror 1.0.69", + "tracing", + "yamux 0.12.1", + "yamux 0.13.4", ] [[package]] @@ -4449,14 +5118,13 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.16.0+8.10.0" +version = "0.17.1+9.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" +checksum = "2b7869a512ae9982f4d46ba482c2a304f1efd80c6412a3d4bf57bb79a619679f" dependencies = [ "bindgen", "bzip2-sys", "cc", - "glob", "libc", "libz-sys", "lz4-sys", @@ -4491,18 +5159,18 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linked_hash_set" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +checksum = "bae85b5be22d9843c80e5fc80e9b64c8a3b1f98f867c709956eca3efff4e92e2" dependencies = [ "linked-hash-map", ] [[package]] name = "linregress" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de04dcecc58d366391f9920245b85ffa684558a5ef6e7736e754347c3aea9c2" +checksum = "a9eda9dcf4f2a99787827661f312ac3219292549c2ee992bf9a6248ffb066bf7" dependencies = [ "nalgebra", ] @@ -4544,11 +5212,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -4597,7 +5265,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -4611,7 +5279,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -4622,7 +5290,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -4633,7 +5301,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -4752,7 +5420,7 @@ dependencies = [ "crypto-bigint", "ff", "ff-group-tests", - "generic-array 1.1.0", + "generic-array 1.1.1", "group", "hex", "rand_core", @@ -4769,20 +5437,19 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi", "libc", "wasi", "windows-sys 0.52.0", @@ -4832,7 +5499,7 @@ dependencies = [ "schnorr-signatures", "serde_json", "subtle", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -4849,7 +5516,7 @@ dependencies = [ "serde", "serde_json", "std-shims", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -4876,7 +5543,7 @@ dependencies = [ "monero-primitives", "rand_core", "std-shims", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -4896,7 +5563,7 @@ dependencies = [ "rand_core", "std-shims", "subtle", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -4931,7 +5598,7 @@ dependencies = [ "monero-io", "monero-primitives", "std-shims", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -4959,7 +5626,7 @@ dependencies = [ "serde", "serde_json", "std-shims", - "thiserror", + "thiserror 2.0.9", "zeroize", ] @@ -5032,7 +5699,7 @@ dependencies = [ "serde", "serde_json", "std-shims", - "thiserror", + "thiserror 2.0.9", "tokio", "zeroize", ] @@ -5052,7 +5719,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.7.2", "url", ] @@ -5095,7 +5762,7 @@ dependencies = [ "multihash-derive 0.8.0", "sha2", "sha3", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -5105,7 +5772,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] @@ -5138,7 +5805,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -5163,7 +5830,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "synstructure", + "synstructure 0.12.6", ] [[package]] @@ -5183,18 +5850,17 @@ dependencies = [ "log", "pin-project", "smallvec", - "unsigned-varint", + "unsigned-varint 0.7.2", ] [[package]] name = "nalgebra" -version = "0.32.6" +version = "0.33.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5c17de023a86f59ed79891b2e5d5a94c705dbe904a5b5c9c952ea6221b03e4" +checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" dependencies = [ "approx", "matrixmultiply", - "nalgebra-macros", "num-complex", "num-rational", "num-traits", @@ -5202,17 +5868,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "nalgebra-macros" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "254a5372af8fc138e36684761d3c0cdb758a4410e938babcff1c860ce14ddbfc" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - [[package]] name = "names" version = "0.14.0" @@ -5224,21 +5879,20 @@ dependencies = [ [[package]] name = "netlink-packet-core" -version = "0.4.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345b8ab5bd4e71a2986663e88c56856699d060e78e152e6e9d7966fcd5491297" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" dependencies = [ "anyhow", "byteorder", - "libc", "netlink-packet-utils", ] [[package]] name = "netlink-packet-route" -version = "0.12.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" dependencies = [ "anyhow", "bitflags 1.3.2", @@ -5257,29 +5911,29 @@ dependencies = [ "anyhow", "byteorder", "paste", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "netlink-proto" -version = "0.10.0" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror", + "thiserror 1.0.69", "tokio", ] [[package]] name = "netlink-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ "bytes", "futures", @@ -5290,9 +5944,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.3" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -5331,6 +5985,20 @@ dependencies = [ "winapi", ] +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "num-bigint" version = "0.4.6" @@ -5375,6 +6043,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-rational" version = "0.4.2" @@ -5423,7 +6102,18 @@ checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", +] + +[[package]] +name = "nybbles" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3409fc85ac27b27d971ea7cd1aabafd2eefa6de7e481c8d4f707225c117e81a" +dependencies = [ + "const-hex", + "serde", + "smallvec", ] [[package]] @@ -5440,9 +6130,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -5453,7 +6143,16 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", +] + +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs 0.6.2", ] [[package]] @@ -5754,8 +6453,7 @@ checksum = "7924d1d0ad836f665c9065e26d016c673ece3993f30d340068b16f282afc1156" [[package]] name = "pasta_curves" version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e57598f73cc7e1b2ac63c79c517b31a0877cd7c402cdcaa311b5208de7a095" +source = "git+https://github.com/kayabaNerve/pasta_curves?rev=a46b5be95cacbff54d06aad8d3bbcba42e05d616#a46b5be95cacbff54d06aad8d3bbcba42e05d616" dependencies = [ "blake2b_simd", "ff", @@ -5764,6 +6462,7 @@ dependencies = [ "rand", "static_assertions", "subtle", + "zeroize", ] [[package]] @@ -5779,6 +6478,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "pbkdf2" version = "0.12.2" @@ -5786,7 +6494,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", - "hmac", ] [[package]] @@ -5798,6 +6505,16 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -5806,12 +6523,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror", + "thiserror 2.0.9", "ucd-trie", ] @@ -5822,34 +6539,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.7.0", ] [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5942,15 +6659,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" [[package]] name = "predicates-tree" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" dependencies = [ "predicates-core", "termtree", @@ -6004,7 +6721,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.22.20", + "toml_edit 0.22.22", ] [[package]] @@ -6050,7 +6767,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -6061,14 +6778,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -6084,7 +6801,7 @@ dependencies = [ "lazy_static", "memchr", "parking_lot 0.12.3", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6099,6 +6816,18 @@ dependencies = [ "prometheus-client-derive-encode", ] +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot 0.12.3", + "prometheus-client-derive-encode", +] + [[package]] name = "prometheus-client-derive-encode" version = "0.4.2" @@ -6107,7 +6836,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -6186,9 +6915,9 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa37f80ca58604976033fae9515a8a2989fc13797d953f7c04fb8fa36a11f205" +checksum = "200b9ff220857e53e184257720a14553b2f4aa02577d2ed9842d45d4b9654810" dependencies = [ "cc", ] @@ -6214,11 +6943,24 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ededb1cd78531627244d51dd0c7139fbe736c7d57af0092a76f0ffb2f56e98" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "quick-protobuf", - "thiserror", - "unsigned-varint", + "thiserror 1.0.69", + "unsigned-varint 0.7.2", +] + +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +dependencies = [ + "asynchronous-codec 0.7.0", + "bytes", + "quick-protobuf", + "thiserror 1.0.69", + "unsigned-varint 0.8.0", ] [[package]] @@ -6230,11 +6972,30 @@ dependencies = [ "bytes", "futures-io", "pin-project-lite", - "quinn-proto", - "quinn-udp", + "quinn-proto 0.10.6", + "quinn-udp 0.4.1", "rustc-hash 1.1.0", "rustls 0.21.12", - "thiserror", + "thiserror 1.0.69", + "tokio", + "tracing", +] + +[[package]] +name = "quinn" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite", + "quinn-proto 0.11.9", + "quinn-udp 0.5.9", + "rustc-hash 2.1.0", + "rustls 0.23.20", + "socket2 0.5.8", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -6251,11 +7012,31 @@ dependencies = [ "rustc-hash 1.1.0", "rustls 0.21.12", "slab", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tracing", ] +[[package]] +name = "quinn-proto" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +dependencies = [ + "bytes", + "getrandom", + "rand", + "ring 0.17.8", + "rustc-hash 2.1.0", + "rustls 0.23.20", + "rustls-pki-types", + "slab", + "thiserror 2.0.9", + "tinyvec", + "tracing", + "web-time", +] + [[package]] name = "quinn-udp" version = "0.4.1" @@ -6264,16 +7045,30 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.7", + "socket2 0.5.8", "tracing", "windows-sys 0.48.0", ] [[package]] -name = "quote" -version = "1.0.37" +name = "quinn-udp" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +dependencies = [ + "cfg_aliases 0.2.1", + "libc", + "once_cell", + "socket2 0.5.8", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "quote" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -6375,7 +7170,19 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ - "pem", + "pem 1.1.1", + "ring 0.16.20", + "time", + "yasna", +] + +[[package]] +name = "rcgen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" +dependencies = [ + "pem 3.0.4", "ring 0.16.20", "time", "yasna", @@ -6383,9 +7190,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags 2.6.0", ] @@ -6398,7 +7205,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6418,7 +7225,7 @@ checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -6436,13 +7243,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -6457,9 +7264,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -6488,6 +7295,68 @@ dependencies = [ "quick-error", ] +[[package]] +name = "revm" +version = "19.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a5a57589c308880c0f89ebf68d92aeef0d51e1ed88867474f895f6fd0f25c64" +dependencies = [ + "auto_impl", + "cfg-if", + "dyn-clone", + "revm-interpreter", + "revm-precompile", + "serde", + "serde_json", +] + +[[package]] +name = "revm-interpreter" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0f632e761f171fb2f6ace8d1552a5793e0350578d4acec3e79ade1489f4c2a6" +dependencies = [ + "revm-primitives", + "serde", +] + +[[package]] +name = "revm-precompile" +version = "16.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6542fb37650dfdbf4b9186769e49c4a8bc1901a3280b2ebf32f915b6c8850f36" +dependencies = [ + "aurora-engine-modexp", + "c-kzg", + "cfg-if", + "k256", + "once_cell", + "revm-primitives", + "ripemd", + "secp256k1", + "sha2", + "substrate-bn", +] + +[[package]] +name = "revm-primitives" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48faea1ecf2c9f80d9b043bbde0db9da616431faed84c4cfa3dd7393005598e6" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "alloy-primitives", + "auto_impl", + "bitflags 2.6.0", + "bitvec", + "cfg-if", + "dyn-clone", + "enumn", + "hex", + "serde", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -6551,14 +7420,14 @@ dependencies = [ name = "rocksdb" version = "0.21.0" dependencies = [ - "rocksdb 0.22.0", + "rocksdb 0.23.0", ] [[package]] name = "rocksdb" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" +checksum = "26ec73b20525cb235bad420f911473b69f9fe27cc856c5461bccd7e4af037f43" dependencies = [ "libc", "librocksdb-sys", @@ -6577,16 +7446,19 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" dependencies = [ "futures", "log", + "netlink-packet-core", "netlink-packet-route", + "netlink-packet-utils", "netlink-proto", + "netlink-sys", "nix", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -6644,9 +7516,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc-hex" @@ -6669,7 +7541,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver 1.0.24", ] [[package]] @@ -6683,15 +7555,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6708,9 +7580,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.8", @@ -6722,31 +7594,24 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" -version = "1.9.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -6771,9 +7636,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -6806,9 +7671,9 @@ checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe_arch" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3460605018fdc9612bce72735cba0d27efbcd9904780d44c7e3a9948f96148a" +checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" dependencies = [ "bytemuck", ] @@ -6830,7 +7695,7 @@ dependencies = [ "log", "sp-core", "sp-wasm-interface", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6842,7 +7707,7 @@ dependencies = [ "futures", "futures-timer", "ip_network", - "libp2p", + "libp2p 0.52.4", "log", "multihash-codetable", "parity-scale-codec", @@ -6858,7 +7723,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -6926,7 +7791,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -6963,8 +7828,8 @@ dependencies = [ "sp-panic-handler", "sp-runtime", "sp-version", - "thiserror", - "tiny-bip39 1.0.2", + "thiserror 1.0.69", + "tiny-bip39", "tokio", ] @@ -7041,7 +7906,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "substrate-prometheus-endpoint", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7077,7 +7942,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7131,7 +7996,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7187,7 +8052,7 @@ dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", "sp-wasm-interface", - "thiserror", + "thiserror 1.0.69", "wasm-instrument", ] @@ -7235,7 +8100,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-keystore", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7246,14 +8111,14 @@ dependencies = [ "array-bytes", "async-channel", "async-trait", - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "either", "fnv", "futures", "futures-timer", "ip_network", - "libp2p", + "libp2p 0.52.4", "linked_hash_set", "log", "mockall", @@ -7273,8 +8138,8 @@ dependencies = [ "sp-core", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror", - "unsigned-varint", + "thiserror 1.0.69", + "unsigned-varint 0.7.2", "void", "wasm-timer", "zeroize", @@ -7296,8 +8161,8 @@ dependencies = [ "sc-network", "sp-blockchain", "sp-runtime", - "thiserror", - "unsigned-varint", + "thiserror 1.0.69", + "unsigned-varint 0.7.2", ] [[package]] @@ -7354,7 +8219,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7368,7 +8233,7 @@ dependencies = [ "fork-tree", "futures", "futures-timer", - "libp2p", + "libp2p 0.52.4", "log", "mockall", "parity-scale-codec", @@ -7388,7 +8253,7 @@ dependencies = [ "sp-core", "sp-runtime", "substrate-prometheus-endpoint", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7398,7 +8263,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "array-bytes", "futures", - "libp2p", + "libp2p 0.52.4", "log", "parity-scale-codec", "sc-network", @@ -7419,7 +8284,7 @@ dependencies = [ "futures", "futures-timer", "hyper 0.14.30", - "libp2p", + "libp2p 0.52.4", "log", "num_cpus", "once_cell", @@ -7495,7 +8360,7 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-version", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7535,7 +8400,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-version", - "thiserror", + "thiserror 1.0.69", "tokio-stream", ] @@ -7596,7 +8461,7 @@ dependencies = [ "static_init", "substrate-prometheus-endpoint", "tempfile", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "tracing-futures", @@ -7639,7 +8504,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "chrono", "futures", - "libp2p", + "libp2p 0.52.4", "log", "parking_lot 0.12.3", "pin-project", @@ -7647,7 +8512,7 @@ dependencies = [ "sc-utils", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "wasm-timer", ] @@ -7673,7 +8538,7 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-tracing", - "thiserror", + "thiserror 1.0.69", "tracing", "tracing-log", "tracing-subscriber 0.2.25", @@ -7687,7 +8552,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -7713,7 +8578,7 @@ dependencies = [ "sp-tracing", "sp-transaction-pool", "substrate-prometheus-endpoint", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7729,7 +8594,7 @@ dependencies = [ "sp-blockchain", "sp-core", "sp-runtime", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -7749,13 +8614,13 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" dependencies = [ "bitvec", "cfg-if", - "derive_more 0.99.18", + "derive_more", "parity-scale-codec", "scale-info-derive", "serde", @@ -7763,30 +8628,30 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.3" +version = "2.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.94", ] [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "schnellru" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" +checksum = "356285bbf17bea63d9e52e96bd18f039672ac92b55b8cb997d6162a2a37d1649" dependencies = [ "ahash", "cfg-if", @@ -7890,6 +8755,27 @@ dependencies = [ "cc", ] +[[package]] +name = "secq256k1" +version = "0.1.0" +dependencies = [ + "blake2", + "ciphersuite", + "crypto-bigint", + "ec-divisors", + "ff-group-tests", + "generalized-bulletproofs-ec-gadgets", + "generic-array 0.14.7", + "hex", + "hex-literal", + "k256", + "rand_core", + "rustversion", + "std-shims", + "subtle", + "zeroize", +] + [[package]] name = "secrecy" version = "0.8.0" @@ -7901,12 +8787,12 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "81d3f8c9bfcc3cbb6b0179eb57042d75b1582bdc65c3cb95f3fa999509c03cbc" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.10.0", "core-foundation-sys", "libc", "security-framework-sys", @@ -7914,9 +8800,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -7937,14 +8823,14 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" dependencies = [ - "semver-parser 0.10.2", + "semver-parser 0.10.3", ] [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" dependencies = [ "serde", ] @@ -7957,9 +8843,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] @@ -7974,6 +8860,7 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" name = "serai-abi" version = "0.1.0" dependencies = [ + "bitvec", "borsh", "frame-support", "parity-scale-codec", @@ -7992,20 +8879,50 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "serai-bitcoin-processor" +version = "0.1.0" +dependencies = [ + "bitcoin-serai", + "borsh", + "ciphersuite", + "dkg", + "hex", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "secp256k1", + "serai-client", + "serai-db", + "serai-processor-bin", + "serai-processor-key-gen", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-transaction-chaining-scheduler", + "serai-processor-utxo-scheduler-primitives", + "tokio", + "zalloc", +] + [[package]] name = "serai-client" version = "0.1.0" dependencies = [ "async-lock", "bitcoin", + "bitvec", "blake2", + "borsh", "ciphersuite", "dockertest", "frame-system", "frost-schnorrkel", "hex", "modular-frost", - "monero-wallet", + "monero-address", "multiaddr", "parity-scale-codec", "rand_core", @@ -8016,7 +8933,7 @@ dependencies = [ "simple-request", "sp-core", "sp-runtime", - "thiserror", + "thiserror 2.0.9", "tokio", "zeroize", ] @@ -8033,6 +8950,7 @@ dependencies = [ "serai-coins-primitives", "serai-primitives", "sp-core", + "sp-io", "sp-runtime", "sp-std", ] @@ -8054,63 +8972,133 @@ dependencies = [ name = "serai-coordinator" version = "0.1.0" dependencies = [ - "async-trait", + "bitvec", "blake2", "borsh", "ciphersuite", + "dkg", "env_logger", - "flexible-transcript", "frost-schnorrkel", - "futures-util", "hex", - "libp2p", "log", - "modular-frost", "parity-scale-codec", "rand_core", - "schnorr-signatures", + "schnorrkel", "serai-client", + "serai-coordinator-libp2p-p2p", + "serai-coordinator-p2p", + "serai-coordinator-substrate", + "serai-coordinator-tributary", + "serai-cosign", "serai-db", "serai-env", "serai-message-queue", "serai-processor-messages", - "sp-application-crypto", - "sp-runtime", + "serai-task", "tokio", - "tributary-chain", + "tributary-sdk", "zalloc", "zeroize", ] [[package]] -name = "serai-coordinator-tests" +name = "serai-coordinator-libp2p-p2p" version = "0.1.0" dependencies = [ "async-trait", "blake2", "borsh", - "ciphersuite", - "dkg", - "dockertest", + "futures-util", "hex", - "parity-scale-codec", + "libp2p 0.54.1", + "log", "rand_core", "schnorrkel", "serai-client", - "serai-docker-tests", - "serai-message-queue", - "serai-message-queue-tests", - "serai-processor-messages", + "serai-coordinator-p2p", + "serai-cosign", + "serai-task", "tokio", + "tributary-sdk", "zeroize", ] [[package]] -name = "serai-db" +name = "serai-coordinator-p2p" version = "0.1.0" +dependencies = [ + "borsh", + "futures-lite", + "log", + "serai-client", + "serai-cosign", + "serai-db", + "serai-task", + "tokio", + "tributary-sdk", +] + +[[package]] +name = "serai-coordinator-substrate" +version = "0.1.0" +dependencies = [ + "bitvec", + "borsh", + "dkg", + "futures", + "log", + "parity-scale-codec", + "serai-client", + "serai-cosign", + "serai-db", + "serai-processor-messages", + "serai-task", + "tokio", +] + +[[package]] +name = "serai-coordinator-tributary" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "ciphersuite", + "dkg", + "log", + "parity-scale-codec", + "rand_core", + "schnorr-signatures", + "serai-client", + "serai-coordinator-substrate", + "serai-cosign", + "serai-db", + "serai-processor-messages", + "serai-task", + "tributary-sdk", + "zeroize", +] + +[[package]] +name = "serai-cosign" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "log", + "parity-scale-codec", + "schnorrkel", + "serai-client", + "serai-db", + "serai-task", + "tokio", +] + +[[package]] +name = "serai-db" +version = "0.1.1" dependencies = [ "parity-db", - "rocksdb 0.21.0", + "rocksdb 0.23.0", ] [[package]] @@ -8145,11 +9133,19 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", + "pallet-babe", + "pallet-grandpa", + "pallet-timestamp", "parity-scale-codec", "scale-info", "serai-coins-pallet", "serai-dex-pallet", "serai-primitives", + "serai-validator-sets-pallet", + "sp-consensus-babe", + "sp-core", + "sp-io", + "sp-runtime", ] [[package]] @@ -8183,6 +9179,46 @@ dependencies = [ name = "serai-env" version = "0.1.0" +[[package]] +name = "serai-ethereum-processor" +version = "0.1.0" +dependencies = [ + "alloy-core", + "alloy-provider", + "alloy-rlp", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-transport", + "borsh", + "ciphersuite", + "const-hex", + "dkg", + "ethereum-schnorr-contract", + "hex", + "k256", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "serai-client", + "serai-db", + "serai-env", + "serai-processor-bin", + "serai-processor-ethereum-erc20", + "serai-processor-ethereum-primitives", + "serai-processor-ethereum-router", + "serai-processor-key-gen", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-smart-contract-scheduler", + "tokio", + "zalloc", +] + [[package]] name = "serai-ethereum-relayer" version = "0.1.0" @@ -8195,28 +9231,16 @@ dependencies = [ ] [[package]] -name = "serai-full-stack-tests" +name = "serai-ethereum-test-primitives" version = "0.1.0" dependencies = [ - "async-trait", - "bitcoin-serai", - "curve25519-dalek", - "dockertest", - "hex", - "monero-simple-request-rpc", - "monero-wallet", - "parity-scale-codec", - "rand_core", - "serai-client", - "serai-coordinator-tests", - "serai-docker-tests", - "serai-message-queue-tests", - "serai-processor", - "serai-processor-tests", - "serde", - "serde_json", - "tokio", - "zeroize", + "alloy-consensus", + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "k256", + "serai-processor-ethereum-primitives", ] [[package]] @@ -8257,12 +9281,17 @@ dependencies = [ name = "serai-in-instructions-pallet" version = "0.1.0" dependencies = [ + "bitvec", "frame-support", "frame-system", + "pallet-babe", + "pallet-grandpa", + "pallet-timestamp", "parity-scale-codec", "scale-info", "serai-coins-pallet", "serai-dex-pallet", + "serai-economic-security-pallet", "serai-emissions-pallet", "serai-genesis-liquidity-pallet", "serai-in-instructions-primitives", @@ -8327,6 +9356,36 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-monero-processor" +version = "0.1.0" +dependencies = [ + "borsh", + "ciphersuite", + "dalek-ff-group", + "dkg", + "log", + "modular-frost", + "monero-simple-request-rpc", + "monero-wallet", + "parity-scale-codec", + "rand_chacha", + "rand_core", + "serai-client", + "serai-processor-bin", + "serai-processor-key-gen", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-utxo-scheduler", + "serai-processor-utxo-scheduler-primitives", + "serai-processor-view-keys", + "tokio", + "zalloc", + "zeroize", +] + [[package]] name = "serai-no-std-tests" version = "0.1.0" @@ -8336,26 +9395,31 @@ dependencies = [ "dalek-ff-group", "dkg", "dleq", + "ec-divisors", + "embedwards25519", "flexible-transcript", + "generalized-bulletproofs", + "generalized-bulletproofs-circuit-abstraction", + "generalized-bulletproofs-ec-gadgets", "minimal-ed448", "monero-wallet", "multiexp", "schnorr-signatures", + "secq256k1", ] [[package]] name = "serai-node" version = "0.1.0" dependencies = [ - "bitcoin-serai", "ciphersuite", "clap", - "curve25519-dalek", + "embedwards25519", "frame-benchmarking", "futures-util", "hex", "jsonrpsee", - "libp2p", + "libp2p 0.52.4", "log", "monero-wallet", "pallet-transaction-payment-rpc", @@ -8378,6 +9442,7 @@ dependencies = [ "sc-transaction-pool", "sc-transaction-pool-api", "schnorrkel", + "secq256k1", "serai-env", "serai-runtime", "sp-api", @@ -8399,11 +9464,13 @@ name = "serai-orchestrator" version = "0.0.1" dependencies = [ "ciphersuite", + "embedwards25519", "flexible-transcript", "hex", "home", "rand_chacha", "rand_core", + "secq256k1", "zalloc", "zeroize", ] @@ -8413,6 +9480,7 @@ name = "serai-primitives" version = "0.1.0" dependencies = [ "borsh", + "ciphersuite", "frame-support", "parity-scale-codec", "rand_core", @@ -8427,41 +9495,143 @@ dependencies = [ ] [[package]] -name = "serai-processor" +name = "serai-processor-bin" version = "0.1.0" dependencies = [ - "async-trait", - "bitcoin-serai", "borsh", "ciphersuite", - "const-hex", - "dalek-ff-group", - "dockertest", + "dkg", "env_logger", - "ethereum-serai", - "flexible-transcript", - "frost-schnorrkel", "hex", + "log", + "parity-scale-codec", + "serai-client", + "serai-cosign", + "serai-db", + "serai-env", + "serai-message-queue", + "serai-processor-key-gen", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "tokio", + "zeroize", +] + +[[package]] +name = "serai-processor-ethereum-deployer" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-types", + "alloy-transport", + "build-solidity-contracts", + "serai-ethereum-test-primitives", + "serai-processor-ethereum-primitives", + "tokio", +] + +[[package]] +name = "serai-processor-ethereum-erc20" +version = "0.1.0" +dependencies = [ + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-types", + "alloy-transport", + "futures-util", + "serai-processor-ethereum-primitives", +] + +[[package]] +name = "serai-processor-ethereum-primitives" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "borsh", + "group", "k256", +] + +[[package]] +name = "serai-processor-ethereum-router" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "alloy-transport", + "borsh", + "build-solidity-contracts", + "ethereum-schnorr-contract", + "futures-util", + "group", + "k256", + "parity-scale-codec", + "rand_core", + "revm", + "serai-client", + "serai-ethereum-test-primitives", + "serai-processor-ethereum-deployer", + "serai-processor-ethereum-erc20", + "serai-processor-ethereum-primitives", + "syn 2.0.94", + "syn-solidity", + "tokio", +] + +[[package]] +name = "serai-processor-frost-attempt-manager" +version = "0.1.0" +dependencies = [ + "borsh", "log", "modular-frost", - "monero-simple-request-rpc", - "monero-wallet", + "parity-scale-codec", + "rand_core", + "serai-db", + "serai-processor-messages", + "serai-validator-sets-primitives", +] + +[[package]] +name = "serai-processor-key-gen" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "ciphersuite", + "dkg", + "ec-divisors", + "flexible-transcript", + "log", "parity-scale-codec", "rand_chacha", "rand_core", - "secp256k1", - "serai-client", "serai-db", - "serai-docker-tests", - "serai-env", - "serai-message-queue", "serai-processor-messages", - "serde_json", - "sp-application-crypto", - "thiserror", - "tokio", - "zalloc", + "serai-validator-sets-primitives", "zeroize", ] @@ -8471,42 +9641,148 @@ version = "0.1.0" dependencies = [ "borsh", "dkg", + "hex", "parity-scale-codec", "serai-coins-primitives", + "serai-cosign", "serai-in-instructions-primitives", "serai-primitives", "serai-validator-sets-primitives", ] [[package]] -name = "serai-processor-tests" +name = "serai-processor-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "log", + "parity-scale-codec", + "serai-coins-primitives", + "serai-primitives", + "serai-task", + "tokio", +] + +[[package]] +name = "serai-processor-scanner" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "group", + "hex", + "log", + "parity-scale-codec", + "serai-coins-primitives", + "serai-db", + "serai-in-instructions-primitives", + "serai-primitives", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scheduler-primitives", + "serai-validator-sets-primitives", + "tokio", +] + +[[package]] +name = "serai-processor-scheduler-primitives" version = "0.1.0" dependencies = [ - "bitcoin-serai", "borsh", "ciphersuite", - "curve25519-dalek", - "dkg", - "dockertest", - "ethereum-serai", - "hex", - "k256", - "monero-simple-request-rpc", - "monero-wallet", + "modular-frost", + "parity-scale-codec", + "serai-db", +] + +[[package]] +name = "serai-processor-signers" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "ciphersuite", + "frost-schnorrkel", + "log", + "modular-frost", "parity-scale-codec", "rand_core", - "serai-client", + "serai-cosign", "serai-db", - "serai-docker-tests", - "serai-message-queue", - "serai-message-queue-tests", - "serai-processor", + "serai-in-instructions-primitives", + "serai-primitives", + "serai-processor-frost-attempt-manager", "serai-processor-messages", - "serde_json", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-validator-sets-primitives", "tokio", "zeroize", ] +[[package]] +name = "serai-processor-smart-contract-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", +] + +[[package]] +name = "serai-processor-transaction-chaining-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-utxo-scheduler-primitives", +] + +[[package]] +name = "serai-processor-utxo-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-utxo-scheduler-primitives", +] + +[[package]] +name = "serai-processor-utxo-scheduler-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", +] + +[[package]] +name = "serai-processor-view-keys" +version = "0.1.0" +dependencies = [ + "ciphersuite", +] + [[package]] name = "serai-reproducible-runtime-tests" version = "0.1.0" @@ -8527,7 +9803,7 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hashbrown 0.14.5", + "hashbrown 0.15.2", "pallet-authorship", "pallet-babe", "pallet-grandpa", @@ -8590,29 +9866,44 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-task" +version = "0.1.0" +dependencies = [ + "log", + "tokio", +] + [[package]] name = "serai-validator-sets-pallet" version = "0.1.0" dependencies = [ + "bitvec", + "ciphersuite", "frame-support", "frame-system", + "frost-schnorrkel", "hashbrown 0.14.5", + "modular-frost", "pallet-babe", "pallet-grandpa", + "pallet-timestamp", "parity-scale-codec", + "rand_core", "scale-info", "serai-coins-pallet", "serai-dex-pallet", "serai-primitives", "serai-validator-sets-primitives", - "sp-api", "sp-application-crypto", + "sp-consensus-babe", "sp-core", "sp-io", "sp-runtime", "sp-session", "sp-staking", "sp-std", + "zeroize", ] [[package]] @@ -8633,9 +9924,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.210" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -8651,21 +9942,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -8680,7 +9972,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -8706,15 +9998,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.5.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -8812,9 +10104,9 @@ dependencies = [ [[package]] name = "simba" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061507c94fc6ab4ba1c9a0305018408e312e17c041eb63bef8aa726fa33aceae" +checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" dependencies = [ "approx", "num-complex", @@ -8863,6 +10155,9 @@ name = "smallvec" version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +dependencies = [ + "serde", +] [[package]] name = "snap" @@ -8899,9 +10194,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -8925,9 +10220,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -8956,7 +10251,7 @@ dependencies = [ "sp-std", "sp-trie", "sp-version", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -8970,7 +10265,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9038,7 +10333,7 @@ dependencies = [ "sp-database", "sp-runtime", "sp-state-machine", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9052,7 +10347,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9141,8 +10436,8 @@ dependencies = [ "sp-storage", "ss58-registry", "substrate-bip39", - "thiserror", - "tiny-bip39 1.0.2", + "thiserror 1.0.69", + "tiny-bip39", "tracing", "zeroize", ] @@ -9166,7 +10461,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9185,7 +10480,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9210,7 +10505,7 @@ dependencies = [ "scale-info", "sp-runtime", "sp-std", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9255,7 +10550,7 @@ dependencies = [ "parking_lot 0.12.3", "sp-core", "sp-externalities", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9263,7 +10558,7 @@ name = "sp-maybe-compressed-blob" version = "4.1.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ - "thiserror", + "thiserror 1.0.69", "zstd 0.12.4", ] @@ -9357,7 +10652,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9405,7 +10700,7 @@ dependencies = [ "sp-panic-handler", "sp-std", "sp-trie", - "thiserror", + "thiserror 1.0.69", "tracing", "trie-db", ] @@ -9438,7 +10733,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-std", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9479,7 +10774,7 @@ dependencies = [ "schnellru", "sp-core", "sp-std", - "thiserror", + "thiserror 1.0.69", "tracing", "trie-db", "trie-root", @@ -9499,7 +10794,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-version-proc-macro", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9510,7 +10805,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9571,9 +10866,9 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "ss58-registry" -version = "1.50.0" +version = "1.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43fce22ed1df64d04b262351c8f9d5c6da4f76f79f25ad15529792f893fad25d" +checksum = "19409f13998e55816d1c728395af0b52ec066206341d939e22e7766df9b494b8" dependencies = [ "Inflector", "num-format", @@ -9603,7 +10898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" dependencies = [ "bitflags 1.3.2", - "cfg_aliases", + "cfg_aliases 0.1.1", "libc", "parking_lot 0.11.2", "parking_lot_core 0.8.6", @@ -9613,11 +10908,11 @@ dependencies = [ [[package]] name = "static_init_macro" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" +checksum = "1389c88ddd739ec6d3f8f83343764a0e944cd23cfbf126a9796a714b0b6edd6f" dependencies = [ - "cfg_aliases", + "cfg_aliases 0.1.1", "memchr", "proc-macro2", "quote", @@ -9628,7 +10923,7 @@ dependencies = [ name = "std-shims" version = "0.1.1" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", "spin 0.9.8", ] @@ -9698,7 +10993,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9711,7 +11006,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -9720,12 +11015,25 @@ version = "0.4.5" source = "git+https://github.com/serai-dex/substrate-bip39#efb58a5263b8585b2d0ef17ed3c989e1ca09294e" dependencies = [ "hmac", - "pbkdf2", + "pbkdf2 0.12.2", "schnorrkel", "sha2", "zeroize", ] +[[package]] +name = "substrate-bn" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b5bbfa79abbae15dd642ea8176a21a635ff3c00059961d1ea27ad04e5b441c" +dependencies = [ + "byteorder", + "crunchy", + "lazy_static", + "rand", + "rustc-hex", +] + [[package]] name = "substrate-build-script-utils" version = "3.0.0" @@ -9758,7 +11066,7 @@ dependencies = [ "hyper 0.14.30", "log", "prometheus", - "thiserror", + "thiserror 1.0.69", "tokio", ] @@ -9799,9 +11107,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "987bc0be1cdea8b10216bd06e2ca407d40b9543468fafd3ddfb02f36e77f71f3" dependencies = [ "proc-macro2", "quote", @@ -9810,33 +11118,21 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.6" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a850d65181df41b83c6be01a7d91f5e9377c43d48faa5af7d95816f437f5a3" +checksum = "c74af950d86ec0f5b2ae2d7f1590bbfbcf4603a0a15742d8f98132ac4fe3efd4" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.79", -] - -[[package]] -name = "syn_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "synstructure" @@ -9851,21 +11147,32 @@ dependencies = [ ] [[package]] -name = "system-configuration" -version = "0.5.1" +name = "synstructure" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "bitflags 1.3.2", - "core-foundation", + "proc-macro2", + "quote", + "syn 2.0.94", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.9.4", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", @@ -9885,12 +11192,13 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix", "windows-sys 0.59.0", @@ -9900,7 +11208,6 @@ dependencies = [ name = "tendermint-machine" version = "0.2.0" dependencies = [ - "async-trait", "futures-channel", "futures-util", "hex", @@ -9908,7 +11215,7 @@ dependencies = [ "parity-scale-codec", "patchable-async-sleep", "serai-db", - "thiserror", + "thiserror 2.0.9", "tokio", ] @@ -9923,28 +11230,48 @@ dependencies = [ [[package]] name = "termtree" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" +dependencies = [ + "thiserror-impl 2.0.9", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", ] [[package]] @@ -9968,9 +11295,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -9989,9 +11316,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -9999,23 +11326,18 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "1.0.2" -dependencies = [ - "tiny-bip39 2.0.0", -] - -[[package]] -name = "tiny-bip39" -version = "2.0.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a30fd743a02bf35236f6faf99adb03089bb77e91c998dac2c2ad76bb424f668c" +checksum = "62cc94d358b5a1e84a5cb9109f559aa3c4d634d2b1b4de3d0fa4adc7c78e2861" dependencies = [ + "anyhow", + "hmac", "once_cell", - "pbkdf2", + "pbkdf2 0.11.0", "rand", "rustc-hash 1.1.0", "sha2", - "thiserror", + "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", "zeroize", @@ -10032,9 +11354,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -10047,9 +11369,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -10058,7 +11380,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2 0.5.8", "tokio-macros", "windows-sys 0.52.0", ] @@ -10071,25 +11393,24 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.14", - "rustls-pki-types", + "rustls 0.23.20", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -10099,9 +11420,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -10147,7 +11468,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -10156,13 +11477,13 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.20" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.7.0", "toml_datetime", - "winnow 0.6.20", + "winnow 0.6.21", ] [[package]] @@ -10178,9 +11499,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", @@ -10240,7 +11561,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -10326,10 +11647,9 @@ dependencies = [ ] [[package]] -name = "tributary-chain" +name = "tributary-sdk" version = "0.1.0" dependencies = [ - "async-trait", "blake2", "ciphersuite", "flexible-transcript", @@ -10344,7 +11664,7 @@ dependencies = [ "serai-db", "subtle", "tendermint-machine", - "thiserror", + "thiserror 2.0.9", "tokio", "zeroize", ] @@ -10390,7 +11710,7 @@ dependencies = [ "rand", "smallvec", "socket2 0.4.10", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -10415,7 +11735,7 @@ dependencies = [ "once_cell", "rand", "smallvec", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -10437,7 +11757,7 @@ dependencies = [ "rand", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "trust-dns-proto 0.23.2", @@ -10499,15 +11819,21 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-bidi" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" + +[[package]] +name = "unicode-joining-type" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22f8cb47ccb8bc750808755af3071da4a10dcd147b68fc874b7ae4b12543f6f5" [[package]] name = "unicode-normalization" @@ -10546,12 +11872,18 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.2", "bytes", "futures-io", "futures-util", ] +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" + [[package]] name = "untrusted" version = "0.7.1" @@ -10566,15 +11898,21 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", ] +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -10583,9 +11921,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" [[package]] name = "valuable" @@ -10647,9 +11985,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -10658,36 +11996,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10695,22 +12033,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-encoder" @@ -10741,7 +12079,7 @@ dependencies = [ "strum 0.24.1", "strum_macros 0.24.3", "tempfile", - "thiserror", + "thiserror 1.0.69", "wasm-opt-cxx-sys", "wasm-opt-sys", ] @@ -10791,8 +12129,8 @@ version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap 2.5.0", - "semver 1.0.23", + "indexmap 2.7.0", + "semver 1.0.24", ] [[package]] @@ -10806,7 +12144,7 @@ dependencies = [ "bumpalo", "cfg-if", "fxprof-processed-profile", - "indexmap 2.5.0", + "indexmap 2.7.0", "libc", "log", "object 0.31.1", @@ -10873,7 +12211,7 @@ dependencies = [ "log", "object 0.31.1", "target-lexicon", - "thiserror", + "thiserror 1.0.69", "wasmparser", "wasmtime-cranelift-shared", "wasmtime-environ", @@ -10905,12 +12243,12 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.27.3", - "indexmap 2.5.0", + "indexmap 2.7.0", "log", "object 0.31.1", "serde", "target-lexicon", - "thiserror", + "thiserror 1.0.69", "wasmparser", "wasmtime-types", ] @@ -10972,7 +12310,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap 2.5.0", + "indexmap 2.7.0", "libc", "log", "mach", @@ -10998,7 +12336,7 @@ checksum = "77943729d4b46141538e8d0b6168915dc5f88575ecdfea26753fd3ba8bab244a" dependencies = [ "cranelift-entity", "serde", - "thiserror", + "thiserror 1.0.69", "wasmparser", ] @@ -11010,14 +12348,38 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", +] + +[[package]] +name = "wasmtimer" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.12.3", + "pin-utils", + "slab", + "wasm-bindgen", ] [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -11043,9 +12405,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.28" +version = "0.7.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b828f995bf1e9622031f8009f8481a85406ce1f4d4588ff746d872043e855690" +checksum = "58e6db2670d2be78525979e9a5f9c69d296fd7d670549fe9ebf70f8708cb5019" dependencies = [ "bytemuck", "safe_arch", @@ -11100,11 +12462,11 @@ dependencies = [ [[package]] name = "windows" -version = "0.54.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" dependencies = [ - "windows-core 0.54.0", + "windows-core 0.58.0", "windows-targets 0.52.6", ] @@ -11119,20 +12481,55 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.54.0" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ + "windows-implement", + "windows-interface", "windows-result", + "windows-strings", "windows-targets 0.52.6", ] [[package]] -name = "windows-result" -version = "0.1.2" +name = "windows-implement" +version = "0.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.94", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", "windows-targets 0.52.6", ] @@ -11295,9 +12692,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "e6f5bb5257f2407a5425c6e749bfd9692192a73e70a6060516ac04f889087d68" dependencies = [ "memchr", ] @@ -11339,22 +12736,39 @@ version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" dependencies = [ - "asn1-rs", + "asn1-rs 0.5.2", "data-encoding", - "der-parser", + "der-parser 8.2.0", "lazy_static", "nom", - "oid-registry", + "oid-registry 0.6.1", "rusticata-macros", - "thiserror", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs 0.6.2", + "data-encoding", + "der-parser 9.0.0", + "lazy_static", + "nom", + "oid-registry 0.7.1", + "rusticata-macros", + "thiserror 1.0.69", "time", ] [[package]] name = "xml-rs" -version = "0.8.22" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" +checksum = "ea8b391c9a790b496184c29f7f93b9ed5b16abb306c05415b68bcc16e4d06432" [[package]] name = "xmltree" @@ -11380,6 +12794,22 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yamux" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", + "web-time", +] + [[package]] name = "yasna" version = "0.5.2" @@ -11415,7 +12845,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] @@ -11435,7 +12865,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.94", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1aa7602f..7ac71666 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ members = [ "common/patchable-async-sleep", "common/db", "common/env", + "common/task", "common/request", "crypto/transcript", @@ -30,17 +31,25 @@ members = [ "crypto/ciphersuite", "crypto/multiexp", - "crypto/schnorr", "crypto/dleq", + + "crypto/evrf/secq256k1", + "crypto/evrf/embedwards25519", + "crypto/evrf/generalized-bulletproofs", + "crypto/evrf/circuit-abstraction", + "crypto/evrf/divisors", + "crypto/evrf/ec-gadgets", + "crypto/dkg", "crypto/frost", "crypto/schnorrkel", "networks/bitcoin", + "networks/ethereum/build-contracts", + "networks/ethereum/schnorr", "networks/ethereum/alloy-simple-request-transport", - "networks/ethereum", "networks/ethereum/relayer", "networks/monero/io", @@ -60,10 +69,37 @@ members = [ "message-queue", "processor/messages", - "processor", - "coordinator/tributary/tendermint", + "processor/key-gen", + "processor/view-keys", + "processor/frost-attempt-manager", + + "processor/primitives", + "processor/scanner", + "processor/scheduler/primitives", + "processor/scheduler/utxo/primitives", + "processor/scheduler/utxo/standard", + "processor/scheduler/utxo/transaction-chaining", + "processor/scheduler/smart-contract", + "processor/signers", + + "processor/bin", + "processor/bitcoin", + "processor/ethereum/primitives", + "processor/ethereum/test-primitives", + "processor/ethereum/deployer", + "processor/ethereum/router", + "processor/ethereum/erc20", + "processor/ethereum", + "processor/monero", + + "coordinator/tributary-sdk/tendermint", + "coordinator/tributary-sdk", + "coordinator/cosign", + "coordinator/substrate", "coordinator/tributary", + "coordinator/p2p", + "coordinator/p2p/libp2p", "coordinator", "substrate/primitives", @@ -105,9 +141,9 @@ members = [ "tests/docker", "tests/message-queue", - "tests/processor", - "tests/coordinator", - "tests/full-stack", + # TODO "tests/processor", + # TODO "tests/coordinator", + # TODO "tests/full-stack", "tests/reproducible-runtime", ] @@ -115,18 +151,32 @@ members = [ # to the extensive operations required for Bulletproofs [profile.dev.package] subtle = { opt-level = 3 } -curve25519-dalek = { opt-level = 3 } ff = { opt-level = 3 } group = { opt-level = 3 } crypto-bigint = { opt-level = 3 } +secp256k1 = { opt-level = 3 } +curve25519-dalek = { opt-level = 3 } dalek-ff-group = { opt-level = 3 } minimal-ed448 = { opt-level = 3 } multiexp = { opt-level = 3 } -monero-serai = { opt-level = 3 } +secq256k1 = { opt-level = 3 } +embedwards25519 = { opt-level = 3 } +generalized-bulletproofs = { opt-level = 3 } +generalized-bulletproofs-circuit-abstraction = { opt-level = 3 } +ec-divisors = { opt-level = 3 } +generalized-bulletproofs-ec-gadgets = { opt-level = 3 } + +dkg = { opt-level = 3 } + +monero-generators = { opt-level = 3 } +monero-borromean = { opt-level = 3 } +monero-bulletproofs = { opt-level = 3 } +monero-mlsag = { opt-level = 3 } +monero-clsag = { opt-level = 3 } [profile.release] panic = "unwind" @@ -141,9 +191,6 @@ parking_lot = { path = "patches/parking_lot" } zstd = { path = "patches/zstd" } # Needed for WAL compression rocksdb = { path = "patches/rocksdb" } -# 1.0.1 was yanked due to a breaking change (an extra field) -# 2.0 has fewer dependencies and still works within our tree -tiny-bip39 = { path = "patches/tiny-bip39" } # is-terminal now has an std-based solution with an equivalent API is-terminal = { path = "patches/is-terminal" } @@ -160,6 +207,7 @@ directories-next = { path = "patches/directories-next" } [workspace.lints.clippy] unwrap_or_default = "allow" +map_unwrap_or = "allow" borrow_as_ptr = "deny" cast_lossless = "deny" cast_possible_truncation = "deny" @@ -187,7 +235,6 @@ manual_instant_elapsed = "deny" manual_let_else = "deny" manual_ok_or = "deny" manual_string_new = "deny" -map_unwrap_or = "deny" match_bool = "deny" match_same_arms = "deny" missing_fields_in_debug = "deny" @@ -199,6 +246,7 @@ range_plus_one = "deny" redundant_closure_for_method_calls = "deny" redundant_else = "deny" string_add_assign = "deny" +string_slice = "deny" unchecked_duration_subtraction = "deny" uninlined_format_args = "deny" unnecessary_box_returns = "deny" diff --git a/LICENSE b/LICENSE index 34f2feb2..03c8975a 100644 --- a/LICENSE +++ b/LICENSE @@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository as a reference text. This copy should be provided with any distribution of a crate licensed under the AGPL-3.0, as per its terms. -The GitHub actions (`.github/actions`) are licensed under the MIT license. +The GitHub actions/workflows (`.github`) are licensed under the MIT license. diff --git a/common/db/Cargo.toml b/common/db/Cargo.toml index e422b346..53ff012a 100644 --- a/common/db/Cargo.toml +++ b/common/db/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "serai-db" -version = "0.1.0" +version = "0.1.1" description = "A simple database trait and backends for it" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/common/db" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.65" +rust-version = "1.71" [package.metadata.docs.rs] all-features = true @@ -18,7 +18,7 @@ workspace = true [dependencies] parity-db = { version = "0.4", default-features = false, optional = true } -rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true } +rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true } [features] parity-db = ["dep:parity-db"] diff --git a/common/db/README.md b/common/db/README.md new file mode 100644 index 00000000..83d4735f --- /dev/null +++ b/common/db/README.md @@ -0,0 +1,8 @@ +# Serai DB + +An inefficient, minimal abstraction around databases. + +The abstraction offers `get`, `put`, and `del` with helper functions and macros +built on top. Database iteration is not offered, forcing the caller to manually +implement indexing schemes. This ensures wide compatibility across abstracted +databases. diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index abd86e46..f5bd6e91 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -38,12 +38,21 @@ pub fn serai_db_key( #[macro_export] macro_rules! create_db { ($db_name: ident { - $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* + $( + $field_name: ident: + $(<$($generic_name: tt: $generic_type: tt),+>)?( + $($arg: ident: $arg_type: ty),* + ) -> $field_type: ty$(,)? + )* }) => { $( #[derive(Clone, Debug)] - pub(crate) struct $field_name; - impl $field_name { + pub(crate) struct $field_name$( + <$($generic_name: $generic_type),+> + )?$( + (core::marker::PhantomData<($($generic_name),+)>) + )?; + impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { pub(crate) fn key($($arg: $arg_type),*) -> Vec { use scale::Encode; $crate::serai_db_key( @@ -52,18 +61,43 @@ macro_rules! create_db { ($($arg),*).encode() ) } - pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) { - let key = $field_name::key($($arg),*); + pub(crate) fn set( + txn: &mut impl DbTxn + $(, $arg: $arg_type)*, + data: &$field_type + ) { + let key = Self::key($($arg),*); txn.put(&key, borsh::to_vec(data).unwrap()); } - pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { - getter.get($field_name::key($($arg),*)).map(|data| { + pub(crate) fn get( + getter: &impl Get, + $($arg: $arg_type),* + ) -> Option<$field_type> { + getter.get(Self::key($($arg),*)).map(|data| { borsh::from_slice(data.as_ref()).unwrap() }) } + // Returns a PhantomData of all generic types so if the generic was only used in the value, + // not the keys, this doesn't have unused generic types #[allow(dead_code)] - pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) { - txn.del(&$field_name::key($($arg),*)) + pub(crate) fn del( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> core::marker::PhantomData<($($($generic_name),+)?)> { + txn.del(&Self::key($($arg),*)); + core::marker::PhantomData + } + + pub(crate) fn take( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let key = Self::key($($arg),*); + let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap()); + if res.is_some() { + txn.del(key); + } + res } } )* @@ -73,19 +107,30 @@ macro_rules! create_db { #[macro_export] macro_rules! db_channel { ($db_name: ident { - $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* + $($field_name: ident: + $(<$($generic_name: tt: $generic_type: tt),+>)?( + $($arg: ident: $arg_type: ty),* + ) -> $field_type: ty$(,)? + )* }) => { $( create_db! { $db_name { - $field_name: ($($arg: $arg_type,)* index: u32) -> $field_type, + $field_name: $(<$($generic_name: $generic_type),+>)?( + $($arg: $arg_type,)* + index: u32 + ) -> $field_type } } - impl $field_name { - pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) { + impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { + pub(crate) fn send( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + , value: &$field_type + ) { // Use index 0 to store the amount of messages - let messages_sent_key = $field_name::key($($arg),*, 0); + let messages_sent_key = Self::key($($arg,)* 0); let messages_sent = txn.get(&messages_sent_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); @@ -96,19 +141,35 @@ macro_rules! db_channel { // at the same time let index_to_use = messages_sent + 2; - $field_name::set(txn, $($arg),*, index_to_use, value); + Self::set(txn, $($arg,)* index_to_use, value); } - pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> { - let messages_recvd_key = $field_name::key($($arg),*, 1); + pub(crate) fn peek( + getter: &impl Get + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let messages_recvd_key = Self::key($($arg,)* 1); + let messages_recvd = getter.get(&messages_recvd_key).map(|counter| { + u32::from_le_bytes(counter.try_into().unwrap()) + }).unwrap_or(0); + + let index_to_read = messages_recvd + 2; + + Self::get(getter, $($arg,)* index_to_read) + } + pub(crate) fn try_recv( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let messages_recvd_key = Self::key($($arg,)* 1); let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); let index_to_read = messages_recvd + 2; - let res = $field_name::get(txn, $($arg),*, index_to_read); + let res = Self::get(txn, $($arg,)* index_to_read); if res.is_some() { - $field_name::del(txn, $($arg),*, index_to_read); + Self::del(txn, $($arg,)* index_to_read); txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes()); } res diff --git a/common/db/src/lib.rs b/common/db/src/lib.rs index 1c08fe3d..72ff4367 100644 --- a/common/db/src/lib.rs +++ b/common/db/src/lib.rs @@ -14,26 +14,43 @@ mod parity_db; #[cfg(feature = "parity-db")] pub use parity_db::{ParityDb, new_parity_db}; -/// An object implementing get. +/// An object implementing `get`. pub trait Get { + /// Get a value from the database. fn get(&self, key: impl AsRef<[u8]>) -> Option>; } -/// An atomic database operation. +/// An atomic database transaction. +/// +/// A transaction is only required to atomically commit. It is not required that two `Get` calls +/// made with the same transaction return the same result, if another transaction wrote to that +/// key. +/// +/// If two transactions are created, and both write (including deletions) to the same key, behavior +/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values +/// randomly, or any other action, at time of write or at time of commit. #[must_use] pub trait DbTxn: Send + Get { + /// Write a value to this key. fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>); + /// Delete the value from this key. fn del(&mut self, key: impl AsRef<[u8]>); + /// Commit this transaction. fn commit(self); } -/// A database supporting atomic operations. +/// A database supporting atomic transaction. pub trait Db: 'static + Send + Sync + Clone + Get { + /// The type representing a database transaction. type Transaction<'a>: DbTxn; + /// Calculate a key for a database entry. + /// + /// Keys are separated by the database, the item within the database, and the item's key itself. fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { let db_len = u8::try_from(db_dst.len()).unwrap(); let dst_len = u8::try_from(item_dst.len()).unwrap(); [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() } + /// Open a new transaction. fn txn(&mut self) -> Self::Transaction<'_>; } diff --git a/common/db/src/mem.rs b/common/db/src/mem.rs index ecac300e..d24aa109 100644 --- a/common/db/src/mem.rs +++ b/common/db/src/mem.rs @@ -11,7 +11,7 @@ use crate::*; #[derive(PartialEq, Eq, Debug)] pub struct MemDbTxn<'a>(&'a MemDb, HashMap, Vec>, HashSet>); -impl<'a> Get for MemDbTxn<'a> { +impl Get for MemDbTxn<'_> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { if self.2.contains(key.as_ref()) { return None; @@ -23,7 +23,7 @@ impl<'a> Get for MemDbTxn<'a> { .or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned()) } } -impl<'a> DbTxn for MemDbTxn<'a> { +impl DbTxn for MemDbTxn<'_> { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { self.2.remove(key.as_ref()); self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); diff --git a/common/env/Cargo.toml b/common/env/Cargo.toml index 8e296a66..be34cbac 100644 --- a/common/env/Cargo.toml +++ b/common/env/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.60" +rust-version = "1.71" [package.metadata.docs.rs] all-features = true diff --git a/common/patchable-async-sleep/Cargo.toml b/common/patchable-async-sleep/Cargo.toml index e2d1e9cf..b4a19c5a 100644 --- a/common/patchable-async-sleep/Cargo.toml +++ b/common/patchable-async-sleep/Cargo.toml @@ -7,6 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a authors = ["Luke Parker "] keywords = ["async", "sleep", "tokio", "smol", "async-std"] edition = "2021" +rust-version = "1.71" [package.metadata.docs.rs] all-features = true diff --git a/common/request/Cargo.toml b/common/request/Cargo.toml index e5018056..d960e91b 100644 --- a/common/request/Cargo.toml +++ b/common/request/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ authors = ["Luke Parker "] keywords = ["http", "https", "async", "request", "ssl"] edition = "2021" -rust-version = "1.64" +rust-version = "1.71" [package.metadata.docs.rs] all-features = true diff --git a/common/request/src/response.rs b/common/request/src/response.rs index 78295d37..e4628f72 100644 --- a/common/request/src/response.rs +++ b/common/request/src/response.rs @@ -11,7 +11,7 @@ use crate::{Client, Error}; #[allow(dead_code)] #[derive(Debug)] pub struct Response<'a>(pub(crate) hyper::Response, pub(crate) &'a Client); -impl<'a> Response<'a> { +impl Response<'_> { pub fn status(&self) -> StatusCode { self.0.status() } diff --git a/common/std-shims/Cargo.toml b/common/std-shims/Cargo.toml index 534a4216..d2bda4b7 100644 --- a/common/std-shims/Cargo.toml +++ b/common/std-shims/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims" authors = ["Luke Parker "] keywords = ["nostd", "no_std", "alloc", "io"] edition = "2021" -rust-version = "1.70" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -18,7 +18,7 @@ workspace = true [dependencies] spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] } -hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } +hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] } [features] std = [] diff --git a/common/task/Cargo.toml b/common/task/Cargo.toml new file mode 100644 index 00000000..f96e4557 --- /dev/null +++ b/common/task/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "serai-task" +version = "0.1.0" +description = "A task schema for Serai services" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/common/task" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.75" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] } diff --git a/processor/LICENSE b/common/task/LICENSE similarity index 94% rename from processor/LICENSE rename to common/task/LICENSE index c425427c..41d5a261 100644 --- a/processor/LICENSE +++ b/common/task/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2022-2023 Luke Parker +Copyright (c) 2022-2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/common/task/README.md b/common/task/README.md new file mode 100644 index 00000000..db1d02ba --- /dev/null +++ b/common/task/README.md @@ -0,0 +1,3 @@ +# Task + +A schema to define tasks to be run ad infinitum. diff --git a/common/task/src/lib.rs b/common/task/src/lib.rs new file mode 100644 index 00000000..83eac9bf --- /dev/null +++ b/common/task/src/lib.rs @@ -0,0 +1,161 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{ + fmt::{self, Debug}, + future::Future, + time::Duration, +}; + +use tokio::sync::mpsc; + +mod type_name; + +/// A handle for a task. +/// +/// The task will only stop running once all handles for it are dropped. +// +// `run_now` isn't infallible if the task may have been closed. `run_now` on a closed task would +// either need to panic (historic behavior), silently drop the fact the task can't be run, or +// return an error. Instead of having a potential panic, and instead of modeling the error +// behavior, this task can't be closed unless all handles are dropped, ensuring calls to `run_now` +// are infallible. +#[derive(Clone)] +pub struct TaskHandle { + run_now: mpsc::Sender<()>, + #[allow(dead_code)] // This is used to track if all handles have been dropped + close: mpsc::Sender<()>, +} + +/// A task's internal structures. +pub struct Task { + run_now: mpsc::Receiver<()>, + close: mpsc::Receiver<()>, +} + +impl Task { + /// Create a new task definition. + pub fn new() -> (Self, TaskHandle) { + // Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as + // soon as possible + let (run_now_send, run_now_recv) = mpsc::channel(1); + // And any call to close satisfies all calls to close + let (close_send, close_recv) = mpsc::channel(1); + ( + Self { run_now: run_now_recv, close: close_recv }, + TaskHandle { run_now: run_now_send, close: close_send }, + ) + } +} + +impl TaskHandle { + /// Tell the task to run now (and not whenever its next iteration on a timer is). + pub fn run_now(&self) { + #[allow(clippy::match_same_arms)] + match self.run_now.try_send(()) { + Ok(()) => {} + // NOP on full, as this task will already be ran as soon as possible + Err(mpsc::error::TrySendError::Full(())) => {} + Err(mpsc::error::TrySendError::Closed(())) => { + // The task should only be closed if all handles are dropped, and this one hasn't been + panic!("task was unexpectedly closed when calling run_now") + } + } + } +} + +/// An enum which can't be constructed, representing that the task does not error. +pub enum DoesNotError {} +impl Debug for DoesNotError { + fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + // This type can't be constructed so we'll never have a `&self` to call this fn with + unreachable!() + } +} + +/// A task to be continually ran. +pub trait ContinuallyRan: Sized + Send { + /// The amount of seconds before this task should be polled again. + const DELAY_BETWEEN_ITERATIONS: u64 = 5; + /// The maximum amount of seconds before this task should be run again. + /// + /// Upon error, the amount of time waited will be linearly increased until this limit. + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120; + + /// The error potentially yielded upon running an iteration of this task. + type Error: Debug; + + /// Run an iteration of the task. + /// + /// If this returns `true`, all dependents of the task will immediately have a new iteration ran + /// (without waiting for whatever timer they were already on). + fn run_iteration(&mut self) -> impl Send + Future>; + + /// Continually run the task. + fn continually_run( + mut self, + mut task: Task, + dependents: Vec, + ) -> impl Send + Future { + async move { + // The default number of seconds to sleep before running the task again + let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS; + // The current number of seconds to sleep before running the task again + // We increment this upon errors in order to not flood the logs with errors + let mut current_sleep_before_next_task = default_sleep_before_next_task; + let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { + let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; + // Set a limit of sleeping for two minutes + *current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS); + }; + + loop { + // If we were told to close/all handles were dropped, drop it + { + let should_close = task.close.try_recv(); + match should_close { + Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break, + Err(mpsc::error::TryRecvError::Empty) => {} + } + } + + match self.run_iteration().await { + Ok(run_dependents) => { + // Upon a successful (error-free) loop iteration, reset the amount of time we sleep + current_sleep_before_next_task = default_sleep_before_next_task; + + if run_dependents { + for dependent in &dependents { + dependent.run_now(); + } + } + } + Err(e) => { + // Get the type name + let type_name = type_name::strip_type_name(core::any::type_name::()); + // Print the error as a warning, prefixed by the task's type + log::warn!("{type_name}: {e:?}"); + increase_sleep_before_next_task(&mut current_sleep_before_next_task); + } + } + + // Don't run the task again for another few seconds UNLESS told to run now + /* + We could replace tokio::mpsc with async_channel, tokio::time::sleep with + patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or + It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio + */ + tokio::select! { + () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, + msg = task.run_now.recv() => { + // Check if this is firing because the handle was dropped + if msg.is_none() { + break; + } + }, + } + } + } + } +} diff --git a/common/task/src/type_name.rs b/common/task/src/type_name.rs new file mode 100644 index 00000000..c6ba1658 --- /dev/null +++ b/common/task/src/type_name.rs @@ -0,0 +1,31 @@ +/// Strip the modules from a type name. +// This may be of the form `a::b::C`, in which case we only want `C` +pub(crate) fn strip_type_name(full_type_name: &'static str) -> String { + // It also may be `a::b::C`, in which case, we only attempt to strip `a::b` + let mut by_generics = full_type_name.split('<'); + + // Strip to just `C` + let full_outer_object_name = by_generics.next().unwrap(); + let mut outer_object_name_parts = full_outer_object_name.split("::"); + let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap(); + for part in outer_object_name_parts { + last_part_in_outer_object_name = part; + } + + // Push back on the generic terms + let mut type_name = last_part_in_outer_object_name.to_string(); + for generic in by_generics { + type_name.push('<'); + type_name.push_str(generic); + } + type_name +} + +#[test] +fn test_strip_type_name() { + assert_eq!(strip_type_name("core::option::Option"), "Option"); + assert_eq!( + strip_type_name("core::option::Option"), + "Option" + ); +} diff --git a/common/zalloc/Cargo.toml b/common/zalloc/Cargo.toml index af4e7c1c..88e59ec0 100644 --- a/common/zalloc/Cargo.toml +++ b/common/zalloc/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.77.0" +rust-version = "1.77" [package.metadata.docs.rs] all-features = true diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index ae4e2be7..4296423f 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false +rust-version = "1.81" [package.metadata.docs.rs] all-features = true @@ -17,48 +18,44 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } - zeroize = { version = "^1.5", default-features = false, features = ["std"] } +bitvec = { version = "1", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } +schnorrkel = { version = "0.11", default-features = false, features = ["std"] } -transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } -ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] } -schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] } -frost = { package = "modular-frost", path = "../crypto/frost" } +ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +dkg = { path = "../crypto/dkg", default-features = false, features = ["std"] } frost-schnorrkel = { path = "../crypto/schnorrkel" } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db" } serai-env = { path = "../common/env" } +serai-task = { path = "../common/task", version = "0.1" } -processor-messages = { package = "serai-processor-messages", path = "../processor/messages" } +messages = { package = "serai-processor-messages", path = "../processor/messages" } message-queue = { package = "serai-message-queue", path = "../message-queue" } -tributary = { package = "tributary-chain", path = "./tributary" } +tributary-sdk = { path = "./tributary-sdk" } -sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] } -hex = { version = "0.4", default-features = false, features = ["std"] } -borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } - log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } -futures-util = { version = "0.3", default-features = false, features = ["std"] } -tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] } +tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] } -[dev-dependencies] -tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } -sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } -sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } +serai-cosign = { path = "./cosign" } +serai-coordinator-substrate = { path = "./substrate" } +serai-coordinator-tributary = { path = "./tributary" } +serai-coordinator-p2p = { path = "./p2p" } +serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" } [features] -longer-reattempts = [] +longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] diff --git a/coordinator/LICENSE b/coordinator/LICENSE index f684d027..621233a9 100644 --- a/coordinator/LICENSE +++ b/coordinator/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2023 Luke Parker +Copyright (c) 2023-2025 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/coordinator/README.md b/coordinator/README.md index ed41ef71..88d2ef63 100644 --- a/coordinator/README.md +++ b/coordinator/README.md @@ -1,7 +1,29 @@ # Coordinator -The Serai coordinator communicates with other coordinators to prepare batches -for Serai and sign transactions. +- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint + BFT algorithm. -In order to achieve consensus over gossip, and order certain events, a -micro-blockchain is instantiated. +- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead + of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend + to, `tributary` is solely intended to be an embedded asynchronous task within + an application. + + The Serai coordinator spawns a tributary for each validator set it's + coordinating. This allows the participating validators to communicate in a + byzantine-fault-tolerant manner (relying on Tendermint for consensus). + +- [`cosign`](./cosign) contains a library to decide which Substrate blocks + should be cosigned and to evaluate cosigns. + +- [`substrate`](./substrate) contains a library to index the Substrate + blockchain and handle its events. + +- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the + Serai processor. It includes the `Transaction` definition and deferred + execution logic. + +- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator. + +- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API. + +- [`src`](./src) contains the source code for the Coordinator binary itself. diff --git a/coordinator/cosign/Cargo.toml b/coordinator/cosign/Cargo.toml new file mode 100644 index 00000000..bf111f85 --- /dev/null +++ b/coordinator/cosign/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "serai-cosign" +version = "0.1.0" +description = "Evaluator of cosigns for the Serai network" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +blake2 = { version = "0.10", default-features = false, features = ["std"] } +schnorrkel = { version = "0.11", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] } + +log = { version = "0.4", default-features = false, features = ["std"] } + +tokio = { version = "1", default-features = false } + +serai-db = { path = "../../common/db", version = "0.1.1" } +serai-task = { path = "../../common/task", version = "0.1" } diff --git a/networks/ethereum/LICENSE b/coordinator/cosign/LICENSE similarity index 94% rename from networks/ethereum/LICENSE rename to coordinator/cosign/LICENSE index c425427c..26d57cbb 100644 --- a/networks/ethereum/LICENSE +++ b/coordinator/cosign/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2022-2023 Luke Parker +Copyright (c) 2023-2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/coordinator/cosign/README.md b/coordinator/cosign/README.md new file mode 100644 index 00000000..50ce52a6 --- /dev/null +++ b/coordinator/cosign/README.md @@ -0,0 +1,121 @@ +# Serai Cosign + +The Serai blockchain is controlled by a set of validators referred to as the +Serai validators. These validators could attempt to double-spend, even if every +node on the network is a full node, via equivocating. + +Posit: + - The Serai validators control X SRI + - The Serai validators produce block A swapping X SRI to Y XYZ + - The Serai validators produce block B swapping X SRI to Z ABC + - The Serai validators finalize block A and send to the validators for XYZ + - The Serai validators finalize block B and send to the validators for ABC + +This is solved via the cosigning protocol. The validators for XYZ and the +validators for ABC each sign their view of the Serai blockchain, communicating +amongst each other to ensure consistency. + +The security of the cosigning protocol is not formally proven, and there are no +claims it achieves Byzantine Fault Tolerance. This protocol is meant to be +practical and make such attacks infeasible, when they could already be argued +difficult to perform. + +### Definitions + +- Cosign: A signature from a non-Serai validator set for a Serai block +- Cosign Commit: A collection of cosigns which achieve the necessary weight + +### Methodology + +Finalized blocks from the Serai network are intended to be cosigned if they +contain burn events. Only once cosigned should non-Serai validators process +them. + +Cosigning occurs by a non-Serai validator set, using their threshold keys +declared on the Serai blockchain. Once 83% of non-Serai validator sets, by +weight, cosign a block, a cosign commit is formed. A cosign commit for a block +is considered to also cosign for all blocks preceding it. + +### Bounds Under Asynchrony + +Assuming an asynchronous environment fully controlled by the adversary, 34% of +a validator set may cause an equivocation. Control of 67% of non-Serai +validator sets, by weight, is sufficient to produce two distinct cosign commits +at the same position. This is due to the honest stake, 33%, being split across +the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means +the cosigning protocol may produce multiple cosign commits if 34% of 67%, just +22.78%, of the non-Serai validator sets, is malicious. This would be in +conjunction with 34% of the Serai validator set (assumed 20% of total stake), +for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is +an increase from the 6.8% required without the cosigning protocol. + +### Bounds Under Synchrony + +Assuming the honest stake within the non-Serai validator sets detect the +malicious stake within their set prior to assisting in producing a cosign for +their set, for which there is a multi-second window, 67% of 67% of non-Serai +validator sets is required to produce cosigns for those sets. This raises the +total stake requirement to 42.712% (past the usual 34% threshold). + +### Behavior Reliant on Synchrony + +If the Serai blockchain node detects an equivocation, it will stop responding +to all RPC requests and stop participating in finalizing further blocks. This +lets the node communicate the equivocating commits to other nodes (causing them +to exhibit the same behavior), yet prevents interaction with it. + +If cosigns representing 17% of the non-Serai validators sets by weight are +detected for distinct blocks at the same position, the protocol halts. An +explicit latency period of seventy seconds is enacted after receiving a cosign +commit for the detection of such an equivocation. This is largely redundant +given how the Serai blockchain node will presumably have halted itself by this +time. + +### Equivocation-Detection Avoidance + +Malicious Serai validators could avoid detection of their equivocating if they +produced two distinct blockchains, A and B, with different keys declared for +the same non-Serai validator set. While the validators following A may detect +the cosigns for distinct blocks by validators following B, the cosigns would be +assumed invalid due to their signatures being verified against distinct keys. + +This is prevented by requiring cosigns on the blocks which declare new keys, +ensuring all validators have a consistent view of the keys used within the +cosigning protocol (per the bounds of the cosigning protocol). These blocks are +exempt from the general policy of cosign commits cosigning all prior blocks, +preventing the newly declared keys (which aren't yet cosigned) from being used +to cosign themselves. These cosigns are flagged as "notable", are permanently +archived, and must be synced before a validator will move forward. + +Cosigning the block which declares new keys also ensures agreement on the +preceding block which declared the new set, with an exact specification of the +participants and their weight, before it impacts the cosigning protocol. + +### Denial of Service Concerns + +Any historical Serai validator set may trigger a chain halt by producing an +equivocation after their retiry. This requires 67% to be malicious. 34% of the +active Serai validator set may also trigger a chain halt. + +17% of non-Serai validator sets equivocating causing a halt means 5.67% of +non-Serai validator sets' stake may cause a halt (in an asynchronous +environment fully controlled by the adversary). In a synchronous environment +where the honest stake cannot be split across two candidates, 11.33% of +non-Serai validator sets' stake is required. + +The more practical attack is for one to obtain 5.67% of non-Serai validator +sets' stake, under any network conditions, and simply go offline. This will +take 17% of validator sets offline with it, preventing any cosign commits +from being performed. A fallback protocol where validators individually produce +cosigns, removing the network's horizontal scalability but ensuring liveness, +prevents this, restoring the additional requirements for control of an +asynchronous network or 11.33% of non-Serai validator sets' stake. + +### TODO + +The Serai node no longer responding to RPC requests upon detecting any +equivocation, and the fallback protocol where validators individually produce +signatures, are not implemented at this time. The former means the detection of +equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai +validator sets' stake the DoS threshold, even without control of an +asynchronous network. diff --git a/coordinator/cosign/src/delay.rs b/coordinator/cosign/src/delay.rs new file mode 100644 index 00000000..3439135b --- /dev/null +++ b/coordinator/cosign/src/delay.rs @@ -0,0 +1,57 @@ +use core::future::Future; +use std::time::{Duration, SystemTime}; + +use serai_db::*; +use serai_task::{DoesNotError, ContinuallyRan}; + +use crate::evaluator::CosignedBlocks; + +/// How often callers should broadcast the cosigns flagged for rebroadcasting. +pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60); +const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10); +const ACKNOWLEDGEMENT_DELAY: Duration = + Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs()); + +create_db!( + SubstrateCosignDelay { + // The latest cosigned block number. + LatestCosignedBlockNumber: () -> u64, + } +); + +/// A task to delay acknowledgement of cosigns. +pub(crate) struct CosignDelayTask { + pub(crate) db: D, +} + +impl ContinuallyRan for CosignDelayTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + loop { + let mut txn = self.db.txn(); + + // Receive the next block to mark as cosigned + let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else { + break; + }; + // Calculate when we should mark it as valid + let time_valid = + SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY; + // Sleep until then + tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO)) + .await; + + // Set the cosigned block + LatestCosignedBlockNumber::set(&mut txn, &block_number); + txn.commit(); + + made_progress = true; + } + + Ok(made_progress) + } + } +} diff --git a/coordinator/cosign/src/evaluator.rs b/coordinator/cosign/src/evaluator.rs new file mode 100644 index 00000000..905e60fc --- /dev/null +++ b/coordinator/cosign/src/evaluator.rs @@ -0,0 +1,246 @@ +use core::future::Future; +use std::time::{Duration, Instant, SystemTime}; + +use serai_db::*; +use serai_task::ContinuallyRan; + +use crate::{ + HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns, + intend::{GlobalSessionsChannel, BlockEventData, BlockEvents}, +}; + +create_db!( + SubstrateCosignEvaluator { + // The global session currently being evaluated. + CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession), + } +); + +db_channel!( + SubstrateCosignEvaluatorChannels { + // (cosigned block, time cosign was evaluated) + CosignedBlocks: () -> (u64, u64), + } +); + +// This is a strict function which won't panic, even with a malicious Serai node, so long as: +// - It's called incrementally (with an increment of 1) +// - It's only called for block numbers we've completed indexing on within the intend task +// - It's only called for block numbers after a global session has started +// - The global sessions channel is populated as the block declaring the session is indexed +// Which all hold true within the context of this task and the intend task. +// +// This function will also ensure the currently evaluated global session is incremented once we +// finish evaluation of the prior session. +fn currently_evaluated_global_session_strict( + txn: &mut impl DbTxn, + block_number: u64, +) -> ([u8; 32], GlobalSession) { + let mut res = { + let existing = match CurrentlyEvaluatedGlobalSession::get(txn) { + Some(existing) => existing, + None => { + let first = GlobalSessionsChannel::try_recv(txn) + .expect("fetching latest global session yet none declared"); + CurrentlyEvaluatedGlobalSession::set(txn, &first); + first + } + }; + assert!( + existing.1.start_block_number <= block_number, + "candidate's start block number exceeds our block number" + ); + existing + }; + + if let Some(next) = GlobalSessionsChannel::peek(txn) { + assert!( + block_number <= next.1.start_block_number, + "currently_evaluated_global_session_strict wasn't called incrementally" + ); + // If it's time for this session to activate, take it from the channel and set it + if block_number == next.1.start_block_number { + GlobalSessionsChannel::try_recv(txn).unwrap(); + CurrentlyEvaluatedGlobalSession::set(txn, &next); + res = next; + } + } + + res +} + +pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> { + CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id) +} + +/// A task to determine if a block has been cosigned and we should handle it. +pub(crate) struct CosignEvaluatorTask { + pub(crate) db: D, + pub(crate) request: R, + pub(crate) last_request_for_cosigns: Instant, +} + +impl ContinuallyRan for CosignEvaluatorTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + let should_request_cosigns = |last_request_for_cosigns: &mut Instant| { + const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60); + if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) { + return false; + } + *last_request_for_cosigns = Instant::now(); + true + }; + + async move { + let mut known_cosign = None; + let mut made_progress = false; + loop { + let mut txn = self.db.txn(); + let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn) + else { + break; + }; + + // Fetch the global session information + let (global_session, global_session_info) = + currently_evaluated_global_session_strict(&mut txn, block_number); + + match has_events { + // Because this had notable events, we require an explicit cosign for this block by a + // supermajority of the prior block's validator sets + HasEvents::Notable => { + let mut weight_cosigned = 0; + for set in global_session_info.sets { + // Check if we have the cosign from this set + if NetworksLatestCosignedBlock::get(&txn, global_session, set.network) + .map(|signed_cosign| signed_cosign.cosign.block_number) == + Some(block_number) + { + // Since have this cosign, add the set's weight to the weight which has cosigned + weight_cosigned += + global_session_info.stakes.get(&set.network).ok_or_else(|| { + "ValidatorSet in global session yet didn't have its stake".to_string() + })?; + } + } + // Check if the sum weight doesn't cross the required threshold + if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) { + // Request the necessary cosigns over the network + if should_request_cosigns(&mut self.last_request_for_cosigns) { + self + .request + .request_notable_cosigns(global_session) + .await + .map_err(|e| format!("{e:?}"))?; + } + // We return an error so the delay before this task is run again increases + return Err(format!( + "notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly", + )); + } + + log::info!("marking notable block #{block_number} as cosigned"); + } + // Since this block didn't have any notable events, we simply require a cosign for this + // block or a greater block by the current validator sets + HasEvents::NonNotable => { + // Check if this was satisfied by a cached result which wasn't calculated incrementally + let known_cosigned = if let Some(known_cosign) = known_cosign { + known_cosign >= block_number + } else { + // Clear `known_cosign` which is no longer helpful + known_cosign = None; + false + }; + + // If it isn't already known to be cosigned, evaluate the latest cosigns + if !known_cosigned { + /* + LatestCosign is populated with the latest cosigns for each network which don't + exceed the latest global session we've evaluated the start of. This current block + is during the latest global session we've evaluated the start of. + */ + + let mut weight_cosigned = 0; + let mut lowest_common_block: Option = None; + for set in global_session_info.sets { + // Check if this set cosigned this block or not + let Some(cosign) = + NetworksLatestCosignedBlock::get(&txn, global_session, set.network) + else { + continue; + }; + if cosign.cosign.block_number >= block_number { + weight_cosigned += + global_session_info.stakes.get(&set.network).ok_or_else(|| { + "ValidatorSet in global session yet didn't have its stake".to_string() + })?; + } + + // Update the lowest block common to all of these cosigns + lowest_common_block = lowest_common_block + .map(|existing| existing.min(cosign.cosign.block_number)) + .or(Some(cosign.cosign.block_number)); + } + + // Check if the sum weight doesn't cross the required threshold + if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) { + // Request the superseding notable cosigns over the network + // If this session hasn't yet produced notable cosigns, then we presume we'll see + // the desired non-notable cosigns as part of normal operations, without needing to + // explicitly request them + if should_request_cosigns(&mut self.last_request_for_cosigns) { + self + .request + .request_notable_cosigns(global_session) + .await + .map_err(|e| format!("{e:?}"))?; + } + // We return an error so the delay before this task is run again increases + return Err(format!( + "block (#{block_number}) wasn't yet cosigned. this should resolve shortly", + )); + } + + // Update the cached result for the block we know is cosigned + /* + There may be a higher block which was cosigned, but once we get to this block, + we'll re-evaluate and find it then. The alternative would be an optimistic + re-evaluation now. Both are fine, so the lower-complexity option is preferred. + */ + known_cosign = lowest_common_block; + } + + log::debug!("marking non-notable block #{block_number} as cosigned"); + } + // If this block has no events necessitating cosigning, we can immediately consider the + // block cosigned (making this block a NOP) + HasEvents::No => {} + } + + // Since we checked we had the necessary cosigns, send it for delay before acknowledgement + CosignedBlocks::send( + &mut txn, + &( + block_number, + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or(Duration::ZERO) + .as_secs(), + ), + ); + txn.commit(); + + if (block_number % 500) == 0 { + log::info!("marking block #{block_number} as cosigned"); + } + + made_progress = true; + } + + Ok(made_progress) + } + } +} diff --git a/coordinator/cosign/src/intend.rs b/coordinator/cosign/src/intend.rs new file mode 100644 index 00000000..08643aad --- /dev/null +++ b/coordinator/cosign/src/intend.rs @@ -0,0 +1,184 @@ +use core::future::Future; +use std::{sync::Arc, collections::HashMap}; + +use serai_client::{ + primitives::{SeraiAddress, Amount}, + validator_sets::primitives::ExternalValidatorSet, + Serai, +}; + +use serai_db::*; +use serai_task::ContinuallyRan; + +use crate::*; + +create_db!( + CosignIntend { + ScanCosignFrom: () -> u64, + } +); + +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub(crate) struct BlockEventData { + pub(crate) block_number: u64, + pub(crate) has_events: HasEvents, +} + +db_channel! { + CosignIntendChannels { + GlobalSessionsChannel: () -> ([u8; 32], GlobalSession), + BlockEvents: () -> BlockEventData, + IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent, + } +} + +async fn block_has_events_justifying_a_cosign( + serai: &Serai, + block_number: u64, +) -> Result<(Block, HasEvents), String> { + let block = serai + .finalized_block_by_number(block_number) + .await + .map_err(|e| format!("{e:?}"))? + .ok_or_else(|| "couldn't get block which should've been finalized".to_string())?; + let serai = serai.as_of(block.hash()); + + if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() { + return Ok((block, HasEvents::Notable)); + } + + if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() { + return Ok((block, HasEvents::NonNotable)); + } + + Ok((block, HasEvents::No)) +} + +/// A task to determine which blocks we should intend to cosign. +pub(crate) struct CosignIntendTask { + pub(crate) db: D, + pub(crate) serai: Arc, +} + +impl ContinuallyRan for CosignIntendTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1); + let latest_block_number = + self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number(); + + for block_number in start_block_number ..= latest_block_number { + let mut txn = self.db.txn(); + + let (block, mut has_events) = + block_has_events_justifying_a_cosign(&self.serai, block_number) + .await + .map_err(|e| format!("{e:?}"))?; + + // Check we are indexing a linear chain + if (block_number > 1) && + (<[u8; 32]>::from(block.header.parent_hash) != + SubstrateBlockHash::get(&txn, block_number - 1) + .expect("indexing a block but haven't indexed its parent")) + { + Err(format!( + "node's block #{block_number} doesn't build upon the block #{} prior indexed", + block_number - 1 + ))?; + } + let block_hash = block.hash(); + SubstrateBlockHash::set(&mut txn, block_number, &block_hash); + + let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn); + + // If this is notable, it creates a new global session, which we index into the database + // now + if has_events == HasEvents::Notable { + let serai = self.serai.as_of(block_hash); + let sets_and_keys = cosigning_sets(&serai).await?; + let global_session = + GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect()); + + let mut sets = Vec::with_capacity(sets_and_keys.len()); + let mut keys = HashMap::with_capacity(sets_and_keys.len()); + let mut stakes = HashMap::with_capacity(sets_and_keys.len()); + let mut total_stake = 0; + for (set, key) in &sets_and_keys { + sets.push(*set); + keys.insert(set.network, SeraiAddress::from(*key)); + let stake = serai + .validator_sets() + .total_allocated_stake(set.network.into()) + .await + .map_err(|e| format!("{e:?}"))? + .unwrap_or(Amount(0)) + .0; + stakes.insert(set.network, stake); + total_stake += stake; + } + if total_stake == 0 { + Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?; + } + + let global_session_info = GlobalSession { + // This session starts cosigning after this block, as this block must be cosigned by + // the existing validators + start_block_number: block_number + 1, + sets, + keys, + stakes, + total_stake, + }; + GlobalSessions::set(&mut txn, global_session, &global_session_info); + if let Some(ending_global_session) = global_session_for_this_block { + GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number); + } + LatestGlobalSessionIntended::set(&mut txn, &global_session); + GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info)); + } + + // If there isn't anyone available to cosign this block, meaning it'll never be cosigned, + // we flag it as not having any events requiring cosigning so we don't attempt to + // sign/require a cosign for it + if global_session_for_this_block.is_none() { + has_events = HasEvents::No; + } + + match has_events { + HasEvents::Notable | HasEvents::NonNotable => { + let global_session_for_this_block = global_session_for_this_block + .expect("global session for this block was None but still attempting to cosign it"); + let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block) + .expect("last global session intended wasn't saved to the database"); + + // Tell each set of their expectation to cosign this block + for set in global_session_info.sets { + log::debug!("{:?} will be cosigning block #{block_number}", set); + IntendedCosigns::send( + &mut txn, + set, + &CosignIntent { + global_session: global_session_for_this_block, + block_number, + block_hash, + notable: has_events == HasEvents::Notable, + }, + ); + } + } + HasEvents::No => {} + } + + // Populate a singular feed with every block's status for the evluator to work off of + BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events })); + // Mark this block as handled, meaning we should scan from the next block moving on + ScanCosignFrom::set(&mut txn, &(block_number + 1)); + txn.commit(); + } + + Ok(start_block_number <= latest_block_number) + } + } +} diff --git a/coordinator/cosign/src/lib.rs b/coordinator/cosign/src/lib.rs new file mode 100644 index 00000000..e98127b4 --- /dev/null +++ b/coordinator/cosign/src/lib.rs @@ -0,0 +1,505 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{fmt::Debug, future::Future}; +use std::{sync::Arc, collections::HashMap, time::Instant}; + +use blake2::{Digest, Blake2s256}; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{ExternalNetworkId, SeraiAddress}, + validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair}, + Public, Block, Serai, TemporalSerai, +}; + +use serai_db::*; +use serai_task::*; + +/// The cosigns which are intended to be performed. +mod intend; +/// The evaluator of the cosigns. +mod evaluator; +/// The task to delay acknowledgement of the cosigns. +mod delay; +pub use delay::BROADCAST_FREQUENCY; +use delay::LatestCosignedBlockNumber; + +/// The schnorrkel context to used when signing a cosign. +pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign"; + +/// A 'global session', defined as all validator sets used for cosigning at a given moment. +/// +/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign +/// distinct blocks at distinct positions within a global session, we still identify the faults. +/* + There is the attack where a validator set is given an alternate blockchain with a key generation + event at block #n, while most validator sets are given a blockchain with a key generation event + at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the + cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block + prior to the block being cosigned. + + We solve this by binding cosigns to a global session ID, which has a specific start block, and + reading the keys from the start block. This means that so long as all validator sets agree on the + start of a global session, they can verify all cosigns produced by that session, regardless of + how it advances. Since agreeing on the start of a global session is mandated, there's no way to + have validator sets follow two distinct global sessions without breaking the bounds of the + cosigning protocol. +*/ +#[derive(Debug, BorshSerialize, BorshDeserialize)] +pub(crate) struct GlobalSession { + pub(crate) start_block_number: u64, + pub(crate) sets: Vec, + pub(crate) keys: HashMap, + pub(crate) stakes: HashMap, + pub(crate) total_stake: u64, +} +impl GlobalSession { + fn id(mut cosigners: Vec) -> [u8; 32] { + cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap()); + Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into() + } +} + +/// If the block has events. +#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +enum HasEvents { + /// The block had a notable event. + /// + /// This is a special case as blocks with key gen events change the keys used for cosigning, and + /// accordingly must be cosigned before we advance past them. + Notable, + /// The block had an non-notable event justifying a cosign. + NonNotable, + /// The block didn't have an event justifying a cosign. + No, +} + +/// An intended cosign. +#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct CosignIntent { + /// The global session this cosign is being performed under. + pub global_session: [u8; 32], + /// The number of the block to cosign. + pub block_number: u64, + /// The hash of the block to cosign. + pub block_hash: [u8; 32], + /// If this cosign must be handled before further cosigns are. + pub notable: bool, +} + +/// A cosign. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] +pub struct Cosign { + /// The global session this cosign is being performed under. + pub global_session: [u8; 32], + /// The number of the block to cosign. + pub block_number: u64, + /// The hash of the block to cosign. + pub block_hash: [u8; 32], + /// The actual cosigner. + pub cosigner: ExternalNetworkId, +} + +impl CosignIntent { + /// Convert this into a `Cosign`. + pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign { + let CosignIntent { global_session, block_number, block_hash, notable: _ } = self; + Cosign { global_session, block_number, block_hash, cosigner } + } +} + +impl Cosign { + /// The message to sign to sign this cosign. + /// + /// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`. + pub fn signature_message(&self) -> Vec { + // We use a schnorrkel context to domain-separate this + self.encode() + } +} + +/// A signed cosign. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +pub struct SignedCosign { + /// The cosign. + pub cosign: Cosign, + /// The signature for the cosign. + pub signature: [u8; 64], +} + +impl SignedCosign { + fn verify_signature(&self, signer: serai_client::Public) -> bool { + let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false }; + let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false }; + + signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok() + } +} + +create_db! { + Cosign { + // The following are populated by the intend task and used throughout the library + + // An index of Substrate blocks + SubstrateBlockHash: (block_number: u64) -> [u8; 32], + // A mapping from a global session's ID to its relevant information. + GlobalSessions: (global_session: [u8; 32]) -> GlobalSession, + // The last block to be cosigned by a global session. + GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64, + // The latest global session intended. + // + // This is distinct from the latest global session for which we've evaluated the cosigns for. + LatestGlobalSessionIntended: () -> [u8; 32], + + // The following are managed by the `intake_cosign` function present in this file + + // The latest cosigned block for each network. + // + // This will only be populated with cosigns predating or during the most recent global session + // to have its start cosigned. + // + // The global session changes upon a notable block, causing each global session to have exactly + // one notable block. All validator sets will explicitly produce a cosign for their notable + // block, causing the latest cosigned block for a global session to either be the global + // session's notable cosigns or the network's latest cosigns. + NetworksLatestCosignedBlock: ( + global_session: [u8; 32], + network: ExternalNetworkId + ) -> SignedCosign, + // Cosigns received for blocks not locally recognized as finalized. + Faults: (global_session: [u8; 32]) -> Vec, + // The global session which faulted. + FaultedSession: () -> [u8; 32], + } +} + +/// Fetch the keys used for cosigning by a specific network. +async fn keys_for_network( + serai: &TemporalSerai<'_>, + network: ExternalNetworkId, +) -> Result, String> { + let Some(latest_session) = + serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))? + else { + // If this network hasn't had a session declared, move on + return Ok(None); + }; + + // Get the keys for the latest session + if let Some(keys) = serai + .validator_sets() + .keys(ExternalValidatorSet { network, session: latest_session }) + .await + .map_err(|e| format!("{e:?}"))? + { + return Ok(Some((latest_session, keys))); + } + + // If the latest session has yet to set keys, use the prior session + if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) { + if let Some(keys) = serai + .validator_sets() + .keys(ExternalValidatorSet { network, session: prior_session }) + .await + .map_err(|e| format!("{e:?}"))? + { + return Ok(Some((prior_session, keys))); + } + } + + Ok(None) +} + +/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this +/// block. +async fn cosigning_sets( + serai: &TemporalSerai<'_>, +) -> Result, String> { + let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len()); + for network in serai_client::primitives::EXTERNAL_NETWORKS { + let Some((session, keys)) = keys_for_network(serai, network).await? else { + // If this network doesn't have usable keys, move on + continue; + }; + + sets.push((ExternalValidatorSet { network, session }, keys.0)); + } + Ok(sets) +} + +/// An object usable to request notable cosigns for a block. +pub trait RequestNotableCosigns: 'static + Send { + /// The error type which may be encountered when requesting notable cosigns. + type Error: Debug; + + /// Request the notable cosigns for this global session. + fn request_notable_cosigns( + &self, + global_session: [u8; 32], + ) -> impl Send + Future>; +} + +/// An error used to indicate the cosigning protocol has faulted. +#[derive(Debug)] +pub struct Faulted; + +/// An error incurred while intaking a cosign. +#[derive(Debug)] +pub enum IntakeCosignError { + /// Cosign is for a not-yet-indexed block + NotYetIndexedBlock, + /// A later cosign for this cosigner has already been handled + StaleCosign, + /// The cosign's global session isn't recognized + UnrecognizedGlobalSession, + /// The cosign is for a block before its global session starts + BeforeGlobalSessionStart, + /// The cosign is for a block after its global session ends + AfterGlobalSessionEnd, + /// The cosign's signing network wasn't a participant in this global session + NonParticipatingNetwork, + /// The cosign had an invalid signature + InvalidSignature, + /// The cosign is for a global session which has yet to have its declaration block cosigned + FutureGlobalSession, +} + +impl IntakeCosignError { + /// If this error is temporal to the local view + pub fn temporal(&self) -> bool { + match self { + IntakeCosignError::NotYetIndexedBlock | + IntakeCosignError::StaleCosign | + IntakeCosignError::UnrecognizedGlobalSession | + IntakeCosignError::FutureGlobalSession => true, + IntakeCosignError::BeforeGlobalSessionStart | + IntakeCosignError::AfterGlobalSessionEnd | + IntakeCosignError::NonParticipatingNetwork | + IntakeCosignError::InvalidSignature => false, + } + } +} + +/// The interface to manage cosigning with. +pub struct Cosigning { + db: D, +} +impl Cosigning { + /// Spawn the tasks to intend and evaluate cosigns. + /// + /// The database specified must only be used with a singular instance of the Serai network, and + /// only used once at any given time. + pub fn spawn( + db: D, + serai: Arc, + request: R, + tasks_to_run_upon_cosigning: Vec, + ) -> Self { + let (intend_task, _intend_task_handle) = Task::new(); + let (evaluator_task, evaluator_task_handle) = Task::new(); + let (delay_task, delay_task_handle) = Task::new(); + tokio::spawn( + (intend::CosignIntendTask { db: db.clone(), serai }) + .continually_run(intend_task, vec![evaluator_task_handle]), + ); + tokio::spawn( + (evaluator::CosignEvaluatorTask { + db: db.clone(), + request, + last_request_for_cosigns: Instant::now(), + }) + .continually_run(evaluator_task, vec![delay_task_handle]), + ); + tokio::spawn( + (delay::CosignDelayTask { db: db.clone() }) + .continually_run(delay_task, tasks_to_run_upon_cosigning), + ); + Self { db } + } + + /// The latest cosigned block number. + pub fn latest_cosigned_block_number(getter: &impl Get) -> Result { + if FaultedSession::get(getter).is_some() { + Err(Faulted)?; + } + + Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0)) + } + + /// Fetch a cosigned Substrate block's hash by its block number. + pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result, Faulted> { + if block_number > Self::latest_cosigned_block_number(getter)? { + return Ok(None); + } + + Ok(Some( + SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"), + )) + } + + /// Fetch the notable cosigns for a global session in order to respond to requests. + /// + /// If this global session hasn't produced any notable cosigns, this will return the latest + /// cosigns for this session. + pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec { + let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len()); + for network in serai_client::primitives::EXTERNAL_NETWORKS { + if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) { + cosigns.push(cosign); + } + } + cosigns + } + + /// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds. + /// + /// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty + /// cosigns, in case of a fault, to induce identification of the fault by others. + pub fn cosigns_to_rebroadcast(&self) -> Vec { + if let Some(faulted) = FaultedSession::get(&self.db) { + let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults"); + // Also include all of our recognized-as-honest cosigns in an attempt to induce fault + // identification in those who see the faulty cosigns as honest + for network in serai_client::primitives::EXTERNAL_NETWORKS { + if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) { + if cosign.cosign.global_session == faulted { + cosigns.push(cosign); + } + } + } + cosigns + } else { + let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else { + return vec![]; + }; + let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len()); + for network in serai_client::primitives::EXTERNAL_NETWORKS { + if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) { + cosigns.push(cosign); + } + } + cosigns + } + } + + /// Intake a cosign. + // + // Takes `&mut self` as this should only be called once at any given moment. + pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> { + let cosign = &signed_cosign.cosign; + let network = cosign.cosigner; + + // Check our indexed blockchain includes a block with this block number + let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else { + Err(IntakeCosignError::NotYetIndexedBlock)? + }; + let faulty = cosign.block_hash != our_block_hash; + + // Check this isn't a dated cosign within its global session (as it would be if rebroadcasted) + if !faulty { + if let Some(existing) = + NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network) + { + if existing.cosign.block_number >= cosign.block_number { + Err(IntakeCosignError::StaleCosign)?; + } + } + } + + let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else { + Err(IntakeCosignError::UnrecognizedGlobalSession)? + }; + + // Check the cosigned block number is in range to the global session + if cosign.block_number < global_session.start_block_number { + // Cosign is for a block predating the global session + Err(IntakeCosignError::BeforeGlobalSessionStart)?; + } + if !faulty { + // This prevents a malicious validator set, on the same chain, from producing a cosign after + // their final block, replacing their notable cosign + if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) { + if cosign.block_number > last_block { + // Cosign is for a block after the last block this global session should have signed + Err(IntakeCosignError::AfterGlobalSessionEnd)?; + } + } + } + + // Check the cosign's signature + { + let key = Public::from({ + let Some(key) = global_session.keys.get(&network) else { + Err(IntakeCosignError::NonParticipatingNetwork)? + }; + *key + }); + + if !signed_cosign.verify_signature(key) { + Err(IntakeCosignError::InvalidSignature)?; + } + } + + // Since we verified this cosign's signature, and have a chain sufficiently long, handle the + // cosign + + let mut txn = self.db.txn(); + + if !faulty { + // If this is for a future global session, we don't acknowledge this cosign at this time + let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0); + // This global session starts the block *after* its declaration, so we want to check if the + // block declaring it was cosigned + if (global_session.start_block_number - 1) > latest_cosigned_block_number { + drop(txn); + return Err(IntakeCosignError::FutureGlobalSession); + } + + // This is safe as it's in-range and newer, as prior checked since it isn't faulty + NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign); + } else { + let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]); + // Only handle this as a fault if this set wasn't prior faulty + if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) { + faults.push(signed_cosign.clone()); + Faults::set(&mut txn, cosign.global_session, &faults); + + let mut weight_cosigned = 0; + for fault in &faults { + let stake = global_session + .stakes + .get(&fault.cosign.cosigner) + .expect("cosigner with recognized key didn't have a stake entry saved"); + weight_cosigned += stake; + } + + // Check if the sum weight means a fault has occurred + if weight_cosigned >= ((global_session.total_stake * 17) / 100) { + FaultedSession::set(&mut txn, &cosign.global_session); + } + } + } + + txn.commit(); + Ok(()) + } + + /// Receive intended cosigns to produce for this ExternalValidatorSet. + /// + /// All cosigns intended, up to and including the next notable cosign, are returned. + /// + /// This will drain the internal channel and not re-yield these intentions again. + pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec { + let mut res: Vec = vec![]; + // While we have yet to find a notable cosign... + while !res.last().map(|cosign| cosign.notable).unwrap_or(false) { + let Some(intent) = intend::IntendedCosigns::try_recv(txn, set) else { break }; + res.push(intent); + } + res + } +} diff --git a/coordinator/p2p/Cargo.toml b/coordinator/p2p/Cargo.toml new file mode 100644 index 00000000..0e55e8e6 --- /dev/null +++ b/coordinator/p2p/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "serai-coordinator-p2p" +version = "0.1.0" +description = "Serai coordinator's P2P abstraction" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../common/db", version = "0.1" } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] } +serai-cosign = { path = "../cosign" } +tributary-sdk = { path = "../tributary-sdk" } + +futures-lite = { version = "2", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["sync", "macros"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +serai-task = { path = "../../common/task", version = "0.1" } diff --git a/coordinator/p2p/LICENSE b/coordinator/p2p/LICENSE new file mode 100644 index 00000000..621233a9 --- /dev/null +++ b/coordinator/p2p/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023-2025 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/coordinator/p2p/README.md b/coordinator/p2p/README.md new file mode 100644 index 00000000..7a8de210 --- /dev/null +++ b/coordinator/p2p/README.md @@ -0,0 +1,3 @@ +# Serai Coordinator P2P + +The P2P abstraction used by Serai's coordinator, and tasks over it. diff --git a/coordinator/p2p/libp2p/Cargo.toml b/coordinator/p2p/libp2p/Cargo.toml new file mode 100644 index 00000000..7707beb7 --- /dev/null +++ b/coordinator/p2p/libp2p/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "serai-coordinator-libp2p-p2p" +version = "0.1.0" +description = "Serai coordinator's libp2p-based P2P backend" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p/libp2p" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +async-trait = { version = "0.1", default-features = false } + +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +zeroize = { version = "^1.5", default-features = false, features = ["std"] } +blake2 = { version = "0.10", default-features = false, features = ["std"] } +schnorrkel = { version = "0.11", default-features = false, features = ["std"] } + +hex = { version = "0.4", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] } +serai-cosign = { path = "../../cosign" } +tributary-sdk = { path = "../../tributary-sdk" } + +futures-util = { version = "0.3", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["sync"] } +libp2p = { version = "0.54", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +serai-task = { path = "../../../common/task", version = "0.1" } +serai-coordinator-p2p = { path = "../" } diff --git a/coordinator/p2p/libp2p/LICENSE b/coordinator/p2p/libp2p/LICENSE new file mode 100644 index 00000000..621233a9 --- /dev/null +++ b/coordinator/p2p/libp2p/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023-2025 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/coordinator/p2p/libp2p/README.md b/coordinator/p2p/libp2p/README.md new file mode 100644 index 00000000..82edec80 --- /dev/null +++ b/coordinator/p2p/libp2p/README.md @@ -0,0 +1,14 @@ +# Serai Coordinator libp2p P2P + +A libp2p-backed P2P instantiation for Serai's coordinator. + +The libp2p swarm is limited to validators from the Serai network. The swarm +does not maintain any of its own peer finding/routing infrastructure, instead +relying on the Serai network's connection information to dial peers. This does +limit the listening peers to only the peers immediately reachable via the same +IP address (despite the two distinct services), not hidden behind a NAT, yet is +also quite simple and gives full control of who to connect to to us. + +Peers are decided via the internal `DialTask` which aims to maintain a target +amount of peers for each external network. This ensures cosigns are able to +propagate across the external networks which sign them. diff --git a/coordinator/p2p/libp2p/src/authenticate.rs b/coordinator/p2p/libp2p/src/authenticate.rs new file mode 100644 index 00000000..641d4481 --- /dev/null +++ b/coordinator/p2p/libp2p/src/authenticate.rs @@ -0,0 +1,187 @@ +use core::{pin::Pin, future::Future}; +use std::io; + +use zeroize::Zeroizing; +use rand_core::{RngCore, OsRng}; + +use blake2::{Digest, Blake2s256}; +use schnorrkel::{Keypair, PublicKey, Signature}; + +use serai_client::primitives::PublicKey as Public; + +use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use libp2p::{ + core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade}, + identity::{self, PeerId}, + noise, +}; + +use crate::peer_id_from_public; + +const PROTOCOL: &str = "/serai/coordinator/validators"; + +#[derive(Clone)] +pub(crate) struct OnlyValidators { + pub(crate) serai_key: Zeroizing, + pub(crate) noise_keypair: identity::Keypair, +} + +impl OnlyValidators { + /// The ephemeral challenge protocol for authentication. + /// + /// We use ephemeral challenges to prevent replaying signatures from historic sessions. + /// + /// We don't immediately send the challenge. We only send a commitment to it. This prevents our + /// remote peer from choosing their challenge in response to our challenge, in case there was any + /// benefit to doing so. + async fn challenges( + socket: &mut noise::Output, + ) -> io::Result<([u8; 32], [u8; 32])> { + let mut our_challenge = [0; 32]; + OsRng.fill_bytes(&mut our_challenge); + + // Write the hash of our challenge + socket.write_all(&Blake2s256::digest(our_challenge)).await?; + + // Read the hash of their challenge + let mut their_challenge_commitment = [0; 32]; + socket.read_exact(&mut their_challenge_commitment).await?; + + // Reveal our challenge + socket.write_all(&our_challenge).await?; + + // Read their challenge + let mut their_challenge = [0; 32]; + socket.read_exact(&mut their_challenge).await?; + + // Verify their challenge + if <[u8; 32]>::from(Blake2s256::digest(their_challenge)) != their_challenge_commitment { + Err(io::Error::other("challenge didn't match challenge commitment"))?; + } + + Ok((our_challenge, their_challenge)) + } + + // We sign the two noise peer IDs and the ephemeral challenges. + // + // Signing the noise peer IDs ensures we're authenticating this noise connection. The only + // expectations placed on noise are for it to prevent a MITM from impersonating the other end or + // modifying any messages sent. + // + // Signing the ephemeral challenges prevents any replays. While that should be unnecessary, as + // noise MAY prevent replays across sessions (even when the same key is used), and noise IDs + // shouldn't be reused (so it should be fine to reuse an existing signature for these noise IDs), + // it doesn't hurt. + async fn authenticate( + &self, + socket: &mut noise::Output, + dialer_peer_id: PeerId, + dialer_challenge: [u8; 32], + listener_peer_id: PeerId, + listener_challenge: [u8; 32], + ) -> io::Result { + // Write our public key + socket.write_all(&self.serai_key.public.to_bytes()).await?; + + let msg = borsh::to_vec(&( + dialer_peer_id.to_bytes(), + dialer_challenge, + listener_peer_id.to_bytes(), + listener_challenge, + )) + .unwrap(); + let signature = self.serai_key.sign_simple(PROTOCOL.as_bytes(), &msg); + socket.write_all(&signature.to_bytes()).await?; + + let mut public_key_and_sig = [0; 96]; + socket.read_exact(&mut public_key_and_sig).await?; + let public_key = PublicKey::from_bytes(&public_key_and_sig[.. 32]) + .map_err(|_| io::Error::other("invalid public key"))?; + let sig = Signature::from_bytes(&public_key_and_sig[32 ..]) + .map_err(|_| io::Error::other("invalid signature serialization"))?; + + public_key + .verify_simple(PROTOCOL.as_bytes(), &msg, &sig) + .map_err(|_| io::Error::other("invalid signature"))?; + + Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes()))) + } +} + +impl UpgradeInfo for OnlyValidators { + type Info = ::Info; + type InfoIter = ::InfoIter; + fn protocol_info(&self) -> Self::InfoIter { + // A keypair only causes an error if its sign operation fails, which is only possible with RSA, + // which isn't used within this codebase + noise::Config::new(&self.noise_keypair).unwrap().protocol_info() + } +} + +impl InboundConnectionUpgrade + for OnlyValidators +{ + type Output = (PeerId, noise::Output); + type Error = io::Error; + type Future = Pin>>>; + + fn upgrade_inbound( + self, + socket: S, + info: ::Info, + ) -> >::Future { + Box::pin(async move { + let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair) + .unwrap() + .upgrade_inbound(socket, info) + .await + .map_err(io::Error::other)?; + + let (our_challenge, dialer_challenge) = OnlyValidators::challenges(&mut socket).await?; + let dialer_serai_validator = self + .authenticate( + &mut socket, + dialer_noise_peer_id, + dialer_challenge, + PeerId::from_public_key(&self.noise_keypair.public()), + our_challenge, + ) + .await?; + Ok((dialer_serai_validator, socket)) + }) + } +} + +impl OutboundConnectionUpgrade + for OnlyValidators +{ + type Output = (PeerId, noise::Output); + type Error = io::Error; + type Future = Pin>>>; + + fn upgrade_outbound( + self, + socket: S, + info: ::Info, + ) -> >::Future { + Box::pin(async move { + let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair) + .unwrap() + .upgrade_outbound(socket, info) + .await + .map_err(io::Error::other)?; + + let (our_challenge, listener_challenge) = OnlyValidators::challenges(&mut socket).await?; + let listener_serai_validator = self + .authenticate( + &mut socket, + PeerId::from_public_key(&self.noise_keypair.public()), + our_challenge, + listener_noise_peer_id, + listener_challenge, + ) + .await?; + Ok((listener_serai_validator, socket)) + }) + } +} diff --git a/coordinator/p2p/libp2p/src/dial.rs b/coordinator/p2p/libp2p/src/dial.rs new file mode 100644 index 00000000..b001446b --- /dev/null +++ b/coordinator/p2p/libp2p/src/dial.rs @@ -0,0 +1,127 @@ +use core::future::Future; +use std::{sync::Arc, collections::HashSet}; + +use rand_core::{RngCore, OsRng}; + +use tokio::sync::mpsc; + +use serai_client::{SeraiError, Serai}; + +use libp2p::{ + core::multiaddr::{Protocol, Multiaddr}, + swarm::dial_opts::DialOpts, +}; + +use serai_task::ContinuallyRan; + +use crate::{PORT, Peers, validators::Validators}; + +const TARGET_PEERS_PER_NETWORK: usize = 5; +/* + If we only tracked the target amount of peers per network, we'd risk being eclipsed by an + adversary who immediately connects to us with their array of validators upon our boot. Their + array would satisfy our target amount of peers, so we'd never seek more, enabling the adversary + to be the only entity we peered with. + + We solve this by additionally requiring an explicit amount of peers we dialed. That means we + randomly chose to connect to these peers. +*/ +// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3; + +pub(crate) struct DialTask { + serai: Arc, + validators: Validators, + peers: Peers, + to_dial: mpsc::UnboundedSender, +} + +impl DialTask { + pub(crate) fn new( + serai: Arc, + peers: Peers, + to_dial: mpsc::UnboundedSender, + ) -> Self { + DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial } + } +} + +impl ContinuallyRan for DialTask { + // Only run every five minutes, not the default of every five seconds + const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60; + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60; + + type Error = SeraiError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + self.validators.update().await?; + + // If any of our peers is lacking, try to connect to more + let mut dialed = false; + let peer_counts = self + .peers + .peers + .read() + .await + .iter() + .map(|(network, peers)| (*network, peers.len())) + .collect::>(); + for (network, peer_count) in peer_counts { + /* + If we don't have the target amount of peers, and we don't have all the validators in the + set but one, attempt to connect to more validators within this set. + + The latter clause is so if there's a set with only 3 validators, we don't infinitely try + to connect to the target amount of peers for this network as we never will. Instead, we + only try to connect to most of the validators actually present. + */ + if (peer_count < TARGET_PEERS_PER_NETWORK) && + (peer_count < + self + .validators + .by_network() + .get(&network) + .map(HashSet::len) + .unwrap_or(0) + .saturating_sub(1)) + { + let mut potential_peers = self.serai.p2p_validators(network).await?; + for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) { + if potential_peers.is_empty() { + break; + } + let index_to_dial = + usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap()) + .unwrap(); + let randomly_selected_peer = potential_peers.swap_remove(index_to_dial); + + log::info!("found peer from substrate: {randomly_selected_peer}"); + + // Map the peer from a Substrate P2P network peer to a Coordinator P2P network peer + let mapped_peer = randomly_selected_peer + .into_iter() + .filter_map(|protocol| match protocol { + // Drop PeerIds from the Substrate P2p network + Protocol::P2p(_) => None, + // Use our own TCP port + Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)), + // Pass-through any other specifications (IPv4, IPv6, etc) + other => Some(other), + }) + .collect::(); + + log::debug!("mapped found peer: {mapped_peer}"); + + self + .to_dial + .send(DialOpts::unknown_peer_id().address(mapped_peer).build()) + .expect("dial receiver closed?"); + dialed = true; + } + } + } + + Ok(dialed) + } + } +} diff --git a/coordinator/p2p/libp2p/src/gossip.rs b/coordinator/p2p/libp2p/src/gossip.rs new file mode 100644 index 00000000..f4ec666b --- /dev/null +++ b/coordinator/p2p/libp2p/src/gossip.rs @@ -0,0 +1,75 @@ +use core::time::Duration; + +use blake2::{Digest, Blake2s256}; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use libp2p::gossipsub::{ + IdentTopic, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform, + AllowAllSubscriptionFilter, Behaviour, +}; +pub use libp2p::gossipsub::Event; + +use serai_cosign::SignedCosign; + +// Block size limit + 16 KB of space for signatures/metadata +pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384; + +const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0"; +const BASE_TOPIC: &str = "/"; + +fn topic_for_tributary(tributary: [u8; 32]) -> IdentTopic { + IdentTopic::new(format!("/tributary/{}", hex::encode(tributary))) +} + +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub(crate) enum Message { + Tributary { tributary: [u8; 32], message: Vec }, + Cosign(SignedCosign), +} + +impl Message { + pub(crate) fn topic(&self) -> IdentTopic { + match self { + Message::Tributary { tributary, .. } => topic_for_tributary(*tributary), + Message::Cosign(_) => IdentTopic::new(BASE_TOPIC), + } + } +} + +pub(crate) type Behavior = Behaviour; + +pub(crate) fn new_behavior() -> Behavior { + // The latency used by the Tendermint protocol, used here as the gossip epoch duration + // libp2p-rs defaults to 1 second, whereas ours will be ~2 + let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME; + // The amount of heartbeats which will occur within a single Tributary block + let heartbeats_per_block = + tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval); + // libp2p-rs defaults to 5, whereas ours will be ~8 + let heartbeats_to_keep = 2 * heartbeats_per_block; + // libp2p-rs defaults to 3 whereas ours will be ~4 + let heartbeats_to_gossip = heartbeats_per_block; + + let config = ConfigBuilder::default() + .protocol_id_prefix(LIBP2P_PROTOCOL) + .history_length(usize::try_from(heartbeats_to_keep).unwrap()) + .history_gossip(usize::try_from(heartbeats_to_gossip).unwrap()) + .heartbeat_interval(Duration::from_millis(heartbeat_interval.into())) + .max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE) + .duplicate_cache_time(Duration::from_millis((heartbeats_to_keep * heartbeat_interval).into())) + .validation_mode(ValidationMode::Anonymous) + // Uses a content based message ID to avoid duplicates as much as possible + .message_id_fn(|msg| { + MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat())) + }) + .build(); + + let mut gossip = Behavior::new(MessageAuthenticity::Anonymous, config.unwrap()).unwrap(); + + // Subscribe to the base topic + let topic = IdentTopic::new(BASE_TOPIC); + let _ = gossip.subscribe(&topic); + + gossip +} diff --git a/coordinator/p2p/libp2p/src/lib.rs b/coordinator/p2p/libp2p/src/lib.rs new file mode 100644 index 00000000..8d60b32b --- /dev/null +++ b/coordinator/p2p/libp2p/src/lib.rs @@ -0,0 +1,416 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{future::Future, time::Duration}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, +}; + +use rand_core::{RngCore, OsRng}; + +use zeroize::Zeroizing; +use schnorrkel::Keypair; + +use serai_client::{ + primitives::{ExternalNetworkId, PublicKey}, + validator_sets::primitives::ExternalValidatorSet, + Serai, +}; + +use tokio::sync::{mpsc, oneshot, Mutex, RwLock}; + +use serai_task::{Task, ContinuallyRan}; + +use serai_cosign::SignedCosign; + +use libp2p::{ + multihash::Multihash, + identity::{self, PeerId}, + tcp::Config as TcpConfig, + yamux, allow_block_list, + connection_limits::{self, ConnectionLimits}, + swarm::NetworkBehaviour, + SwarmBuilder, +}; + +use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit}; + +/// A struct to sync the validators from the Serai node in order to keep track of them. +mod validators; +use validators::UpdateValidatorsTask; + +/// The authentication protocol upgrade to limit the P2P network to active validators. +mod authenticate; +use authenticate::OnlyValidators; + +/// The ping behavior, used to ensure connection latency is below the limit +mod ping; + +/// The request-response messages and behavior +mod reqres; +use reqres::{InboundRequestId, Request, Response}; + +/// The gossip messages and behavior +mod gossip; +use gossip::Message; + +/// The swarm task, running it and dispatching to/from it +mod swarm; +use swarm::SwarmTask; + +/// The dial task, to find new peers to connect to +mod dial; +use dial::DialTask; + +const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') + +fn peer_id_from_public(public: PublicKey) -> PeerId { + // 0 represents the identity Multihash, that no hash was performed + // It's an internal constant so we can't refer to the constant inside libp2p + PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap() +} + +/// The representation of a peer. +pub struct Peer<'a> { + outbound_requests: &'a mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender)>, + id: PeerId, +} +impl serai_coordinator_p2p::Peer<'_> for Peer<'_> { + fn send_heartbeat( + &self, + heartbeat: Heartbeat, + ) -> impl Send + Future>> { + async move { + const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(5); + + let request = Request::Heartbeat(heartbeat); + let (sender, receiver) = oneshot::channel(); + self + .outbound_requests + .send((self.id, request, sender)) + .expect("outbound requests recv channel was dropped?"); + if let Ok(Ok(Response::Blocks(blocks))) = + tokio::time::timeout(HEARTBEAT_TIMEOUT, receiver).await + { + Some(blocks) + } else { + None + } + } + } +} + +#[derive(Clone)] +struct Peers { + peers: Arc>>>, +} + +// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai +// network for peers, we could use it solely for bootstrapping/as a fallback. +#[derive(NetworkBehaviour)] +struct Behavior { + // Used to only allow Serai validators as peers + allow_list: allow_block_list::Behaviour, + // Used to limit each peer to a single connection + connection_limits: connection_limits::Behaviour, + // Used to ensure connection latency is within tolerances + ping: ping::Behavior, + // Used to request data from specific peers + reqres: reqres::Behavior, + // Used to broadcast messages to all other peers subscribed to a topic + gossip: gossip::Behavior, +} + +#[allow(clippy::type_complexity)] +struct Libp2pInner { + peers: Peers, + + gossip: mpsc::UnboundedSender, + outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender)>, + + tributary_gossip: Mutex)>>, + + signed_cosigns: Mutex>, + signed_cosigns_send: mpsc::UnboundedSender, + + heartbeat_requests: + Mutex>, + notable_cosign_requests: Mutex>, + inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>, +} + +/// The libp2p-backed P2P implementation. +/// +/// The P2p trait implementation does not support backpressure and is expected to be fully +/// utilized. Failure to poll the entire API will cause unbounded memory growth. +#[derive(Clone)] +pub struct Libp2p(Arc); + +impl Libp2p { + /// Create a new libp2p-backed P2P instance. + /// + /// This will spawn all of the internal tasks necessary for functioning. + pub fn new(serai_key: &Zeroizing, serai: Arc) -> Libp2p { + // Define the object we track peers with + let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) }; + + // Define the dial task + let (dial_task_def, dial_task) = Task::new(); + let (to_dial_send, to_dial_recv) = mpsc::unbounded_channel(); + tokio::spawn( + DialTask::new(serai.clone(), peers.clone(), to_dial_send) + .continually_run(dial_task_def, vec![]), + ); + + let swarm = { + let new_only_validators = |noise_keypair: &identity::Keypair| -> Result<_, ()> { + Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() }) + }; + + let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519()) + .with_tokio() + .with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default) + .unwrap() + .with_behaviour(|_| Behavior { + allow_list: allow_block_list::Behaviour::default(), + // Limit each per to a single connection + connection_limits: connection_limits::Behaviour::new( + ConnectionLimits::default().with_max_established_per_peer(Some(1)), + ), + ping: ping::new_behavior(), + reqres: reqres::new_behavior(), + gossip: gossip::new_behavior(), + }) + .unwrap() + .with_swarm_config(|config| { + config + .with_idle_connection_timeout(ping::INTERVAL + ping::TIMEOUT + Duration::from_secs(5)) + }) + .build(); + swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); + swarm.listen_on(format!("/ip6/::/tcp/{PORT}").parse().unwrap()).unwrap(); + swarm + }; + + let (swarm_validators, validator_changes) = UpdateValidatorsTask::spawn(serai); + + let (gossip_send, gossip_recv) = mpsc::unbounded_channel(); + let (signed_cosigns_send, signed_cosigns_recv) = mpsc::unbounded_channel(); + let (tributary_gossip_send, tributary_gossip_recv) = mpsc::unbounded_channel(); + + let (outbound_requests_send, outbound_requests_recv) = mpsc::unbounded_channel(); + + let (heartbeat_requests_send, heartbeat_requests_recv) = mpsc::unbounded_channel(); + let (notable_cosign_requests_send, notable_cosign_requests_recv) = mpsc::unbounded_channel(); + let (inbound_request_responses_send, inbound_request_responses_recv) = + mpsc::unbounded_channel(); + + // Create the swarm task + SwarmTask::spawn( + dial_task, + to_dial_recv, + swarm_validators, + validator_changes, + peers.clone(), + swarm, + gossip_recv, + signed_cosigns_send.clone(), + tributary_gossip_send, + outbound_requests_recv, + heartbeat_requests_send, + notable_cosign_requests_send, + inbound_request_responses_recv, + ); + + Libp2p(Arc::new(Libp2pInner { + peers, + + gossip: gossip_send, + outbound_requests: outbound_requests_send, + + tributary_gossip: Mutex::new(tributary_gossip_recv), + + signed_cosigns: Mutex::new(signed_cosigns_recv), + signed_cosigns_send, + + heartbeat_requests: Mutex::new(heartbeat_requests_recv), + notable_cosign_requests: Mutex::new(notable_cosign_requests_recv), + inbound_request_responses: inbound_request_responses_send, + })) + } +} + +impl tributary_sdk::P2p for Libp2p { + fn broadcast(&self, tributary: [u8; 32], message: Vec) -> impl Send + Future { + async move { + self + .0 + .gossip + .send(Message::Tributary { tributary, message }) + .expect("gossip recv channel was dropped?"); + } + } +} + +impl serai_cosign::RequestNotableCosigns for Libp2p { + type Error = (); + + fn request_notable_cosigns( + &self, + global_session: [u8; 32], + ) -> impl Send + Future> { + async move { + const AMOUNT_OF_PEERS_TO_REQUEST_FROM: usize = 3; + const NOTABLE_COSIGNS_TIMEOUT: Duration = Duration::from_secs(5); + + let request = Request::NotableCosigns { global_session }; + + let peers = self.0.peers.peers.read().await.clone(); + // HashSet of all peers + let peers = peers.into_values().flat_map(<_>::into_iter).collect::>(); + // Vec of all peers + let mut peers = peers.into_iter().collect::>(); + + let mut channels = Vec::with_capacity(AMOUNT_OF_PEERS_TO_REQUEST_FROM); + for _ in 0 .. AMOUNT_OF_PEERS_TO_REQUEST_FROM { + if peers.is_empty() { + break; + } + let i = usize::try_from(OsRng.next_u64() % u64::try_from(peers.len()).unwrap()).unwrap(); + let peer = peers.swap_remove(i); + + let (sender, receiver) = oneshot::channel(); + self + .0 + .outbound_requests + .send((peer, request, sender)) + .expect("outbound requests recv channel was dropped?"); + channels.push(receiver); + } + + // We could reduce our latency by using FuturesUnordered here but the latency isn't a concern + for channel in channels { + if let Ok(Ok(Response::NotableCosigns(cosigns))) = + tokio::time::timeout(NOTABLE_COSIGNS_TIMEOUT, channel).await + { + for cosign in cosigns { + self + .0 + .signed_cosigns_send + .send(cosign) + .expect("signed_cosigns recv in this object was dropped?"); + } + } + } + + Ok(()) + } + } +} + +impl serai_coordinator_p2p::P2p for Libp2p { + type Peer<'a> = Peer<'a>; + + fn peers(&self, network: ExternalNetworkId) -> impl Send + Future>> { + async move { + let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else { + return vec![]; + }; + let mut res = vec![]; + for id in peer_ids { + res.push(Peer { outbound_requests: &self.0.outbound_requests, id }); + } + res + } + } + + fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future { + async move { + self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?"); + } + } + + fn heartbeat( + &self, + ) -> impl Send + Future>)> { + async move { + let (request_id, set, latest_block_hash) = self + .0 + .heartbeat_requests + .lock() + .await + .recv() + .await + .expect("heartbeat_requests_send was dropped?"); + let (sender, receiver) = oneshot::channel(); + tokio::spawn({ + let respond = self.0.inbound_request_responses.clone(); + async move { + // The swarm task expects us to respond to every request. If the caller drops this + // channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound + // without requiring the caller send a value down this channel + let response = if let Ok(blocks) = receiver.await { + Response::Blocks(blocks) + } else { + Response::Blocks(vec![]) + }; + respond + .send((request_id, response)) + .expect("inbound_request_responses_recv was dropped?"); + } + }); + (Heartbeat { set, latest_block_hash }, sender) + } + } + + fn notable_cosigns_request( + &self, + ) -> impl Send + Future>)> { + async move { + let (request_id, global_session) = self + .0 + .notable_cosign_requests + .lock() + .await + .recv() + .await + .expect("notable_cosign_requests_send was dropped?"); + let (sender, receiver) = oneshot::channel(); + tokio::spawn({ + let respond = self.0.inbound_request_responses.clone(); + async move { + let response = if let Ok(notable_cosigns) = receiver.await { + Response::NotableCosigns(notable_cosigns) + } else { + Response::NotableCosigns(vec![]) + }; + respond + .send((request_id, response)) + .expect("inbound_request_responses_recv was dropped?"); + } + }); + (global_session, sender) + } + } + + fn tributary_message(&self) -> impl Send + Future)> { + async move { + self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?") + } + } + + fn cosign(&self) -> impl Send + Future { + async move { + self + .0 + .signed_cosigns + .lock() + .await + .recv() + .await + .expect("signed_cosigns couldn't recv despite send in same object?") + } + } +} diff --git a/coordinator/p2p/libp2p/src/ping.rs b/coordinator/p2p/libp2p/src/ping.rs new file mode 100644 index 00000000..2b9afa41 --- /dev/null +++ b/coordinator/p2p/libp2p/src/ping.rs @@ -0,0 +1,17 @@ +use core::time::Duration; + +use tributary_sdk::tendermint::LATENCY_TIME; + +use libp2p::ping::{self, Config, Behaviour}; +pub use ping::Event; + +pub(crate) const INTERVAL: Duration = Duration::from_secs(30); +// LATENCY_TIME represents the maximum latency for message delivery. Sending the ping, and +// receiving the pong, each have to occur within this time bound to validate the connection. We +// enforce that, as best we can, by requiring the round-trip be within twice the allowed latency. +pub(crate) const TIMEOUT: Duration = Duration::from_millis((2 * LATENCY_TIME) as u64); + +pub(crate) type Behavior = Behaviour; +pub(crate) fn new_behavior() -> Behavior { + Behavior::new(Config::default().with_interval(INTERVAL).with_timeout(TIMEOUT)) +} diff --git a/coordinator/p2p/libp2p/src/reqres.rs b/coordinator/p2p/libp2p/src/reqres.rs new file mode 100644 index 00000000..aef16940 --- /dev/null +++ b/coordinator/p2p/libp2p/src/reqres.rs @@ -0,0 +1,134 @@ +use core::{fmt, time::Duration}; +use std::io; + +use async_trait::async_trait; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; + +use libp2p::request_response::{ + self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport, +}; +pub use request_response::{InboundRequestId, Message}; + +use serai_cosign::SignedCosign; + +use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit}; + +/// The maximum message size for the request-response protocol +// This is derived from the heartbeat message size as it's our largest message +pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize = + 1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT; + +const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0"; + +/// Requests which can be made via the request-response protocol. +#[derive(Clone, Copy, Debug, BorshSerialize, BorshDeserialize)] +pub(crate) enum Request { + /// A heartbeat informing our peers of our latest block, for the specified blockchain, on regular + /// intervals. + /// + /// If our peers have more blocks than us, they're expected to respond with those blocks. + Heartbeat(Heartbeat), + /// A request for the notable cosigns for a global session. + NotableCosigns { global_session: [u8; 32] }, +} + +/// Responses which can be received via the request-response protocol. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub(crate) enum Response { + None, + Blocks(Vec), + NotableCosigns(Vec), +} +impl fmt::Debug for Response { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Response::None => fmt.debug_struct("Response::None").finish(), + Response::Blocks(_) => fmt.debug_struct("Response::Block").finish_non_exhaustive(), + Response::NotableCosigns(_) => { + fmt.debug_struct("Response::NotableCosigns").finish_non_exhaustive() + } + } + } +} + +/// The codec used for the request-response protocol. +/// +/// We don't use CBOR or JSON, but use borsh to create `Vec`s we then length-prefix. While +/// ideally, we'd use borsh directly with the `io` traits defined here, they're async and there +/// isn't an amenable API within borsh for incremental deserialization. +#[derive(Default, Clone, Copy, Debug)] +pub(crate) struct Codec; +impl Codec { + async fn read(io: &mut (impl Unpin + AsyncRead)) -> io::Result { + let mut len = [0; 4]; + io.read_exact(&mut len).await?; + let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?"); + if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE { + Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?; + } + // This may be a non-trivial allocation easily causable + // While we could chunk the read, meaning we only perform the allocation as bandwidth is used, + // the max message size should be sufficiently sane + let mut buf = vec![0; len]; + io.read_exact(&mut buf).await?; + let mut buf = buf.as_slice(); + let res = M::deserialize(&mut buf)?; + if !buf.is_empty() { + Err(io::Error::other("p2p message had extra data appended to it"))?; + } + Ok(res) + } + async fn write(io: &mut (impl Unpin + AsyncWrite), msg: &impl BorshSerialize) -> io::Result<()> { + let msg = borsh::to_vec(msg).unwrap(); + io.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await?; + io.write_all(&msg).await + } +} +#[async_trait] +impl CodecTrait for Codec { + type Protocol = &'static str; + type Request = Request; + type Response = Response; + + async fn read_request( + &mut self, + _: &Self::Protocol, + io: &mut R, + ) -> io::Result { + Self::read(io).await + } + async fn read_response( + &mut self, + _: &Self::Protocol, + io: &mut R, + ) -> io::Result { + Self::read(io).await + } + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut W, + req: Request, + ) -> io::Result<()> { + Self::write(io, &req).await + } + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut W, + res: Response, + ) -> io::Result<()> { + Self::write(io, &res).await + } +} + +pub(crate) type Event = GenericEvent; + +pub(crate) type Behavior = Behaviour; +pub(crate) fn new_behavior() -> Behavior { + let config = Config::default().with_request_timeout(Duration::from_secs(5)); + Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config) +} diff --git a/coordinator/p2p/libp2p/src/swarm.rs b/coordinator/p2p/libp2p/src/swarm.rs new file mode 100644 index 00000000..94a7cb03 --- /dev/null +++ b/coordinator/p2p/libp2p/src/swarm.rs @@ -0,0 +1,359 @@ +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, + time::{Duration, Instant}, +}; + +use borsh::BorshDeserialize; + +use serai_client::validator_sets::primitives::ExternalValidatorSet; + +use tokio::sync::{mpsc, oneshot, RwLock}; + +use serai_task::TaskHandle; + +use serai_cosign::SignedCosign; + +use futures_util::StreamExt; +use libp2p::{ + identity::PeerId, + request_response::{InboundRequestId, OutboundRequestId, ResponseChannel}, + swarm::{dial_opts::DialOpts, SwarmEvent, Swarm}, +}; + +use serai_coordinator_p2p::Heartbeat; + +use crate::{ + Peers, BehaviorEvent, Behavior, + validators::{self, Validators}, + ping, + reqres::{self, Request, Response}, + gossip, +}; + +const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60); + +/* + `SwarmTask` handles everything we need the `Swarm` object for. The goal is to minimize the + contention on this task. Unfortunately, the `Swarm` object itself is needed for a variety of + purposes making this a rather large task. + + Responsibilities include: + - Actually dialing new peers (the selection process occurs in another task) + - Maintaining the peers structure (as we need the Swarm object to see who our peers are) + - Gossiping messages + - Dispatching gossiped messages + - Sending requests + - Dispatching responses to requests + - Dispatching received requests + - Sending responses +*/ +pub(crate) struct SwarmTask { + dial_task: TaskHandle, + to_dial: mpsc::UnboundedReceiver, + last_dial_task_run: Instant, + + validators: Arc>, + validator_changes: mpsc::UnboundedReceiver, + peers: Peers, + rebuild_peers_at: Instant, + + swarm: Swarm, + + gossip: mpsc::UnboundedReceiver, + signed_cosigns: mpsc::UnboundedSender, + tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec)>, + + outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender)>, + outbound_request_responses: HashMap>, + + inbound_request_response_channels: HashMap>, + heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>, + notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>, + inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>, +} + +impl SwarmTask { + fn handle_gossip(&mut self, event: gossip::Event) { + match event { + gossip::Event::Message { message, .. } => { + let Ok(message) = gossip::Message::deserialize(&mut message.data.as_slice()) else { + // TODO: Penalize the PeerId which created this message, which requires authenticating + // each message OR moving to explicit acknowledgement before re-gossiping + return; + }; + match message { + gossip::Message::Tributary { tributary, message } => { + let _: Result<_, _> = self.tributary_gossip.send((tributary, message)); + } + gossip::Message::Cosign(signed_cosign) => { + let _: Result<_, _> = self.signed_cosigns.send(signed_cosign); + } + } + } + gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {} + gossip::Event::GossipsubNotSupported { peer_id } => { + let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id); + } + } + } + + fn handle_reqres(&mut self, event: reqres::Event) { + match event { + reqres::Event::Message { message, .. } => match message { + reqres::Message::Request { request_id, request, channel } => match request { + reqres::Request::Heartbeat(Heartbeat { set, latest_block_hash }) => { + self.inbound_request_response_channels.insert(request_id, channel); + let _: Result<_, _> = + self.heartbeat_requests.send((request_id, set, latest_block_hash)); + } + reqres::Request::NotableCosigns { global_session } => { + self.inbound_request_response_channels.insert(request_id, channel); + let _: Result<_, _> = self.notable_cosign_requests.send((request_id, global_session)); + } + }, + reqres::Message::Response { request_id, response } => { + if let Some(channel) = self.outbound_request_responses.remove(&request_id) { + let _: Result<_, _> = channel.send(response); + } + } + }, + reqres::Event::OutboundFailure { request_id, .. } => { + // Send None as the response for the request + if let Some(channel) = self.outbound_request_responses.remove(&request_id) { + let _: Result<_, _> = channel.send(Response::None); + } + } + reqres::Event::InboundFailure { .. } | reqres::Event::ResponseSent { .. } => {} + } + } + + async fn run(mut self) { + loop { + let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now()); + + tokio::select! { + // If the validators have changed, update the allow list + validator_changes = self.validator_changes.recv() => { + let validator_changes = validator_changes.expect("validators update task shut down?"); + let behavior = &mut self.swarm.behaviour_mut().allow_list; + for removed in validator_changes.removed { + behavior.disallow_peer(removed); + } + for added in validator_changes.added { + behavior.allow_peer(added); + } + } + + // Dial peers we're instructed to + dial_opts = self.to_dial.recv() => { + let dial_opts = dial_opts.expect("DialTask was closed?"); + let _: Result<_, _> = self.swarm.dial(dial_opts); + } + + /* + Rebuild the peers every 10 minutes. + + This protects against any race conditions/edge cases we have in our logic to track peers, + along with unrepresented behavior such as when a peer changes the networks they're active + in. This lets the peer tracking logic simply be 'good enough' to not become horribly + corrupt over the span of `TIME_BETWEEN_REBUILD_PEERS`. + + We also use this to disconnect all peers who are no longer active in any network. + */ + () = tokio::time::sleep(time_till_rebuild_peers) => { + let validators_by_network = self.validators.read().await.by_network().clone(); + let connected_peers = self.swarm.connected_peers().copied().collect::>(); + + // Build the new peers object + let mut peers = HashMap::new(); + for (network, validators) in validators_by_network { + peers.insert(network, validators.intersection(&connected_peers).copied().collect()); + } + + // Write the new peers object + *self.peers.peers.write().await = peers; + self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS; + } + + // Handle swarm events + event = self.swarm.next() => { + // `Swarm::next` will never return `Poll::Ready(None)` + // https://docs.rs/ + // libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E + let event = event.unwrap(); + match event { + // New connection, so update peers + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let Some(networks) = + self.validators.read().await.networks(&peer_id).cloned() else { continue }; + let mut peers = self.peers.peers.write().await; + for network in networks { + peers.entry(network).or_insert_with(HashSet::new).insert(peer_id); + } + } + + // Connection closed, so update peers + SwarmEvent::ConnectionClosed { peer_id, .. } => { + let Some(networks) = + self.validators.read().await.networks(&peer_id).cloned() else { continue }; + let mut peers = self.peers.peers.write().await; + for network in networks { + peers.entry(network).or_insert_with(HashSet::new).remove(&peer_id); + } + + /* + We want to re-run the dial task, since we lost a peer, in case we should find new + peers. This opens a DoS where a validator repeatedly opens/closes connections to + force iterations of the dial task. We prevent this by setting a minimum distance + since the last explicit iteration. + + This is suboptimal. If we have several disconnects in immediate proximity, we'll + trigger the dial task upon the first (where we may still have enough peers we + shouldn't dial more) but not the last (where we may have so few peers left we + should dial more). This is accepted as the dial task will eventually run on its + natural timer. + */ + const MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL: Duration = Duration::from_secs(60); + let now = Instant::now(); + if (self.last_dial_task_run + MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL) < now { + self.dial_task.run_now(); + self.last_dial_task_run = now; + } + } + + SwarmEvent::Behaviour(event) => { + match event { + BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => { + // This *is* an exhaustive match as these events are empty enums + match event {} + } + BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => { + if result.is_err() { + self.swarm.close_connection(connection); + } + } + BehaviorEvent::Reqres(event) => self.handle_reqres(event), + BehaviorEvent::Gossip(event) => self.handle_gossip(event), + } + } + + // We don't handle any of these + SwarmEvent::IncomingConnection { .. } | + SwarmEvent::IncomingConnectionError { .. } | + SwarmEvent::OutgoingConnectionError { .. } | + SwarmEvent::NewListenAddr { .. } | + SwarmEvent::ExpiredListenAddr { .. } | + SwarmEvent::ListenerClosed { .. } | + SwarmEvent::ListenerError { .. } | + SwarmEvent::Dialing { .. } | + SwarmEvent::NewExternalAddrCandidate { .. } | + SwarmEvent::ExternalAddrConfirmed { .. } | + SwarmEvent::ExternalAddrExpired { .. } | + SwarmEvent::NewExternalAddrOfPeer { .. } => {} + + // Requires as SwarmEvent is non-exhaustive + _ => log::warn!("unhandled SwarmEvent: {event:?}"), + } + } + + message = self.gossip.recv() => { + let message = message.expect("channel for messages to gossip was closed?"); + let topic = message.topic(); + let message = borsh::to_vec(&message).unwrap(); + + /* + If we're sending a message for this topic, it's because this topic is relevant to us. + Subscribe to it. + + We create topics roughly weekly, one per validator set/session. Once present in a + topic, we're interested in all messages for it until the validator set/session retires. + Then there should no longer be any messages for the topic as we should drop the + Tributary which creates the messages. + + We use this as an argument to not bother implement unsubscribing from topics. They're + incredibly infrequently created and old topics shouldn't still have messages published + to them. Having the coordinator reboot being our method of unsubscribing is fine. + + Alternatively, we could route an API to determine when a topic is retired, or retire + any topics we haven't sent messages on in the past hour. + */ + let behavior = self.swarm.behaviour_mut(); + let _: Result<_, _> = behavior.gossip.subscribe(&topic); + /* + This may be an error of `InsufficientPeers`. If so, we could ask DialTask to dial more + peers for this network. We don't as we assume DialTask will detect the lack of peers + for this network, and will already successfully handle this. + */ + let _: Result<_, _> = behavior.gossip.publish(topic.hash(), message); + } + + request = self.outbound_requests.recv() => { + let (peer, request, response_channel) = + request.expect("channel for requests was closed?"); + let request_id = self.swarm.behaviour_mut().reqres.send_request(&peer, request); + self.outbound_request_responses.insert(request_id, response_channel); + } + + response = self.inbound_request_responses.recv() => { + let (request_id, response) = + response.expect("channel for inbound request responses was closed?"); + if let Some(channel) = self.inbound_request_response_channels.remove(&request_id) { + let _: Result<_, _> = + self.swarm.behaviour_mut().reqres.send_response(channel, response); + } + } + } + } + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn spawn( + dial_task: TaskHandle, + to_dial: mpsc::UnboundedReceiver, + + validators: Arc>, + validator_changes: mpsc::UnboundedReceiver, + peers: Peers, + + swarm: Swarm, + + gossip: mpsc::UnboundedReceiver, + signed_cosigns: mpsc::UnboundedSender, + tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec)>, + + outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender)>, + + heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>, + notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>, + inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>, + ) { + tokio::spawn( + SwarmTask { + dial_task, + to_dial, + last_dial_task_run: Instant::now(), + + validators, + validator_changes, + peers, + rebuild_peers_at: Instant::now() + TIME_BETWEEN_REBUILD_PEERS, + + swarm, + + gossip, + signed_cosigns, + tributary_gossip, + + outbound_requests, + outbound_request_responses: HashMap::new(), + + inbound_request_response_channels: HashMap::new(), + heartbeat_requests, + notable_cosign_requests, + inbound_request_responses, + } + .run(), + ); + } +} diff --git a/coordinator/p2p/libp2p/src/validators.rs b/coordinator/p2p/libp2p/src/validators.rs new file mode 100644 index 00000000..25fabacd --- /dev/null +++ b/coordinator/p2p/libp2p/src/validators.rs @@ -0,0 +1,221 @@ +use core::{borrow::Borrow, future::Future}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, +}; + +use serai_client::{ + primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai, +}; + +use serai_task::{Task, ContinuallyRan}; + +use libp2p::PeerId; + +use futures_util::stream::{StreamExt, FuturesUnordered}; +use tokio::sync::{mpsc, RwLock}; + +use crate::peer_id_from_public; + +pub(crate) struct Changes { + pub(crate) removed: HashSet, + pub(crate) added: HashSet, +} + +pub(crate) struct Validators { + serai: Arc, + + // A cache for which session we're populated with the validators of + sessions: HashMap, + // The validators by network + by_network: HashMap>, + // The validators and their networks + validators: HashMap>, + + // The channel to send the changes down + changes: mpsc::UnboundedSender, +} + +impl Validators { + pub(crate) fn new(serai: Arc) -> (Self, mpsc::UnboundedReceiver) { + let (send, recv) = mpsc::unbounded_channel(); + let validators = Validators { + serai, + sessions: HashMap::new(), + by_network: HashMap::new(), + validators: HashMap::new(), + changes: send, + }; + (validators, recv) + } + + async fn session_changes( + serai: impl Borrow, + sessions: impl Borrow>, + ) -> Result)>, SeraiError> { + /* + This uses the latest finalized block, not the latest cosigned block, which should be fine as + in the worst case, we'd connect to unexpected validators. They still shouldn't be able to + bypass the cosign protocol unless a historical global session was malicious, in which case + the cosign protocol already breaks. + + Besides, we can't connect to historical validators, only the current validators. + */ + let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?; + let temporal_serai = temporal_serai.validator_sets(); + + let mut session_changes = vec![]; + { + // FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but + // we poll it till it yields all futures with the most minimal processing possible + let mut futures = FuturesUnordered::new(); + for network in serai_client::primitives::EXTERNAL_NETWORKS { + let sessions = sessions.borrow(); + futures.push(async move { + let session = match temporal_serai.session(network.into()).await { + Ok(Some(session)) => session, + Ok(None) => return Ok(None), + Err(e) => return Err(e), + }; + + if sessions.get(&network) == Some(&session) { + Ok(None) + } else { + match temporal_serai.active_network_validators(network.into()).await { + Ok(validators) => Ok(Some(( + network, + session, + validators.into_iter().map(peer_id_from_public).collect(), + ))), + Err(e) => Err(e), + } + } + }); + } + while let Some(session_change) = futures.next().await { + if let Some(session_change) = session_change? { + session_changes.push(session_change); + } + } + } + + Ok(session_changes) + } + + fn incorporate_session_changes( + &mut self, + session_changes: Vec<(ExternalNetworkId, Session, HashSet)>, + ) { + let mut removed = HashSet::new(); + let mut added = HashSet::new(); + + for (network, session, validators) in session_changes { + // Remove the existing validators + for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) { + // Get all networks this validator is in + let mut networks = self.validators.remove(&validator).unwrap(); + // Remove this one + networks.remove(&network); + if !networks.is_empty() { + // Insert the networks back if the validator was present in other networks + self.validators.insert(validator, networks); + } else { + // Because this validator is no longer present in any network, mark them as removed + /* + This isn't accurate. The validator isn't present in the latest session for this + network. The validator was present in the prior session which has yet to retire. Our + lack of explicit inclusion for both the prior session and the current session causes + only the validators mutually present in both sessions to be responsible for all actions + still ongoing as the prior validator set retires. + + TODO: Fix this + */ + removed.insert(validator); + } + } + + // Add the new validators + for validator in validators.iter().copied() { + self.validators.entry(validator).or_insert_with(HashSet::new).insert(network); + added.insert(validator); + } + self.by_network.insert(network, validators); + + // Update the session we have populated + self.sessions.insert(network, session); + } + + // Only flag validators for removal if they weren't simultaneously added by these changes + removed.retain(|validator| !added.contains(validator)); + // Send the changes, dropping the error + // This lets the caller opt-out of change notifications by dropping the receiver + let _: Result<_, _> = self.changes.send(Changes { removed, added }); + } + + /// Update the view of the validators. + pub(crate) async fn update(&mut self) -> Result<(), SeraiError> { + let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?; + self.incorporate_session_changes(session_changes); + Ok(()) + } + + pub(crate) fn by_network(&self) -> &HashMap> { + &self.by_network + } + + pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet> { + self.validators.get(peer_id) + } +} + +/// A task which updates a set of validators. +/// +/// The validators managed by this tak will have their exclusive lock held for a minimal amount of +/// time while the update occurs to minimize the disruption to the services relying on it. +pub(crate) struct UpdateValidatorsTask { + validators: Arc>, +} + +impl UpdateValidatorsTask { + /// Spawn a new instance of the UpdateValidatorsTask. + /// + /// This returns a reference to the Validators it updates after spawning itself. + pub(crate) fn spawn( + serai: Arc, + ) -> (Arc>, mpsc::UnboundedReceiver) { + // The validators which will be updated + let (validators, changes) = Validators::new(serai); + let validators = Arc::new(RwLock::new(validators)); + + // Define the task + let (update_validators_task, update_validators_task_handle) = Task::new(); + // Forget the handle, as dropping the handle would stop the task + core::mem::forget(update_validators_task_handle); + // Spawn the task + tokio::spawn( + (Self { validators: validators.clone() }).continually_run(update_validators_task, vec![]), + ); + + // Return the validators + (validators, changes) + } +} + +impl ContinuallyRan for UpdateValidatorsTask { + // Only run every minute, not the default of every five seconds + const DELAY_BETWEEN_ITERATIONS: u64 = 60; + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60; + + type Error = SeraiError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let session_changes = { + let validators = self.validators.read().await; + Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await? + }; + self.validators.write().await.incorporate_session_changes(session_changes); + Ok(true) + } + } +} diff --git a/coordinator/p2p/src/heartbeat.rs b/coordinator/p2p/src/heartbeat.rs new file mode 100644 index 00000000..7691abbd --- /dev/null +++ b/coordinator/p2p/src/heartbeat.rs @@ -0,0 +1,151 @@ +use core::future::Future; +use std::time::{Duration, SystemTime}; + +use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet}; + +use futures_lite::FutureExt; + +use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader}; + +use serai_db::*; +use serai_task::ContinuallyRan; + +use crate::{Heartbeat, Peer, P2p}; + +// Amount of blocks in a minute +const BLOCKS_PER_MINUTE: usize = + (60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize; + +/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to +/// include in the batch. +/// +/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount +/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within +/// the size limit. +pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1; + +/// The size limit for a batch of blocks sent in response to a Heartbeat. +/// +/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a +/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators, +/// and aggregate signature). Accordingly, this should be a safe over-estimate. +pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH * + (tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128)); + +/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's +/// tip. +/// +/// If the other validator has more blocks then we do, they're expected to inform us. This forms +/// the sync protocol for our Tributaries. +pub(crate) struct HeartbeatTask { + pub(crate) set: ExternalValidatorSet, + pub(crate) tributary: Tributary, + pub(crate) reader: TributaryReader, + pub(crate) p2p: P, +} + +impl ContinuallyRan for HeartbeatTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol + const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60); + + let mut tip = self.reader.tip(); + let time_since = { + let block_time = if let Some(time_of_block) = self.reader.time_of_block(&tip) { + SystemTime::UNIX_EPOCH + Duration::from_secs(time_of_block) + } else { + // If we couldn't fetch this block's time, assume it's old + // We don't want to declare its unix time as 0 and claim it's 50+ years old though + log::warn!( + "heartbeat task couldn't fetch the time of a block, flagging it as a minute old" + ); + SystemTime::now() - TIME_TO_TRIGGER_SYNCING + }; + SystemTime::now().duration_since(block_time).unwrap_or(Duration::ZERO) + }; + let mut tip_is_stale = false; + + let mut synced_block = false; + if TIME_TO_TRIGGER_SYNCING <= time_since { + log::warn!( + "last known tributary block for {:?} was {} seconds ago", + self.set, + time_since.as_secs() + ); + + // This requests all peers for this network, without differentiating by session + // This should be fine as most validators should overlap across sessions + 'peer: for peer in self.p2p.peers(self.set.network).await { + loop { + // Create the request for blocks + if tip_is_stale { + tip = self.reader.tip(); + tip_is_stale = false; + } + // Necessary due to https://github.com/rust-lang/rust/issues/100013 + let Some(blocks) = peer + .send_heartbeat(Heartbeat { set: self.set, latest_block_hash: tip }) + .boxed() + .await + else { + continue 'peer; + }; + + // This is the final batch if it has less than the maximum amount of blocks + // (signifying there weren't more blocks after this to fill the batch with) + let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH; + + // Sync each block + for block_with_commit in blocks { + let Ok(block) = Block::read(&mut block_with_commit.block.as_slice()) else { + // TODO: Disconnect/slash this peer + log::warn!("received invalid Block inside response to heartbeat"); + continue 'peer; + }; + + // Attempt to sync the block + if !self.tributary.sync_block(block, block_with_commit.commit).await { + // The block may be invalid or stale if we added a block elsewhere + if (!tip_is_stale) && (tip != self.reader.tip()) { + // Since the Tributary's tip advanced on its own, return + return Ok(false); + } + + // Since this block was invalid or stale in a way non-trivial to detect, try to + // sync with the next peer + continue 'peer; + } + + // Because we synced a block, flag the tip as stale + tip_is_stale = true; + // And that we did sync a block + synced_block = true; + } + + // If this was the final batch, move on from this peer + // We could assume they were honest and we are done syncing the chain, but this is a + // bit more robust + if final_batch { + continue 'peer; + } + } + } + + // This will cause the tak to be run less and less often, ensuring we aren't spamming the + // net if we legitimately aren't making progress + if !synced_block { + Err(format!( + "tried to sync blocks for {:?} since we haven't seen one in {} seconds but didn't", + self.set, + time_since.as_secs(), + ))?; + } + } + + Ok(synced_block) + } + } +} diff --git a/coordinator/p2p/src/lib.rs b/coordinator/p2p/src/lib.rs new file mode 100644 index 00000000..68536b9d --- /dev/null +++ b/coordinator/p2p/src/lib.rs @@ -0,0 +1,204 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::future::Future; +use std::collections::HashMap; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet}; + +use serai_db::Db; +use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader}; +use serai_cosign::{SignedCosign, Cosigning}; + +use tokio::sync::{mpsc, oneshot}; + +use serai_task::{Task, ContinuallyRan}; + +/// The heartbeat task, effecting sync of Tributaries +pub mod heartbeat; +use crate::heartbeat::HeartbeatTask; + +/// A heartbeat for a Tributary. +#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)] +pub struct Heartbeat { + /// The Tributary this is the heartbeat of. + pub set: ExternalValidatorSet, + /// The hash of the latest block added to the Tributary. + pub latest_block_hash: [u8; 32], +} + +/// A tributary block and its commit. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub struct TributaryBlockWithCommit { + /// The serialized block. + pub block: Vec, + /// The serialized commit. + pub commit: Vec, +} + +/// A representation of a peer. +pub trait Peer<'a>: Send { + /// Send a heartbeat to this peer. + fn send_heartbeat( + &self, + heartbeat: Heartbeat, + ) -> impl Send + Future>>; +} + +/// The representation of the P2P network. +pub trait P2p: + Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns +{ + /// The representation of a peer. + type Peer<'a>: Peer<'a>; + + /// Fetch the peers for this network. + fn peers(&self, network: ExternalNetworkId) -> impl Send + Future>>; + + /// Broadcast a cosign. + fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future; + + /// A cancel-safe future for the next heartbeat received over the P2P network. + /// + /// Yields the validator set its for, the latest block hash observed, and a channel to return the + /// descending blocks. This channel MUST NOT and will not have its receiver dropped before a + /// message is sent. + fn heartbeat( + &self, + ) -> impl Send + Future>)>; + + /// A cancel-safe future for the next request for the notable cosigns of a gloabl session. + /// + /// Yields the global session the request is for and a channel to return the notable cosigns. + /// This channel MUST NOT and will not have its receiver dropped before a message is sent. + fn notable_cosigns_request( + &self, + ) -> impl Send + Future>)>; + + /// A cancel-safe future for the next message regarding a Tributary. + /// + /// Yields the message's Tributary's genesis block hash and the message. + fn tributary_message(&self) -> impl Send + Future)>; + + /// A cancel-safe future for the next cosign received. + fn cosign(&self) -> impl Send + Future; +} + +fn handle_notable_cosigns_request( + db: &D, + global_session: [u8; 32], + channel: oneshot::Sender>, +) { + let cosigns = Cosigning::::notable_cosigns(db, global_session); + channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?"); +} + +fn handle_heartbeat( + reader: &TributaryReader, + mut latest_block_hash: [u8; 32], + channel: oneshot::Sender>, +) { + let mut res_size = 8; + let mut res = vec![]; + // This former case should be covered by this latter case + while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) { + let Some(block_after) = reader.block_after(&latest_block_hash) else { break }; + + // These `break` conditions should only occur under edge cases, such as if we're actively + // deleting this Tributary due to being done with it + let Some(block) = reader.block(&block_after) else { break }; + let block = block.serialize(); + let Some(commit) = reader.commit(&block_after) else { break }; + res_size += 8 + block.len() + 8 + commit.len(); + res.push(TributaryBlockWithCommit { block, commit }); + + latest_block_hash = block_after; + } + channel + .send(res) + .map_err(|_| ()) + .expect("channel listening for heartbeat oneshot response was dropped?"); +} + +/// Run the P2P instance. +/// +/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must +/// never be dropped. `retire_tributary` is not required to only be instructed with added +/// Tributaries. +pub async fn run( + db: impl Db, + p2p: P, + mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary)>, + mut retire_tributary: mpsc::UnboundedReceiver, + send_cosigns: mpsc::UnboundedSender, +) { + let mut readers = HashMap::>::new(); + let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender>>::new(); + let mut heartbeat_tasks = HashMap::::new(); + + loop { + tokio::select! { + tributary = add_tributary.recv() => { + let (set, tributary) = tributary.expect("add_tributary send was dropped"); + let reader = tributary.reader(); + readers.insert(set, reader.clone()); + + let (heartbeat_task_def, heartbeat_task) = Task::new(); + tokio::spawn( + (HeartbeatTask { + set, + tributary: tributary.clone(), + reader: reader.clone(), + p2p: p2p.clone(), + }).continually_run(heartbeat_task_def, vec![]) + ); + heartbeat_tasks.insert(set, heartbeat_task); + + let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel(); + tributaries.insert(tributary.genesis(), tributary_message_send); + // For as long as this sender exists, handle the messages from it on a dedicated task + tokio::spawn(async move { + while let Some(message) = tributary_message_recv.recv().await { + tributary.handle_message(&message).await; + } + }); + } + set = retire_tributary.recv() => { + let set = set.expect("retire_tributary send was dropped"); + let Some(reader) = readers.remove(&set) else { continue }; + tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary"); + heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task"); + } + + (heartbeat, channel) = p2p.heartbeat() => { + if let Some(reader) = readers.get(&heartbeat.set) { + let reader = reader.clone(); // This is a cheap clone + // We spawn this on a task due to the DB reads needed + tokio::spawn(async move { + handle_heartbeat(&reader, heartbeat.latest_block_hash, channel) + }); + } + } + (global_session, channel) = p2p.notable_cosigns_request() => { + tokio::spawn({ + let db = db.clone(); + async move { handle_notable_cosigns_request(&db, global_session, channel) } + }); + } + (tributary, message) = p2p.tributary_message() => { + if let Some(tributary) = tributaries.get(&tributary) { + tributary.send(message).expect("tributary message recv was dropped?"); + } + } + cosign = p2p.cosign() => { + // We don't call `Cosigning::intake_cosign` here as that can only be called from a single + // location. We also need to intake the cosigns we produce, which means we need to merge + // these streams (signing, network) somehow. That's done with this mpsc channel + send_cosigns.send(cosign).expect("channel receiving cosigns was dropped"); + } + } + } +} diff --git a/coordinator/src/cosign_evaluator.rs b/coordinator/src/cosign_evaluator.rs deleted file mode 100644 index 84436008..00000000 --- a/coordinator/src/cosign_evaluator.rs +++ /dev/null @@ -1,333 +0,0 @@ -use core::time::Duration; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, -}; - -use tokio::{ - sync::{mpsc, Mutex, RwLock}, - time::sleep, -}; - -use borsh::BorshSerialize; -use sp_application_crypto::RuntimePublic; -use serai_client::{ - primitives::{ExternalNetworkId, Signature, EXTERNAL_NETWORKS}, - validator_sets::primitives::{ExternalValidatorSet, Session}, - Serai, SeraiError, TemporalSerai, -}; - -use serai_db::{Get, DbTxn, Db, create_db}; - -use processor_messages::coordinator::cosign_block_msg; - -use crate::{ - p2p::{CosignedBlock, GossipMessageKind, P2p}, - substrate::LatestCosignedBlock, -}; - -create_db! { - CosignDb { - ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock, - LatestCosign: (network: ExternalNetworkId) -> CosignedBlock, - DistinctChain: (set: ExternalValidatorSet) -> (), - } -} - -pub struct CosignEvaluator { - db: Mutex, - serai: Arc, - stakes: RwLock>>, - latest_cosigns: RwLock>, -} - -impl CosignEvaluator { - async fn update_latest_cosign(&self) { - let stakes_lock = self.stakes.read().await; - // If we haven't gotten the stake data yet, return - let Some(stakes) = stakes_lock.as_ref() else { return }; - - let total_stake = stakes.values().copied().sum::(); - - let latest_cosigns = self.latest_cosigns.read().await; - let mut highest_block = 0; - for cosign in latest_cosigns.values() { - let mut networks = HashSet::new(); - for (network, sub_cosign) in &*latest_cosigns { - if sub_cosign.block_number >= cosign.block_number { - networks.insert(network); - } - } - let sum_stake = - networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::(); - let needed_stake = ((total_stake * 2) / 3) + 1; - if (total_stake == 0) || (sum_stake > needed_stake) { - highest_block = highest_block.max(cosign.block_number); - } - } - - let mut db_lock = self.db.lock().await; - let mut txn = db_lock.txn(); - if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) { - log::info!("setting latest cosigned block to {}", highest_block); - LatestCosignedBlock::set(&mut txn, &highest_block); - } - txn.commit(); - } - - async fn update_stakes(&self) -> Result<(), SeraiError> { - let serai = self.serai.as_of_latest_finalized_block().await?; - - let mut stakes = HashMap::new(); - for network in EXTERNAL_NETWORKS { - // Use if this network has published a Batch for a short-circuit of if they've ever set a key - let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some(); - if set_key { - stakes.insert( - network, - serai - .validator_sets() - .total_allocated_stake(network.into()) - .await? - .expect("network which published a batch didn't have a stake set") - .0, - ); - } - } - - // Since we've successfully built stakes, set it - *self.stakes.write().await = Some(stakes); - - self.update_latest_cosign().await; - - Ok(()) - } - - // Uses Err to signify a message should be retried - async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> { - // If we already have this cosign or a newer cosign, return - if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) { - if latest.block_number >= cosign.block_number { - return Ok(()); - } - } - - // If this an old cosign (older than a day), drop it - let latest_block = self.serai.latest_finalized_block().await?; - if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() { - log::debug!("received old cosign supposedly signed by {:?}", cosign.network); - return Ok(()); - } - - let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else { - log::warn!("received cosign with a block number which doesn't map to a block"); - return Ok(()); - }; - - async fn set_with_keys_fn( - serai: &TemporalSerai<'_>, - network: ExternalNetworkId, - ) -> Result, SeraiError> { - let Some(latest_session) = serai.validator_sets().session(network.into()).await? else { - log::warn!("received cosign from {:?}, which doesn't yet have a session", network); - return Ok(None); - }; - let prior_session = Session(latest_session.0.saturating_sub(1)); - Ok(Some( - if serai - .validator_sets() - .keys(ExternalValidatorSet { network, session: prior_session }) - .await? - .is_some() - { - ExternalValidatorSet { network, session: prior_session } - } else { - ExternalValidatorSet { network, session: latest_session } - }, - )) - } - - // Get the key for this network as of the prior block - // If we have two chains, this value may be different across chains depending on if one chain - // included the set_keys and one didn't - // Because set_keys will force a cosign, it will force detection of distinct blocks - // re: set_keys using keys prior to set_keys (assumed amenable to all) - let serai = self.serai.as_of(block.header.parent_hash.into()); - - let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else { - return Ok(()); - }; - let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else { - log::warn!("received cosign for a block we didn't have keys for"); - return Ok(()); - }; - - if !keys - .0 - .verify(&cosign_block_msg(cosign.block_number, cosign.block), &Signature(cosign.signature)) - { - log::warn!("received cosigned block with an invalid signature"); - return Ok(()); - } - - log::info!( - "received cosign for block {} ({}) by {:?}", - block.number(), - hex::encode(cosign.block), - cosign.network - ); - - // Save this cosign to the DB - { - let mut db = self.db.lock().await; - let mut txn = db.txn(); - ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign); - LatestCosign::set(&mut txn, set_with_keys.network, &(cosign)); - txn.commit(); - } - - if cosign.block != block.hash() { - log::error!( - "received cosign for a distinct block at {}. we have {}. cosign had {}", - cosign.block_number, - hex::encode(block.hash()), - hex::encode(cosign.block) - ); - - let serai = self.serai.as_of(latest_block.hash()); - - let mut db = self.db.lock().await; - // Save this set as being on a different chain - let mut txn = db.txn(); - DistinctChain::set(&mut txn, set_with_keys, &()); - txn.commit(); - - let mut total_stake = 0; - let mut total_on_distinct_chain = 0; - for network in EXTERNAL_NETWORKS { - // Get the current set for this network - let set_with_keys = { - let mut res; - while { - res = set_with_keys_fn(&serai, network).await; - res.is_err() - } { - log::error!( - "couldn't get the set with keys when checking for a distinct chain: {:?}", - res - ); - tokio::time::sleep(core::time::Duration::from_secs(3)).await; - } - res.unwrap() - }; - - // Get its stake - // Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition - if let Some(set_with_keys) = set_with_keys { - let stake = { - let mut res; - while { - res = - serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await; - res.is_err() - } { - log::error!( - "couldn't get total allocated stake when checking for a distinct chain: {:?}", - res - ); - tokio::time::sleep(core::time::Duration::from_secs(3)).await; - } - res.unwrap() - }; - - if let Some(stake) = stake { - total_stake += stake.0; - - if DistinctChain::get(&*db, set_with_keys).is_some() { - total_on_distinct_chain += stake.0; - } - } - } - } - - // See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17% - if (total_stake * 17 / 100) <= total_on_distinct_chain { - panic!("17% of validator sets (by stake) have co-signed a distinct chain"); - } - } else { - { - let mut latest_cosigns = self.latest_cosigns.write().await; - latest_cosigns.insert(cosign.network, cosign); - } - self.update_latest_cosign().await; - } - - Ok(()) - } - - #[allow(clippy::new_ret_no_self)] - pub fn new(db: D, p2p: P, serai: Arc) -> mpsc::UnboundedSender { - let mut latest_cosigns = HashMap::new(); - for network in EXTERNAL_NETWORKS { - if let Some(cosign) = LatestCosign::get(&db, network) { - latest_cosigns.insert(network, cosign); - } - } - - let evaluator = Arc::new(Self { - db: Mutex::new(db), - serai, - stakes: RwLock::new(None), - latest_cosigns: RwLock::new(latest_cosigns), - }); - - // Spawn a task to update stakes regularly - tokio::spawn({ - let evaluator = evaluator.clone(); - async move { - loop { - // Run this until it passes - while evaluator.update_stakes().await.is_err() { - log::warn!("couldn't update stakes in the cosign evaluator"); - // Try again in 10 seconds - sleep(Duration::from_secs(10)).await; - } - // Run it every 10 minutes as we don't need the exact stake data for this to be valid - sleep(Duration::from_secs(10 * 60)).await; - } - } - }); - - // Spawn a task to receive cosigns and handle them - let (send, mut recv) = mpsc::unbounded_channel(); - tokio::spawn({ - let evaluator = evaluator.clone(); - async move { - while let Some(msg) = recv.recv().await { - while evaluator.handle_new_cosign(msg).await.is_err() { - // Try again in 10 seconds - sleep(Duration::from_secs(10)).await; - } - } - } - }); - - // Spawn a task to rebroadcast the most recent cosigns - tokio::spawn({ - async move { - loop { - let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::>(); - for cosign in cosigns { - let mut buf = vec![]; - cosign.serialize(&mut buf).unwrap(); - P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await; - } - sleep(Duration::from_secs(60)).await; - } - } - }); - - // Return the channel to send cosigns - send - } -} diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 934e5050..108e0f32 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -1,134 +1,148 @@ -use blake2::{ - digest::{consts::U32, Digest}, - Blake2b, -}; +use std::{path::Path, fs}; + +pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait}; +use serai_db::{create_db, db_channel}; + +use dkg::Participant; -use scale::Encode; -use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ - in_instructions::primitives::{Batch, SignedBatch}, primitives::ExternalNetworkId, - validator_sets::primitives::{ExternalValidatorSet, Session}, + validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair}, }; -pub use serai_db::*; +use serai_cosign::SignedCosign; +use serai_coordinator_substrate::NewSetInformation; +use serai_coordinator_tributary::Transaction; -use ::tributary::ReadWrite; -use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType}; +#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] +pub(crate) type Db = std::sync::Arc; +#[cfg(feature = "rocksdb")] +pub(crate) type Db = serai_db::RocksDB; -create_db!( - MainDb { - HandledMessageDb: (network: ExternalNetworkId) -> u64, - ActiveTributaryDb: () -> Vec, - RetiredTributaryDb: (set: ExternalValidatorSet) -> (), - FirstPreprocessDb: ( - network: ExternalNetworkId, - id_type: RecognizedIdType, - id: &[u8] - ) -> Vec>, - LastReceivedBatchDb: (network: ExternalNetworkId) -> u32, - ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32], - BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch, - LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32, - HandoverBatchDb: (set: ExternalValidatorSet) -> u32, - LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session, - QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec - } -); - -impl ActiveTributaryDb { - pub fn active_tributaries(getter: &G) -> (Vec, Vec) { - let bytes = Self::get(getter).unwrap_or_default(); - let mut bytes_ref: &[u8] = bytes.as_ref(); - - let mut tributaries = vec![]; - while !bytes_ref.is_empty() { - tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap()); - } - - (bytes, tributaries) +#[allow(unused_variables, unreachable_code)] +fn db(path: &str) -> Db { + { + let path: &Path = path.as_ref(); + // This may error if this path already exists, which we shouldn't propagate/panic on. If this + // is a problem (such as we don't have the necessary permissions to write to this path), we + // expect the following DB opening to error. + let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap()); } - pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) { - let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn); - for tributary in &existing { - if tributary == spec { - return; - } - } + #[cfg(all(feature = "parity-db", feature = "rocksdb"))] + panic!("built with parity-db and rocksdb"); + #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] + let db = serai_db::new_parity_db(path); + #[cfg(feature = "rocksdb")] + let db = serai_db::new_rocksdb(path); + db +} - spec.serialize(&mut existing_bytes).unwrap(); - ActiveTributaryDb::set(txn, &existing_bytes); - } +pub(crate) fn coordinator_db() -> Db { + let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified"); + db(&format!("{root_path}/coordinator/db")) +} - pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) { - let mut active = Self::active_tributaries(txn).1; - for i in 0 .. active.len() { - if active[i].set() == set { - active.remove(i); - break; - } - } +fn tributary_db_folder(set: ExternalValidatorSet) -> String { + let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified"); + let network = match set.network { + ExternalNetworkId::Bitcoin => "Bitcoin", + ExternalNetworkId::Ethereum => "Ethereum", + ExternalNetworkId::Monero => "Monero", + }; + format!("{root_path}/tributary-{network}-{}", set.session.0) +} - let mut bytes = vec![]; - for active in active { - active.serialize(&mut bytes).unwrap(); - } - Self::set(txn, &bytes); - RetiredTributaryDb::set(txn, set, &()); +pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db { + db(&format!("{}/db", tributary_db_folder(set))) +} + +pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) { + log::info!("pruning data directory for tributary {set:?}"); + let db = tributary_db_folder(set); + if fs::exists(&db).expect("couldn't check if tributary DB exists") { + fs::remove_dir_all(db).unwrap(); } } -impl FirstPreprocessDb { - pub fn save_first_preprocess( - txn: &mut impl DbTxn, - network: ExternalNetworkId, - id_type: RecognizedIdType, - id: &[u8], - preprocess: &Vec>, - ) { - if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) { - assert_eq!(&existing, preprocess, "saved a distinct first preprocess"); - return; +create_db! { + Coordinator { + // The currently active Tributaries + ActiveTributaries: () -> Vec, + // The latest Tributary to have been retired for a network + // Since Tributaries are retired sequentially, this is informative to if any Tributary has been + // retired + RetiredTributary: (network: ExternalNetworkId) -> Session, + // The last handled message from a Processor + LastProcessorMessage: (network: ExternalNetworkId) -> u64, + // Cosigns we produced and tried to intake yet incurred an error while doing so + ErroneousCosigns: () -> Vec, + // The keys to confirm and set on the Serai network + KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair, + // The key was set on the Serai network + KeySet: (set: ExternalValidatorSet) -> (), + } +} + +db_channel! { + Coordinator { + // Cosigns we produced + SignedCosigns: () -> SignedCosign, + // Tributaries to clean up upon reboot + TributaryCleanup: () -> ExternalValidatorSet, + } +} + +mod _internal_db { + use super::*; + + db_channel! { + Coordinator { + // Tributary transactions to publish from the Processor messages + TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction, + // Tributary transactions to publish from the DKG confirmation task + TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction, + // Participants to remove + RemoveParticipant: (set: ExternalValidatorSet) -> Participant, } - FirstPreprocessDb::set(txn, network, id_type, id, preprocess); } } -impl ExpectedBatchDb { - pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) { - LastReceivedBatchDb::set(txn, batch.network, &batch.id); - Self::set( - txn, - batch.network, - batch.id, - &Blake2b::::digest(batch.instructions.encode()).into(), - ); - } -} - -impl HandoverBatchDb { - pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) { - Self::set(txn, set, &batch); - LookupHandoverBatchDb::set(txn, set.network, batch, &set.session); - } -} -impl QueuedBatchesDb { - pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) { - let mut batches = Self::get(txn, set).unwrap_or_default(); - batch.write(&mut batches).unwrap(); - Self::set(txn, set, &batches); - } - - pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec { - let batches_vec = Self::get(txn, set).unwrap_or_default(); - txn.del(Self::key(set)); - - let mut batches: &[u8] = &batches_vec; - let mut res = vec![]; - while !batches.is_empty() { - res.push(Transaction::read(&mut batches).unwrap()); +pub(crate) struct TributaryTransactionsFromProcessorMessages; +impl TributaryTransactionsFromProcessorMessages { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { + // If this set has yet to be retired, send this transaction + if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { + _internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx); } - res + } + pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option { + _internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set) + } +} + +pub(crate) struct TributaryTransactionsFromDkgConfirmation; +impl TributaryTransactionsFromDkgConfirmation { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) { + // If this set has yet to be retired, send this transaction + if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { + _internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx); + } + } + pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option { + _internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set) + } +} + +pub(crate) struct RemoveParticipant; +impl RemoveParticipant { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) { + // If this set has yet to be retired, send this transaction + if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) { + _internal_db::RemoveParticipant::send(txn, set, &participant); + } + } + pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option { + _internal_db::RemoveParticipant::try_recv(txn, set) } } diff --git a/coordinator/src/dkg_confirmation.rs b/coordinator/src/dkg_confirmation.rs new file mode 100644 index 00000000..a28fb40f --- /dev/null +++ b/coordinator/src/dkg_confirmation.rs @@ -0,0 +1,437 @@ +use core::{ops::Deref, future::Future}; +use std::{boxed::Box, collections::HashMap}; + +use zeroize::Zeroizing; +use rand_core::OsRng; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost_schnorrkel::{ + frost::{ + dkg::{Participant, musig::musig}, + FrostError, + sign::*, + }, + Schnorrkel, +}; + +use serai_db::{DbTxn, Db as DbTrait}; + +use serai_client::{ + primitives::SeraiAddress, + validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message}, +}; + +use serai_task::{DoesNotError, ContinuallyRan}; + +use serai_coordinator_substrate::{NewSetInformation, Keys}; +use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages}; + +use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation}; + +fn schnorrkel() -> Schnorrkel { + Schnorrkel::new(b"substrate") // TODO: Pull the constant for this +} + +fn our_i( + set: &NewSetInformation, + key: &Zeroizing<::F>, + data: &HashMap>, +) -> Participant { + let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes()); + + let mut our_i = None; + for participant in data.keys() { + let validator_index = usize::from(u16::from(*participant) - 1); + let (validator, _weight) = set.validators[validator_index]; + if validator == public { + our_i = Some(*participant); + } + } + our_i.unwrap() +} + +// Take a HashMap of participations with non-contiguous Participants and convert them to a +// contiguous sequence. +// +// The input data is expected to not include our own data, which also won't be in the output data. +// +// Returns the mapping from the contiguous Participants to the original Participants. +fn make_contiguous( + our_i: Participant, + mut data: HashMap>, + transform: impl Fn(Vec) -> std::io::Result, +) -> Result, Participant> { + assert!(!data.contains_key(&our_i)); + + let mut ordered_participants = data.keys().copied().collect::>(); + ordered_participants.sort_by_key(|participant| u16::from(*participant)); + + let mut our_i = Some(our_i); + let mut contiguous = HashMap::new(); + let mut i = 1; + for participant in ordered_participants { + // If this is the first participant after our own index, increment to account for our index + if let Some(our_i_value) = our_i { + if u16::from(participant) > u16::from(our_i_value) { + i += 1; + our_i = None; + } + } + + let contiguous_index = Participant::new(i).unwrap(); + let data = match transform(data.remove(&participant).unwrap()) { + Ok(data) => data, + Err(_) => Err(participant)?, + }; + contiguous.insert(contiguous_index, data); + i += 1; + } + Ok(contiguous) +} + +fn handle_frost_error(result: Result) -> Result { + match &result { + Ok(_) => Ok(result.unwrap()), + Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => { + Err(*participant) + } + // All of these should be unreachable + Err( + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_), + ) => { + result.unwrap(); + unreachable!("continued execution after unwrapping Result::Err"); + } + } +} + +#[rustfmt::skip] +enum Signer { + Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] }, + Share { + attempt: u32, + musig_validators: Vec, + share: [u8; 32], + machine: Box>, + }, +} + +/// Performs the DKG Confirmation protocol. +pub(crate) struct ConfirmDkgTask { + db: CD, + + set: NewSetInformation, + tributary_db: TD, + + key: Zeroizing<::F>, + signer: Option, +} + +impl ConfirmDkgTask { + pub(crate) fn new( + db: CD, + set: NewSetInformation, + tributary_db: TD, + key: Zeroizing<::F>, + ) -> Self { + Self { db, set, tributary_db, key, signer: None } + } + + fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) { + let mut txn = db.txn(); + TributaryTransactionsFromDkgConfirmation::send( + &mut txn, + set, + &Transaction::RemoveParticipant { participant: validator, signed: Default::default() }, + ); + txn.commit(); + } + + fn preprocess( + db: &mut CD, + set: ExternalValidatorSet, + attempt: u32, + key: &Zeroizing<::F>, + signer: &mut Option, + ) { + // Perform the preprocess + let (machine, preprocess) = AlgorithmMachine::new( + schnorrkel(), + // We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet + musig(&musig_context(set.into()), key, &[Ristretto::generator() * key.deref()]) + .unwrap() + .into(), + ) + .preprocess(&mut OsRng); + // We take the preprocess so we can use it in a distinct machine with the actual Musig + // parameters + let seed = machine.cache(); + + let mut preprocess_bytes = [0u8; 64]; + preprocess_bytes.copy_from_slice(&preprocess.serialize()); + let preprocess = preprocess_bytes; + + let mut txn = db.txn(); + // If this attempt has already been preprocessed for, the Tributary will de-duplicate it + // This may mean the Tributary preprocess is distinct from ours, but we check for that later + TributaryTransactionsFromDkgConfirmation::send( + &mut txn, + set, + &Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() }, + ); + txn.commit(); + + *signer = Some(Signer::Preprocess { attempt, seed, preprocess }); + } +} + +impl ContinuallyRan for ConfirmDkgTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + + // If we were sent a key to set, create the signer for it + if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() { + // Create and publish the initial preprocess + Self::preprocess(&mut self.db, self.set.set, 0, &self.key, &mut self.signer); + + made_progress = true; + } + + // If we have keys to confirm, handle all messages from the tributary + if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) { + // Handle all messages from the Tributary + loop { + let mut tributary_txn = self.tributary_db.txn(); + let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set) + else { + break; + }; + + match msg { + messages::sign::CoordinatorMessage::Reattempt { + id: messages::sign::SignId { attempt, .. }, + } => { + // Create and publish the preprocess for the specified attempt + Self::preprocess(&mut self.db, self.set.set, attempt, &self.key, &mut self.signer); + } + messages::sign::CoordinatorMessage::Preprocesses { + id: messages::sign::SignId { attempt, .. }, + mut preprocesses, + } => { + // Confirm the preprocess we're expected to sign with is the one we locally have + // It may be different if we rebooted and made a second preprocess for this attempt + let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) = + self.signer.take() + else { + // If this message is not expected, commit the txn to drop it and move on + // At some point, we'll get a Reattempt and reset + tributary_txn.commit(); + break; + }; + + // Determine the MuSig key signed with + let musig_validators = { + let mut ordered_participants = preprocesses.keys().copied().collect::>(); + ordered_participants.sort_by_key(|participant| u16::from(*participant)); + + let mut res = vec![]; + for participant in ordered_participants { + let (validator, _weight) = + self.set.validators[usize::from(u16::from(participant) - 1)]; + res.push(validator); + } + res + }; + + let musig_public_keys = musig_validators + .iter() + .map(|key| { + Ristretto::read_G(&mut key.0.as_slice()) + .expect("Serai validator had invalid public key") + }) + .collect::>(); + + let keys = musig(&musig_context(self.set.set.into()), &self.key, &musig_public_keys) + .unwrap() + .into(); + + // Rebuild the machine + let (machine, preprocess_from_cache) = + AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed); + assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice()); + + // Ensure this is a consistent signing session + let our_i = our_i(&self.set, &self.key, &preprocesses); + let consistent = (attempt == our_attempt) && + (preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice()); + if !consistent { + tributary_txn.commit(); + break; + } + + // Reformat the preprocesses into the expected format for Musig + let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| { + machine.read_preprocess(&mut preprocess.as_slice()) + }) { + Ok(preprocesses) => preprocesses, + // This yields the *original participant index* + Err(participant) => { + Self::slash( + &mut self.db, + self.set.set, + self.set.validators[usize::from(u16::from(participant) - 1)].0, + ); + tributary_txn.commit(); + break; + } + }; + + // Calculate our share + let (machine, share) = match handle_frost_error( + machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)), + ) { + Ok((machine, share)) => (machine, share), + // This yields the *musig participant index* + Err(participant) => { + Self::slash( + &mut self.db, + self.set.set, + musig_validators[usize::from(u16::from(participant) - 1)], + ); + tributary_txn.commit(); + break; + } + }; + + // Send our share + let share = <[u8; 32]>::try_from(share.serialize()).unwrap(); + let mut txn = self.db.txn(); + TributaryTransactionsFromDkgConfirmation::send( + &mut txn, + self.set.set, + &Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() }, + ); + txn.commit(); + + self.signer = Some(Signer::Share { + attempt, + musig_validators, + share, + machine: Box::new(machine), + }); + } + messages::sign::CoordinatorMessage::Shares { + id: messages::sign::SignId { attempt, .. }, + mut shares, + } => { + let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) = + self.signer.take() + else { + tributary_txn.commit(); + break; + }; + + // Ensure this is a consistent signing session + let our_i = our_i(&self.set, &self.key, &shares); + let consistent = (attempt == our_attempt) && + (shares.remove(&our_i).unwrap().as_slice() == share.as_slice()); + if !consistent { + tributary_txn.commit(); + break; + } + + // Reformat the shares into the expected format for Musig + let shares = match make_contiguous(our_i, shares, |share| { + machine.read_share(&mut share.as_slice()) + }) { + Ok(shares) => shares, + // This yields the *original participant index* + Err(participant) => { + Self::slash( + &mut self.db, + self.set.set, + self.set.validators[usize::from(u16::from(participant) - 1)].0, + ); + tributary_txn.commit(); + break; + } + }; + + match handle_frost_error(machine.complete(shares)) { + Ok(signature) => { + // Create the bitvec of the participants + let mut signature_participants; + { + use bitvec::prelude::*; + signature_participants = bitvec![u8, Lsb0; 0; 0]; + let mut i = 0; + for (validator, _) in &self.set.validators { + if Some(validator) == musig_validators.get(i) { + signature_participants.push(true); + i += 1; + } else { + signature_participants.push(false); + } + } + } + + // This is safe to call multiple times as it'll just change which *valid* + // signature to publish + let mut txn = self.db.txn(); + Keys::set( + &mut txn, + self.set.set, + key_pair.clone(), + signature_participants, + signature.into(), + ); + txn.commit(); + } + // This yields the *musig participant index* + Err(participant) => { + Self::slash( + &mut self.db, + self.set.set, + musig_validators[usize::from(u16::from(participant) - 1)], + ); + tributary_txn.commit(); + break; + } + } + } + } + + // Because we successfully handled this message, note we made proress + made_progress = true; + tributary_txn.commit(); + } + } + + // Check if the key has been set on Serai + if KeysToConfirm::get(&self.db, self.set.set).is_some() && + KeySet::get(&self.db, self.set.set).is_some() + { + // Take the keys to confirm so we never instantiate the signer again + let mut txn = self.db.txn(); + KeysToConfirm::take(&mut txn, self.set.set); + KeySet::take(&mut txn, self.set.set); + txn.commit(); + + // Drop our own signer + // The task won't die until the Tributary does, but now it'll never do anything again + self.signer = None; + + made_progress = true; + } + + Ok(made_progress) + } + } +} diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index adcc49ef..d63b79a2 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1,1311 +1,323 @@ -use core::ops::Deref; -use std::{ - sync::{OnceLock, Arc}, - time::Duration, - collections::{VecDeque, HashSet, HashMap}, -}; +use core::{ops::Deref, time::Duration}; +use std::{sync::Arc, collections::HashMap, time::Instant}; use zeroize::{Zeroize, Zeroizing}; -use rand_core::OsRng; +use rand_core::{RngCore, OsRng}; use ciphersuite::{ - group::{ - ff::{Field, PrimeField}, - GroupEncoding, - }, + group::{ff::PrimeField, GroupEncoding}, Ciphersuite, Ristretto, }; -use schnorr::SchnorrSignature; -use frost::Participant; -use serai_db::{DbTxn, Db}; +use borsh::BorshDeserialize; + +use tokio::sync::mpsc; -use scale::Encode; -use borsh::BorshSerialize; use serai_client::{ - primitives::ExternalNetworkId, - validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session}, - Public, Serai, SeraiInInstructions, + primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature}, + validator_sets::primitives::{ExternalValidatorSet, KeyPair}, + Serai, }; - use message_queue::{Service, client::MessageQueue}; -use tokio::{ - sync::{Mutex, RwLock, mpsc, broadcast}, - time::sleep, -}; +use serai_task::{Task, TaskHandle, ContinuallyRan}; -use ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary}; - -mod tributary; -use crate::tributary::{ - TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds, +use serai_cosign::{Faulted, SignedCosign, Cosigning}; +use serai_coordinator_substrate::{ + CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches, + PublishBatchTask, SlashReports, PublishSlashReportTask, }; +use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans}; mod db; use db::*; -mod p2p; -pub use p2p::*; - -use processor_messages::{ - key_gen, sign, - coordinator::{self, SubstrateSignableId}, - ProcessorMessage, -}; - -pub mod processors; -use processors::Processors; +mod tributary; +mod dkg_confirmation; mod substrate; -use substrate::CosignTransactions; +use substrate::SubstrateTask; -mod cosign_evaluator; -use cosign_evaluator::CosignEvaluator; - -#[cfg(test)] -pub mod tests; +mod p2p { + pub use serai_coordinator_p2p::*; + pub use serai_coordinator_libp2p_p2p::Libp2p; +} +// Use a zeroizing allocator for this entire application +// While secrets should already be zeroized, the presence of secret keys in a networked application +// (at increased risk of OOB reads) justifies the performance hit in case any secrets weren't +// already #[global_allocator] static ALLOCATOR: zalloc::ZeroizingAlloc = zalloc::ZeroizingAlloc(std::alloc::System); -#[derive(Clone)] -pub struct ActiveTributary { - pub spec: TributarySpec, - pub tributary: Arc>, -} +async fn serai() -> Arc { + const SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(10); + const MAX_SERAI_CONNECTION_DELAY: Duration = Duration::from_secs(300); -#[derive(Clone)] -pub enum TributaryEvent { - NewTributary(ActiveTributary), - TributaryRetired(ExternalValidatorSet), -} - -// Creates a new tributary and sends it to all listeners. -async fn add_tributary( - db: D, - key: Zeroizing<::F>, - processors: &Pro, - p2p: P, - tributaries: &broadcast::Sender>, - spec: TributarySpec, -) { - if RetiredTributaryDb::get(&db, spec.set()).is_some() { - log::info!("not adding tributary {:?} since it's been retired", spec.set()); - } - - log::info!("adding tributary {:?}", spec.set()); - - let tributary = Tributary::<_, Transaction, _>::new( - // TODO2: Use a db on a distinct volume to protect against DoS attacks - // TODO2: Delete said db once the Tributary is dropped - db, - spec.genesis(), - spec.start_time(), - key.clone(), - spec.validators(), - p2p, - ) - .await - .unwrap(); - - // Trigger a DKG for the newly added Tributary - // If we're rebooting, we'll re-fire this message - // This is safe due to the message-queue deduplicating based off the intent system - let set = spec.set(); - let our_i = spec - .i(&[], Ristretto::generator() * key.deref()) - .expect("adding a tributary for a set we aren't in set for"); - processors - .send( - set.network, - processor_messages::key_gen::CoordinatorMessage::GenerateKey { - id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 }, - params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), - shares: u16::from(our_i.end) - u16::from(our_i.start), - }, - ) - .await; - - tributaries - .send(TributaryEvent::NewTributary(ActiveTributary { spec, tributary: Arc::new(tributary) })) - .map_err(|_| "all ActiveTributary recipients closed") - .unwrap(); -} - -// TODO: Find a better pattern for this -static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock> = OnceLock::new(); - -#[allow(clippy::too_many_arguments)] -async fn handle_processor_message( - db: &mut D, - key: &Zeroizing<::F>, - serai: &Serai, - p2p: &P, - cosign_channel: &mpsc::UnboundedSender, - tributaries: &HashMap>, - network: ExternalNetworkId, - msg: &processors::Message, -) -> bool { - #[allow(clippy::nonminimal_bool)] - if let Some(already_handled) = HandledMessageDb::get(db, msg.network) { - assert!(!(already_handled > msg.id)); - assert!((already_handled == msg.id) || (already_handled == msg.id - 1)); - if already_handled == msg.id { - return true; - } - } else { - assert_eq!(msg.id, 0); - } - - let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await; - let mut txn = db.txn(); - - let mut relevant_tributary = match &msg.msg { - // We'll only receive these if we fired GenerateKey, which we'll only do if if we're - // in-set, making the Tributary relevant - ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, .. } | - key_gen::ProcessorMessage::InvalidCommitments { id, .. } | - key_gen::ProcessorMessage::Shares { id, .. } | - key_gen::ProcessorMessage::InvalidShare { id, .. } | - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } | - key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session), - }, - ProcessorMessage::Sign(inner_msg) => match inner_msg { - // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing - sign::ProcessorMessage::InvalidParticipant { id, .. } | - sign::ProcessorMessage::Preprocess { id, .. } | - sign::ProcessorMessage::Share { id, .. } => Some(id.session), - // While the Processor's Scanner will always emit Completed, that's routed through the - // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and - // confirms it - sign::ProcessorMessage::Completed { session, .. } => Some(*session), - }, - ProcessorMessage::Coordinator(inner_msg) => match inner_msg { - // This is a special case as it's relevant to *all* Tributaries for this network we're - // signing in - // It doesn't return a Tributary to become `relevant_tributary` though - coordinator::ProcessorMessage::SubstrateBlockAck { block, plans } => { - // Get the sessions for these keys - let sessions = plans - .iter() - .map(|plan| plan.session) - .filter(|session| { - RetiredTributaryDb::get(&txn, ExternalValidatorSet { network, session: *session }) - .is_none() - }) - .collect::>(); - - // Ensure we have the Tributaries - for session in &sessions { - if !tributaries.contains_key(session) { - return false; - } - } - - for session in sessions { - let tributary = &tributaries[&session]; - let plans = plans - .iter() - .filter_map(|plan| Some(plan.id).filter(|_| plan.session == session)) - .collect::>(); - PlanIds::set(&mut txn, &tributary.spec.genesis(), *block, &plans); - - let tx = Transaction::SubstrateBlock(*block); - log::trace!( - "processor message effected transaction {} {:?}", - hex::encode(tx.hash()), - &tx - ); - log::trace!("providing transaction {}", hex::encode(tx.hash())); - let res = tributary.tributary.provide_transaction(tx).await; - if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { - if res == Err(ProvidedError::LocalMismatchesOnChain) { - // Spin, since this is a crit for this Tributary - loop { - log::error!( - "{}. tributary: {}, provided: SubstrateBlock({})", - "tributary added distinct provided to delayed locally provided TX", - hex::encode(tributary.spec.genesis()), - block, - ); - sleep(Duration::from_secs(60)).await; - } - } - panic!("provided an invalid transaction: {res:?}"); - } - } - - None - } - // We'll only fire these if we are the Substrate signer, making the Tributary relevant - coordinator::ProcessorMessage::InvalidParticipant { id, .. } | - coordinator::ProcessorMessage::CosignPreprocess { id, .. } | - coordinator::ProcessorMessage::BatchPreprocess { id, .. } | - coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } | - coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session), - // This causes an action on our P2P net yet not on any Tributary - coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => { - let cosigned_block = CosignedBlock { - network, - block_number: *block_number, - block: *block, - signature: { - let mut arr = [0; 64]; - arr.copy_from_slice(signature); - arr - }, - }; - cosign_channel.send(cosigned_block).unwrap(); - let mut buf = vec![]; - cosigned_block.serialize(&mut buf).unwrap(); - P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await; - None - } - // This causes an action on Substrate yet not on any Tributary - coordinator::ProcessorMessage::SignedSlashReport { session, signature } => { - let set = ExternalValidatorSet { network, session: *session }; - let signature: &[u8] = signature.as_ref(); - let signature = serai_client::Signature(signature.try_into().unwrap()); - - let slashes = crate::tributary::SlashReport::get(&txn, set) - .expect("signed slash report despite not having slash report locally"); - let slashes_pubs = - slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::>(); - - let tx = serai_client::SeraiValidatorSets::report_slashes( - network, - slashes - .into_iter() - .map(|(address, points)| (serai_client::SeraiAddress(address), points)) - .collect::>() - .try_into() - .unwrap(), - signature.clone(), - ); - - loop { - if serai.publish(&tx).await.is_ok() { - break None; - } - - // Check if the slashes shouldn't still be reported. If not, break. - let Ok(serai) = serai.as_of_latest_finalized_block().await else { - tokio::time::sleep(core::time::Duration::from_secs(5)).await; - continue; - }; - let Ok(key) = serai.validator_sets().key_pending_slash_report(network).await else { - tokio::time::sleep(core::time::Duration::from_secs(5)).await; - continue; - }; - let Some(key) = key else { - break None; - }; - // If this is the key for this slash report, then this will verify - use sp_application_crypto::RuntimePublic; - if !key.verify( - &serai_client::validator_sets::primitives::report_slashes_message(&set, &slashes_pubs), - &signature, - ) { - break None; - } - } - } - }, - // These don't return a relevant Tributary as there's no Tributary with action expected - ProcessorMessage::Substrate(inner_msg) => match inner_msg { - processor_messages::substrate::ProcessorMessage::Batch { batch } => { - assert_eq!( - batch.network, msg.network, - "processor sent us a batch for a different network than it was for", - ); - ExpectedBatchDb::save_expected_batch(&mut txn, batch); - None - } - // If this is a new Batch, immediately publish it (if we can) - processor_messages::substrate::ProcessorMessage::SignedBatch { batch } => { - assert_eq!( - batch.batch.network, msg.network, - "processor sent us a signed batch for a different network than it was for", - ); - - log::debug!("received batch {:?} {}", batch.batch.network, batch.batch.id); - - // Save this batch to the disk - BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone()); - - // Get the next-to-execute batch ID - let Ok(mut next) = substrate::expected_next_batch(serai, network).await else { - return false; - }; - - // Since we have a new batch, publish all batches yet to be published to Serai - // This handles the edge-case where batch n+1 is signed before batch n is - let mut batches = VecDeque::new(); - while let Some(batch) = BatchDb::get(&txn, network, next) { - batches.push_back(batch); - next += 1; - } - - while let Some(batch) = batches.pop_front() { - // If this Batch should no longer be published, continue - let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else { - return false; - }; - if expected_next_batch > batch.batch.id { - continue; - } - - let tx = SeraiInInstructions::execute_batch(batch.clone()); - log::debug!("attempting to publish batch {:?} {}", batch.batch.network, batch.batch.id,); - // This publish may fail if this transactions already exists in the mempool, which is - // possible, or if this batch was already executed on-chain - // Either case will have eventual resolution and be handled by the above check on if - // this batch should execute - let res = serai.publish(&tx).await; - if res.is_ok() { - log::info!( - "published batch {network:?} {} (block {})", - batch.batch.id, - hex::encode(batch.batch.block), - ); - } else { - log::debug!( - "couldn't publish batch {:?} {}: {:?}", - batch.batch.network, - batch.batch.id, - res, - ); - // If we failed to publish it, restore it - batches.push_front(batch); - // Sleep for a few seconds before retrying to prevent hammering the node - sleep(Duration::from_secs(5)).await; - } - } - - None - } - }, - }; - - // If we have a relevant Tributary, check it's actually still relevant and has yet to be retired - if let Some(relevant_tributary_value) = relevant_tributary { - if RetiredTributaryDb::get( - &txn, - ExternalValidatorSet { network: msg.network, session: relevant_tributary_value }, - ) - .is_some() - { - relevant_tributary = None; - } - } - - // If there's a relevant Tributary... - if let Some(relevant_tributary) = relevant_tributary { - // Make sure we have it - // Per the reasoning above, we only return a Tributary as relevant if we're a participant - // Accordingly, we do *need* to have this Tributary now to handle it UNLESS the Tributary has - // already completed and this is simply an old message (which we prior checked) - let Some(ActiveTributary { spec, tributary }) = tributaries.get(&relevant_tributary) else { - // Since we don't, sleep for a fraction of a second and return false, signaling we didn't - // handle this message - // At the start of the loop which calls this function, we'll check for new tributaries, - // making this eventually resolve - sleep(Duration::from_millis(100)).await; - return false; - }; - - let genesis = spec.genesis(); - let pub_key = Ristretto::generator() * key.deref(); - - let txs = match msg.msg.clone() { - ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, commitments } => { - vec![Transaction::DkgCommitments { - attempt: id.attempt, - commitments, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => { - // This doesn't have guaranteed timing - // - // While the party *should* be fatally slashed and not included in future attempts, - // they'll actually be fatally slashed (assuming liveness before the Tributary retires) - // and not included in future attempts *which begin after the latency window completes* - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - faulty, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::Shares { id, mut shares } => { - // Create a MuSig-based machine to inform Substrate of this key generation - let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt); - - let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt) - .expect("participating in a DKG attempt yet we didn't track who was removed yet?"); - let our_i = spec - .i(&removed, pub_key) - .expect("processor message to DKG for an attempt we aren't a validator in"); - - // `tx_shares` needs to be done here as while it can be serialized from the HashMap - // without further context, it can't be deserialized without context - let mut tx_shares = Vec::with_capacity(shares.len()); - for shares in &mut shares { - tx_shares.push(vec![]); - for i in 1 ..= spec.n(&removed) { - let i = Participant::new(i).unwrap(); - if our_i.contains(&i) { - if shares.contains_key(&i) { - panic!("processor sent us our own shares"); - } - continue; - } - tx_shares.last_mut().unwrap().push( - shares.remove(&i).expect("processor didn't send share for another validator"), - ); - } - } - - vec![Transaction::DkgShares { - attempt: id.attempt, - shares: tx_shares, - confirmation_nonces: nonces, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => { - vec![Transaction::InvalidDkgShare { - attempt: id.attempt, - accuser, - faulty, - blame, - signed: Transaction::empty_signed(), - }] - } - key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { - // TODO2: Check the KeyGenId fields - - // Tell the Tributary the key pair, get back the share for the MuSig signature - let share = crate::tributary::generated_key_pair::( - &mut txn, - key, - spec, - &KeyPair(Public(substrate_key), network_key.try_into().unwrap()), - id.attempt, - ); - - // TODO: Move this into generated_key_pair? - match share { - Ok(share) => { - vec![Transaction::DkgConfirmed { - attempt: id.attempt, - confirmation_share: share, - signed: Transaction::empty_signed(), - }] - } - Err(p) => { - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - p, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - } - } - key_gen::ProcessorMessage::Blame { id, participant } => { - let participant = spec - .reverse_lookup_i( - &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) - .expect("participating in DKG attempt yet we didn't save who was removed"), - participant, - ) - .unwrap(); - vec![Transaction::RemoveParticipantDueToDkg { - participant, - signed: Transaction::empty_signed(), - }] - } - }, - ProcessorMessage::Sign(msg) => match msg { - sign::ProcessorMessage::InvalidParticipant { .. } => { - // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal - // slash) and censor transactions (yet don't explicitly ban) - vec![] - } - sign::ProcessorMessage::Preprocess { id, preprocesses } => { - if id.attempt == 0 { - FirstPreprocessDb::save_first_preprocess( - &mut txn, - network, - RecognizedIdType::Plan, - &id.id, - &preprocesses, - ); - - vec![] - } else { - vec![Transaction::Sign(SignData { - plan: id.id, - attempt: id.attempt, - label: Label::Preprocess, - data: preprocesses, - signed: Transaction::empty_signed(), - })] - } - } - sign::ProcessorMessage::Share { id, shares } => { - vec![Transaction::Sign(SignData { - plan: id.id, - attempt: id.attempt, - label: Label::Share, - data: shares, - signed: Transaction::empty_signed(), - })] - } - sign::ProcessorMessage::Completed { session: _, id, tx } => { - let r = Zeroizing::new(::F::random(&mut OsRng)); - #[allow(non_snake_case)] - let R = ::generator() * r.deref(); - let mut tx = Transaction::SignCompleted { - plan: id, - tx_hash: tx, - first_signer: pub_key, - signature: SchnorrSignature { R, s: ::F::ZERO }, - }; - let signed = SchnorrSignature::sign(key, r, tx.sign_completed_challenge()); - match &mut tx { - Transaction::SignCompleted { signature, .. } => { - *signature = signed; - } - _ => unreachable!(), - } - vec![tx] - } - }, - ProcessorMessage::Coordinator(inner_msg) => match inner_msg { - coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(), - coordinator::ProcessorMessage::InvalidParticipant { .. } => { - // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal - // slash) and censor transactions (yet don't explicitly ban) - vec![] - } - coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } | - coordinator::ProcessorMessage::SlashReportPreprocess { id, preprocesses } => { - vec![Transaction::SubstrateSign(SignData { - plan: id.id, - attempt: id.attempt, - label: Label::Preprocess, - data: preprocesses.into_iter().map(Into::into).collect(), - signed: Transaction::empty_signed(), - })] - } - coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => { - log::info!( - "informed of batch (sign ID {}, attempt {}) for block {}", - hex::encode(id.id.encode()), - id.attempt, - hex::encode(block), - ); - - // If this is the first attempt instance, wait until we synchronize around the batch - // first - if id.attempt == 0 { - FirstPreprocessDb::save_first_preprocess( - &mut txn, - spec.set().network, - RecognizedIdType::Batch, - &{ - let SubstrateSignableId::Batch(id) = id.id else { - panic!("BatchPreprocess SubstrateSignableId wasn't Batch") - }; - id.to_le_bytes() - }, - &preprocesses.into_iter().map(Into::into).collect::>(), - ); - - let intended = Transaction::Batch { - block: block.0, - batch: match id.id { - SubstrateSignableId::Batch(id) => id, - _ => panic!("BatchPreprocess did not contain Batch ID"), - }, - }; - - // If this is the new key's first Batch, only create this TX once we verify all - // all prior published `Batch`s - // TODO: This assumes BatchPreprocess is immediately after Batch - // Ensure that assumption - let last_received = LastReceivedBatchDb::get(&txn, msg.network).unwrap(); - let handover_batch = HandoverBatchDb::get(&txn, spec.set()); - let mut queue = false; - if let Some(handover_batch) = handover_batch { - // There is a race condition here. We may verify all `Batch`s from the prior set, - // start signing the handover `Batch` `n`, start signing `n+1`, have `n+1` signed - // before `n` (or at the same time), yet then the prior set forges a malicious - // `Batch` `n`. - // - // The malicious `Batch` `n` would be publishable to Serai, as Serai can't - // distinguish what's intended to be a handover `Batch`, yet then anyone could - // publish the new set's `n+1`, causing their acceptance of the handover. - // - // To fix this, if this is after the handover `Batch` and we have yet to verify - // publication of the handover `Batch`, don't yet yield the provided. - if last_received > handover_batch { - if let Some(last_verified) = LastVerifiedBatchDb::get(&txn, msg.network) { - if last_verified < handover_batch { - queue = true; - } - } else { - queue = true; - } - } - } else { - HandoverBatchDb::set_handover_batch(&mut txn, spec.set(), last_received); - // If this isn't the first batch, meaning we do have to verify all prior batches, and - // the prior Batch hasn't been verified yet... - if (last_received != 0) && - LastVerifiedBatchDb::get(&txn, msg.network) - .map_or(true, |last_verified| last_verified < (last_received - 1)) - { - // Withhold this TX until we verify all prior `Batch`s - queue = true; - } - } - - if queue { - QueuedBatchesDb::queue(&mut txn, spec.set(), &intended); - vec![] - } else { - // Because this is post-verification of the handover batch, take all queued `Batch`s - // now to ensure we don't provide this before an already queued Batch - // This *may* be an unreachable case due to how last_verified_batch is set, yet it - // doesn't hurt to have as a defensive pattern - let mut res = QueuedBatchesDb::take(&mut txn, spec.set()); - res.push(intended); - res - } - } else { - vec![Transaction::SubstrateSign(SignData { - plan: id.id, - attempt: id.attempt, - label: Label::Preprocess, - data: preprocesses.into_iter().map(Into::into).collect(), - signed: Transaction::empty_signed(), - })] - } - } - coordinator::ProcessorMessage::SubstrateShare { id, shares } => { - vec![Transaction::SubstrateSign(SignData { - plan: id.id, - attempt: id.attempt, - label: Label::Share, - data: shares.into_iter().map(|share| share.to_vec()).collect(), - signed: Transaction::empty_signed(), - })] - } - #[allow(clippy::match_same_arms)] // Allowed to preserve layout - coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(), - #[allow(clippy::match_same_arms)] - coordinator::ProcessorMessage::SignedSlashReport { .. } => unreachable!(), - }, - ProcessorMessage::Substrate(inner_msg) => match inner_msg { - processor_messages::substrate::ProcessorMessage::Batch { .. } | - processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(), - }, - }; - - // If this created transactions, publish them - for mut tx in txs { - log::trace!("processor message effected transaction {} {:?}", hex::encode(tx.hash()), &tx); - - match tx.kind() { - TransactionKind::Provided(_) => { - log::trace!("providing transaction {}", hex::encode(tx.hash())); - let res = tributary.provide_transaction(tx.clone()).await; - if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { - if res == Err(ProvidedError::LocalMismatchesOnChain) { - // Spin, since this is a crit for this Tributary - loop { - log::error!( - "{}. tributary: {}, provided: {:?}", - "tributary added distinct provided to delayed locally provided TX", - hex::encode(spec.genesis()), - &tx, - ); - sleep(Duration::from_secs(60)).await; - } - } - panic!("provided an invalid transaction: {res:?}"); - } - } - TransactionKind::Unsigned => { - log::trace!("publishing unsigned transaction {}", hex::encode(tx.hash())); - match tributary.add_transaction(tx.clone()).await { - Ok(_) => {} - Err(e) => panic!("created an invalid unsigned transaction: {e:?}"), - } - } - TransactionKind::Signed(_, _) => { - tx.sign(&mut OsRng, genesis, key); - tributary::publish_signed_transaction(&mut txn, tributary, tx).await; - } - } - } - } - - HandledMessageDb::set(&mut txn, msg.network, &msg.id); - txn.commit(); - - true -} - -#[allow(clippy::too_many_arguments)] -async fn handle_processor_messages( - mut db: D, - key: Zeroizing<::F>, - serai: Arc, - processors: Pro, - p2p: P, - cosign_channel: mpsc::UnboundedSender, - network: ExternalNetworkId, - mut tributary_event: mpsc::UnboundedReceiver>, -) { - let mut tributaries = HashMap::new(); + let mut delay = SERAI_CONNECTION_DELAY; loop { - match tributary_event.try_recv() { - Ok(event) => match event { - TributaryEvent::NewTributary(tributary) => { - let set = tributary.spec.set(); - assert_eq!(set.network, network); - tributaries.insert(set.session, tributary); - } - TributaryEvent::TributaryRetired(set) => { - tributaries.remove(&set.session); - } - }, - Err(mpsc::error::TryRecvError::Empty) => {} - Err(mpsc::error::TryRecvError::Disconnected) => { - panic!("handle_processor_messages tributary_event sender closed") - } - } - - // TODO: Check this ID is sane (last handled ID or expected next ID) - let Ok(msg) = tokio::time::timeout(Duration::from_secs(1), processors.recv(network)).await + let Ok(serai) = Serai::new(format!( + "http://{}:9944", + serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided") + )) + .await else { + log::error!("couldn't connect to the Serai node"); + tokio::time::sleep(delay).await; + delay = (delay + SERAI_CONNECTION_DELAY).min(MAX_SERAI_CONNECTION_DELAY); continue; }; - log::trace!("entering handle_processor_message for {:?}", network); - if handle_processor_message( - &mut db, - &key, - &serai, - &p2p, - &cosign_channel, - &tributaries, - network, - &msg, - ) - .await - { - processors.ack(msg).await; - } - log::trace!("exited handle_processor_message for {:?}", network); + log::info!("made initial connection to Serai node"); + return Arc::new(serai); } } -#[allow(clippy::too_many_arguments)] -async fn handle_cosigns_and_batch_publication( +fn spawn_cosigning( mut db: D, - network: ExternalNetworkId, - mut tributary_event: mpsc::UnboundedReceiver>, + serai: Arc, + p2p: impl p2p::P2p, + tasks_to_run_upon_cosigning: Vec, + mut p2p_cosigns: mpsc::UnboundedReceiver, ) { - let mut tributaries = HashMap::new(); - 'outer: loop { - // TODO: Create a better async flow for this - tokio::time::sleep(core::time::Duration::from_millis(100)).await; + let mut cosigning = Cosigning::spawn(db.clone(), serai, p2p.clone(), tasks_to_run_upon_cosigning); + tokio::spawn(async move { + const COSIGN_LOOP_INTERVAL: Duration = Duration::from_secs(5); - match tributary_event.try_recv() { - Ok(event) => match event { - TributaryEvent::NewTributary(tributary) => { - let set = tributary.spec.set(); - assert_eq!(set.network, network); - tributaries.insert(set.session, tributary); + let last_cosign_rebroadcast = Instant::now(); + loop { + // Intake our own cosigns + match Cosigning::::latest_cosigned_block_number(&db) { + Ok(latest_cosigned_block_number) => { + let mut txn = db.txn(); + // The cosigns we prior tried to intake yet failed to + let mut cosigns = ErroneousCosigns::get(&txn).unwrap_or(vec![]); + // The cosigns we have yet to intake + while let Some(cosign) = SignedCosigns::try_recv(&mut txn) { + cosigns.push(cosign); + } + + let mut erroneous = vec![]; + for cosign in cosigns { + // If this cosign is stale, move on + if cosign.cosign.block_number <= latest_cosigned_block_number { + continue; + } + + match cosigning.intake_cosign(&cosign) { + // Publish this cosign + Ok(()) => p2p.publish_cosign(cosign).await, + Err(e) => { + assert!(e.temporal(), "signed an invalid cosign: {e:?}"); + // Since this had a temporal error, queue it to try again later + erroneous.push(cosign); + } + }; + } + + // Save the cosigns with temporal errors to the database + ErroneousCosigns::set(&mut txn, &erroneous); + + txn.commit(); } - TributaryEvent::TributaryRetired(set) => { - tributaries.remove(&set.session); + Err(Faulted) => { + // We don't panic here as the following code rebroadcasts our cosigns which is + // necessary to inform other coordinators of the faulty cosigns + log::error!("cosigning faulted"); + } + } + + let time_till_cosign_rebroadcast = (last_cosign_rebroadcast + + serai_cosign::BROADCAST_FREQUENCY) + .saturating_duration_since(Instant::now()); + tokio::select! { + () = tokio::time::sleep(time_till_cosign_rebroadcast) => { + for cosign in cosigning.cosigns_to_rebroadcast() { + p2p.publish_cosign(cosign).await; + } + } + cosign = p2p_cosigns.recv() => { + let cosign = cosign.expect("p2p cosigns channel was dropped?"); + if cosigning.intake_cosign(&cosign).is_ok() { + p2p.publish_cosign(cosign).await; + } + } + // Make sure this loop runs at least this often + () = tokio::time::sleep(COSIGN_LOOP_INTERVAL) => {} + } + } + }); +} + +async fn handle_network( + mut db: impl serai_db::Db, + message_queue: Arc, + serai: Arc, + network: ExternalNetworkId, +) { + // Spawn the task to publish batches for this network + { + let (publish_batch_task_def, publish_batch_task) = Task::new(); + tokio::spawn( + PublishBatchTask::new(db.clone(), serai.clone(), network) + .continually_run(publish_batch_task_def, vec![]), + ); + // Forget its handle so it always runs in the background + core::mem::forget(publish_batch_task); + } + + // Handle Processor messages + loop { + let (msg_id, msg) = { + let msg = message_queue.next(Service::Processor(network)).await; + // Check this message's sender is as expected + assert_eq!(msg.from, Service::Processor(network)); + + // Check this message's ID is as expected + let last = LastProcessorMessage::get(&db, network); + let next = last.map(|id| id + 1).unwrap_or(0); + // This should either be the last message's ID, if we committed but didn't send our ACK, or + // the expected next message's ID + assert!((Some(msg.id) == last) || (msg.id == next)); + + // TODO: Check msg.sig + + // If this is the message we already handled, and just failed to ACK, ACK it now and move on + if Some(msg.id) == last { + message_queue.ack(Service::Processor(network), msg.id).await; + continue; + } + + (msg.id, messages::ProcessorMessage::deserialize(&mut msg.msg.as_slice()).unwrap()) + }; + + let mut txn = db.txn(); + + match msg { + messages::ProcessorMessage::KeyGen(msg) => match msg { + messages::key_gen::ProcessorMessage::Participation { session, participation } => { + let set = ExternalValidatorSet { network, session }; + TributaryTransactionsFromProcessorMessages::send( + &mut txn, + set, + &Transaction::DkgParticipation { participation, signed: Signed::default() }, + ); + } + messages::key_gen::ProcessorMessage::GeneratedKeyPair { + session, + substrate_key, + network_key, + } => { + KeysToConfirm::set( + &mut txn, + ExternalValidatorSet { network, session }, + &KeyPair( + PublicKey::from_raw(substrate_key), + network_key + .try_into() + .expect("generated a network key which exceeds the maximum key length"), + ), + ); + } + messages::key_gen::ProcessorMessage::Blame { session, participant } => { + RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant); } }, - Err(mpsc::error::TryRecvError::Empty) => {} - Err(mpsc::error::TryRecvError::Disconnected) => { - panic!("handle_processor_messages tributary_event sender closed") - } - } - - // Handle pending cosigns - { - let mut txn = db.txn(); - while let Some((session, block, hash)) = CosignTransactions::try_recv(&mut txn, network) { - let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else { - log::warn!("didn't yet have tributary we're supposed to cosign with"); - break; - }; - log::info!( - "{network:?} {session:?} cosigning block #{block} (hash {}...)", - hex::encode(&hash[.. 8]) - ); - let tx = Transaction::CosignSubstrateBlock(hash); - let res = tributary.provide_transaction(tx.clone()).await; - if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { - if res == Err(ProvidedError::LocalMismatchesOnChain) { - // Spin, since this is a crit for this Tributary - loop { - log::error!( - "{}. tributary: {}, provided: {:?}", - "tributary added distinct CosignSubstrateBlock", - hex::encode(spec.genesis()), - &tx, + messages::ProcessorMessage::Sign(msg) => match msg { + messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => { + RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant); + } + messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => { + let set = ExternalValidatorSet { network, session: id.session }; + if id.attempt == 0 { + // Batches are declared by their intent to be signed + if let messages::sign::VariantSignId::Batch(hash) = id.id { + TributaryTransactionsFromProcessorMessages::send( + &mut txn, + set, + &Transaction::Batch { hash }, ); - sleep(Duration::from_secs(60)).await; } } - panic!("provided an invalid CosignSubstrateBlock: {res:?}"); - } - } - txn.commit(); - } - // Verify any publifshed `Batch`s - { - let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await; - let mut txn = db.txn(); - let mut to_publish = vec![]; - let start_id = - LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1); - if let Some(last_id) = - substrate::verify_published_batches::(&mut txn, network, u32::MAX).await - { - // Check if any of these `Batch`s were a handover `Batch` or the `Batch` before a handover - // `Batch` - // If so, we need to publish queued provided `Batch` transactions - for batch in start_id ..= last_id { - let is_pre_handover = LookupHandoverBatchDb::get(&txn, network, batch + 1); - if let Some(session) = is_pre_handover { + TributaryTransactionsFromProcessorMessages::send( + &mut txn, + set, + &Transaction::Sign { + id: id.id, + attempt: id.attempt, + round: SigningProtocolRound::Preprocess, + data: preprocesses, + signed: Signed::default(), + }, + ); + } + messages::sign::ProcessorMessage::Shares { id, shares } => { + let set = ExternalValidatorSet { network, session: id.session }; + TributaryTransactionsFromProcessorMessages::send( + &mut txn, + set, + &Transaction::Sign { + id: id.id, + attempt: id.attempt, + round: SigningProtocolRound::Share, + data: shares, + signed: Signed::default(), + }, + ); + } + }, + messages::ProcessorMessage::Coordinator(msg) => match msg { + messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => { + SignedCosigns::send(&mut txn, &cosign); + } + messages::coordinator::ProcessorMessage::SignedBatch { batch } => { + SignedBatches::send(&mut txn, &batch); + } + messages::coordinator::ProcessorMessage::SignedSlashReport { + session, + slash_report, + signature, + } => { + SlashReports::set( + &mut txn, + ExternalValidatorSet { network, session }, + slash_report, + Signature(signature), + ); + } + }, + messages::ProcessorMessage::Substrate(msg) => match msg { + messages::substrate::ProcessorMessage::SubstrateBlockAck { block, plans } => { + let mut by_session = HashMap::new(); + for plan in plans { + by_session + .entry(plan.session) + .or_insert_with(|| Vec::with_capacity(1)) + .push(plan.transaction_plan_id); + } + for (session, plans) in by_session { let set = ExternalValidatorSet { network, session }; - let mut queued = QueuedBatchesDb::take(&mut txn, set); - // is_handover_batch is only set for handover `Batch`s we're participating in, making - // this safe - if queued.is_empty() { - panic!("knew the next Batch was a handover yet didn't queue it"); - } - - // Only publish the handover Batch - to_publish.push((set.session, queued.remove(0))); - // Re-queue the remaining batches - for remaining in queued { - QueuedBatchesDb::queue(&mut txn, set, &remaining); - } - } - - let is_handover = LookupHandoverBatchDb::get(&txn, network, batch); - if let Some(session) = is_handover { - for queued in QueuedBatchesDb::take(&mut txn, ExternalValidatorSet { network, session }) - { - to_publish.push((session, queued)); - } + SubstrateBlockPlans::set(&mut txn, set, block, &plans); + TributaryTransactionsFromProcessorMessages::send( + &mut txn, + set, + &Transaction::SubstrateBlock { hash: block }, + ); } } - } - - for (session, tx) in to_publish { - let Some(ActiveTributary { spec, tributary }) = tributaries.get(&session) else { - log::warn!("didn't yet have tributary we're supposed to provide a queued Batch for"); - // Safe since this will drop the txn updating the most recently queued batch - continue 'outer; - }; - log::debug!("providing Batch transaction {:?}", &tx); - let res = tributary.provide_transaction(tx.clone()).await; - if !(res.is_ok() || (res == Err(ProvidedError::AlreadyProvided))) { - if res == Err(ProvidedError::LocalMismatchesOnChain) { - // Spin, since this is a crit for this Tributary - loop { - log::error!( - "{}. tributary: {}, provided: {:?}", - "tributary added distinct Batch", - hex::encode(spec.genesis()), - &tx, - ); - sleep(Duration::from_secs(60)).await; - } - } - panic!("provided an invalid Batch: {res:?}"); - } - } - - txn.commit(); + }, } + + // Mark this as the last handled message + LastProcessorMessage::set(&mut txn, network, &msg_id); + // Commit the txn + txn.commit(); + // Now that we won't handle this message again, acknowledge it so we won't see it again + message_queue.ack(Service::Processor(network), msg_id).await; } } -pub async fn handle_processors( - db: D, - key: Zeroizing<::F>, - serai: Arc, - processors: Pro, - p2p: P, - cosign_channel: mpsc::UnboundedSender, - mut tributary_event: broadcast::Receiver>, -) { - let mut channels = HashMap::new(); - for network in serai_client::primitives::EXTERNAL_NETWORKS { - let (processor_send, processor_recv) = mpsc::unbounded_channel(); - tokio::spawn(handle_processor_messages( - db.clone(), - key.clone(), - serai.clone(), - processors.clone(), - p2p.clone(), - cosign_channel.clone(), - network, - processor_recv, - )); - let (cosign_send, cosign_recv) = mpsc::unbounded_channel(); - tokio::spawn(handle_cosigns_and_batch_publication(db.clone(), network, cosign_recv)); - channels.insert(network, (processor_send, cosign_send)); - } - - // Listen to new tributary events - loop { - match tributary_event.recv().await.unwrap() { - TributaryEvent::NewTributary(tributary) => { - let (c1, c2) = &channels[&tributary.spec.set().network]; - c1.send(TributaryEvent::NewTributary(tributary.clone())).unwrap(); - c2.send(TributaryEvent::NewTributary(tributary)).unwrap(); - } - TributaryEvent::TributaryRetired(set) => { - let (c1, c2) = &channels[&set.network]; - c1.send(TributaryEvent::TributaryRetired(set)).unwrap(); - c2.send(TributaryEvent::TributaryRetired(set)).unwrap(); - } - }; - } -} - -pub async fn run( - raw_db: D, - key: Zeroizing<::F>, - p2p: P, - processors: Pro, - serai: Arc, -) { - let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel(); - // Reload active tributaries from the database - for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 { - new_tributary_spec_send.send(spec).unwrap(); - } - - let (perform_slash_report_send, mut perform_slash_report_recv) = mpsc::unbounded_channel(); - - let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel(); - - // Handle new Substrate blocks - tokio::spawn(crate::substrate::scan_task( - raw_db.clone(), - key.clone(), - processors.clone(), - serai.clone(), - new_tributary_spec_send, - perform_slash_report_send, - tributary_retired_send, - )); - - // Handle the Tributaries - - // This should be large enough for an entire rotation of all tributaries - // If it's too small, the coordinator fail to boot, which is a decent sanity check - let (tributary_event, mut tributary_event_listener_1) = broadcast::channel(32); - let tributary_event_listener_2 = tributary_event.subscribe(); - let tributary_event_listener_3 = tributary_event.subscribe(); - let tributary_event_listener_4 = tributary_event.subscribe(); - let tributary_event_listener_5 = tributary_event.subscribe(); - - // Emit TributaryEvent::TributaryRetired - tokio::spawn({ - let tributary_event = tributary_event.clone(); - async move { - loop { - let retired = tributary_retired_recv.recv().await.unwrap(); - tributary_event.send(TributaryEvent::TributaryRetired(retired)).map_err(|_| ()).unwrap(); - } - } - }); - - // Spawn a task to further add Tributaries as needed - tokio::spawn({ - let raw_db = raw_db.clone(); - let key = key.clone(); - let processors = processors.clone(); - let p2p = p2p.clone(); - async move { - loop { - let spec = new_tributary_spec_recv.recv().await.unwrap(); - // Uses an inner task as Tributary::new may take several seconds - tokio::spawn({ - let raw_db = raw_db.clone(); - let key = key.clone(); - let processors = processors.clone(); - let p2p = p2p.clone(); - let tributary_event = tributary_event.clone(); - async move { - add_tributary(raw_db, key, &processors, p2p, &tributary_event, spec).await; - } - }); - } - } - }); - - // When we reach synchrony on an event requiring signing, send our preprocess for it - // TODO: Properly place this into the Tributary scanner, as it's a mess out here - let recognized_id = { - let raw_db = raw_db.clone(); - let key = key.clone(); - - let specs = Arc::new(RwLock::new(HashMap::new())); - let tributaries = Arc::new(RwLock::new(HashMap::new())); - // Spawn a task to maintain a local view of the tributaries for whenever recognized_id is - // called - tokio::spawn({ - let specs = specs.clone(); - let tributaries = tributaries.clone(); - let mut set_to_genesis = HashMap::new(); - async move { - loop { - match tributary_event_listener_1.recv().await { - Ok(TributaryEvent::NewTributary(tributary)) => { - set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis()); - tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary); - specs.write().await.insert(tributary.spec.set(), tributary.spec); - } - Ok(TributaryEvent::TributaryRetired(set)) => { - if let Some(genesis) = set_to_genesis.remove(&set) { - specs.write().await.remove(&set); - tributaries.write().await.remove(&genesis); - } - } - Err(broadcast::error::RecvError::Lagged(_)) => { - panic!("recognized_id lagged to handle tributary_event") - } - Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"), - } - } - } - }); - - // Also spawn a task to handle slash reports, as this needs such a view of tributaries - tokio::spawn({ - let mut raw_db = raw_db.clone(); - let key = key.clone(); - let tributaries = tributaries.clone(); - async move { - 'task_loop: loop { - match perform_slash_report_recv.recv().await { - Some(set) => { - let (genesis, validators) = loop { - let specs = specs.read().await; - let Some(spec) = specs.get(&set) else { - // If we don't have this Tributary because it's retired, break and move on - if RetiredTributaryDb::get(&raw_db, set).is_some() { - continue 'task_loop; - } - - // This may happen if the task above is simply slow - log::warn!("tributary we don't have yet is supposed to perform a slash report"); - continue; - }; - break (spec.genesis(), spec.validators()); - }; - - let mut slashes = vec![]; - for (validator, _) in validators { - if validator == (::generator() * key.deref()) { - continue; - } - let validator = validator.to_bytes(); - - let fatally = tributary::FatallySlashed::get(&raw_db, genesis, validator).is_some(); - // TODO: Properly type this - let points = if fatally { - u32::MAX - } else { - tributary::SlashPoints::get(&raw_db, genesis, validator).unwrap_or(0) - }; - slashes.push(points); - } - - let mut tx = Transaction::SlashReport(slashes, Transaction::empty_signed()); - tx.sign(&mut OsRng, genesis, &key); - - let mut first = true; - loop { - if !first { - sleep(Duration::from_millis(100)).await; - } - first = false; - - let tributaries = tributaries.read().await; - let Some(tributary) = tributaries.get(&genesis) else { - // If we don't have this Tributary because it's retired, break and move on - if RetiredTributaryDb::get(&raw_db, set).is_some() { - break; - } - - // This may happen if the task above is simply slow - log::warn!("tributary we don't have yet is supposed to perform a slash report"); - continue; - }; - // This is safe to perform multiple times and solely needs atomicity with regards - // to itself - // TODO: Should this not take a txn accordingly? It's best practice to take a txn, - // yet taking a txn fails to declare its achieved independence - let mut txn = raw_db.txn(); - tributary::publish_signed_transaction(&mut txn, tributary, tx).await; - txn.commit(); - break; - } - } - None => panic!("perform slash report sender closed"), - } - } - } - }); - - move |set: ExternalValidatorSet, genesis, id_type, id: Vec| { - log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id)); - let mut raw_db = raw_db.clone(); - let key = key.clone(); - let tributaries = tributaries.clone(); - async move { - // The transactions for these are fired before the preprocesses are actually - // received/saved, creating a race between Tributary ack and the availability of all - // Preprocesses - // This waits until the necessary preprocess is available 0, - let get_preprocess = |raw_db, id_type, id| async move { - loop { - let Some(preprocess) = FirstPreprocessDb::get(raw_db, set.network, id_type, id) else { - log::warn!("waiting for preprocess for recognized ID"); - sleep(Duration::from_millis(100)).await; - continue; - }; - return preprocess; - } - }; - - let mut tx = match id_type { - RecognizedIdType::Batch => Transaction::SubstrateSign(SignData { - data: get_preprocess(&raw_db, id_type, &id).await, - plan: SubstrateSignableId::Batch(u32::from_le_bytes(id.try_into().unwrap())), - label: Label::Preprocess, - attempt: 0, - signed: Transaction::empty_signed(), - }), - - RecognizedIdType::Plan => Transaction::Sign(SignData { - data: get_preprocess(&raw_db, id_type, &id).await, - plan: id.try_into().unwrap(), - label: Label::Preprocess, - attempt: 0, - signed: Transaction::empty_signed(), - }), - }; - - tx.sign(&mut OsRng, genesis, &key); - - let mut first = true; - loop { - if !first { - sleep(Duration::from_millis(100)).await; - } - first = false; - - let tributaries = tributaries.read().await; - let Some(tributary) = tributaries.get(&genesis) else { - // If we don't have this Tributary because it's retired, break and move on - if RetiredTributaryDb::get(&raw_db, set).is_some() { - break; - } - - // This may happen if the task above is simply slow - log::warn!("tributary we don't have yet came to consensus on an Batch"); - continue; - }; - // This is safe to perform multiple times and solely needs atomicity with regards to - // itself - // TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet - // taking a txn fails to declare its achieved independence - let mut txn = raw_db.txn(); - tributary::publish_signed_transaction(&mut txn, tributary, tx).await; - txn.commit(); - break; - } - } - } - }; - - // Handle new blocks for each Tributary - { - let raw_db = raw_db.clone(); - tokio::spawn(tributary::scanner::scan_tributaries_task( - raw_db, - key.clone(), - recognized_id, - processors.clone(), - serai.clone(), - tributary_event_listener_2, - )); - } - - // Spawn the heartbeat task, which will trigger syncing if there hasn't been a Tributary block - // in a while (presumably because we're behind) - tokio::spawn(p2p::heartbeat_tributaries_task(p2p.clone(), tributary_event_listener_3)); - - // Create the Cosign evaluator - let cosign_channel = CosignEvaluator::new(raw_db.clone(), p2p.clone(), serai.clone()); - - // Handle P2P messages - tokio::spawn(p2p::handle_p2p_task( - p2p.clone(), - cosign_channel.clone(), - tributary_event_listener_4, - )); - - // Handle all messages from processors - handle_processors( - raw_db, - key, - serai, - processors, - p2p, - cosign_channel, - tributary_event_listener_5, - ) - .await; -} - #[tokio::main] async fn main() { // Override the panic handler with one which will panic if any tokio task panics @@ -1320,27 +332,15 @@ async fn main() { })); } + // Initialize the logger if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); } env_logger::init(); - log::info!("starting coordinator service..."); - #[allow(unused_variables, unreachable_code)] - let db = { - #[cfg(all(feature = "parity-db", feature = "rocksdb"))] - panic!("built with parity-db and rocksdb"); - #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] - let db = - serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - #[cfg(feature = "rocksdb")] - let db = - serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - db - }; - - let key = { + // Read the Serai key from the env + let serai_key = { let mut key_hex = serai_env::var("SERAI_KEY").expect("Serai key wasn't provided"); let mut key_vec = hex::decode(&key_hex).map_err(|_| ()).expect("Serai key wasn't hex-encoded"); key_hex.zeroize(); @@ -1356,25 +356,154 @@ async fn main() { key }; - let processors = Arc::new(MessageQueue::from_env(Service::Coordinator)); + // Open the database + let mut db = coordinator_db(); - let serai = (async { - loop { - let Ok(serai) = Serai::new(format!( - "http://{}:9944", - serai_env::var("SERAI_HOSTNAME").expect("Serai hostname wasn't provided") - )) - .await - else { - log::error!("couldn't connect to the Serai node"); - sleep(Duration::from_secs(5)).await; - continue; - }; - log::info!("made initial connection to Serai node"); - return Arc::new(serai); + let existing_tributaries_at_boot = { + let mut txn = db.txn(); + + // Cleanup all historic Tributaries + while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) { + prune_tributary_db(to_cleanup); + // Remove the keys to confirm for this network + KeysToConfirm::take(&mut txn, to_cleanup); + KeySet::take(&mut txn, to_cleanup); + // Drain the cosign intents created for this set + while !Cosigning::::intended_cosigns(&mut txn, to_cleanup).is_empty() {} + // Drain the transactions to publish for this set + while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {} + while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {} + // Drain the participants to remove for this set + while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {} + // Remove the SignSlashReport notification + SignSlashReport::try_recv(&mut txn, to_cleanup); } - }) - .await; - let p2p = LibP2p::new(serai.clone()); - run(db, key, p2p, processors, serai).await + + // Remove retired Tributaries from ActiveTributaries + let mut active_tributaries = ActiveTributaries::get(&txn).unwrap_or(vec![]); + active_tributaries.retain(|tributary| { + RetiredTributary::get(&txn, tributary.set.network).map(|session| session.0) < + Some(tributary.set.session.0) + }); + ActiveTributaries::set(&mut txn, &active_tributaries); + + txn.commit(); + + active_tributaries + }; + + // Connect to the message-queue + let message_queue = Arc::new(MessageQueue::from_env(Service::Coordinator)); + + // Connect to the Serai node + let serai = serai().await; + + let (p2p_add_tributary_send, p2p_add_tributary_recv) = mpsc::unbounded_channel(); + let (p2p_retire_tributary_send, p2p_retire_tributary_recv) = mpsc::unbounded_channel(); + let (p2p_cosigns_send, p2p_cosigns_recv) = mpsc::unbounded_channel(); + + // Spawn the P2P network + let p2p = { + let serai_keypair = { + let mut key_bytes = serai_key.to_bytes(); + // Schnorrkel SecretKey is the key followed by 32 bytes of entropy for nonces + let mut expanded_key = Zeroizing::new([0; 64]); + expanded_key.as_mut_slice()[.. 32].copy_from_slice(&key_bytes); + OsRng.fill_bytes(&mut expanded_key.as_mut_slice()[32 ..]); + key_bytes.zeroize(); + Zeroizing::new( + schnorrkel::SecretKey::from_bytes(expanded_key.as_slice()).unwrap().to_keypair(), + ) + }; + let p2p = p2p::Libp2p::new(&serai_keypair, serai.clone()); + tokio::spawn(p2p::run::( + db.clone(), + p2p.clone(), + p2p_add_tributary_recv, + p2p_retire_tributary_recv, + p2p_cosigns_send, + )); + p2p + }; + + // Spawn the Substrate scanners + let (substrate_task_def, substrate_task) = Task::new(); + let (substrate_canonical_task_def, substrate_canonical_task) = Task::new(); + tokio::spawn( + CanonicalEventStream::new(db.clone(), serai.clone()) + .continually_run(substrate_canonical_task_def, vec![substrate_task.clone()]), + ); + let (substrate_ephemeral_task_def, substrate_ephemeral_task) = Task::new(); + tokio::spawn( + EphemeralEventStream::new( + db.clone(), + serai.clone(), + SeraiAddress((::generator() * serai_key.deref()).to_bytes()), + ) + .continually_run(substrate_ephemeral_task_def, vec![substrate_task]), + ); + + // Spawn the cosign handler + spawn_cosigning( + db.clone(), + serai.clone(), + p2p.clone(), + // Run the Substrate scanners once we cosign new blocks + vec![substrate_canonical_task, substrate_ephemeral_task], + p2p_cosigns_recv, + ); + + // Spawn all Tributaries on-disk + for tributary in existing_tributaries_at_boot { + crate::tributary::spawn_tributary( + db.clone(), + message_queue.clone(), + p2p.clone(), + &p2p_add_tributary_send, + tributary, + serai_key.clone(), + ) + .await; + } + + // Handle the events from the Substrate scanner + tokio::spawn( + (SubstrateTask { + serai_key: serai_key.clone(), + db: db.clone(), + message_queue: message_queue.clone(), + p2p: p2p.clone(), + p2p_add_tributary: p2p_add_tributary_send.clone(), + p2p_retire_tributary: p2p_retire_tributary_send.clone(), + }) + .continually_run(substrate_task_def, vec![]), + ); + + // Handle each of the networks + for network in serai_client::primitives::EXTERNAL_NETWORKS { + tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network)); + } + + // Spawn the task to set keys + { + let (set_keys_task_def, set_keys_task) = Task::new(); + tokio::spawn( + SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]), + ); + // Forget its handle so it always runs in the background + core::mem::forget(set_keys_task); + } + + // Spawn the task to publish slash reports + { + let (publish_slash_report_task_def, publish_slash_report_task) = Task::new(); + tokio::spawn( + PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]), + ); + // Always have this run in the background + core::mem::forget(publish_slash_report_task); + } + + // Run the spawned tasks ad-infinitum + core::future::pending().await } diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs deleted file mode 100644 index a9fd288e..00000000 --- a/coordinator/src/p2p.rs +++ /dev/null @@ -1,1045 +0,0 @@ -use core::{time::Duration, fmt}; -use std::{ - sync::Arc, - io::{self, Read}, - collections::{HashSet, HashMap}, - time::{SystemTime, Instant}, -}; - -use async_trait::async_trait; -use rand_core::{RngCore, OsRng}; - -use scale::{Decode, Encode}; -use borsh::{BorshSerialize, BorshDeserialize}; -use serai_client::{ - primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet, Serai, -}; - -use serai_db::Db; - -use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt}; -use tokio::{ - sync::{Mutex, RwLock, mpsc, broadcast}, - time::sleep, -}; - -use libp2p::{ - core::multiaddr::{Protocol, Multiaddr}, - identity::Keypair, - PeerId, - tcp::Config as TcpConfig, - noise, yamux, - request_response::{ - Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig, - Behaviour as RrBehavior, ProtocolSupport, - }, - gossipsub::{ - IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, - IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError, - Behaviour as GsBehavior, - }, - swarm::{NetworkBehaviour, SwarmEvent}, - SwarmBuilder, -}; - -pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p}; - -use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; - -// Block size limit + 1 KB of space for signatures/metadata -const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; - -const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize = - (tributary::BLOCK_SIZE_LIMIT * BLOCKS_PER_BATCH) + 1024; - -const MAX_LIBP2P_MESSAGE_SIZE: usize = { - // Manual `max` since `max` isn't a const fn - if MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > MAX_LIBP2P_REQRES_MESSAGE_SIZE { - MAX_LIBP2P_GOSSIP_MESSAGE_SIZE - } else { - MAX_LIBP2P_REQRES_MESSAGE_SIZE - } -}; - -const LIBP2P_TOPIC: &str = "serai-coordinator"; - -// Amount of blocks in a minute -const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize; - -// Maximum amount of blocks to send in a batch -const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1; - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] -pub struct CosignedBlock { - pub network: ExternalNetworkId, - pub block_number: u64, - pub block: [u8; 32], - pub signature: [u8; 64], -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum ReqResMessageKind { - KeepAlive, - Heartbeat([u8; 32]), - Block([u8; 32]), -} - -impl ReqResMessageKind { - pub fn read(reader: &mut R) -> Option { - let mut kind = [0; 1]; - reader.read_exact(&mut kind).ok()?; - match kind[0] { - 0 => Some(ReqResMessageKind::KeepAlive), - 1 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - ReqResMessageKind::Heartbeat(genesis) - }), - 2 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - ReqResMessageKind::Block(genesis) - }), - _ => None, - } - } - - pub fn serialize(&self) -> Vec { - match self { - ReqResMessageKind::KeepAlive => vec![0], - ReqResMessageKind::Heartbeat(genesis) => { - let mut res = vec![1]; - res.extend(genesis); - res - } - ReqResMessageKind::Block(genesis) => { - let mut res = vec![2]; - res.extend(genesis); - res - } - } - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum GossipMessageKind { - Tributary([u8; 32]), - CosignedBlock, -} - -impl GossipMessageKind { - pub fn read(reader: &mut R) -> Option { - let mut kind = [0; 1]; - reader.read_exact(&mut kind).ok()?; - match kind[0] { - 0 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - GossipMessageKind::Tributary(genesis) - }), - 1 => Some(GossipMessageKind::CosignedBlock), - _ => None, - } - } - - pub fn serialize(&self) -> Vec { - match self { - GossipMessageKind::Tributary(genesis) => { - let mut res = vec![0]; - res.extend(genesis); - res - } - GossipMessageKind::CosignedBlock => { - vec![1] - } - } - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum P2pMessageKind { - ReqRes(ReqResMessageKind), - Gossip(GossipMessageKind), -} - -impl P2pMessageKind { - fn genesis(&self) -> Option<[u8; 32]> { - match self { - P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) | - P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None, - P2pMessageKind::ReqRes( - ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), - ) | - P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis), - } - } -} - -impl From for P2pMessageKind { - fn from(kind: ReqResMessageKind) -> P2pMessageKind { - P2pMessageKind::ReqRes(kind) - } -} - -impl From for P2pMessageKind { - fn from(kind: GossipMessageKind) -> P2pMessageKind { - P2pMessageKind::Gossip(kind) - } -} - -#[derive(Clone, Debug)] -pub struct Message { - pub sender: P::Id, - pub kind: P2pMessageKind, - pub msg: Vec, -} - -#[derive(Clone, Debug, Encode, Decode)] -pub struct BlockCommit { - pub block: Vec, - pub commit: Vec, -} - -#[derive(Clone, Debug, Encode, Decode)] -pub struct HeartbeatBatch { - pub blocks: Vec, - pub timestamp: u64, -} - -#[async_trait] -pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { - type Id: Send + Sync + Clone + Copy + fmt::Debug; - - async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]); - async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]); - - async fn send_raw(&self, to: Self::Id, msg: Vec); - async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec); - async fn receive(&self) -> Message; - - async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec) { - let mut actual_msg = kind.serialize(); - actual_msg.extend(msg); - self.send_raw(to, actual_msg).await; - } - async fn broadcast(&self, kind: impl Send + Into, msg: Vec) { - let kind = kind.into(); - let mut actual_msg = match kind { - P2pMessageKind::ReqRes(kind) => kind.serialize(), - P2pMessageKind::Gossip(kind) => kind.serialize(), - }; - actual_msg.extend(msg); - /* - log::trace!( - "broadcasting p2p message (kind {})", - match kind { - P2pMessageKind::KeepAlive => "KeepAlive".to_string(), - P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)), - P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)), - P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)), - P2pMessageKind::CosignedBlock => "CosignedBlock".to_string(), - } - ); - */ - self.broadcast_raw(kind, actual_msg).await; - } -} - -#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)] -struct RrCodec; -#[async_trait] -impl RrCodecTrait for RrCodec { - type Protocol = &'static str; - type Request = Vec; - type Response = Vec; - - async fn read_request( - &mut self, - _: &Self::Protocol, - io: &mut R, - ) -> io::Result> { - let mut len = [0; 4]; - io.read_exact(&mut len).await?; - let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?"); - if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE { - Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?; - } - // This may be a non-trivial allocation easily causable - // While we could chunk the read, meaning we only perform the allocation as bandwidth is used, - // the max message size should be sufficiently sane - let mut buf = vec![0; len]; - io.read_exact(&mut buf).await?; - Ok(buf) - } - async fn read_response( - &mut self, - proto: &Self::Protocol, - io: &mut R, - ) -> io::Result> { - self.read_request(proto, io).await - } - async fn write_request( - &mut self, - _: &Self::Protocol, - io: &mut W, - req: Vec, - ) -> io::Result<()> { - io.write_all( - &u32::try_from(req.len()) - .map_err(|_| io::Error::other("request length exceeded 2**32"))? - .to_le_bytes(), - ) - .await?; - io.write_all(&req).await - } - async fn write_response( - &mut self, - proto: &Self::Protocol, - io: &mut W, - res: Vec, - ) -> io::Result<()> { - self.write_request(proto, io, res).await - } -} - -#[derive(NetworkBehaviour)] -struct Behavior { - reqres: RrBehavior, - gossipsub: GsBehavior, -} - -#[allow(clippy::type_complexity)] -#[derive(Clone)] -pub struct LibP2p { - subscribe: Arc>>, - send: Arc)>>>, - broadcast: Arc)>>>, - receive: Arc>>>, -} -impl fmt::Debug for LibP2p { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_struct("LibP2p").finish_non_exhaustive() - } -} - -impl LibP2p { - #[allow(clippy::new_without_default)] - pub fn new(serai: Arc) -> Self { - log::info!("creating a libp2p instance"); - - let throwaway_key_pair = Keypair::generate_ed25519(); - - let behavior = Behavior { - reqres: { RrBehavior::new([("/coordinator", ProtocolSupport::Full)], RrConfig::default()) }, - gossipsub: { - let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2; - let heartbeats_per_block = - usize::try_from(tributary::tendermint::TARGET_BLOCK_TIME / heartbeat_interval).unwrap(); - - use blake2::{Digest, Blake2s256}; - let config = ConfigBuilder::default() - .heartbeat_interval(Duration::from_millis(heartbeat_interval.into())) - .history_length(heartbeats_per_block * 2) - .history_gossip(heartbeats_per_block) - .max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE) - // We send KeepAlive after 80s - .idle_timeout(Duration::from_secs(85)) - .validation_mode(ValidationMode::Strict) - // Uses a content based message ID to avoid duplicates as much as possible - .message_id_fn(|msg| { - MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat())) - }) - // Re-defines for fast ID to prevent needing to convert into a Message to run - // message_id_fn - // This function is valid for both - .fast_message_id_fn(|msg| { - FastMessageId::new(&Blake2s256::digest( - [msg.topic.as_str().as_bytes(), &msg.data].concat(), - )) - }) - .build(); - let mut gossipsub = GsBehavior::::new( - MessageAuthenticity::Signed(throwaway_key_pair.clone()), - config.unwrap(), - ) - .unwrap(); - - // Subscribe to the base topic - let topic = IdentTopic::new(LIBP2P_TOPIC); - gossipsub.subscribe(&topic).unwrap(); - - gossipsub - }, - }; - - // Uses noise for authentication, yamux for multiplexing - // TODO: Do we want to add a custom authentication protocol to only accept connections from - // fellow validators? Doing so would reduce the potential for spam - // TODO: Relay client? - let mut swarm = SwarmBuilder::with_existing_identity(throwaway_key_pair) - .with_tokio() - .with_tcp(TcpConfig::default().nodelay(true), noise::Config::new, || { - let mut config = yamux::Config::default(); - // 1 MiB default + max message size - config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE); - // 256 KiB default + max message size - config - .set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap()); - config - }) - .unwrap() - .with_behaviour(|_| behavior) - .unwrap() - .build(); - const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') - swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); - - let (send_send, mut send_recv) = mpsc::unbounded_channel(); - let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel(); - let (receive_send, receive_recv) = mpsc::unbounded_channel(); - let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); - - fn topic_for_set(set: ExternalValidatorSet) -> IdentTopic { - IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) - } - - // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum - const TARGET_PEERS: usize = 5; - - // The addrs we're currently dialing, and the networks associated with them - let dialing_peers = Arc::new(RwLock::new(HashMap::new())); - // The peers we're currently connected to, and the networks associated with them - let connected_peers = - Arc::new(RwLock::new(HashMap::>::new())); - - // Find and connect to peers - let (connect_to_network_send, mut connect_to_network_recv) = - tokio::sync::mpsc::unbounded_channel(); - let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); - tokio::spawn({ - let dialing_peers = dialing_peers.clone(); - let connected_peers = connected_peers.clone(); - - let connect_to_network_send = connect_to_network_send.clone(); - async move { - loop { - let connect = |network: ExternalNetworkId, addr: Multiaddr| { - let dialing_peers = dialing_peers.clone(); - let connected_peers = connected_peers.clone(); - let to_dial_send = to_dial_send.clone(); - let connect_to_network_send = connect_to_network_send.clone(); - async move { - log::info!("found peer from substrate: {addr}"); - - let protocols = addr.iter().filter_map(|piece| match piece { - // Drop PeerIds from the Substrate P2p network - Protocol::P2p(_) => None, - // Use our own TCP port - Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)), - other => Some(other), - }); - - let mut new_addr = Multiaddr::empty(); - for protocol in protocols { - new_addr.push(protocol); - } - let addr = new_addr; - log::debug!("transformed found peer: {addr}"); - - let (is_fresh_dial, nets) = { - let mut dialing_peers = dialing_peers.write().await; - let is_fresh_dial = !dialing_peers.contains_key(&addr); - if is_fresh_dial { - dialing_peers.insert(addr.clone(), HashSet::new()); - } - // Associate this network with this peer - dialing_peers.get_mut(&addr).unwrap().insert(network); - - let nets = dialing_peers.get(&addr).unwrap().clone(); - (is_fresh_dial, nets) - }; - - // Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing - // fails - // This performs cleanup and bounds the size of the map to whatever growth occurs - // within a temporal window - tokio::spawn({ - let dialing_peers = dialing_peers.clone(); - let connected_peers = connected_peers.clone(); - let connect_to_network_send = connect_to_network_send.clone(); - let addr = addr.clone(); - async move { - tokio::time::sleep(core::time::Duration::from_secs(60)).await; - let mut dialing_peers = dialing_peers.write().await; - if let Some(expected_nets) = dialing_peers.remove(&addr) { - log::debug!("removed addr from dialing upon timeout: {addr}"); - - // TODO: De-duplicate this below instance - // If we failed to dial and haven't gotten enough actual connections, retry - let connected_peers = connected_peers.read().await; - for net in expected_nets { - let mut remaining_peers = 0; - for nets in connected_peers.values() { - if nets.contains(&net) { - remaining_peers += 1; - } - } - // If we do not, start connecting to this network again - if remaining_peers < TARGET_PEERS { - connect_to_network_send.send(net).expect( - "couldn't send net to connect to due to disconnects (receiver dropped?)", - ); - } - } - } - } - }); - - if is_fresh_dial { - to_dial_send.send((addr, nets)).unwrap(); - } - } - }; - - // TODO: We should also connect to random peers from random nets as needed for - // cosigning - - // Drain the chainnel, de-duplicating any networks in it - let mut connect_to_network_networks = HashSet::new(); - while let Ok(network) = connect_to_network_recv.try_recv() { - connect_to_network_networks.insert(network); - } - for network in connect_to_network_networks { - if let Ok(mut nodes) = serai.p2p_validators(network.into()).await { - // If there's an insufficient amount of nodes known, connect to all yet add it - // back and break - if nodes.len() < TARGET_PEERS { - log::warn!( - "insufficient amount of P2P nodes known for {:?}: {}", - network, - nodes.len() - ); - // Retry this later - connect_to_network_send.send(network).unwrap(); - for node in nodes { - connect(network, node).await; - } - continue; - } - - // Randomly select up to 150% of the TARGET_PEERS - for _ in 0 .. ((3 * TARGET_PEERS) / 2) { - if !nodes.is_empty() { - let to_connect = nodes.swap_remove( - usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) - .unwrap(), - ); - connect(network, to_connect).await; - } - } - } - } - // Sleep 60 seconds before moving to the next iteration - tokio::time::sleep(core::time::Duration::from_secs(60)).await; - } - } - }); - - // Manage the actual swarm - tokio::spawn({ - let mut time_of_last_p2p_message = Instant::now(); - - async move { - let connected_peers = connected_peers.clone(); - - let mut set_for_genesis = HashMap::new(); - loop { - let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); - tokio::select! { - biased; - - // Subscribe to any new topics - set = subscribe_recv.recv() => { - let (subscribe, set, genesis): (_, ExternalValidatorSet, [u8; 32]) = - set.expect("subscribe_recv closed. are we shutting down?"); - let topic = topic_for_set(set); - if subscribe { - log::info!("subscribing to p2p messages for {set:?}"); - connect_to_network_send.send(set.network).unwrap(); - set_for_genesis.insert(genesis, set); - swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); - } else { - log::info!("unsubscribing to p2p messages for {set:?}"); - set_for_genesis.remove(&genesis); - swarm.behaviour_mut().gossipsub.unsubscribe(&topic).unwrap(); - } - } - - msg = send_recv.recv() => { - let (peer, msg): (PeerId, Vec) = - msg.expect("send_recv closed. are we shutting down?"); - swarm.behaviour_mut().reqres.send_request(&peer, msg); - }, - - // Handle any queued outbound messages - msg = broadcast_recv.recv() => { - // Update the time of last message - time_of_last_p2p_message = Instant::now(); - - let (kind, msg): (P2pMessageKind, Vec) = - msg.expect("broadcast_recv closed. are we shutting down?"); - - if matches!(kind, P2pMessageKind::ReqRes(_)) { - // Use request/response, yet send to all connected peers - for peer_id in swarm.connected_peers().copied().collect::>() { - swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); - } - } else { - // Use gossipsub - - let set = - kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); - let topic = if let Some(set) = set { - topic_for_set(set) - } else { - IdentTopic::new(LIBP2P_TOPIC) - }; - - match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) { - Err(PublishError::SigningError(e)) => { - panic!("signing error when broadcasting: {e}") - }, - Err(PublishError::InsufficientPeers) => { - log::warn!("failed to send p2p message due to insufficient peers") - } - Err(PublishError::MessageTooLarge) => { - panic!("tried to send a too large message: {}", hex::encode(msg)) - } - Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), - Err(PublishError::Duplicate) | Ok(_) => {} - } - } - } - - // Handle new incoming messages - event = swarm.next() => { - match event { - Some(SwarmEvent::Dialing { connection_id, .. }) => { - log::debug!("dialing to peer in connection ID {}", &connection_id); - } - Some(SwarmEvent::ConnectionEstablished { - peer_id, - connection_id, - endpoint, - .. - }) => { - if &peer_id == swarm.local_peer_id() { - log::warn!("established a libp2p connection to ourselves"); - swarm.close_connection(connection_id); - continue; - } - - let addr = endpoint.get_remote_address(); - let nets = { - let mut dialing_peers = dialing_peers.write().await; - if let Some(nets) = dialing_peers.remove(addr) { - nets - } else { - log::debug!("connected to a peer who we didn't have within dialing"); - HashSet::new() - } - }; - { - let mut connected_peers = connected_peers.write().await; - connected_peers.insert(addr.clone(), nets); - - log::debug!( - "connection established to peer {} in connection ID {}, connected peers: {}", - &peer_id, - &connection_id, - connected_peers.len(), - ); - } - } - Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { - let mut connected_peers = connected_peers.write().await; - let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else { - log::debug!("closed connection to peer which wasn't in connected_peers"); - continue; - }; - // Downgrade to a read lock - let connected_peers = connected_peers.downgrade(); - - // For each net we lost a peer for, check if we still have sufficient peers - // overall - for net in nets { - let mut remaining_peers = 0; - for nets in connected_peers.values() { - if nets.contains(&net) { - remaining_peers += 1; - } - } - // If we do not, start connecting to this network again - if remaining_peers < TARGET_PEERS { - connect_to_network_send - .send(net) - .expect( - "couldn't send net to connect to due to disconnects (receiver dropped?)" - ); - } - } - - log::debug!( - "connection with peer {peer_id} closed, connected peers: {}", - connected_peers.len(), - ); - } - Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres( - RrEvent::Message { peer, message }, - ))) => { - let message = match message { - RrMessage::Request { request, .. } => request, - RrMessage::Response { response, .. } => response, - }; - - let mut msg_ref = message.as_slice(); - let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue }; - let message = Message { - sender: peer, - kind: P2pMessageKind::ReqRes(kind), - msg: msg_ref.to_vec(), - }; - receive_send.send(message).expect("receive_send closed. are we shutting down?"); - } - Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( - GsEvent::Message { propagation_source, message, .. }, - ))) => { - let mut msg_ref = message.data.as_slice(); - let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue }; - let message = Message { - sender: propagation_source, - kind: P2pMessageKind::Gossip(kind), - msg: msg_ref.to_vec(), - }; - receive_send.send(message).expect("receive_send closed. are we shutting down?"); - } - _ => {} - } - } - - // Handle peers to dial - addr_and_nets = to_dial_recv.recv() => { - let (addr, nets) = - addr_and_nets.expect("received address was None (sender dropped?)"); - // If we've already dialed and connected to this address, don't further dial them - // Just associate these networks with them - if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) { - for net in nets { - existing_nets.insert(net); - } - continue; - } - - if let Err(e) = swarm.dial(addr) { - log::warn!("dialing peer failed: {e:?}"); - } - } - - // If it's been >80s since we've published a message, publish a KeepAlive since we're - // still an active service - // This is useful when we have no active tributaries and accordingly aren't sending - // heartbeats - // If we are sending heartbeats, we should've sent one after 60s of no finalized blocks - // (where a finalized block only occurs due to network activity), meaning this won't be - // run - () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { - time_of_last_p2p_message = Instant::now(); - for peer_id in swarm.connected_peers().copied().collect::>() { - swarm - .behaviour_mut() - .reqres - .send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize()); - } - } - } - } - } - }); - - LibP2p { - subscribe: Arc::new(Mutex::new(subscribe_send)), - send: Arc::new(Mutex::new(send_send)), - broadcast: Arc::new(Mutex::new(broadcast_send)), - receive: Arc::new(Mutex::new(receive_recv)), - } - } -} - -#[async_trait] -impl P2p for LibP2p { - type Id = PeerId; - - async fn subscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) { - self - .subscribe - .lock() - .await - .send((true, set, genesis)) - .expect("subscribe_send closed. are we shutting down?"); - } - - async fn unsubscribe(&self, set: ExternalValidatorSet, genesis: [u8; 32]) { - self - .subscribe - .lock() - .await - .send((false, set, genesis)) - .expect("subscribe_send closed. are we shutting down?"); - } - - async fn send_raw(&self, peer: Self::Id, msg: Vec) { - self.send.lock().await.send((peer, msg)).expect("send_send closed. are we shutting down?"); - } - - async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { - self - .broadcast - .lock() - .await - .send((kind, msg)) - .expect("broadcast_send closed. are we shutting down?"); - } - - // TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant - // lock acquisition? - async fn receive(&self) -> Message { - self.receive.lock().await.recv().await.expect("receive_recv closed. are we shutting down?") - } -} - -#[async_trait] -impl TributaryP2p for LibP2p { - async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await - } -} - -pub async fn heartbeat_tributaries_task( - p2p: P, - mut tributary_event: broadcast::Receiver>, -) { - let ten_blocks_of_time = - Duration::from_secs((10 * Tributary::::block_time()).into()); - - let mut readers = HashMap::new(); - loop { - loop { - match tributary_event.try_recv() { - Ok(TributaryEvent::NewTributary(ActiveTributary { spec, tributary })) => { - readers.insert(spec.set(), tributary.reader()); - } - Ok(TributaryEvent::TributaryRetired(set)) => { - readers.remove(&set); - } - Err(broadcast::error::TryRecvError::Empty) => break, - Err(broadcast::error::TryRecvError::Lagged(_)) => { - panic!("heartbeat_tributaries lagged to handle tributary_event") - } - Err(broadcast::error::TryRecvError::Closed) => panic!("tributary_event sender closed"), - } - } - - for tributary in readers.values() { - let tip = tributary.tip(); - let block_time = - SystemTime::UNIX_EPOCH + Duration::from_secs(tributary.time_of_block(&tip).unwrap_or(0)); - - // Only trigger syncing if the block is more than a minute behind - if SystemTime::now() > (block_time + Duration::from_secs(60)) { - log::warn!("last known tributary block was over a minute ago"); - let mut msg = tip.to_vec(); - let time: u64 = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("system clock is wrong") - .as_secs(); - msg.extend(time.to_le_bytes()); - P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await; - } - } - - // Only check once every 10 blocks of time - sleep(ten_blocks_of_time).await; - } -} - -pub async fn handle_p2p_task( - p2p: P, - cosign_channel: mpsc::UnboundedSender, - mut tributary_event: broadcast::Receiver>, -) { - let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender>>::new())); - tokio::spawn({ - let p2p = p2p.clone(); - let channels = channels.clone(); - let mut set_to_genesis = HashMap::new(); - async move { - loop { - match tributary_event.recv().await.unwrap() { - TributaryEvent::NewTributary(tributary) => { - let genesis = tributary.spec.genesis(); - set_to_genesis.insert(tributary.spec.set(), genesis); - - let (send, mut recv) = mpsc::unbounded_channel(); - channels.write().await.insert(genesis, send); - - // Subscribe to the topic for this tributary - p2p.subscribe(tributary.spec.set(), genesis).await; - - let spec_set = tributary.spec.set(); - - // Per-Tributary P2P message handler - tokio::spawn({ - let p2p = p2p.clone(); - async move { - loop { - let Some(msg) = recv.recv().await else { - // Channel closure happens when the tributary retires - break; - }; - match msg.kind { - P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} - - // TODO: Slash on Heartbeat which justifies a response, since the node - // obviously was offline and we must now use our bandwidth to compensate for - // them? - P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => { - assert_eq!(msg_genesis, genesis); - if msg.msg.len() != 40 { - log::error!("validator sent invalid heartbeat"); - continue; - } - // Only respond to recent heartbeats - let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( - "length-checked heartbeat message didn't have 8 bytes for the u64", - )); - if SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("system clock is wrong") - .as_secs() - .saturating_sub(msg_time) > - 10 - { - continue; - } - - log::debug!("received heartbeat with a recent timestamp"); - - let reader = tributary.tributary.reader(); - - let p2p = p2p.clone(); - // Spawn a dedicated task as this may require loading large amounts of data - // from disk and take a notable amount of time - tokio::spawn(async move { - let mut latest = msg.msg[.. 32].try_into().unwrap(); - let mut to_send = vec![]; - while let Some(next) = reader.block_after(&latest) { - to_send.push(next); - latest = next; - } - if to_send.len() > 3 { - // prepare the batch to sends - let mut blocks = vec![]; - for (i, next) in to_send.iter().enumerate() { - if i >= BLOCKS_PER_BATCH { - break; - } - - blocks.push(BlockCommit { - block: reader.block(next).unwrap().serialize(), - commit: reader.commit(next).unwrap(), - }); - } - let batch = HeartbeatBatch { blocks, timestamp: msg_time }; - - p2p - .send(msg.sender, ReqResMessageKind::Block(genesis), batch.encode()) - .await; - } - }); - } - - P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => { - assert_eq!(msg_genesis, genesis); - // decode the batch - let Ok(batch) = HeartbeatBatch::decode(&mut msg.msg.as_ref()) else { - log::error!( - "received HeartBeatBatch message with an invalidly serialized batch" - ); - continue; - }; - - // sync blocks - for bc in batch.blocks { - // TODO: why do we use ReadWrite instead of Encode/Decode for blocks? - // Should we use the same for batches so we can read both at the same time? - let Ok(block) = Block::::read(&mut bc.block.as_slice()) else { - log::error!("received block message with an invalidly serialized block"); - continue; - }; - - let res = tributary.tributary.sync_block(block, bc.commit).await; - log::debug!( - "received block from {:?}, sync_block returned {}", - msg.sender, - res - ); - } - } - - P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => { - assert_eq!(msg_genesis, genesis); - log::trace!("handling message for tributary {:?}", spec_set); - if tributary.tributary.handle_message(&msg.msg).await { - P2p::broadcast(&p2p, msg.kind, msg.msg).await; - } - } - - P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(), - } - } - } - }); - } - TributaryEvent::TributaryRetired(set) => { - if let Some(genesis) = set_to_genesis.remove(&set) { - p2p.unsubscribe(set, genesis).await; - channels.write().await.remove(&genesis); - } - } - } - } - } - }); - - loop { - let msg = p2p.receive().await; - match msg.kind { - P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} - P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) | - P2pMessageKind::ReqRes( - ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), - ) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } - P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => { - let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else { - log::error!("received CosignedBlock message with invalidly serialized contents"); - continue; - }; - cosign_channel.send(msg).unwrap(); - } - } - } -} diff --git a/coordinator/src/processors.rs b/coordinator/src/processors.rs deleted file mode 100644 index cfdbfa25..00000000 --- a/coordinator/src/processors.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::sync::Arc; - -use serai_client::primitives::ExternalNetworkId; -use processor_messages::{ProcessorMessage, CoordinatorMessage}; - -use message_queue::{Service, Metadata, client::MessageQueue}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Message { - pub id: u64, - pub network: ExternalNetworkId, - pub msg: ProcessorMessage, -} - -#[async_trait::async_trait] -pub trait Processors: 'static + Send + Sync + Clone { - async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into); - async fn recv(&self, network: ExternalNetworkId) -> Message; - async fn ack(&self, msg: Message); -} - -#[async_trait::async_trait] -impl Processors for Arc { - async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into) { - let msg: CoordinatorMessage = msg.into(); - let metadata = - Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() }; - let msg = borsh::to_vec(&msg).unwrap(); - self.queue(metadata, msg).await; - } - async fn recv(&self, network: ExternalNetworkId) -> Message { - let msg = self.next(Service::Processor(network)).await; - assert_eq!(msg.from, Service::Processor(network)); - - let id = msg.id; - - // Deserialize it into a ProcessorMessage - let msg: ProcessorMessage = - borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded ProcessorMessage"); - - return Message { id, network, msg }; - } - async fn ack(&self, msg: Message) { - MessageQueue::ack(self, Service::Processor(msg.network), msg.id).await - } -} diff --git a/coordinator/src/substrate.rs b/coordinator/src/substrate.rs new file mode 100644 index 00000000..4a70ee6b --- /dev/null +++ b/coordinator/src/substrate.rs @@ -0,0 +1,163 @@ +use core::future::Future; +use std::sync::Arc; + +use zeroize::Zeroizing; + +use ciphersuite::{Ciphersuite, Ristretto}; + +use tokio::sync::mpsc; + +use serai_db::{DbTxn, Db as DbTrait}; + +use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet}; +use message_queue::{Service, Metadata, client::MessageQueue}; + +use tributary_sdk::Tributary; + +use serai_task::ContinuallyRan; + +use serai_coordinator_tributary::Transaction; +use serai_coordinator_p2p::P2p; + +use crate::{Db, KeySet}; + +pub(crate) struct SubstrateTask { + pub(crate) serai_key: Zeroizing<::F>, + pub(crate) db: Db, + pub(crate) message_queue: Arc, + pub(crate) p2p: P, + pub(crate) p2p_add_tributary: + mpsc::UnboundedSender<(ExternalValidatorSet, Tributary)>, + pub(crate) p2p_retire_tributary: mpsc::UnboundedSender, +} + +impl ContinuallyRan for SubstrateTask

{ + type Error = String; // TODO + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + + // Handle the Canonical events + for network in serai_client::primitives::EXTERNAL_NETWORKS { + loop { + let mut txn = self.db.txn(); + let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network) + else { + break; + }; + + match msg { + messages::substrate::CoordinatorMessage::SetKeys { session, .. } => { + KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &()); + } + messages::substrate::CoordinatorMessage::SlashesReported { session } => { + let prior_retired = crate::db::RetiredTributary::get(&txn, network); + let next_to_be_retired = + prior_retired.map(|session| Session(session.0 + 1)).unwrap_or(Session(0)); + assert_eq!(session, next_to_be_retired); + crate::db::RetiredTributary::set(&mut txn, network, &session); + self + .p2p_retire_tributary + .send(ExternalValidatorSet { network, session }) + .expect("p2p retire_tributary channel dropped?"); + } + messages::substrate::CoordinatorMessage::Block { .. } => {} + } + + let msg = messages::CoordinatorMessage::from(msg); + let metadata = Metadata { + from: Service::Coordinator, + to: Service::Processor(network), + intent: msg.intent(), + }; + let msg = borsh::to_vec(&msg).unwrap(); + self.message_queue.queue(metadata, msg).await?; + txn.commit(); + made_progress = true; + } + } + + // Handle the NewSet events + loop { + let mut txn = self.db.txn(); + let Some(new_set) = serai_coordinator_substrate::NewSet::try_recv(&mut txn) else { break }; + + if let Some(historic_session) = new_set.set.session.0.checked_sub(2) { + // We should have retired this session if we're here + if crate::db::RetiredTributary::get(&txn, new_set.set.network).map(|session| session.0) < + Some(historic_session) + { + /* + If we haven't, it's because we're processing the NewSet event before the retiry + event from the Canonical event stream. This happens if the Canonical event, and + then the NewSet event, is fired while we're already iterating over NewSet events. + + We break, dropping the txn, restoring this NewSet to the database, so we'll only + handle it once a future iteration of this loop handles the retiry event. + */ + break; + } + + /* + Queue this historical Tributary for deletion. + + We explicitly don't queue this upon Tributary retire, instead here, to give time to + investigate retired Tributaries if questions are raised post-retiry. This gives a + week (the duration of the following session) after the Tributary has been retired to + make a backup of the data directory for any investigations. + */ + crate::db::TributaryCleanup::send( + &mut txn, + &ExternalValidatorSet { + network: new_set.set.network, + session: Session(historic_session), + }, + ); + } + + // Save this Tributary as active to the database + { + let mut active_tributaries = + crate::db::ActiveTributaries::get(&txn).unwrap_or(Vec::with_capacity(1)); + active_tributaries.push(new_set.clone()); + crate::db::ActiveTributaries::set(&mut txn, &active_tributaries); + } + + // Send GenerateKey to the processor + let msg = messages::key_gen::CoordinatorMessage::GenerateKey { + session: new_set.set.session, + threshold: new_set.threshold, + evrf_public_keys: new_set.evrf_public_keys.clone(), + }; + let msg = messages::CoordinatorMessage::from(msg); + let metadata = Metadata { + from: Service::Coordinator, + to: Service::Processor(new_set.set.network), + intent: msg.intent(), + }; + let msg = borsh::to_vec(&msg).unwrap(); + self.message_queue.queue(metadata, msg).await?; + + // Commit the transaction for all of this + txn.commit(); + + // Now spawn the Tributary + // If we reboot after committing the txn, but before this is called, this will be called + // on boot + crate::tributary::spawn_tributary( + self.db.clone(), + self.message_queue.clone(), + self.p2p.clone(), + &self.p2p_add_tributary, + new_set, + self.serai_key.clone(), + ) + .await; + + made_progress = true; + } + + Ok(made_progress) + } + } +} diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs deleted file mode 100644 index 644ddf13..00000000 --- a/coordinator/src/substrate/cosign.rs +++ /dev/null @@ -1,337 +0,0 @@ -/* - If: - A) This block has events and it's been at least X blocks since the last cosign or - B) This block doesn't have events but it's been X blocks since a skipped block which did - have events or - C) This block key gens (which changes who the cosigners are) - cosign this block. - - This creates both a minimum and maximum delay of X blocks before a block's cosigning begins, - barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly - spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to - ensure any block needing cosigned is consigned within a reasonable amount of time. -*/ - -use zeroize::Zeroizing; - -use ciphersuite::{Ciphersuite, Ristretto}; - -use borsh::{BorshSerialize, BorshDeserialize}; - -use serai_client::{ - primitives::ExternalNetworkId, - validator_sets::primitives::{ExternalValidatorSet, Session}, - Serai, SeraiError, -}; - -use serai_db::*; - -use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber}; - -// 5 minutes, expressed in blocks -// TODO: Pull a constant for block time -const COSIGN_DISTANCE: u64 = 5 * 60 / 6; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] -enum HasEvents { - KeyGen, - Yes, - No, -} - -create_db!( - SubstrateCosignDb { - ScanCosignFrom: () -> u64, - IntendedCosign: () -> (u64, Option), - BlockHasEventsCache: (block: u64) -> HasEvents, - LatestCosignedBlock: () -> u64, - } -); - -impl IntendedCosign { - // Sets the intended to cosign block, clearing the prior value entirely. - pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) { - Self::set(txn, &(intended, None::)); - } - - // Sets the cosign skipped since the last intended to cosign block. - pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) { - let (intended, prior_skipped) = Self::get(txn).unwrap(); - assert!(prior_skipped.is_none()); - Self::set(txn, &(intended, Some(skipped))); - } -} - -impl LatestCosignedBlock { - pub fn latest_cosigned_block(getter: &impl Get) -> u64 { - Self::get(getter).unwrap_or_default().max(1) - } -} - -db_channel! { - SubstrateDbChannels { - CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]), - } -} - -impl CosignTransactions { - // Append a cosign transaction. - pub fn append_cosign( - txn: &mut impl DbTxn, - set: ExternalValidatorSet, - number: u64, - hash: [u8; 32], - ) { - CosignTransactions::send(txn, set.network, &(set.session, number, hash)) - } -} - -async fn block_has_events( - txn: &mut impl DbTxn, - serai: &Serai, - block: u64, -) -> Result { - let cached = BlockHasEventsCache::get(txn, block); - match cached { - None => { - let serai = serai.as_of( - serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized") - .hash(), - ); - - if !serai.validator_sets().key_gen_events().await?.is_empty() { - return Ok(HasEvents::KeyGen); - } - - let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() && - serai.in_instructions().batch_events().await?.is_empty() && - serai.validator_sets().new_set_events().await?.is_empty() && - serai.validator_sets().set_retired_events().await?.is_empty(); - - let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - - BlockHasEventsCache::set(txn, block, &has_events); - Ok(has_events) - } - Some(code) => Ok(code), - } -} - -async fn potentially_cosign_block( - txn: &mut impl DbTxn, - serai: &Serai, - block: u64, - skipped_block: Option, - window_end_exclusive: u64, -) -> Result { - // The following code regarding marking cosigned if prior block is cosigned expects this block to - // not be zero - // While we could perform this check there, there's no reason not to optimize the entire function - // as such - if block == 0 { - return Ok(false); - } - - let block_has_events = block_has_events(txn, serai, block).await?; - - // If this block had no events and immediately follows a cosigned block, mark it as cosigned - if (block_has_events == HasEvents::No) && - (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) - { - log::debug!("automatically co-signing next block ({block}) since it has no events"); - LatestCosignedBlock::set(txn, &block); - } - - // If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks - // trigger a cosigning protocol covering it - // This means there will be the maximum delay allowed from a block needing cosigning occurring - // and a cosign for it triggering - let maximally_latent_cosign_block = - skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); - - // If this block is within the window, - if block < window_end_exclusive { - // and set a key, cosign it - if block_has_events == HasEvents::KeyGen { - IntendedCosign::set_intended_cosign(txn, block); - // Carry skipped if it isn't included by cosigning this block - if let Some(skipped) = skipped_block { - if skipped > block { - IntendedCosign::set_skipped_cosign(txn, block); - } - } - return Ok(true); - } - } else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) { - // Since this block was outside the window and had events/was maximally latent, cosign it - IntendedCosign::set_intended_cosign(txn, block); - return Ok(true); - } - Ok(false) -} - -/* - Advances the cosign protocol as should be done per the latest block. - - A block is considered cosigned if: - A) It was cosigned - B) It's the parent of a cosigned block - C) It immediately follows a cosigned block and has no events requiring cosigning - - This only actually performs advancement within a limited bound (generally until it finds a block - which should be cosigned). Accordingly, it is necessary to call multiple times even if - `latest_number` doesn't change. -*/ -async fn advance_cosign_protocol_inner( - db: &mut impl Db, - key: &Zeroizing<::F>, - serai: &Serai, - latest_number: u64, -) -> Result<(), SeraiError> { - let mut txn = db.txn(); - - const INITIAL_INTENDED_COSIGN: u64 = 1; - let (last_intended_to_cosign_block, mut skipped_block) = { - let intended_cosign = IntendedCosign::get(&txn); - // If we haven't prior intended to cosign a block, set the intended cosign to 1 - if let Some(intended_cosign) = intended_cosign { - intended_cosign - } else { - IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN); - IntendedCosign::get(&txn).unwrap() - } - }; - - // "windows" refers to the window of blocks where even if there's a block which should be - // cosigned, it won't be due to proximity due to the prior cosign - let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; - // If we've never triggered a cosign, don't skip any cosigns based on proximity - if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { - window_end_exclusive = 1; - } - - // The consensus rules for this are `last_intended_to_cosign_block + 1` - let scan_start_block = last_intended_to_cosign_block + 1; - // As a practical optimization, we don't re-scan old blocks since old blocks are independent to - // new state - let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1)); - - // Check all blocks within the window to see if they should be cosigned - // If so, we're skipping them and need to flag them as skipped so that once the window closes, we - // do cosign them - // We only perform this check if we haven't already marked a block as skipped since the cosign - // the skipped block will cause will cosign all other blocks within this window - if skipped_block.is_none() { - let window_end_inclusive = window_end_exclusive - 1; - for b in scan_start_block ..= window_end_inclusive.min(latest_number) { - if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { - skipped_block = Some(b); - log::debug!("skipping cosigning {b} due to proximity to prior cosign"); - IntendedCosign::set_skipped_cosign(&mut txn, b); - break; - } - } - } - - // A block which should be cosigned - let mut to_cosign = None; - // A list of sets which are cosigning, along with a boolean of if we're in the set - let mut cosigning = vec![]; - - for block in scan_start_block ..= latest_number { - let actual_block = serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized"); - - // Save the block number for this block, as needed by the cosigner to perform cosigning - SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); - - if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await? - { - to_cosign = Some((block, actual_block.hash())); - - // Get the keys as of the prior block - // If this key sets new keys, the coordinator won't acknowledge so until we process this - // block - // We won't process this block until its co-signed - // Using the keys of the prior block ensures this deadlock isn't reached - let serai = serai.as_of(actual_block.header.parent_hash.into()); - - for network in serai_client::primitives::EXTERNAL_NETWORKS { - // Get the latest session to have set keys - let set_with_keys = { - let Some(latest_session) = serai.validator_sets().session(network.into()).await? else { - continue; - }; - let prior_session = Session(latest_session.0.saturating_sub(1)); - if serai - .validator_sets() - .keys(ExternalValidatorSet { network, session: prior_session }) - .await? - .is_some() - { - ExternalValidatorSet { network, session: prior_session } - } else { - let set = ExternalValidatorSet { network, session: latest_session }; - if serai.validator_sets().keys(set).await?.is_none() { - continue; - } - set - } - }; - - log::debug!("{:?} will be cosigning {block}", set_with_keys.network); - cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap())); - } - - break; - } - - // If this TX is committed, always start future scanning from the next block - ScanCosignFrom::set(&mut txn, &(block + 1)); - // Since we're scanning *from* the next block, tidy the cache - BlockHasEventsCache::del(&mut txn, block); - } - - if let Some((number, hash)) = to_cosign { - // If this block doesn't have cosigners, yet does have events, automatically mark it as - // cosigned - if cosigning.is_empty() { - log::debug!("{} had no cosigners available, marking as cosigned", number); - LatestCosignedBlock::set(&mut txn, &number); - } else { - for (set, in_set) in cosigning { - if in_set { - log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session); - CosignTransactions::append_cosign(&mut txn, set, number, hash); - } - } - } - } - txn.commit(); - - Ok(()) -} - -pub async fn advance_cosign_protocol( - db: &mut impl Db, - key: &Zeroizing<::F>, - serai: &Serai, - latest_number: u64, -) -> Result<(), SeraiError> { - loop { - let scan_from = ScanCosignFrom::get(db).unwrap_or(1); - // Only scan 1000 blocks at a time to limit a massive txn from forming - let scan_to = latest_number.min(scan_from + 1000); - advance_cosign_protocol_inner(db, key, serai, scan_to).await?; - // If we didn't limit the scan_to, break - if scan_to == latest_number { - break; - } - } - Ok(()) -} diff --git a/coordinator/src/substrate/db.rs b/coordinator/src/substrate/db.rs deleted file mode 100644 index 52493105..00000000 --- a/coordinator/src/substrate/db.rs +++ /dev/null @@ -1,32 +0,0 @@ -use serai_client::primitives::ExternalNetworkId; - -pub use serai_db::*; - -mod inner_db { - use super::*; - - create_db!( - SubstrateDb { - NextBlock: () -> u64, - HandledEvent: (block: [u8; 32]) -> u32, - BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32] - } - ); -} -pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb}; - -pub struct HandledEvent; -impl HandledEvent { - fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 { - inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1) - } - pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool { - let next = Self::next_to_handle_event(getter, block); - assert!(next >= event_id); - next == event_id - } - pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) { - assert!(Self::next_to_handle_event(txn, block) == index); - inner_db::HandledEvent::set(txn, block, &index); - } -} diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs deleted file mode 100644 index a10806a3..00000000 --- a/coordinator/src/substrate/mod.rs +++ /dev/null @@ -1,546 +0,0 @@ -use core::{ops::Deref, time::Duration}; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, -}; - -use zeroize::Zeroizing; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; - -use serai_client::{ - coins::CoinsEvent, - in_instructions::InInstructionsEvent, - primitives::{BlockHash, ExternalNetworkId}, - validator_sets::{ - primitives::{ExternalValidatorSet, ValidatorSet}, - ValidatorSetsEvent, - }, - Block, Serai, SeraiError, TemporalSerai, -}; - -use serai_db::DbTxn; - -use processor_messages::SubstrateContext; - -use tokio::{sync::mpsc, time::sleep}; - -use crate::{ - Db, - processors::Processors, - tributary::{TributarySpec, SeraiDkgCompleted}, -}; - -mod db; -pub use db::*; - -mod cosign; -pub use cosign::*; - -async fn in_set( - key: &Zeroizing<::F>, - serai: &TemporalSerai<'_>, - set: ValidatorSet, -) -> Result, SeraiError> { - let Some(participants) = serai.validator_sets().participants(set.network).await? else { - return Ok(None); - }; - let key = (Ristretto::generator() * key.deref()).to_bytes(); - Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key))) -} - -async fn handle_new_set( - txn: &mut D::Transaction<'_>, - key: &Zeroizing<::F>, - new_tributary_spec: &mpsc::UnboundedSender, - serai: &Serai, - block: &Block, - set: ExternalValidatorSet, -) -> Result<(), SeraiError> { - if in_set(key, &serai.as_of(block.hash()), set.into()) - .await? - .expect("NewSet for set which doesn't exist") - { - log::info!("present in set {:?}", set); - - let set_data = { - let serai = serai.as_of(block.hash()); - let serai = serai.validator_sets(); - let set_participants = - serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist"); - - set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::>() - }; - - let time = if let Ok(time) = block.time() { - time - } else { - assert_eq!(block.number(), 0); - // Use the next block's time - loop { - let Ok(Some(res)) = serai.finalized_block_by_number(1).await else { - sleep(Duration::from_secs(5)).await; - continue; - }; - break res.time().unwrap(); - } - }; - // The block time is in milliseconds yet the Tributary is in seconds - let time = time / 1000; - // Since this block is in the past, and Tendermint doesn't play nice with starting chains after - // their start time (though it does eventually work), delay the start time by 120 seconds - // This is meant to handle ~20 blocks of lack of finalization for this first block - const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120; - let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY; - - let spec = TributarySpec::new(block.hash(), time, set, set_data); - - log::info!("creating new tributary for {:?}", spec.set()); - - // Save it to the database now, not on the channel receiver's side, so this is safe against - // reboots - // If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries - // If this txn doesn't finish, this will be re-fired - // If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the - // prior fired event may have not been received yet - crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec); - - new_tributary_spec.send(spec).unwrap(); - } else { - log::info!("not present in new set {:?}", set); - } - - Ok(()) -} - -async fn handle_batch_and_burns( - txn: &mut impl DbTxn, - processors: &Pro, - serai: &Serai, - block: &Block, -) -> Result<(), SeraiError> { - // Track which networks had events with a Vec in ordr to preserve the insertion order - // While that shouldn't be needed, ensuring order never hurts, and may enable design choices - // with regards to Processor <-> Coordinator message passing - let mut networks_with_event = vec![]; - let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| { - // Don't insert this network multiple times - // A Vec is still used in order to maintain the insertion order - if !networks_with_event.contains(&network) { - networks_with_event.push(network); - burns.insert(network, vec![]); - batches.insert(network, vec![]); - } - }; - - let mut batch_block = HashMap::new(); - let mut batches = HashMap::>::new(); - let mut burns = HashMap::new(); - - let serai = serai.as_of(block.hash()); - for batch in serai.in_instructions().batch_events().await? { - if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } = - batch - { - network_had_event(&mut burns, &mut batches, network); - - BatchInstructionsHashDb::set(txn, network, id, &instructions_hash); - - // Make sure this is the only Batch event for this network in this Block - assert!(batch_block.insert(network, network_block).is_none()); - - // Add the batch included by this block - batches.get_mut(&network).unwrap().push(id); - } else { - panic!("Batch event wasn't Batch: {batch:?}"); - } - } - - for burn in serai.coins().burn_with_instruction_events().await? { - if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn { - let network = instruction.balance.coin.network(); - network_had_event(&mut burns, &mut batches, network); - - // network_had_event should register an entry in burns - burns.get_mut(&network).unwrap().push(instruction); - } else { - panic!("Burn event wasn't Burn: {burn:?}"); - } - } - - assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len()); - - for network in networks_with_event { - let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) { - block - } else { - // If it's had a batch or a burn, it must have had a block acknowledged - serai - .in_instructions() - .latest_block_for_network(network) - .await? - .expect("network had a batch/burn yet never set a latest block") - }; - - processors - .send( - network, - processor_messages::substrate::CoordinatorMessage::SubstrateBlock { - context: SubstrateContext { - serai_time: block.time().unwrap() / 1000, - network_latest_finalized_block, - }, - block: block.number(), - burns: burns.remove(&network).unwrap(), - batches: batches.remove(&network).unwrap(), - }, - ) - .await; - } - - Ok(()) -} - -// Handle a specific Substrate block, returning an error when it fails to get data -// (not blocking / holding) -#[allow(clippy::too_many_arguments)] -async fn handle_block( - db: &mut D, - key: &Zeroizing<::F>, - new_tributary_spec: &mpsc::UnboundedSender, - perform_slash_report: &mpsc::UnboundedSender, - tributary_retired: &mpsc::UnboundedSender, - processors: &Pro, - serai: &Serai, - block: Block, -) -> Result<(), SeraiError> { - let hash = block.hash(); - - // Define an indexed event ID. - let mut event_id = 0; - - // If a new validator set was activated, create tributary/inform processor to do a DKG - for new_set in serai.as_of(hash).validator_sets().new_set_events().await? { - // Individually mark each event as handled so on reboot, we minimize duplicates - // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000 - // events will successfully be incrementally handled - // (though the Serai connection should be stable, making this unnecessary) - let ValidatorSetsEvent::NewSet { set } = new_set else { - panic!("NewSet event wasn't NewSet: {new_set:?}"); - }; - - // We only coordinate/process external networks - let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; - if HandledEvent::is_unhandled(db, hash, event_id) { - log::info!("found fresh new set event {:?}", new_set); - let mut txn = db.txn(); - handle_new_set::(&mut txn, key, new_tributary_spec, serai, &block, set).await?; - HandledEvent::handle_event(&mut txn, hash, event_id); - txn.commit(); - } - event_id += 1; - } - - // If a key pair was confirmed, inform the processor - for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? { - if HandledEvent::is_unhandled(db, hash, event_id) { - log::info!("found fresh key gen event {:?}", key_gen); - let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else { - panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); - }; - let substrate_key = key_pair.0 .0; - processors - .send( - set.network, - processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair { - context: SubstrateContext { - serai_time: block.time().unwrap() / 1000, - network_latest_finalized_block: serai - .as_of(block.hash()) - .in_instructions() - .latest_block_for_network(set.network) - .await? - // The processor treats this as a magic value which will cause it to find a network - // block which has a time greater than or equal to the Serai time - .unwrap_or(BlockHash([0; 32])), - }, - session: set.session, - key_pair, - }, - ) - .await; - - // TODO: If we were in the set, yet were removed, drop the tributary - - let mut txn = db.txn(); - SeraiDkgCompleted::set(&mut txn, set, &substrate_key); - HandledEvent::handle_event(&mut txn, hash, event_id); - txn.commit(); - } - event_id += 1; - } - - for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? { - let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else { - panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}"); - }; - - let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; - if HandledEvent::is_unhandled(db, hash, event_id) { - log::info!("found fresh accepted handover event {:?}", accepted_handover); - // TODO: This isn't atomic with the event handling - // Send a oneshot receiver so we can await the response? - perform_slash_report.send(set).unwrap(); - let mut txn = db.txn(); - HandledEvent::handle_event(&mut txn, hash, event_id); - txn.commit(); - } - event_id += 1; - } - - for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? { - let ValidatorSetsEvent::SetRetired { set } = retired_set else { - panic!("SetRetired event wasn't SetRetired: {retired_set:?}"); - }; - - let Ok(set) = ExternalValidatorSet::try_from(set) else { continue }; - if HandledEvent::is_unhandled(db, hash, event_id) { - log::info!("found fresh set retired event {:?}", retired_set); - let mut txn = db.txn(); - crate::ActiveTributaryDb::retire_tributary(&mut txn, set); - tributary_retired.send(set).unwrap(); - HandledEvent::handle_event(&mut txn, hash, event_id); - txn.commit(); - } - event_id += 1; - } - - // Finally, tell the processor of acknowledged blocks/burns - // This uses a single event as unlike prior events which individually executed code, all - // following events share data collection - if HandledEvent::is_unhandled(db, hash, event_id) { - let mut txn = db.txn(); - handle_batch_and_burns(&mut txn, processors, serai, &block).await?; - HandledEvent::handle_event(&mut txn, hash, event_id); - txn.commit(); - } - - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -async fn handle_new_blocks( - db: &mut D, - key: &Zeroizing<::F>, - new_tributary_spec: &mpsc::UnboundedSender, - perform_slash_report: &mpsc::UnboundedSender, - tributary_retired: &mpsc::UnboundedSender, - processors: &Pro, - serai: &Serai, - next_block: &mut u64, -) -> Result<(), SeraiError> { - // Check if there's been a new Substrate block - let latest_number = serai.latest_finalized_block().await?.number(); - - // Advance the cosigning protocol - advance_cosign_protocol(db, key, serai, latest_number).await?; - - // Reduce to the latest cosigned block - let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db)); - - if latest_number < *next_block { - return Ok(()); - } - - for b in *next_block ..= latest_number { - let block = serai - .finalized_block_by_number(b) - .await? - .expect("couldn't get block before the latest finalized block"); - - log::info!("handling substrate block {b}"); - handle_block( - db, - key, - new_tributary_spec, - perform_slash_report, - tributary_retired, - processors, - serai, - block, - ) - .await?; - *next_block += 1; - - let mut txn = db.txn(); - NextBlock::set(&mut txn, next_block); - txn.commit(); - - log::info!("handled substrate block {b}"); - } - - Ok(()) -} - -pub async fn scan_task( - mut db: D, - key: Zeroizing<::F>, - processors: Pro, - serai: Arc, - new_tributary_spec: mpsc::UnboundedSender, - perform_slash_report: mpsc::UnboundedSender, - tributary_retired: mpsc::UnboundedSender, -) { - log::info!("scanning substrate"); - let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default(); - - /* - let new_substrate_block_notifier = { - let serai = &serai; - move || async move { - loop { - match serai.newly_finalized_block().await { - Ok(sub) => return sub, - Err(e) => { - log::error!("couldn't communicate with serai node: {e}"); - sleep(Duration::from_secs(5)).await; - } - } - } - } - }; - */ - // TODO: Restore the above subscription-based system - // That would require moving serai-client from HTTP to websockets - let new_substrate_block_notifier = { - let serai = &serai; - move |next_substrate_block| async move { - loop { - match serai.latest_finalized_block().await { - Ok(latest) => { - if latest.header.number >= next_substrate_block { - return latest; - } - sleep(Duration::from_secs(3)).await; - } - Err(e) => { - log::error!("couldn't communicate with serai node: {e}"); - sleep(Duration::from_secs(5)).await; - } - } - } - } - }; - - loop { - // await the next block, yet if our notifier had an error, re-create it - { - let Ok(_) = tokio::time::timeout( - Duration::from_secs(60), - new_substrate_block_notifier(next_substrate_block), - ) - .await - else { - // Timed out, which may be because Serai isn't finalizing or may be some issue with the - // notifier - if serai.latest_finalized_block().await.map(|block| block.number()).ok() == - Some(next_substrate_block.saturating_sub(1)) - { - log::info!("serai hasn't finalized a block in the last 60s..."); - } - continue; - }; - - /* - // next_block is a Option - if next_block.and_then(Result::ok).is_none() { - substrate_block_notifier = new_substrate_block_notifier(next_substrate_block); - continue; - } - */ - } - - match handle_new_blocks( - &mut db, - &key, - &new_tributary_spec, - &perform_slash_report, - &tributary_retired, - &processors, - &serai, - &mut next_substrate_block, - ) - .await - { - Ok(()) => {} - Err(e) => { - log::error!("couldn't communicate with serai node: {e}"); - sleep(Duration::from_secs(5)).await; - } - } - } -} - -/// Gets the expected ID for the next Batch. -/// -/// Will log an error and apply a slight sleep on error, letting the caller simply immediately -/// retry. -pub(crate) async fn expected_next_batch( - serai: &Serai, - network: ExternalNetworkId, -) -> Result { - async fn expected_next_batch_inner( - serai: &Serai, - network: ExternalNetworkId, - ) -> Result { - let serai = serai.as_of_latest_finalized_block().await?; - let last = serai.in_instructions().last_batch_for_network(network).await?; - Ok(if let Some(last) = last { last + 1 } else { 0 }) - } - match expected_next_batch_inner(serai, network).await { - Ok(next) => Ok(next), - Err(e) => { - log::error!("couldn't get the expected next batch from substrate: {e:?}"); - sleep(Duration::from_millis(100)).await; - Err(e) - } - } -} - -/// Verifies `Batch`s which have already been indexed from Substrate. -/// -/// Spins if a distinct `Batch` is detected on-chain. -/// -/// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected. -/// This is deemed fine. -pub(crate) async fn verify_published_batches( - txn: &mut D::Transaction<'_>, - network: ExternalNetworkId, - optimistic_up_to: u32, -) -> Option { - // TODO: Localize from MainDb to SubstrateDb - let last = crate::LastVerifiedBatchDb::get(txn, network); - for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to { - let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else { - break; - }; - let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap(); - if on_chain != off_chain { - // Halt operations on this network and spin, as this is a critical fault - loop { - log::error!( - "{}! network: {:?} id: {} off-chain: {} on-chain: {}", - "on-chain batch doesn't match off-chain", - network, - id, - hex::encode(off_chain), - hex::encode(on_chain), - ); - sleep(Duration::from_secs(60)).await; - } - } - crate::LastVerifiedBatchDb::set(txn, network, &id); - } - - crate::LastVerifiedBatchDb::get(txn, network) -} diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs deleted file mode 100644 index 1d9d6d34..00000000 --- a/coordinator/src/tests/mod.rs +++ /dev/null @@ -1,125 +0,0 @@ -use core::fmt::Debug; -use std::{ - sync::Arc, - collections::{VecDeque, HashSet, HashMap}, -}; - -use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet}; - -use processor_messages::CoordinatorMessage; - -use async_trait::async_trait; - -use tokio::sync::RwLock; - -use crate::{ - processors::{Message, Processors}, - TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p, -}; - -pub mod tributary; - -#[derive(Clone)] -pub struct MemProcessors(pub Arc>>>); -impl MemProcessors { - #[allow(clippy::new_without_default)] - pub fn new() -> MemProcessors { - MemProcessors(Arc::new(RwLock::new(HashMap::new()))) - } -} - -#[async_trait::async_trait] -impl Processors for MemProcessors { - async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into) { - let mut processors = self.0.write().await; - let processor = processors.entry(network).or_insert_with(VecDeque::new); - processor.push_back(msg.into()); - } - async fn recv(&self, _: ExternalNetworkId) -> Message { - todo!() - } - async fn ack(&self, _: Message) { - todo!() - } -} - -#[allow(clippy::type_complexity)] -#[derive(Clone, Debug)] -pub struct LocalP2p( - usize, - pub Arc>, Vec)>>)>>, -); - -impl LocalP2p { - pub fn new(validators: usize) -> Vec { - let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators]))); - let mut res = vec![]; - for i in 0 .. validators { - res.push(LocalP2p(i, shared.clone())); - } - res - } -} - -#[async_trait] -impl P2p for LocalP2p { - type Id = usize; - - async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {} - async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {} - - async fn send_raw(&self, to: Self::Id, msg: Vec) { - let mut msg_ref = msg.as_slice(); - let kind = ReqResMessageKind::read(&mut msg_ref).unwrap(); - self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec())); - } - - async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { - // Content-based deduplication - let mut lock = self.1.write().await; - { - let already_sent = &mut lock.0; - if already_sent.contains(&msg) { - return; - } - already_sent.insert(msg.clone()); - } - let queues = &mut lock.1; - - let kind_len = (match kind { - P2pMessageKind::ReqRes(kind) => kind.serialize(), - P2pMessageKind::Gossip(kind) => kind.serialize(), - }) - .len(); - let msg = msg[kind_len ..].to_vec(); - - for (i, msg_queue) in queues.iter_mut().enumerate() { - if i == self.0 { - continue; - } - msg_queue.push_back((self.0, kind, msg.clone())); - } - } - - async fn receive(&self) -> P2pMessage { - // This is a cursed way to implement an async read from a Vec - loop { - if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() { - return P2pMessage { sender, kind, msg }; - } - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - } - } -} - -#[async_trait] -impl TributaryP2p for LocalP2p { - async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast( - self, - P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)), - msg, - ) - .await - } -} diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs deleted file mode 100644 index 62feb78b..00000000 --- a/coordinator/src/tests/tributary/chain.rs +++ /dev/null @@ -1,237 +0,0 @@ -use std::{ - time::{Duration, SystemTime}, - collections::HashSet, -}; - -use zeroize::Zeroizing; -use rand_core::{RngCore, CryptoRng, OsRng}; -use futures_util::{task::Poll, poll}; - -use ciphersuite::{ - group::{ff::Field, GroupEncoding}, - Ciphersuite, Ristretto, -}; - -use sp_application_crypto::sr25519; -use borsh::BorshDeserialize; -use serai_client::{ - primitives::ExternalNetworkId, - validator_sets::primitives::{ExternalValidatorSet, Session}, -}; - -use tokio::time::sleep; - -use serai_db::MemDb; - -use tributary::Tributary; - -use crate::{ - GossipMessageKind, P2pMessageKind, P2p, - tributary::{Transaction, TributarySpec}, - tests::LocalP2p, -}; - -pub fn new_keys( - rng: &mut R, -) -> Vec::F>> { - let mut keys = vec![]; - for _ in 0 .. 5 { - keys.push(Zeroizing::new(::F::random(&mut *rng))); - } - keys -} - -pub fn new_spec( - rng: &mut R, - keys: &[Zeroizing<::F>], -) -> TributarySpec { - let mut serai_block = [0; 32]; - rng.fill_bytes(&mut serai_block); - - let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); - - let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin }; - - let set_participants = keys - .iter() - .map(|key| (sr25519::Public((::generator() * **key).to_bytes()), 1)) - .collect::>(); - - let res = TributarySpec::new(serai_block, start_time, set, set_participants); - assert_eq!( - TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), - res, - ); - res -} - -pub async fn new_tributaries( - keys: &[Zeroizing<::F>], - spec: &TributarySpec, -) -> Vec<(MemDb, LocalP2p, Tributary)> { - let p2p = LocalP2p::new(keys.len()); - let mut res = vec![]; - for (i, key) in keys.iter().enumerate() { - let db = MemDb::new(); - res.push(( - db.clone(), - p2p[i].clone(), - Tributary::<_, Transaction, _>::new( - db, - spec.genesis(), - spec.start_time(), - key.clone(), - spec.validators(), - p2p[i].clone(), - ) - .await - .unwrap(), - )); - } - res -} - -pub async fn run_tributaries( - mut tributaries: Vec<(LocalP2p, Tributary)>, -) { - loop { - for (p2p, tributary) in &mut tributaries { - while let Poll::Ready(msg) = poll!(p2p.receive()) { - match msg.kind { - P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { - assert_eq!(genesis, tributary.genesis()); - if tributary.handle_message(&msg.msg).await { - p2p.broadcast(msg.kind, msg.msg).await; - } - } - _ => panic!("unexpected p2p message found"), - } - } - } - - sleep(Duration::from_millis(100)).await; - } -} - -pub async fn wait_for_tx_inclusion( - tributary: &Tributary, - mut last_checked: [u8; 32], - hash: [u8; 32], -) -> [u8; 32] { - let reader = tributary.reader(); - loop { - let tip = tributary.tip().await; - if tip == last_checked { - sleep(Duration::from_secs(1)).await; - continue; - } - - let mut queue = vec![reader.block(&tip).unwrap()]; - let mut block = None; - while { - let parent = queue.last().unwrap().parent(); - if parent == tributary.genesis() { - false - } else { - block = Some(reader.block(&parent).unwrap()); - block.as_ref().unwrap().hash() != last_checked - } - } { - queue.push(block.take().unwrap()); - } - - while let Some(block) = queue.pop() { - for tx in &block.transactions { - if tx.hash() == hash { - return block.hash(); - } - } - } - - last_checked = tip; - } -} - -#[tokio::test] -async fn tributary_test() { - let keys = new_keys(&mut OsRng); - let spec = new_spec(&mut OsRng, &keys); - - let mut tributaries = new_tributaries(&keys, &spec) - .await - .into_iter() - .map(|(_, p2p, tributary)| (p2p, tributary)) - .collect::>(); - - let mut blocks = 0; - let mut last_block = spec.genesis(); - - // Doesn't use run_tributaries as we want to wind these down at a certain point - // run_tributaries will run them ad infinitum - let timeout = SystemTime::now() + Duration::from_secs(65); - while (blocks < 10) && (SystemTime::now().duration_since(timeout).is_err()) { - for (p2p, tributary) in &mut tributaries { - while let Poll::Ready(msg) = poll!(p2p.receive()) { - match msg.kind { - P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { - assert_eq!(genesis, tributary.genesis()); - tributary.handle_message(&msg.msg).await; - } - _ => panic!("unexpected p2p message found"), - } - } - } - - let tip = tributaries[0].1.tip().await; - if tip != last_block { - last_block = tip; - blocks += 1; - } - - sleep(Duration::from_millis(100)).await; - } - - if blocks != 10 { - panic!("tributary chain test hit timeout"); - } - - // Handle all existing messages - for (p2p, tributary) in &mut tributaries { - while let Poll::Ready(msg) = poll!(p2p.receive()) { - match msg.kind { - P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { - assert_eq!(genesis, tributary.genesis()); - tributary.handle_message(&msg.msg).await; - } - _ => panic!("unexpected p2p message found"), - } - } - } - - // handle_message informed the Tendermint machine, yet it still has to process it - // Sleep for a second accordingly - // TODO: Is there a better way to handle this? - sleep(Duration::from_secs(1)).await; - - // All tributaries should agree on the tip, within a block - let mut tips = HashSet::new(); - for (_, tributary) in &tributaries { - tips.insert(tributary.tip().await); - } - assert!(tips.len() <= 2); - if tips.len() == 2 { - for tip in &tips { - // Find a Tributary where this isn't the tip - for (_, tributary) in &tributaries { - let Some(after) = tributary.reader().block_after(tip) else { continue }; - // Make sure the block after is the other tip - assert!(tips.contains(&after)); - return; - } - } - } else { - assert_eq!(tips.len(), 1); - return; - } - panic!("tributary had different tip with a variance exceeding one block"); -} diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs deleted file mode 100644 index adaa6643..00000000 --- a/coordinator/src/tests/tributary/dkg.rs +++ /dev/null @@ -1,392 +0,0 @@ -use core::time::Duration; -use std::collections::HashMap; - -use zeroize::Zeroizing; -use rand_core::{RngCore, OsRng}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use frost::Participant; - -use sp_runtime::traits::Verify; -use serai_client::{ - primitives::{SeraiAddress, Signature}, - validator_sets::primitives::{ExternalValidatorSet, KeyPair}, -}; - -use tokio::time::sleep; - -use serai_db::{Get, DbTxn, Db, MemDb}; - -use processor_messages::{ - key_gen::{self, KeyGenId}, - CoordinatorMessage, -}; - -use tributary::{TransactionTrait, Tributary}; - -use crate::{ - tributary::{ - Transaction, TributarySpec, - scanner::{PublishSeraiTransaction, handle_new_blocks}, - }, - tests::{ - MemProcessors, LocalP2p, - tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion}, - }, -}; - -#[tokio::test] -async fn dkg_test() { - env_logger::init(); - - let keys = new_keys(&mut OsRng); - let spec = new_spec(&mut OsRng, &keys); - - let full_tributaries = new_tributaries(&keys, &spec).await; - let mut dbs = vec![]; - let mut tributaries = vec![]; - for (db, p2p, tributary) in full_tributaries { - dbs.push(db); - tributaries.push((p2p, tributary)); - } - - // Run the tributaries in the background - tokio::spawn(run_tributaries(tributaries.clone())); - - let mut txs = vec![]; - // Create DKG commitments for each key - for key in &keys { - let attempt = 0; - let mut commitments = vec![0; 256]; - OsRng.fill_bytes(&mut commitments); - - let mut tx = Transaction::DkgCommitments { - attempt, - commitments: vec![commitments], - signed: Transaction::empty_signed(), - }; - tx.sign(&mut OsRng, spec.genesis(), key); - txs.push(tx); - } - - let block_before_tx = tributaries[0].1.tip().await; - - // Publish all commitments but one - for (i, tx) in txs.iter().enumerate().skip(1) { - assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); - } - - // Wait until these are included - for tx in txs.iter().skip(1) { - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; - } - - let expected_commitments: HashMap<_, _> = txs - .iter() - .enumerate() - .map(|(i, tx)| { - if let Transaction::DkgCommitments { commitments, .. } = tx { - (Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) - } else { - panic!("txs had non-commitments"); - } - }) - .collect(); - - async fn new_processors( - db: &mut MemDb, - key: &Zeroizing<::F>, - spec: &TributarySpec, - tributary: &Tributary, - ) -> MemProcessors { - let processors = MemProcessors::new(); - handle_new_blocks::<_, _, _, _, _, LocalP2p>( - db, - key, - &|_, _, _, _| async { - panic!("provided TX caused recognized_id to be called in new_processors") - }, - &processors, - &(), - &|_| async { - panic!( - "test tried to publish a new Tributary TX from handle_application_tx in new_processors" - ) - }, - spec, - &tributary.reader(), - ) - .await; - processors - } - - // Instantiate a scanner and verify it has nothing to report - let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; - assert!(processors.0.read().await.is_empty()); - - // Publish the last commitment - let block_before_tx = tributaries[0].1.tip().await; - assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; - sleep(Duration::from_secs(Tributary::::block_time().into())).await; - - // Verify the scanner emits a KeyGen::Commitments message - handle_new_blocks::<_, _, _, _, _, LocalP2p>( - &mut dbs[0], - &keys[0], - &|_, _, _, _| async { - panic!("provided TX caused recognized_id to be called after Commitments") - }, - &processors, - &(), - &|_| async { - panic!( - "test tried to publish a new Tributary TX from handle_application_tx after Commitments" - ) - }, - &spec, - &tributaries[0].1.reader(), - ) - .await; - { - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - let mut expected_commitments = expected_commitments.clone(); - expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap()); - assert_eq!( - msgs.pop_front().unwrap(), - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - commitments: expected_commitments - }) - ); - assert!(msgs.is_empty()); - } - - // Verify all keys exhibit this scanner behavior - for (i, key) in keys.iter().enumerate().skip(1) { - let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - let mut expected_commitments = expected_commitments.clone(); - expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); - assert_eq!( - msgs.pop_front().unwrap(), - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - commitments: expected_commitments - }) - ); - assert!(msgs.is_empty()); - } - - // Now do shares - let mut txs = vec![]; - for (k, key) in keys.iter().enumerate() { - let attempt = 0; - - let mut shares = vec![vec![]]; - for i in 0 .. keys.len() { - if i != k { - let mut share = vec![0; 256]; - OsRng.fill_bytes(&mut share); - shares.last_mut().unwrap().push(share); - } - } - - let mut txn = dbs[k].txn(); - let mut tx = Transaction::DkgShares { - attempt, - shares, - confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0), - signed: Transaction::empty_signed(), - }; - txn.commit(); - tx.sign(&mut OsRng, spec.genesis(), key); - txs.push(tx); - } - - let block_before_tx = tributaries[0].1.tip().await; - for (i, tx) in txs.iter().enumerate().skip(1) { - assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); - } - for tx in txs.iter().skip(1) { - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; - } - - // With just 4 sets of shares, nothing should happen yet - handle_new_blocks::<_, _, _, _, _, LocalP2p>( - &mut dbs[0], - &keys[0], - &|_, _, _, _| async { - panic!("provided TX caused recognized_id to be called after some shares") - }, - &processors, - &(), - &|_| async { - panic!( - "test tried to publish a new Tributary TX from handle_application_tx after some shares" - ) - }, - &spec, - &tributaries[0].1.reader(), - ) - .await; - assert_eq!(processors.0.read().await.len(), 1); - assert!(processors.0.read().await[&spec.set().network].is_empty()); - - // Publish the final set of shares - let block_before_tx = tributaries[0].1.tip().await; - assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true)); - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await; - sleep(Duration::from_secs(Tributary::::block_time().into())).await; - - // Each scanner should emit a distinct shares message - let shares_for = |i: usize| { - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - shares: vec![txs - .iter() - .enumerate() - .filter_map(|(l, tx)| { - if let Transaction::DkgShares { shares, .. } = tx { - if i == l { - None - } else { - let relative_i = i - (if i > l { 1 } else { 0 }); - Some(( - Participant::new((l + 1).try_into().unwrap()).unwrap(), - shares[0][relative_i].clone(), - )) - } - } else { - panic!("txs had non-shares"); - } - }) - .collect::>()], - }) - }; - - // Any scanner which has handled the prior blocks should only emit the new event - for (i, key) in keys.iter().enumerate() { - handle_new_blocks::<_, _, _, _, _, LocalP2p>( - &mut dbs[i], - key, - &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, - &processors, - &(), - &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, - &spec, - &tributaries[i].1.reader(), - ) - .await; - { - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); - assert!(msgs.is_empty()); - } - } - - // Yet new scanners should emit all events - for (i, key) in keys.iter().enumerate() { - let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await; - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - let mut expected_commitments = expected_commitments.clone(); - expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap()); - assert_eq!( - msgs.pop_front().unwrap(), - CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt: 0 }, - commitments: expected_commitments - }) - ); - assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); - assert!(msgs.is_empty()); - } - - // Send DkgConfirmed - let mut substrate_key = [0; 32]; - OsRng.fill_bytes(&mut substrate_key); - let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()]; - OsRng.fill_bytes(&mut network_key); - let key_pair = KeyPair(serai_client::Public(substrate_key), network_key.try_into().unwrap()); - - let mut txs = vec![]; - for (i, key) in keys.iter().enumerate() { - let attempt = 0; - let mut txn = dbs[i].txn(); - let share = - crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair, 0).unwrap(); - txn.commit(); - - let mut tx = Transaction::DkgConfirmed { - attempt, - confirmation_share: share, - signed: Transaction::empty_signed(), - }; - tx.sign(&mut OsRng, spec.genesis(), key); - txs.push(tx); - } - let block_before_tx = tributaries[0].1.tip().await; - for (i, tx) in txs.iter().enumerate() { - assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); - } - for tx in &txs { - wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; - } - - struct CheckPublishSetKeys { - spec: TributarySpec, - key_pair: KeyPair, - } - #[async_trait::async_trait] - impl PublishSeraiTransaction for CheckPublishSetKeys { - async fn publish_set_keys( - &self, - _db: &(impl Sync + Get), - set: ExternalValidatorSet, - removed: Vec, - key_pair: KeyPair, - signature: Signature, - ) { - assert_eq!(set, self.spec.set()); - assert!(removed.is_empty()); - assert_eq!(self.key_pair, key_pair); - assert!(signature.verify( - &*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair), - &serai_client::Public( - frost::dkg::musig::musig_key::( - &serai_client::validator_sets::primitives::musig_context(set.into()), - &self.spec.validators().into_iter().map(|(validator, _)| validator).collect::>() - ) - .unwrap() - .to_bytes() - ), - )); - } - } - - // The scanner should successfully try to publish a transaction with a validly signed signature - handle_new_blocks::<_, _, _, _, _, LocalP2p>( - &mut dbs[0], - &keys[0], - &|_, _, _, _| async { - panic!("provided TX caused recognized_id to be called after DKG confirmation") - }, - &processors, - &CheckPublishSetKeys { spec: spec.clone(), key_pair: key_pair.clone() }, - &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, - &spec, - &tributaries[0].1.reader(), - ) - .await; - { - assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty()); - } -} diff --git a/coordinator/src/tests/tributary/handle_p2p.rs b/coordinator/src/tests/tributary/handle_p2p.rs deleted file mode 100644 index 756f4561..00000000 --- a/coordinator/src/tests/tributary/handle_p2p.rs +++ /dev/null @@ -1,74 +0,0 @@ -use core::time::Duration; -use std::sync::Arc; - -use rand_core::OsRng; - -use tokio::{ - sync::{mpsc, broadcast}, - time::sleep, -}; - -use serai_db::MemDb; - -use tributary::Tributary; - -use crate::{ - tributary::Transaction, - ActiveTributary, TributaryEvent, - p2p::handle_p2p_task, - tests::{ - LocalP2p, - tributary::{new_keys, new_spec, new_tributaries}, - }, -}; - -#[tokio::test] -async fn handle_p2p_test() { - let keys = new_keys(&mut OsRng); - let spec = new_spec(&mut OsRng, &keys); - - let mut tributaries = new_tributaries(&keys, &spec) - .await - .into_iter() - .map(|(_, p2p, tributary)| (p2p, tributary)) - .collect::>(); - - let mut tributary_senders = vec![]; - let mut tributary_arcs = vec![]; - for (p2p, tributary) in tributaries.drain(..) { - let tributary = Arc::new(tributary); - tributary_arcs.push(tributary.clone()); - let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); - let (cosign_send, _) = mpsc::unbounded_channel(); - tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); - new_tributary_send - .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) - .map_err(|_| "failed to send ActiveTributary") - .unwrap(); - tributary_senders.push(new_tributary_send); - } - let tributaries = tributary_arcs; - - // After two blocks of time, we should have a new block - // We don't wait one block of time as we may have missed the chance for this block - sleep(Duration::from_secs((2 * Tributary::::block_time()).into())) - .await; - let tip = tributaries[0].tip().await; - assert!(tip != spec.genesis()); - - // Sleep one second to make sure this block propagates - sleep(Duration::from_secs(1)).await; - // Make sure every tributary has it - for tributary in &tributaries { - assert!(tributary.reader().block(&tip).is_some()); - } - - // Then after another block of time, we should have yet another new block - sleep(Duration::from_secs(Tributary::::block_time().into())).await; - let new_tip = tributaries[0].tip().await; - assert!(new_tip != tip); - sleep(Duration::from_secs(1)).await; - for tributary in tributaries { - assert!(tributary.reader().block(&new_tip).is_some()); - } -} diff --git a/coordinator/src/tests/tributary/mod.rs b/coordinator/src/tests/tributary/mod.rs deleted file mode 100644 index 1016248d..00000000 --- a/coordinator/src/tests/tributary/mod.rs +++ /dev/null @@ -1,293 +0,0 @@ -use core::fmt::Debug; - -use rand_core::{RngCore, OsRng}; - -use ciphersuite::{group::Group, Ciphersuite, Ristretto}; - -use scale::{Encode, Decode}; -use serai_client::{ - primitives::{SeraiAddress, Signature}, - validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET}, -}; -use processor_messages::coordinator::SubstrateSignableId; - -use tributary::{ReadWrite, tests::random_signed_with_nonce}; - -use crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction}; - -mod chain; -pub use chain::*; - -mod tx; - -mod dkg; -// TODO: Test the other transactions - -mod handle_p2p; -mod sync; - -#[async_trait::async_trait] -impl PublishSeraiTransaction for () { - async fn publish_set_keys( - &self, - _db: &(impl Sync + serai_db::Get), - _set: ExternalValidatorSet, - _removed: Vec, - _key_pair: KeyPair, - _signature: Signature, - ) { - panic!("publish_set_keys was called in test") - } -} - -fn random_u32(rng: &mut R) -> u32 { - u32::try_from(rng.next_u64() >> 32).unwrap() -} - -fn random_vec(rng: &mut R, limit: usize) -> Vec { - let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap(); - let mut res = vec![0; len]; - rng.fill_bytes(&mut res); - res -} - -fn random_sign_data( - rng: &mut R, - plan: Id, - label: Label, -) -> SignData { - SignData { - plan, - attempt: random_u32(&mut OsRng), - label, - - data: { - let mut res = vec![]; - for _ in 0 ..= (rng.next_u64() % 255) { - res.push(random_vec(&mut OsRng, 512)); - } - res - }, - - signed: random_signed_with_nonce(&mut OsRng, label.nonce()), - } -} - -fn test_read_write(value: &RW) { - assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap()); -} - -#[test] -fn tx_size_limit() { - use serai_client::validator_sets::primitives::MAX_KEY_LEN; - - use tributary::TRANSACTION_SIZE_LIMIT; - - let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1; - let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients; - // Handwave the DKG Commitments size as the size of the commitments to the coefficients and - // 1024 bytes for all overhead - let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024; - assert!( - u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= - (handwaved_dkg_commitments_size * max_key_shares_per_individual) - ); - - // Encryption key, PoP (2 elements), message - let elements_per_share = 4; - let handwaved_dkg_shares_size = - (elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024; - assert!( - u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >= - (handwaved_dkg_shares_size * max_key_shares_per_individual) - ); -} - -#[test] -fn serialize_sign_data() { - fn test_read_write(value: &SignData) { - let mut buf = vec![]; - value.write(&mut buf).unwrap(); - assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap()) - } - - let mut plan = [0; 3]; - OsRng.fill_bytes(&mut plan); - test_read_write(&random_sign_data::<_, _>( - &mut OsRng, - plan, - if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, - )); - let mut plan = [0; 5]; - OsRng.fill_bytes(&mut plan); - test_read_write(&random_sign_data::<_, _>( - &mut OsRng, - plan, - if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, - )); - let mut plan = [0; 8]; - OsRng.fill_bytes(&mut plan); - test_read_write(&random_sign_data::<_, _>( - &mut OsRng, - plan, - if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, - )); - let mut plan = [0; 24]; - OsRng.fill_bytes(&mut plan); - test_read_write(&random_sign_data::<_, _>( - &mut OsRng, - plan, - if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, - )); -} - -#[test] -fn serialize_transaction() { - test_read_write(&Transaction::RemoveParticipantDueToDkg { - participant: ::G::random(&mut OsRng), - signed: random_signed_with_nonce(&mut OsRng, 0), - }); - - { - let mut commitments = vec![random_vec(&mut OsRng, 512)]; - for _ in 0 .. (OsRng.next_u64() % 100) { - let mut temp = commitments[0].clone(); - OsRng.fill_bytes(&mut temp); - commitments.push(temp); - } - test_read_write(&Transaction::DkgCommitments { - attempt: random_u32(&mut OsRng), - commitments, - signed: random_signed_with_nonce(&mut OsRng, 0), - }); - } - - { - // This supports a variable share length, and variable amount of sent shares, yet share length - // and sent shares is expected to be constant among recipients - let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap(); - let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap(); - // Create a valid vec of shares - let mut shares = vec![]; - // Create up to 150 participants - for _ in 0 ..= (OsRng.next_u64() % 150) { - // Give each sender multiple shares - let mut sender_shares = vec![]; - for _ in 0 .. amount_of_shares { - let mut share = vec![0; share_len]; - OsRng.fill_bytes(&mut share); - sender_shares.push(share); - } - shares.push(sender_shares); - } - - test_read_write(&Transaction::DkgShares { - attempt: random_u32(&mut OsRng), - shares, - confirmation_nonces: { - let mut nonces = [0; 64]; - OsRng.fill_bytes(&mut nonces); - nonces - }, - signed: random_signed_with_nonce(&mut OsRng, 1), - }); - } - - for i in 0 .. 2 { - test_read_write(&Transaction::InvalidDkgShare { - attempt: random_u32(&mut OsRng), - accuser: frost::Participant::new( - u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), - ) - .unwrap(), - faulty: frost::Participant::new( - u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), - ) - .unwrap(), - blame: if i == 0 { - None - } else { - Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty()) - }, - signed: random_signed_with_nonce(&mut OsRng, 2), - }); - } - - test_read_write(&Transaction::DkgConfirmed { - attempt: random_u32(&mut OsRng), - confirmation_share: { - let mut share = [0; 32]; - OsRng.fill_bytes(&mut share); - share - }, - signed: random_signed_with_nonce(&mut OsRng, 2), - }); - - { - let mut block = [0; 32]; - OsRng.fill_bytes(&mut block); - test_read_write(&Transaction::CosignSubstrateBlock(block)); - } - - { - let mut block = [0; 32]; - OsRng.fill_bytes(&mut block); - let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); - test_read_write(&Transaction::Batch { block, batch }); - } - test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64())); - - { - let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); - test_read_write(&Transaction::SubstrateSign(random_sign_data( - &mut OsRng, - SubstrateSignableId::Batch(batch), - Label::Preprocess, - ))); - } - { - let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); - test_read_write(&Transaction::SubstrateSign(random_sign_data( - &mut OsRng, - SubstrateSignableId::Batch(batch), - Label::Share, - ))); - } - - { - let mut plan = [0; 32]; - OsRng.fill_bytes(&mut plan); - test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess))); - } - { - let mut plan = [0; 32]; - OsRng.fill_bytes(&mut plan); - test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share))); - } - - { - let mut plan = [0; 32]; - OsRng.fill_bytes(&mut plan); - let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()]; - OsRng.fill_bytes(&mut tx_hash); - test_read_write(&Transaction::SignCompleted { - plan, - tx_hash, - first_signer: random_signed_with_nonce(&mut OsRng, 2).signer, - signature: random_signed_with_nonce(&mut OsRng, 2).signature, - }); - } - - test_read_write(&Transaction::SlashReport( - { - let amount = - usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap(); - let mut points = vec![]; - for _ in 0 .. amount { - points.push((OsRng.next_u64() >> 32).try_into().unwrap()); - } - points - }, - random_signed_with_nonce(&mut OsRng, 0), - )); -} diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs deleted file mode 100644 index 18f60864..00000000 --- a/coordinator/src/tests/tributary/sync.rs +++ /dev/null @@ -1,165 +0,0 @@ -use core::time::Duration; -use std::{sync::Arc, collections::HashSet}; - -use rand_core::OsRng; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; - -use tokio::{ - sync::{mpsc, broadcast}, - time::sleep, -}; - -use serai_db::MemDb; - -use tributary::Tributary; - -use crate::{ - tributary::Transaction, - ActiveTributary, TributaryEvent, - p2p::{heartbeat_tributaries_task, handle_p2p_task}, - tests::{ - LocalP2p, - tributary::{new_keys, new_spec, new_tributaries}, - }, -}; - -#[tokio::test] -async fn sync_test() { - let mut keys = new_keys(&mut OsRng); - let spec = new_spec(&mut OsRng, &keys); - // Ensure this can have a node fail - assert!(spec.n(&[]) > spec.t()); - - let mut tributaries = new_tributaries(&keys, &spec) - .await - .into_iter() - .map(|(_, p2p, tributary)| (p2p, tributary)) - .collect::>(); - - // Keep a Tributary back, effectively having it offline - let syncer_key = keys.pop().unwrap(); - let (syncer_p2p, syncer_tributary) = tributaries.pop().unwrap(); - - // Have the rest form a P2P net - let mut tributary_senders = vec![]; - let mut tributary_arcs = vec![]; - let mut p2p_threads = vec![]; - for (p2p, tributary) in tributaries.drain(..) { - let tributary = Arc::new(tributary); - tributary_arcs.push(tributary.clone()); - let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); - let (cosign_send, _) = mpsc::unbounded_channel(); - let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); - new_tributary_send - .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) - .map_err(|_| "failed to send ActiveTributary") - .unwrap(); - tributary_senders.push(new_tributary_send); - p2p_threads.push(thread); - } - let tributaries = tributary_arcs; - - // After four blocks of time, we should have a new block - // We don't wait one block of time as we may have missed the chance for the first block - // We don't wait two blocks because we may have missed the chance, and then had a failure to - // propose by our 'offline' validator, which would cause the Tendermint round time to increase, - // requiring a longer delay - let block_time = u64::from(Tributary::::block_time()); - sleep(Duration::from_secs(4 * block_time)).await; - let tip = tributaries[0].tip().await; - assert!(tip != spec.genesis()); - - // Sleep one second to make sure this block propagates - sleep(Duration::from_secs(1)).await; - // Make sure every tributary has it - for tributary in &tributaries { - assert!(tributary.reader().block(&tip).is_some()); - } - - // Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's - // pending P2P messages - syncer_p2p.1.write().await.1.last_mut().unwrap().clear(); - - // Have it join the net - let syncer_key = Ristretto::generator() * *syncer_key; - let syncer_tributary = Arc::new(syncer_tributary); - let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5); - let (cosign_send, _) = mpsc::unbounded_channel(); - tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv)); - syncer_tributary_send - .send(TributaryEvent::NewTributary(ActiveTributary { - spec: spec.clone(), - tributary: syncer_tributary.clone(), - })) - .map_err(|_| "failed to send ActiveTributary to syncer") - .unwrap(); - - // It shouldn't automatically catch up. If it somehow was, our test would be broken - // Sanity check this - let tip = tributaries[0].tip().await; - // Wait until a new block occurs - sleep(Duration::from_secs(3 * block_time)).await; - // Make sure a new block actually occurred - assert!(tributaries[0].tip().await != tip); - // Make sure the new block alone didn't trigger catching up - assert_eq!(syncer_tributary.tip().await, spec.genesis()); - - // Start the heartbeat protocol - let (syncer_heartbeat_tributary_send, syncer_heartbeat_tributary_recv) = broadcast::channel(5); - tokio::spawn(heartbeat_tributaries_task(syncer_p2p, syncer_heartbeat_tributary_recv)); - syncer_heartbeat_tributary_send - .send(TributaryEvent::NewTributary(ActiveTributary { - spec: spec.clone(), - tributary: syncer_tributary.clone(), - })) - .map_err(|_| "failed to send ActiveTributary to heartbeat") - .unwrap(); - - // The heartbeat is once every 10 blocks, with some limitations - sleep(Duration::from_secs(20 * block_time)).await; - assert!(syncer_tributary.tip().await != spec.genesis()); - - // Verify it synced to the tip - let syncer_tip = { - let tributary = &tributaries[0]; - - let tip = tributary.tip().await; - let syncer_tip = syncer_tributary.tip().await; - // Allow a one block tolerance in case of race conditions - assert!( - HashSet::from([tip, tributary.reader().block(&tip).unwrap().parent()]).contains(&syncer_tip) - ); - syncer_tip - }; - - sleep(Duration::from_secs(block_time)).await; - - // Verify it's now keeping up - assert!(syncer_tributary.tip().await != syncer_tip); - - // Verify it's now participating in consensus - // Because only `t` validators are used in a commit, take n - t nodes offline - // leaving only `t` nodes. Which should force it to participate in the consensus - // of next blocks. - let spares = usize::from(spec.n(&[]) - spec.t()); - for thread in p2p_threads.iter().take(spares) { - thread.abort(); - } - - // wait for a block - sleep(Duration::from_secs(block_time)).await; - - if syncer_tributary - .reader() - .parsed_commit(&syncer_tributary.tip().await) - .unwrap() - .validators - .iter() - .any(|signer| signer == &syncer_key.to_bytes()) - { - return; - } - - panic!("synced tributary didn't start participating in consensus"); -} diff --git a/coordinator/src/tests/tributary/tx.rs b/coordinator/src/tests/tributary/tx.rs deleted file mode 100644 index da9433b6..00000000 --- a/coordinator/src/tests/tributary/tx.rs +++ /dev/null @@ -1,63 +0,0 @@ -use core::time::Duration; - -use rand_core::{RngCore, OsRng}; - -use tokio::time::sleep; - -use serai_db::MemDb; - -use tributary::{ - transaction::Transaction as TransactionTrait, Transaction as TributaryTransaction, Tributary, -}; - -use crate::{ - tributary::Transaction, - tests::{ - LocalP2p, - tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion}, - }, -}; - -#[tokio::test] -async fn tx_test() { - let keys = new_keys(&mut OsRng); - let spec = new_spec(&mut OsRng, &keys); - - let tributaries = new_tributaries(&keys, &spec) - .await - .into_iter() - .map(|(_, p2p, tributary)| (p2p, tributary)) - .collect::>(); - - // Run the tributaries in the background - tokio::spawn(run_tributaries(tributaries.clone())); - - // Send a TX from a random Tributary - let sender = - usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap(); - let key = keys[sender].clone(); - - let attempt = 0; - let mut commitments = vec![0; 256]; - OsRng.fill_bytes(&mut commitments); - - // Create the TX with a null signature so we can get its sig hash - let block_before_tx = tributaries[sender].1.tip().await; - let mut tx = Transaction::DkgCommitments { - attempt, - commitments: vec![commitments.clone()], - signed: Transaction::empty_signed(), - }; - tx.sign(&mut OsRng, spec.genesis(), &key); - - assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true)); - let included_in = wait_for_tx_inclusion(&tributaries[sender].1, block_before_tx, tx.hash()).await; - // Also sleep for the block time to ensure the block is synced around before we run checks on it - sleep(Duration::from_secs(Tributary::::block_time().into())).await; - - // All tributaries should have acknowledged this transaction in a block - for (_, tributary) in tributaries { - let block = tributary.reader().block(&included_in).unwrap(); - assert_eq!(block.transactions, vec![TributaryTransaction::Application(tx.clone())]); - } -} diff --git a/coordinator/src/tributary.rs b/coordinator/src/tributary.rs new file mode 100644 index 00000000..7f45797d --- /dev/null +++ b/coordinator/src/tributary.rs @@ -0,0 +1,596 @@ +use core::{future::Future, time::Duration}; +use std::sync::Arc; + +use zeroize::Zeroizing; +use rand_core::OsRng; +use blake2::{digest::typenum::U32, Digest, Blake2s}; +use ciphersuite::{Ciphersuite, Ristretto}; + +use tokio::sync::mpsc; + +use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; + +use scale::Encode; +use serai_client::validator_sets::primitives::ExternalValidatorSet; + +use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary}; + +use serai_task::{Task, TaskHandle, DoesNotError, ContinuallyRan}; + +use message_queue::{Service, Metadata, client::MessageQueue}; + +use serai_cosign::{Faulted, CosignIntent, Cosigning}; +use serai_coordinator_substrate::{NewSetInformation, SignSlashReport}; +use serai_coordinator_tributary::{ + Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask, +}; +use serai_coordinator_p2p::P2p; + +use crate::{ + Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation, + RemoveParticipant, dkg_confirmation::ConfirmDkgTask, +}; + +create_db! { + Coordinator { + PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction, + } +} + +db_channel! { + Coordinator { + PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent, + } +} + +/// Provide a Provided Transaction to the Tributary. +/// +/// This is not a well-designed function. This is specific to the context in which its called, +/// within this file. It should only be considered an internal helper for this domain alone. +async fn provide_transaction( + set: ExternalValidatorSet, + tributary: &Tributary, + tx: Transaction, +) { + match tributary.provide_transaction(tx.clone()).await { + // The Tributary uses its own DB, so we may provide this multiple times if we reboot before + // committing the txn which provoked this + Ok(()) | Err(ProvidedError::AlreadyProvided) => {} + Err(ProvidedError::NotProvided) => { + panic!("providing a Transaction which wasn't a Provided transaction: {tx:?}"); + } + Err(ProvidedError::InvalidProvided(e)) => { + panic!("providing an invalid Provided transaction, tx: {tx:?}, error: {e:?}") + } + // The Tributary's scan task won't advance if we don't have the Provided transactions + // present on-chain, and this enters an infinite loop to block the calling task from + // advancing + Err(ProvidedError::LocalMismatchesOnChain) => loop { + log::error!( + "Tributary {:?} was supposed to provide {:?} but peers disagree, halting Tributary", + set, + tx, + ); + // Print this every five minutes as this does need to be handled + tokio::time::sleep(Duration::from_secs(5 * 60)).await; + }, + } +} + +/// Provides Cosign/Cosigned Transactions onto the Tributary. +pub(crate) struct ProvideCosignCosignedTransactionsTask { + db: CD, + tributary_db: TD, + set: NewSetInformation, + tributary: Tributary, +} +impl ContinuallyRan + for ProvideCosignCosignedTransactionsTask +{ + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + + // Check if we produced any cosigns we were supposed to + let mut pending_notable_cosign = false; + loop { + let mut txn = self.db.txn(); + + // Fetch the next cosign this tributary should handle + let Some(cosign) = PendingCosigns::try_recv(&mut txn, self.set.set) else { break }; + pending_notable_cosign = cosign.notable; + + // If we (Serai) haven't cosigned this block, break as this is still pending + let latest = match Cosigning::::latest_cosigned_block_number(&txn) { + Ok(latest) => latest, + Err(Faulted) => { + log::error!("cosigning faulted"); + Err("cosigning faulted")? + } + }; + if latest < cosign.block_number { + break; + } + + // Because we've cosigned it, provide the TX for that + { + let mut txn = self.tributary_db.txn(); + CosignIntents::provide(&mut txn, self.set.set, &cosign); + txn.commit(); + } + provide_transaction( + self.set.set, + &self.tributary, + Transaction::Cosigned { substrate_block_hash: cosign.block_hash }, + ) + .await; + // Clear pending_notable_cosign since this cosign isn't pending + pending_notable_cosign = false; + + // Commit the txn to clear this from PendingCosigns + txn.commit(); + made_progress = true; + } + + // If we don't have any notable cosigns pending, provide the next set of cosign intents + if !pending_notable_cosign { + let mut txn = self.db.txn(); + // intended_cosigns will only yield up to and including the next notable cosign + for cosign in Cosigning::::intended_cosigns(&mut txn, self.set.set) { + // Flag this cosign as pending + PendingCosigns::send(&mut txn, self.set.set, &cosign); + // Provide the transaction to queue it for work + provide_transaction( + self.set.set, + &self.tributary, + Transaction::Cosign { substrate_block_hash: cosign.block_hash }, + ) + .await; + } + txn.commit(); + made_progress = true; + } + + Ok(made_progress) + } + } +} + +#[must_use] +async fn add_signed_unsigned_transaction( + tributary: &Tributary, + key: &Zeroizing<::F>, + mut tx: Transaction, +) -> bool { + // If this is a signed transaction, sign it + if matches!(tx.kind(), TransactionKind::Signed(_, _)) { + tx.sign(&mut OsRng, tributary.genesis(), key); + } + + let res = tributary.add_transaction(tx.clone()).await; + match &res { + // Fresh publication, already published + Ok(true | false) => {} + Err( + TransactionError::TooLargeTransaction | + TransactionError::InvalidSigner | + TransactionError::InvalidSignature | + TransactionError::InvalidContent, + ) => { + panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}"); + } + // InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after + // on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the + // issue is this transaction was already included on-chain + Err(TransactionError::InvalidNonce) => { + let TransactionKind::Signed(order, signed) = tx.kind() else { + panic!("non-Signed transaction had InvalidNonce"); + }; + let next_nonce = tributary + .next_nonce(&signed.signer, &order) + .await + .expect("signer who is a present validator didn't have a nonce"); + assert!(next_nonce != signed.nonce); + // We're publishing an old transaction + if next_nonce > signed.nonce { + return true; + } + panic!("nonce in transaction wasn't contiguous with nonce on-chain"); + } + // We've published too many transactions recently + Err(TransactionError::TooManyInMempool) => { + return false; + } + // This isn't a Provided transaction so this should never be hit + Err(TransactionError::ProvidedAddedToMempool) => unreachable!(), + } + + true +} + +async fn add_with_recognition_check( + set: ExternalValidatorSet, + tributary_db: &mut TD, + tributary: &Tributary, + key: &Zeroizing<::F>, + tx: Transaction, +) -> bool { + let kind = tx.kind(); + match kind { + TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await, + TransactionKind::Unsigned | TransactionKind::Signed(_, _) => { + // If this is a transaction with signing data, check the topic is recognized before + // publishing + let topic = tx.topic(); + let still_requires_recognition = if let Some(topic) = topic { + (topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic))) + .then_some(topic) + } else { + None + }; + if let Some(topic) = still_requires_recognition { + // Queue the transaction until the topic is recognized + // We use the Tributary DB for this so it's cleaned up when the Tributary DB is + let mut tributary_txn = tributary_db.txn(); + PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx); + tributary_txn.commit(); + } else { + // Actually add the transaction + if !add_signed_unsigned_transaction(tributary, key, tx).await { + return false; + } + } + } + } + true +} + +/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`. +pub(crate) struct AddTributaryTransactionsTask { + db: CD, + tributary_db: TD, + tributary: Tributary, + set: NewSetInformation, + key: Zeroizing<::F>, +} +impl ContinuallyRan for AddTributaryTransactionsTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + + // Provide/add all transactions sent our way + loop { + let mut txn = self.db.txn(); + let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set) + else { + break; + }; + + if !add_with_recognition_check( + self.set.set, + &mut self.tributary_db, + &self.tributary, + &self.key, + tx, + ) + .await + { + break; + } + + made_progress = true; + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set) + else { + break; + }; + + if !add_with_recognition_check( + self.set.set, + &mut self.tributary_db, + &self.tributary, + &self.key, + tx, + ) + .await + { + break; + } + + made_progress = true; + txn.commit(); + } + + // Provide/add all transactions due to newly recognized topics + loop { + let mut tributary_txn = self.tributary_db.txn(); + let Some(topic) = + RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set) + else { + break; + }; + if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) { + if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await { + break; + } + } + + made_progress = true; + tributary_txn.commit(); + } + + // Publish any participant removals + loop { + let mut txn = self.db.txn(); + let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break }; + let tx = Transaction::RemoveParticipant { + participant: self.set.participant_indexes_reverse_lookup[&participant], + signed: Default::default(), + }; + if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await { + break; + } + made_progress = true; + txn.commit(); + } + + Ok(made_progress) + } + } +} + +/// Takes the messages from ScanTributaryTask and publishes them to the message-queue. +pub(crate) struct TributaryProcessorMessagesTask { + tributary_db: TD, + set: ExternalValidatorSet, + message_queue: Arc, +} +impl ContinuallyRan for TributaryProcessorMessagesTask { + type Error = String; // TODO + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + loop { + let mut txn = self.tributary_db.txn(); + let Some(msg) = ProcessorMessages::try_recv(&mut txn, self.set) else { break }; + let metadata = Metadata { + from: Service::Coordinator, + to: Service::Processor(self.set.network), + intent: msg.intent(), + }; + let msg = borsh::to_vec(&msg).unwrap(); + self.message_queue.queue(metadata, msg).await?; + txn.commit(); + made_progress = true; + } + Ok(made_progress) + } + } +} + +/// Checks for the notification to sign a slash report and does so if present. +pub(crate) struct SignSlashReportTask { + db: CD, + tributary_db: TD, + tributary: Tributary, + set: NewSetInformation, + key: Zeroizing<::F>, +} +impl ContinuallyRan for SignSlashReportTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut txn = self.db.txn(); + let Some(()) = SignSlashReport::try_recv(&mut txn, self.set.set) else { return Ok(false) }; + + // Fetch the slash report for this Tributary + let mut tx = + serai_coordinator_tributary::slash_report_transaction(&self.tributary_db, &self.set); + tx.sign(&mut OsRng, self.tributary.genesis(), &self.key); + + let res = self.tributary.add_transaction(tx.clone()).await; + match &res { + // Fresh publication, already published + Ok(true | false) => {} + Err( + TransactionError::TooLargeTransaction | + TransactionError::InvalidSigner | + TransactionError::InvalidNonce | + TransactionError::InvalidSignature | + TransactionError::InvalidContent, + ) => { + panic!("created an invalid SlashReport transaction, tx: {tx:?}, err: {res:?}"); + } + // We've published too many transactions recently + // Drop this txn to try to publish it again later on a future iteration + Err(TransactionError::TooManyInMempool) => { + drop(txn); + return Ok(false); + } + // This isn't a Provided transaction so this should never be hit + Err(TransactionError::ProvidedAddedToMempool) => unreachable!(), + } + + txn.commit(); + Ok(true) + } + } +} + +/// Run the scan task whenever the Tributary adds a new block. +async fn scan_on_new_block( + db: CD, + set: ExternalValidatorSet, + tributary: Tributary, + scan_tributary_task: TaskHandle, + tasks_to_keep_alive: Vec, +) { + loop { + // Break once this Tributary is retired + if crate::RetiredTributary::get(&db, set.network).map(|session| session.0) >= + Some(set.session.0) + { + drop(tasks_to_keep_alive); + break; + } + + // Have the tributary scanner run as soon as there's a new block + match tributary.next_block_notification().await.await { + Ok(()) => scan_tributary_task.run_now(), + // unreachable since this owns the tributary object and doesn't drop it + Err(_) => panic!("tributary was dropped causing notification to error"), + } + } +} + +/// Spawn a Tributary. +/// +/// This will: +/// - Spawn the Tributary +/// - Inform the P2P network of the Tributary +/// - Spawn the ScanTributaryTask +/// - Spawn the ProvideCosignCosignedTransactionsTask +/// - Spawn the TributaryProcessorMessagesTask +/// - Spawn the AddTributaryTransactionsTask +/// - Spawn the ConfirmDkgTask +/// - Spawn the SignSlashReportTask +/// - Iterate the scan task whenever a new block occurs (not just on the standard interval) +pub(crate) async fn spawn_tributary( + db: Db, + message_queue: Arc, + p2p: P, + p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary)>, + set: NewSetInformation, + serai_key: Zeroizing<::F>, +) { + // Don't spawn retired Tributaries + if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >= + Some(set.set.session.0) + { + return; + } + + let genesis = <[u8; 32]>::from(Blake2s::::digest((set.serai_block, set.set).encode())); + + // Since the Serai block will be finalized, then cosigned, before we handle this, this time will + // be a couple of minutes stale. While the Tributary will still function with a start time in the + // past, the Tributary will immediately incur round timeouts. We reduce these by adding a + // constant delay of a couple of minutes. + const TRIBUTARY_START_TIME_DELAY: u64 = 120; + let start_time = set.declaration_time + TRIBUTARY_START_TIME_DELAY; + + let mut tributary_validators = Vec::with_capacity(set.validators.len()); + for (validator, weight) in set.validators.iter().copied() { + let validator_key = ::read_G(&mut validator.0.as_slice()) + .expect("Serai validator had an invalid public key"); + let weight = u64::from(weight); + tributary_validators.push((validator_key, weight)); + } + + // Spawn the Tributary + let tributary_db = crate::db::tributary_db(set.set); + let tributary = Tributary::new( + tributary_db.clone(), + genesis, + start_time, + serai_key.clone(), + tributary_validators, + p2p, + ) + .await + .unwrap(); + let reader = tributary.reader(); + + // Inform the P2P network + p2p_add_tributary + .send((set.set, tributary.clone())) + .expect("p2p's add_tributary channel was closed?"); + + // Spawn the task to provide Cosign/Cosigned transactions onto the Tributary + let (provide_cosign_cosigned_transactions_task_def, provide_cosign_cosigned_transactions_task) = + Task::new(); + tokio::spawn( + (ProvideCosignCosignedTransactionsTask { + db: db.clone(), + tributary_db: tributary_db.clone(), + set: set.clone(), + tributary: tributary.clone(), + }) + .continually_run(provide_cosign_cosigned_transactions_task_def, vec![]), + ); + + // Spawn the task to send all messages from the Tributary scanner to the message-queue + let (scan_tributary_messages_task_def, scan_tributary_messages_task) = Task::new(); + tokio::spawn( + (TributaryProcessorMessagesTask { + tributary_db: tributary_db.clone(), + set: set.set, + message_queue, + }) + .continually_run(scan_tributary_messages_task_def, vec![]), + ); + + // Spawn the scan task + let (scan_tributary_task_def, scan_tributary_task) = Task::new(); + tokio::spawn( + ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader) + // This is the only handle for this TributaryProcessorMessagesTask, so when this task is + // dropped, it will be too + .continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]), + ); + + // Spawn the add transactions task + let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new(); + tokio::spawn( + (AddTributaryTransactionsTask { + db: db.clone(), + tributary_db: tributary_db.clone(), + tributary: tributary.clone(), + set: set.clone(), + key: serai_key.clone(), + }) + .continually_run(add_tributary_transactions_task_def, vec![]), + ); + + // Spawn the task to confirm the DKG result + let (confirm_dkg_task_def, confirm_dkg_task) = Task::new(); + tokio::spawn( + ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone()) + .continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]), + ); + + // Spawn the sign slash report task + let (sign_slash_report_task_def, sign_slash_report_task) = Task::new(); + tokio::spawn( + (SignSlashReportTask { + db: db.clone(), + tributary_db, + tributary: tributary.clone(), + set: set.clone(), + key: serai_key, + }) + .continually_run(sign_slash_report_task_def, vec![]), + ); + + // Whenever a new block occurs, immediately run the scan task + // This function also preserves the ProvideCosignCosignedTransactionsTask handle until the + // Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad + // infinitum + tokio::spawn(scan_on_new_block( + db, + set.set, + tributary, + scan_tributary_task, + vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task], + )); +} diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs deleted file mode 100644 index fe39b7de..00000000 --- a/coordinator/src/tributary/db.rs +++ /dev/null @@ -1,197 +0,0 @@ -use std::collections::HashMap; - -use scale::Encode; -use borsh::{BorshSerialize, BorshDeserialize}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use frost::Participant; - -use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet}; - -use processor_messages::coordinator::SubstrateSignableId; - -pub use serai_db::*; - -use tributary::ReadWrite; - -use crate::tributary::{Label, Transaction}; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] -pub enum Topic { - Dkg, - DkgConfirmation, - SubstrateSign(SubstrateSignableId), - Sign([u8; 32]), -} - -// A struct to refer to a piece of data all validators will presumably provide a value for. -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] -pub struct DataSpecification { - pub topic: Topic, - pub label: Label, - pub attempt: u32, -} - -pub enum DataSet { - Participating(HashMap>), - NotParticipating, -} - -pub enum Accumulation { - Ready(DataSet), - NotReady, -} - -// TODO: Move from genesis to set for indexing -create_db!( - Tributary { - SeraiBlockNumber: (hash: [u8; 32]) -> u64, - SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32], - - TributaryBlockNumber: (block: [u8; 32]) -> u32, - LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], - - // TODO: Revisit the point of this - FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, - RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>, - OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>, - // TODO: Combine these two - FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), - SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32, - - VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (), - VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16, - - AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32, - ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec, - DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, - DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec, - - DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, - ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, - DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, - KeyToDkgAttempt: (key: [u8; 32]) -> u32, - DkgLocallyCompleted: (genesis: [u8; 32]) -> (), - - PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, - - SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec, - - SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec, - SlashReported: (genesis: [u8; 32]) -> u16, - SlashReportCutOff: (genesis: [u8; 32]) -> u64, - SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>, - } -); - -impl FatalSlashes { - pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<::G> { - FatalSlashes::get(getter, genesis) - .unwrap_or(vec![]) - .iter() - .map(|key| ::G::from_bytes(key).unwrap()) - .collect::>() - } -} - -impl FatallySlashed { - pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) { - Self::set(txn, genesis, account, &()); - let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default(); - - // Don't append if we already have it, which can occur upon multiple faults - if existing.iter().any(|existing| existing == &account) { - return; - } - - existing.push(account); - FatalSlashes::set(txn, genesis, &existing); - } -} - -impl AttemptDb { - pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) { - Self::set(txn, genesis, &topic, &0u32); - } - - pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) -> u32 { - let next = - Self::attempt(txn, genesis, topic).expect("starting next attempt for unknown topic") + 1; - Self::set(txn, genesis, &topic, &next); - next - } - - pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option { - let attempt = Self::get(getter, genesis, &topic); - // Don't require explicit recognition of the Dkg topic as it starts when the chain does - // Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it - // should always happen (eventually) - if attempt.is_none() && - ((topic == Topic::Dkg) || - (topic == Topic::DkgConfirmation) || - (topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport))) - { - return Some(0); - } - attempt - } -} - -impl ReattemptDb { - pub fn schedule_reattempt( - txn: &mut impl DbTxn, - genesis: [u8; 32], - current_block_number: u32, - topic: Topic, - ) { - // 5 minutes - #[cfg(not(feature = "longer-reattempts"))] - const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME; - - // 10 minutes, intended for latent environments like the GitHub CI - #[cfg(feature = "longer-reattempts")] - const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME; - - // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5 - // Assumes no event will take longer than 15 minutes, yet grows the time in case there are - // network bandwidth issues - let mut reattempt_delay = BASE_REATTEMPT_DELAY * - ((AttemptDb::attempt(txn, genesis, topic) - .expect("scheduling re-attempt for unknown topic") / - 3) + - 1) - .min(3); - // Allow more time for DKGs since they have an extra round and much more data - if matches!(topic, Topic::Dkg) { - reattempt_delay *= 4; - } - let upon_block = current_block_number + reattempt_delay; - - let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]); - reattempts.push(topic); - Self::set(txn, genesis, upon_block, &reattempts); - } - - pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) -> Vec { - let res = Self::get(txn, genesis, block_number).unwrap_or(vec![]); - if !res.is_empty() { - Self::del(txn, genesis, block_number); - } - res - } -} - -impl SignedTransactionDb { - pub fn take_signed_transaction( - txn: &mut impl DbTxn, - order: &[u8], - nonce: u32, - ) -> Option { - let res = SignedTransactionDb::get(txn, order, nonce) - .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap()); - if res.is_some() { - Self::del(txn, order, nonce); - } - res - } -} diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs deleted file mode 100644 index fbce7dd9..00000000 --- a/coordinator/src/tributary/handle.rs +++ /dev/null @@ -1,776 +0,0 @@ -use core::ops::Deref; -use std::collections::HashMap; - -use zeroize::Zeroizing; -use rand_core::OsRng; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use frost::dkg::Participant; - -use scale::{Encode, Decode}; -use serai_client::{Signature, validator_sets::primitives::KeyPair}; - -use tributary::{Signed, TransactionKind, TransactionTrait}; - -use processor_messages::{ - key_gen::{self, KeyGenId}, - coordinator::{self, SubstrateSignableId, SubstrateSignId}, - sign::{self, SignId}, -}; - -use serai_db::*; - -use crate::{ - processors::Processors, - tributary::{ - *, - signing_protocol::DkgConfirmer, - scanner::{ - RecognizedIdType, RIDTrait, PublishSeraiTransaction, PTTTrait, TributaryBlockHandler, - }, - }, - P2p, -}; - -pub fn dkg_confirmation_nonces( - key: &Zeroizing<::F>, - spec: &TributarySpec, - txn: &mut impl DbTxn, - attempt: u32, -) -> [u8; 64] { - DkgConfirmer::new(key, spec, txn, attempt) - .expect("getting DKG confirmation nonces for unknown attempt") - .preprocess() -} - -pub fn generated_key_pair( - txn: &mut D::Transaction<'_>, - key: &Zeroizing<::F>, - spec: &TributarySpec, - key_pair: &KeyPair, - attempt: u32, -) -> Result<[u8; 32], Participant> { - DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair); - KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt); - let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); - DkgConfirmer::new(key, spec, txn, attempt) - .expect("claiming to have generated a key pair for an unrecognized attempt") - .share(preprocesses, key_pair) -} - -fn unflatten( - spec: &TributarySpec, - removed: &[::G], - data: &mut HashMap>, -) { - for (validator, _) in spec.validators() { - let Some(range) = spec.i(removed, validator) else { continue }; - let Some(all_segments) = data.remove(&range.start) else { - continue; - }; - let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap(); - for i in u16::from(range.start) .. u16::from(range.end) { - let i = Participant::new(i).unwrap(); - data.insert(i, data_vec.remove(0)); - } - } -} - -impl< - D: Db, - T: DbTxn, - Pro: Processors, - PST: PublishSeraiTransaction, - PTT: PTTTrait, - RID: RIDTrait, - P: P2p, - > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P> -{ - fn accumulate( - &mut self, - removed: &[::G], - data_spec: &DataSpecification, - signer: ::G, - data: &Vec, - ) -> Accumulation { - log::debug!("accumulating entry for {:?} attempt #{}", &data_spec.topic, &data_spec.attempt); - let genesis = self.spec.genesis(); - if DataDb::get(self.txn, genesis, data_spec, &signer.to_bytes()).is_some() { - panic!("accumulating data for a participant multiple times"); - } - let signer_shares = { - let Some(signer_i) = self.spec.i(removed, signer) else { - log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes())); - return Accumulation::NotReady; - }; - u16::from(signer_i.end) - u16::from(signer_i.start) - }; - - let prior_received = DataReceived::get(self.txn, genesis, data_spec).unwrap_or_default(); - let now_received = prior_received + signer_shares; - DataReceived::set(self.txn, genesis, data_spec, &now_received); - DataDb::set(self.txn, genesis, data_spec, &signer.to_bytes(), data); - - let received_range = (prior_received + 1) ..= now_received; - - // If 2/3rds of the network participated in this preprocess, queue it for an automatic - // re-attempt - // DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg - if (data_spec.label == Label::Preprocess) && - received_range.contains(&self.spec.t()) && - (data_spec.topic != Topic::DkgConfirmation) - { - // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this - // is an old entry - // This is an assert, not part of the if check, as old data shouldn't be here in the first - // place - assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt)); - ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic); - } - - // If we have all the needed commitments/preprocesses/shares, tell the processor - let needs_everyone = - (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation); - let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() }; - if received_range.contains(&needed) { - log::debug!( - "accumulation for entry {:?} attempt #{} is ready", - &data_spec.topic, - &data_spec.attempt - ); - - let mut data = HashMap::new(); - for validator in self.spec.validators().iter().map(|validator| validator.0) { - let Some(i) = self.spec.i(removed, validator) else { continue }; - data.insert( - i.start, - if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { - data - } else { - continue; - }, - ); - } - - assert_eq!(data.len(), usize::from(needed)); - - // Remove our own piece of data, if we were involved - if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) { - if data.remove(&i.start).is_some() { - return Accumulation::Ready(DataSet::Participating(data)); - } - } - return Accumulation::Ready(DataSet::NotParticipating); - } - Accumulation::NotReady - } - - fn handle_data( - &mut self, - removed: &[::G], - data_spec: &DataSpecification, - bytes: &Vec, - signed: &Signed, - ) -> Accumulation { - let genesis = self.spec.genesis(); - - let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else { - // Premature publication of a valid ID/publication of an invalid ID - self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt"); - return Accumulation::NotReady; - }; - - // If they've already published a TX for this attempt, slash - // This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a - // cheap check to leave in for safety - if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { - self.fatal_slash(signed.signer.to_bytes(), "published data multiple times"); - return Accumulation::NotReady; - } - - // If the attempt is lesser than the blockchain's, return - if data_spec.attempt < curr_attempt { - log::debug!( - "dated attempt published onto tributary for topic {:?} (used attempt {}, current {})", - data_spec.topic, - data_spec.attempt, - curr_attempt - ); - return Accumulation::NotReady; - } - // If the attempt is greater, this is a premature publication, full slash - if data_spec.attempt > curr_attempt { - self.fatal_slash( - signed.signer.to_bytes(), - "published data with an attempt which hasn't started", - ); - return Accumulation::NotReady; - } - - // TODO: We can also full slash if shares before all commitments, or share before the - // necessary preprocesses - - // TODO: If this is shares, we need to check they are part of the selected signing set - - // Accumulate this data - self.accumulate(removed, data_spec, signed.signer, bytes) - } - - fn check_sign_data_len( - &mut self, - removed: &[::G], - signer: ::G, - len: usize, - ) -> Result<(), ()> { - let Some(signer_i) = self.spec.i(removed, signer) else { - // TODO: Ensure processor doesn't so participate/check how it handles removals for being - // offline - self.fatal_slash(signer.to_bytes(), "signer participated despite being removed"); - Err(())? - }; - if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { - self.fatal_slash( - signer.to_bytes(), - "signer published a distinct amount of sign data than they had shares", - ); - Err(())?; - } - Ok(()) - } - - // TODO: Don't call fatal_slash in here, return the party to fatal_slash to ensure no further - // execution occurs - pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) { - let genesis = self.spec.genesis(); - - // Don't handle transactions from fatally slashed participants - // This prevents removed participants from sabotaging the removal signing sessions and so on - // TODO: Because fatally slashed participants can still publish onto the blockchain, they have - // a notable DoS ability - if let TransactionKind::Signed(_, signed) = tx.kind() { - if FatallySlashed::get(self.txn, genesis, signed.signer.to_bytes()).is_some() { - return; - } - } - - match tx { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { - if self.spec.i(&[], participant).is_none() { - self.fatal_slash( - participant.to_bytes(), - "RemoveParticipantDueToDkg vote for non-validator", - ); - return; - } - - let participant = participant.to_bytes(); - let signer = signed.signer.to_bytes(); - - assert!( - VotedToRemove::get(self.txn, genesis, signer, participant).is_none(), - "VotedToRemove multiple times despite a single nonce being allocated", - ); - VotedToRemove::set(self.txn, genesis, signer, participant, &()); - - let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0); - let signer_votes = - self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?"); - let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start); - VotesToRemove::set(self.txn, genesis, participant, &new_votes); - if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) { - self.fatal_slash(participant, "RemoveParticipantDueToDkg vote") - } - } - - Transaction::DkgCommitments { attempt, commitments, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt"); - return; - }; - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else { - return; - }; - let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; - match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) { - Accumulation::Ready(DataSet::Participating(mut commitments)) => { - log::info!("got all DkgCommitments for {}", hex::encode(genesis)); - unflatten(self.spec, &removed, &mut commitments); - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: self.spec.set().session, attempt }, - commitments, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - assert!( - removed.contains(&(Ristretto::generator() * self.our_key.deref())), - "NotParticipating in a DkgCommitments we weren't removed for" - ); - } - Accumulation::NotReady => {} - } - } - - Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt"); - return; - }; - let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref())); - - let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else { - return; - }; - - let Some(sender_i) = self.spec.i(&removed, signed.signer) else { - self.fatal_slash( - signed.signer.to_bytes(), - "DkgShares for a DKG they aren't participating in", - ); - return; - }; - let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); - for shares in &shares { - if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) { - self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares"); - return; - } - } - - // Save each share as needed for blame - for (from_offset, shares) in shares.iter().enumerate() { - let from = - Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap()) - .unwrap(); - - for (to_offset, share) in shares.iter().enumerate() { - // 0-indexed (the enumeration) to 1-indexed (Participant) - let mut to = u16::try_from(to_offset).unwrap() + 1; - // Adjust for the omission of the sender's own shares - if to >= u16::from(sender_i.start) { - to += u16::from(sender_i.end) - u16::from(sender_i.start); - } - let to = Participant::new(to).unwrap(); - - DkgShare::set(self.txn, genesis, from.into(), to.into(), share); - } - } - - // Filter down to only our share's bytes for handle - let our_shares = if let Some(our_i) = - self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) - { - if sender_i == our_i { - vec![] - } else { - // 1-indexed to 0-indexed - let mut our_i_pos = u16::from(our_i.start) - 1; - // Handle the omission of the sender's own data - if u16::from(our_i.start) > u16::from(sender_i.start) { - our_i_pos -= sender_is_len; - } - let our_i_pos = usize::from(our_i_pos); - shares - .iter_mut() - .map(|shares| { - shares - .drain( - our_i_pos .. - (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), - ) - .collect::>() - }) - .collect() - } - } else { - assert!( - not_participating, - "we didn't have an i while handling DkgShares we weren't removed for" - ); - // Since we're not participating, simply save vec![] for our shares - vec![] - }; - // Drop shares as it's presumably been mutated into invalidity - drop(shares); - - let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; - let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); - match self.handle_data(&removed, &data_spec, &encoded_data, &signed) { - Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { - log::info!("got all DkgShares for {}", hex::encode(genesis)); - - let mut confirmation_nonces = HashMap::new(); - let mut shares = HashMap::new(); - for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares { - let (these_confirmation_nonces, these_shares) = - <(Vec, Vec)>::decode(&mut confirmation_nonces_and_shares.as_slice()) - .unwrap(); - confirmation_nonces.insert(participant, these_confirmation_nonces); - shares.insert(participant, these_shares); - } - ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); - - // shares is a HashMap>>>, with the values representing: - // - Each of the sender's shares - // - Each of the our shares - // - Each share - // We need a Vec>>, with the outer being each of ours - let mut expanded_shares = vec![]; - for (sender_start_i, shares) in shares { - let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); - for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { - for (our_share_i, our_share) in our_shares.into_iter().enumerate() { - if expanded_shares.len() <= our_share_i { - expanded_shares.push(HashMap::new()); - } - expanded_shares[our_share_i].insert( - Participant::new( - u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), - ) - .unwrap(), - our_share, - ); - } - } - } - - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: self.spec.set().session, attempt }, - shares: expanded_shares, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for"); - } - Accumulation::NotReady => {} - } - } - - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self - .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt"); - return; - }; - let Some(range) = self.spec.i(&removed, signed.signer) else { - self.fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare for a DKG they aren't participating in", - ); - return; - }; - if !range.contains(&accuser) { - self.fatal_slash( - signed.signer.to_bytes(), - "accused with a Participant index which wasn't theirs", - ); - return; - } - if range.contains(&faulty) { - self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare"); - return; - } - - let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else { - self.fatal_slash( - signed.signer.to_bytes(), - "InvalidDkgShare had a non-existent faulty participant", - ); - return; - }; - self - .processors - .send( - self.spec.set().network, - key_gen::CoordinatorMessage::VerifyBlame { - id: KeyGenId { session: self.spec.set().session, attempt }, - accuser, - accused: faulty, - share, - blame, - }, - ) - .await; - } - - Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); - return; - }; - - let data_spec = - DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; - match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { - Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); - - let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { - panic!( - "DkgConfirmed for everyone yet didn't have the removed parties for this attempt", - ); - }; - - let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); - // TODO: This can technically happen under very very very specific timing as the txn - // put happens before DkgConfirmed, yet the txn commit isn't guaranteed to - let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( - "in DkgConfirmed handling, which happens after everyone \ - (including us) fires DkgConfirmed, yet no confirming key pair", - ); - let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) - .expect("confirming DKG for unrecognized attempt"); - let sig = match confirmer.complete(preprocesses, &key_pair, shares) { - Ok(sig) => sig, - Err(p) => { - let mut tx = Transaction::RemoveParticipantDueToDkg { - participant: self.spec.reverse_lookup_i(&removed, p).unwrap(), - signed: Transaction::empty_signed(), - }; - tx.sign(&mut OsRng, genesis, self.our_key); - self.publish_tributary_tx.publish_tributary_tx(tx).await; - return; - } - }; - - DkgLocallyCompleted::set(self.txn, genesis, &()); - - self - .publish_serai_tx - .publish_set_keys( - self.db, - self.spec.set(), - removed.into_iter().map(|key| key.to_bytes().into()).collect(), - key_pair, - Signature(sig), - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG confirmination shares") - } - Accumulation::NotReady => {} - } - } - - Transaction::CosignSubstrateBlock(hash) => { - AttemptDb::recognize_topic( - self.txn, - genesis, - Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)), - ); - - let block_number = SeraiBlockNumber::get(self.txn, hash) - .expect("CosignSubstrateBlock yet didn't save Serai block number"); - let msg = coordinator::CoordinatorMessage::CosignSubstrateBlock { - id: SubstrateSignId { - session: self.spec.set().session, - id: SubstrateSignableId::CosigningSubstrateBlock(hash), - attempt: 0, - }, - block_number, - }; - self.processors.send(self.spec.set().network, msg).await; - } - - Transaction::Batch { block: _, batch } => { - // Because this Batch has achieved synchrony, its batch ID should be authorized - AttemptDb::recognize_topic( - self.txn, - genesis, - Topic::SubstrateSign(SubstrateSignableId::Batch(batch)), - ); - self - .recognized_id - .recognized_id( - self.spec.set(), - genesis, - RecognizedIdType::Batch, - batch.to_le_bytes().to_vec(), - ) - .await; - } - - Transaction::SubstrateBlock(block) => { - let plan_ids = PlanIds::get(self.txn, &genesis, block).expect( - "synced a tributary block finalizing a substrate block in a provided transaction \ - despite us not providing that transaction", - ); - - for id in plan_ids { - AttemptDb::recognize_topic(self.txn, genesis, Topic::Sign(id)); - self - .recognized_id - .recognized_id(self.spec.set(), genesis, RecognizedIdType::Plan, id.to_vec()) - .await; - } - } - - Transaction::SubstrateSign(data) => { - // Provided transactions ensure synchrony on any signing protocol, and we won't start - // signing with threshold keys before we've confirmed them on-chain - let Some(removed) = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - else { - self.fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ); - return; - }; - let signer = data.signed.signer; - let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { - return; - }; - let expected_len = match data.label { - Label::Preprocess => 64, - Label::Share => 32, - }; - for data in &data.data { - if data.len() != expected_len { - self.fatal_slash( - signer.to_bytes(), - "unexpected length data for substrate signing protocol", - ); - return; - } - } - - let data_spec = DataSpecification { - topic: Topic::SubstrateSign(data.plan), - label: data.label, - attempt: data.attempt, - }; - let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) - else { - return; - }; - unflatten(self.spec, &removed, &mut results); - - let id = SubstrateSignId { - session: self.spec.set().session, - id: data.plan, - attempt: data.attempt, - }; - let msg = match data.label { - Label::Preprocess => coordinator::CoordinatorMessage::SubstratePreprocesses { - id, - preprocesses: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), - }, - Label::Share => coordinator::CoordinatorMessage::SubstrateShares { - id, - shares: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), - }, - }; - self.processors.send(self.spec.set().network, msg).await; - } - - Transaction::Sign(data) => { - let Some(removed) = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - else { - self.fatal_slash( - data.signed.signer.to_bytes(), - "signing despite not having set keys on substrate", - ); - return; - }; - let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else { - return; - }; - - let data_spec = DataSpecification { - topic: Topic::Sign(data.plan), - label: data.label, - attempt: data.attempt, - }; - if let Accumulation::Ready(DataSet::Participating(mut results)) = - self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) - { - unflatten(self.spec, &removed, &mut results); - let id = - SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; - self - .processors - .send( - self.spec.set().network, - match data.label { - Label::Preprocess => { - sign::CoordinatorMessage::Preprocesses { id, preprocesses: results } - } - Label::Share => sign::CoordinatorMessage::Shares { id, shares: results }, - }, - ) - .await; - } - } - - Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => { - log::info!( - "on-chain SignCompleted claims {} completes {}", - hex::encode(&tx_hash), - hex::encode(plan) - ); - - if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() { - self.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed"); - return; - }; - - // TODO: Confirm this signer hasn't prior published a completion - - let msg = sign::CoordinatorMessage::Completed { - session: self.spec.set().session, - id: plan, - tx: tx_hash, - }; - self.processors.send(self.spec.set().network, msg).await; - } - - Transaction::SlashReport(points, signed) => { - // Uses &[] as we only need the length which is independent to who else was removed - let signer_range = self.spec.i(&[], signed.signer).unwrap(); - let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start); - if points.len() != (self.spec.validators().len() - 1) { - self.fatal_slash( - signed.signer.to_bytes(), - "submitted a distinct amount of slash points to participants", - ); - return; - } - - if SlashReports::get(self.txn, genesis, signed.signer.to_bytes()).is_some() { - self.fatal_slash(signed.signer.to_bytes(), "submitted multiple slash points"); - return; - } - SlashReports::set(self.txn, genesis, signed.signer.to_bytes(), &points); - - let prior_reported = SlashReported::get(self.txn, genesis).unwrap_or(0); - let now_reported = prior_reported + signer_len; - SlashReported::set(self.txn, genesis, &now_reported); - - if (prior_reported < self.spec.t()) && (now_reported >= self.spec.t()) { - SlashReportCutOff::set( - self.txn, - genesis, - // 30 minutes into the future - &(u64::from(self.block_number) + - ((30 * 60 * 1000) / u64::from(tributary::tendermint::TARGET_BLOCK_TIME))), - ); - } - } - } - } -} diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs deleted file mode 100644 index 27bb6396..00000000 --- a/coordinator/src/tributary/mod.rs +++ /dev/null @@ -1,100 +0,0 @@ -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; - -use serai_client::validator_sets::primitives::ExternalValidatorSet; - -use tributary::{ - ReadWrite, - transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, - Tributary, -}; - -mod db; -pub use db::*; - -mod spec; -pub use spec::TributarySpec; - -mod transaction; -pub use transaction::{Label, SignData, Transaction}; - -mod signing_protocol; - -mod handle; -pub use handle::*; - -pub mod scanner; - -pub fn removed_as_of_dkg_attempt( - getter: &impl Get, - genesis: [u8; 32], - attempt: u32, -) -> Option::G>> { - if attempt == 0 { - Some(vec![]) - } else { - RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| { - keys.iter().map(|key| ::G::from_bytes(key).unwrap()).collect() - }) - } -} - -pub fn removed_as_of_set_keys( - getter: &impl Get, - set: ExternalValidatorSet, - genesis: [u8; 32], -) -> Option::G>> { - // SeraiDkgCompleted has the key placed on-chain. - // This key can be uniquely mapped to an attempt so long as one participant was honest, which we - // assume as a presumably honest participant. - // Resolve from generated key to attempt to fatally slashed as of attempt. - - // This expect will trigger if this is prematurely called and Substrate has tracked the keys yet - // we haven't locally synced and handled the Tributary - // All callers of this, at the time of writing, ensure the Tributary has sufficiently synced - // making the panic with context more desirable than the None - let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?) - .expect("key completed on-chain didn't have an attempt related"); - removed_as_of_dkg_attempt(getter, genesis, attempt) -} - -pub async fn publish_signed_transaction( - txn: &mut D::Transaction<'_>, - tributary: &Tributary, - tx: Transaction, -) { - log::debug!("publishing transaction {}", hex::encode(tx.hash())); - - let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() { - let signer = signed.signer; - - // Safe as we should deterministically create transactions, meaning if this is already on-disk, - // it's what we're saving now - SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize()); - - (order, signer) - } else { - panic!("non-signed transaction passed to publish_signed_transaction"); - }; - - // If we're trying to publish 5, when the last transaction published was 3, this will delay - // publication until the point in time we publish 4 - while let Some(tx) = SignedTransactionDb::take_signed_transaction( - txn, - &order, - tributary - .next_nonce(&signer, &order) - .await - .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), - ) { - // We need to return a proper error here to enable that, due to a race condition around - // multiple publications - match tributary.add_transaction(tx.clone()).await { - Ok(_) => {} - // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces - Err(TransactionError::InvalidNonce) => { - log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") - } - Err(e) => panic!("created an invalid transaction: {e:?}"), - } - } -} diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs deleted file mode 100644 index 8e1f4842..00000000 --- a/coordinator/src/tributary/scanner.rs +++ /dev/null @@ -1,804 +0,0 @@ -use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration}; -use std::{sync::Arc, collections::HashSet}; - -use zeroize::Zeroizing; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; - -use tokio::sync::broadcast; - -use scale::{Encode, Decode}; -use serai_client::{ - primitives::{SeraiAddress, Signature}, - validator_sets::primitives::{ExternalValidatorSet, KeyPair}, - Serai, -}; - -use serai_db::DbTxn; - -use processor_messages::coordinator::{SubstrateSignId, SubstrateSignableId}; - -use tributary::{ - TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader, - tendermint::{ - tx::{TendermintTx, Evidence, decode_signed_message}, - TendermintNetwork, - }, -}; - -use crate::{Db, processors::Processors, substrate::BatchInstructionsHashDb, tributary::*, P2p}; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] -pub enum RecognizedIdType { - Batch, - Plan, -} - -#[async_trait::async_trait] -pub trait RIDTrait { - async fn recognized_id( - &self, - set: ExternalValidatorSet, - genesis: [u8; 32], - kind: RecognizedIdType, - id: Vec, - ); -} -#[async_trait::async_trait] -impl< - FRid: Send + Future, - F: Sync + Fn(ExternalValidatorSet, [u8; 32], RecognizedIdType, Vec) -> FRid, - > RIDTrait for F -{ - async fn recognized_id( - &self, - set: ExternalValidatorSet, - genesis: [u8; 32], - kind: RecognizedIdType, - id: Vec, - ) { - (self)(set, genesis, kind, id).await - } -} - -#[async_trait::async_trait] -pub trait PublishSeraiTransaction { - async fn publish_set_keys( - &self, - db: &(impl Sync + Get), - set: ExternalValidatorSet, - removed: Vec, - key_pair: KeyPair, - signature: Signature, - ); -} - -mod impl_pst_for_serai { - use super::*; - - use serai_client::SeraiValidatorSets; - - // Uses a macro because Rust can't resolve the lifetimes/generics around the check function - // check is expected to return true if the effect has already occurred - // The generated publish function will return true if *we* published the transaction - macro_rules! common_pst { - ($Meta: ty, $check: ident) => { - async fn publish( - serai: &Serai, - db: &impl Get, - set: ExternalValidatorSet, - tx: serai_client::Transaction, - meta: $Meta, - ) -> bool { - loop { - match serai.publish(&tx).await { - Ok(_) => return true, - // This is assumed to be some ephemeral error due to the assumed fault-free - // creation - // TODO2: Differentiate connection errors from invariants - Err(e) => { - // The following block is irrelevant, and can/likely will fail, if we're publishing - // a TX for an old session - // If we're on a newer session, move on - if crate::RetiredTributaryDb::get(db, set).is_some() { - log::warn!("trying to publish a TX relevant to set {set:?} which isn't the latest"); - return false; - } - - if let Ok(serai) = serai.as_of_latest_finalized_block().await { - let serai = serai.validator_sets(); - - // Check if someone else published the TX in question - if $check(serai, set, meta).await { - return false; - } - } - - log::error!("couldn't connect to Serai node to publish TX: {e:?}"); - tokio::time::sleep(core::time::Duration::from_secs(5)).await; - } - } - } - } - }; - } - - #[async_trait::async_trait] - impl PublishSeraiTransaction for Serai { - async fn publish_set_keys( - &self, - db: &(impl Sync + Get), - set: ExternalValidatorSet, - removed: Vec, - key_pair: KeyPair, - signature: Signature, - ) { - // TODO: BoundedVec as an arg to avoid this expect - let tx = SeraiValidatorSets::set_keys( - set.network, - removed.try_into().expect("removing more than allowed"), - key_pair, - signature, - ); - async fn check(serai: SeraiValidatorSets<'_>, set: ExternalValidatorSet, (): ()) -> bool { - if matches!(serai.keys(set).await, Ok(Some(_))) { - log::info!("another coordinator set key pair for {:?}", set); - return true; - } - false - } - common_pst!((), check); - if publish(self, db, set, tx, ()).await { - log::info!("published set keys for {set:?}"); - } - } - } -} - -#[async_trait::async_trait] -pub trait PTTTrait { - async fn publish_tributary_tx(&self, tx: Transaction); -} -#[async_trait::async_trait] -impl, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F { - async fn publish_tributary_tx(&self, tx: Transaction) { - (self)(tx).await - } -} - -pub struct TributaryBlockHandler< - 'a, - D: Db, - T: DbTxn, - Pro: Processors, - PST: PublishSeraiTransaction, - PTT: PTTTrait, - RID: RIDTrait, - P: P2p, -> { - pub db: &'a D, - pub txn: &'a mut T, - pub our_key: &'a Zeroizing<::F>, - pub recognized_id: &'a RID, - pub processors: &'a Pro, - pub publish_serai_tx: &'a PST, - pub publish_tributary_tx: &'a PTT, - pub spec: &'a TributarySpec, - block: Block, - pub block_number: u32, - _p2p: PhantomData

, -} - -impl< - D: Db, - T: DbTxn, - Pro: Processors, - PST: PublishSeraiTransaction, - PTT: PTTTrait, - RID: RIDTrait, - P: P2p, - > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P> -{ - pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) { - let genesis = self.spec.genesis(); - - log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason); - FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing); - - // TODO: disconnect the node from network/ban from further participation in all Tributaries - } - - // TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second - // Tributary post-DKG - // https://github.com/serai-dex/serai/issues/426 - - async fn handle(mut self) { - log::info!("found block for Tributary {:?}", self.spec.set()); - - let transactions = self.block.transactions.clone(); - for tx in transactions { - match tx { - TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { - // Since the evidence is on the chain, it should already have been validated - // We can just punish the signer - let data = match ev { - Evidence::ConflictingMessages(first, second) => (first, Some(second)), - Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None), - }; - let msgs = ( - decode_signed_message::>(&data.0).unwrap(), - if data.1.is_some() { - Some( - decode_signed_message::>(&data.1.unwrap()) - .unwrap(), - ) - } else { - None - }, - ); - - // Since anything with evidence is fundamentally faulty behavior, not just temporal - // errors, mark the node as fatally slashed - self.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}")); - } - TributaryTransaction::Application(tx) => { - self.handle_application_tx(tx).await; - } - } - } - - let genesis = self.spec.genesis(); - - let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); - - // Calculate the shares still present, spinning if not enough are - // still_present_shares is used by a below branch, yet it's a natural byproduct of checking if - // we should spin, hence storing it in a variable here - let still_present_shares = { - // Start with the original n value - let mut present_shares = self.spec.n(&[]); - // Remove everyone fatally slashed - for removed in ¤t_fatal_slashes { - let original_i_for_removed = - self.spec.i(&[], *removed).expect("removed party was never present"); - let removed_shares = - u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start); - present_shares -= removed_shares; - } - - // Spin if the present shares don't satisfy the required threshold - if present_shares < self.spec.t() { - loop { - log::error!( - "fatally slashed so many participants for {:?} we no longer meet the threshold", - self.spec.set() - ); - tokio::time::sleep(core::time::Duration::from_secs(60)).await; - } - } - - present_shares - }; - - for topic in ReattemptDb::take(self.txn, genesis, self.block_number) { - let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic); - log::info!("re-attempting {topic:?} with attempt {attempt}"); - - // Slash people who failed to participate as expected in the prior attempt - { - let prior_attempt = attempt - 1; - let (removed, expected_participants) = match topic { - Topic::Dkg => { - // Every validator who wasn't removed is expected to have participated - let removed = - crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt) - .expect("prior attempt didn't have its removed saved to disk"); - let removed_set = removed.iter().copied().collect::>(); - ( - removed, - self - .spec - .validators() - .into_iter() - .filter_map(|(validator, _)| { - Some(validator).filter(|validator| !removed_set.contains(validator)) - }) - .collect(), - ) - } - Topic::DkgConfirmation => { - panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg") - } - Topic::SubstrateSign(_) | Topic::Sign(_) => { - let removed = - crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) - .expect("SubstrateSign/Sign yet have yet to set keys"); - // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] - let expected_participants = vec![]; - (removed, expected_participants) - } - }; - - let (expected_topic, expected_label) = match topic { - Topic::Dkg => { - let n = self.spec.n(&removed); - // If we got all the DKG shares, we should be on DKG confirmation - let share_spec = - DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt }; - if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n { - // Label::Share since there is no Label::Preprocess for DkgConfirmation since the - // preprocess is part of Topic::Dkg Label::Share - (Topic::DkgConfirmation, Label::Share) - } else { - let preprocess_spec = DataSpecification { - topic: Topic::Dkg, - label: Label::Preprocess, - attempt: prior_attempt, - }; - // If we got all the DKG preprocesses, DKG shares - if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n { - // Label::Share since there is no Label::Preprocess for DkgConfirmation since the - // preprocess is part of Topic::Dkg Label::Share - (Topic::Dkg, Label::Share) - } else { - (Topic::Dkg, Label::Preprocess) - } - } - } - Topic::DkgConfirmation => unreachable!(), - // If we got enough participants to move forward, then we expect shares from them all - Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share), - }; - - let mut did_not_participate = vec![]; - for expected_participant in expected_participants { - if DataDb::get( - self.txn, - genesis, - &DataSpecification { - topic: expected_topic, - label: expected_label, - attempt: prior_attempt, - }, - &expected_participant.to_bytes(), - ) - .is_none() - { - did_not_participate.push(expected_participant); - } - } - - // If a supermajority didn't participate as expected, the protocol was likely aborted due - // to detection of a completion or some larger networking error - // Accordingly, clear did_not_participate - // TODO - - // If during the DKG, explicitly mark these people as having been offline - // TODO: If they were offline sufficiently long ago, don't strike them off - if topic == Topic::Dkg { - let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]); - for did_not_participate in did_not_participate { - existing.push(did_not_participate.to_bytes()); - } - OfflineDuringDkg::set(self.txn, genesis, &existing); - } - - // Slash everyone who didn't participate as expected - // This may be overzealous as if a minority detects a completion, they'll abort yet the - // supermajority will cause the above allowance to not trigger, causing an honest minority - // to be slashed - // At the end of the protocol, the accumulated slashes are reduced by the amount obtained - // by the worst-performing member of the supermajority, and this is expected to - // sufficiently compensate for slashes which occur under normal operation - // TODO - } - - /* - All of these have the same common flow: - - 1) Check if this re-attempt is actually needed - 2) If so, dispatch whatever events as needed - - This is because we *always* re-attempt any protocol which had participation. That doesn't - mean we *should* re-attempt this protocol. - - The alternatives were: - 1) Note on-chain we completed a protocol, halting re-attempts upon 34%. - 2) Vote on-chain to re-attempt a protocol. - - This schema doesn't have any additional messages upon the success case (whereas - alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and - then preprocesses. This only sends preprocesses). - */ - match topic { - Topic::Dkg => { - let mut removed = current_fatal_slashes.clone(); - - let t = self.spec.t(); - { - let mut present_shares = still_present_shares; - - // Load the parties marked as offline across the various attempts - let mut offline = OfflineDuringDkg::get(self.txn, genesis) - .unwrap_or(vec![]) - .iter() - .map(|key| ::G::from_bytes(key).unwrap()) - .collect::>(); - // Pop from the list to prioritize the removal of those recently offline - while let Some(offline) = offline.pop() { - // Make sure they weren't removed already (such as due to being fatally slashed) - // This also may trigger if they were offline across multiple attempts - if removed.contains(&offline) { - continue; - } - - // If we can remove them and still meet the threshold, do so - let original_i_for_offline = - self.spec.i(&[], offline).expect("offline was never present?"); - let offline_shares = - u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start); - if (present_shares - offline_shares) >= t { - present_shares -= offline_shares; - removed.push(offline); - } - - // If we've removed as many people as we can, break - if present_shares == t { - break; - } - } - } - - RemovedAsOfDkgAttempt::set( - self.txn, - genesis, - attempt, - &removed.iter().map(::G::to_bytes).collect(), - ); - - if DkgLocallyCompleted::get(self.txn, genesis).is_none() { - let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) - else { - continue; - }; - - // Since it wasn't completed, instruct the processor to start the next attempt - let id = - processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt }; - - let params = - frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap(); - let shares = u16::from(our_i.end) - u16::from(our_i.start); - - self - .processors - .send( - self.spec.set().network, - processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares }, - ) - .await; - } - } - Topic::DkgConfirmation => unreachable!(), - Topic::SubstrateSign(inner_id) => { - let id = processor_messages::coordinator::SubstrateSignId { - session: self.spec.set().session, - id: inner_id, - attempt, - }; - match inner_id { - SubstrateSignableId::CosigningSubstrateBlock(block) => { - let block_number = SeraiBlockNumber::get(self.txn, block) - .expect("couldn't get the block number for prior attempted cosign"); - - // Check if the cosigner has a signature from our set for this block/a newer one - let latest_cosign = - crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) - .map_or(0, |cosign| cosign.block_number); - if latest_cosign < block_number { - // Instruct the processor to start the next attempt - self - .processors - .send( - self.spec.set().network, - processor_messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { - id, - block_number, - }, - ) - .await; - } - } - SubstrateSignableId::Batch(batch) => { - // If the Batch hasn't appeared on-chain... - if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() { - // Instruct the processor to start the next attempt - // The processor won't continue if it's already signed a Batch - // Prior checking if the Batch is on-chain just may reduce the non-participating - // 33% from publishing their re-attempt messages - self - .processors - .send( - self.spec.set().network, - processor_messages::coordinator::CoordinatorMessage::BatchReattempt { id }, - ) - .await; - } - } - SubstrateSignableId::SlashReport => { - // If this Tributary hasn't been retired... - // (published SlashReport/took too long to do so) - if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() { - let report = SlashReport::get(self.txn, self.spec.set()) - .expect("re-attempting signing a SlashReport we don't have?"); - self - .processors - .send( - self.spec.set().network, - processor_messages::coordinator::CoordinatorMessage::SignSlashReport { - id, - report, - }, - ) - .await; - } - } - } - } - Topic::Sign(id) => { - // Instruct the processor to start the next attempt - // If it has already noted a completion, it won't send a preprocess and will simply drop - // the re-attempt message - self - .processors - .send( - self.spec.set().network, - processor_messages::sign::CoordinatorMessage::Reattempt { - id: processor_messages::sign::SignId { - session: self.spec.set().session, - id, - attempt, - }, - }, - ) - .await; - } - } - } - - if Some(u64::from(self.block_number)) == SlashReportCutOff::get(self.txn, genesis) { - // Grab every slash report - let mut all_reports = vec![]; - for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() { - let Some(mut report) = SlashReports::get(self.txn, genesis, validator.to_bytes()) else { - continue; - }; - // Assign them 0 points for themselves - report.insert(i, 0); - // Uses &[] as we only need the length which is independent to who else was removed - let signer_i = self.spec.i(&[], validator).unwrap(); - let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start); - // Push `n` copies, one for each of their shares - for _ in 0 .. signer_len { - all_reports.push(report.clone()); - } - } - - // For each participant, grab their median - let mut medians = vec![]; - for p in 0 .. self.spec.validators().len() { - let mut median_calc = vec![]; - for report in &all_reports { - median_calc.push(report[p]); - } - median_calc.sort_unstable(); - medians.push(median_calc[median_calc.len() / 2]); - } - - // Grab the points of the last party within the best-performing threshold - // This is done by first expanding the point values by the amount of shares - let mut sorted_medians = vec![]; - for (i, (_, shares)) in self.spec.validators().into_iter().enumerate() { - for _ in 0 .. shares { - sorted_medians.push(medians[i]); - } - } - // Then performing the sort - sorted_medians.sort_unstable(); - let worst_points_by_party_within_threshold = sorted_medians[usize::from(self.spec.t()) - 1]; - - // Reduce everyone's points by this value - for median in &mut medians { - *median = median.saturating_sub(worst_points_by_party_within_threshold); - } - - // The threshold now has the proper incentive to report this as they no longer suffer - // negative effects - // - // Additionally, if all validators had degraded performance, they don't all get penalized for - // what's likely outside their control (as it occurred universally) - - // Mark everyone fatally slashed with u32::MAX - for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() { - if FatallySlashed::get(self.txn, genesis, validator.to_bytes()).is_some() { - medians[i] = u32::MAX; - } - } - - let mut report = vec![]; - for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() { - if medians[i] != 0 { - report.push((validator.to_bytes(), medians[i])); - } - } - - // This does lock in the report, meaning further slash point accumulations won't be reported - // They still have value to be locally tracked due to local decisions made based off - // accumulated slash reports - SlashReport::set(self.txn, self.spec.set(), &report); - - // Start a signing protocol for this - self - .processors - .send( - self.spec.set().network, - processor_messages::coordinator::CoordinatorMessage::SignSlashReport { - id: SubstrateSignId { - session: self.spec.set().session, - id: SubstrateSignableId::SlashReport, - attempt: 0, - }, - report, - }, - ) - .await; - } - } -} - -#[allow(clippy::too_many_arguments)] -pub(crate) async fn handle_new_blocks< - D: Db, - Pro: Processors, - PST: PublishSeraiTransaction, - PTT: PTTTrait, - RID: RIDTrait, - P: P2p, ->( - db: &mut D, - key: &Zeroizing<::F>, - recognized_id: &RID, - processors: &Pro, - publish_serai_tx: &PST, - publish_tributary_tx: &PTT, - spec: &TributarySpec, - tributary: &TributaryReader, -) { - let genesis = tributary.genesis(); - let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis); - let mut block_number = TributaryBlockNumber::get(db, last_block).unwrap_or(0); - while let Some(next) = tributary.block_after(&last_block) { - let block = tributary.block(&next).unwrap(); - block_number += 1; - - // Make sure we have all of the provided transactions for this block - for tx in &block.transactions { - // Provided TXs will appear first in the Block, so we can break after we hit a non-Provided - let TransactionKind::Provided(order) = tx.kind() else { - break; - }; - - // make sure we have all the provided txs in this block locally - if !tributary.locally_provided_txs_in_block(&block.hash(), order) { - return; - } - } - - let mut db_clone = db.clone(); - let mut txn = db_clone.txn(); - TributaryBlockNumber::set(&mut txn, next, &block_number); - (TributaryBlockHandler { - db, - txn: &mut txn, - spec, - our_key: key, - recognized_id, - processors, - publish_serai_tx, - publish_tributary_tx, - block, - block_number, - _p2p: PhantomData::

, - }) - .handle() - .await; - last_block = next; - LastHandledBlock::set(&mut txn, genesis, &next); - txn.commit(); - } -} - -pub(crate) async fn scan_tributaries_task< - D: Db, - Pro: Processors, - P: P2p, - RID: 'static + Send + Sync + Clone + RIDTrait, ->( - raw_db: D, - key: Zeroizing<::F>, - recognized_id: RID, - processors: Pro, - serai: Arc, - mut tributary_event: broadcast::Receiver>, -) { - log::info!("scanning tributaries"); - - loop { - match tributary_event.recv().await { - Ok(crate::TributaryEvent::NewTributary(crate::ActiveTributary { spec, tributary })) => { - // For each Tributary, spawn a dedicated scanner task - tokio::spawn({ - let raw_db = raw_db.clone(); - let key = key.clone(); - let recognized_id = recognized_id.clone(); - let processors = processors.clone(); - let serai = serai.clone(); - async move { - let spec = &spec; - let reader = tributary.reader(); - let mut tributary_db = raw_db.clone(); - loop { - // Check if the set was retired, and if so, don't further operate - if crate::db::RetiredTributaryDb::get(&raw_db, spec.set()).is_some() { - break; - } - - // Obtain the next block notification now to prevent obtaining it immediately after - // the next block occurs - let next_block_notification = tributary.next_block_notification().await; - - handle_new_blocks::<_, _, _, _, _, P>( - &mut tributary_db, - &key, - &recognized_id, - &processors, - &*serai, - &|tx: Transaction| { - let tributary = tributary.clone(); - async move { - match tributary.add_transaction(tx.clone()).await { - Ok(_) => {} - // Can happen as this occurs on a distinct DB TXN - Err(TransactionError::InvalidNonce) => { - log::warn!( - "publishing TX {tx:?} returned InvalidNonce. was it already added?" - ) - } - Err(e) => panic!("created an invalid transaction: {e:?}"), - } - } - }, - spec, - &reader, - ) - .await; - - // Run either when the notification fires, or every interval of block_time - let _ = tokio::time::timeout( - Duration::from_secs(tributary::Tributary::::block_time().into()), - next_block_notification, - ) - .await; - } - } - }); - } - // The above loop simply checks the DB every few seconds, voiding the need for this event - Ok(crate::TributaryEvent::TributaryRetired(_)) => {} - Err(broadcast::error::RecvError::Lagged(_)) => { - panic!("scan_tributaries lagged to handle tributary_event") - } - Err(broadcast::error::RecvError::Closed) => panic!("tributary_event sender closed"), - } - } -} diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs deleted file mode 100644 index 20dda48e..00000000 --- a/coordinator/src/tributary/signing_protocol.rs +++ /dev/null @@ -1,331 +0,0 @@ -/* - A MuSig-based signing protocol executed with the validators' keys. - - This is used for confirming the results of a DKG on-chain, an operation requiring all validators - which aren't specified as removed while still satisfying a supermajority. - - Since we're using the validator's keys, as needed for their being the root of trust, the - coordinator must perform the signing. This is distinct from all other group-signing operations, - as they're all done by the processor. - - The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern. - While we could individually tack votes, that'd require logic to prevent voting multiple times and - tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and - the list's weight exceeds the threshold. - - Instead of maintaining state in memory, a combination of the DB and re-execution are used. This - is deemed acceptable re: performance as: - - 1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent. - 2) This is an O(n) algorithm. - 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. - - Accordingly, this should be tolerable. - - As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises - concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from - the nonces being context-bound under a BFT protocol. The flow is as follows: - - 1) Decide the nonce. - 2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be - signed*. - 3) Sign and publish the signature share. - - In order for nonce re-use to occur, the received nonce commitments (or the message to be signed) - would have to be distinct and sign would have to be called again. - - Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The - only way to operate on distinct received messages would be if: - - 1) A logical flaw exists, letting new messages over write prior messages - 2) A reorganization occurred from chain A to chain B, and with it, different messages - - Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While - a significant amount of processes may be byzantine, leading to BFT being broken, that still will - not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, - would be by rebuilding the local process (this time following chain B). Upon any complete - rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial - rebuilds which is accepted. - - Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the - commitments generated from the decided nonces are in fact its commitments on-chain (TODO). - - TODO: We also need to review how we're handling Processor preprocesses and likely implement the - same on-chain-preprocess-matches-presumed-preprocess check before publishing shares. -*/ - -use core::ops::Deref; -use std::collections::HashMap; - -use zeroize::{Zeroize, Zeroizing}; - -use rand_core::OsRng; - -use blake2::{Digest, Blake2s256}; - -use ciphersuite::{ - group::{ff::PrimeField, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - ThresholdKeys, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; - -use scale::Encode; - -use serai_client::{ - Public, - validator_sets::primitives::{KeyPair, musig_context, set_keys_message}, -}; - -use serai_db::*; - -use crate::tributary::TributarySpec; - -create_db!( - SigningProtocolDb { - CachedPreprocesses: (context: &impl Encode) -> [u8; 32] - } -); - -struct SigningProtocol<'a, T: DbTxn, C: Encode> { - pub(crate) key: &'a Zeroizing<::F>, - pub(crate) spec: &'a TributarySpec, - pub(crate) txn: &'a mut T, - pub(crate) context: C, -} - -impl SigningProtocol<'_, T, C> { - fn preprocess_internal( - &mut self, - participants: &[::G], - ) -> (AlgorithmSignMachine, [u8; 64]) { - // Encrypt the cached preprocess as recovery of it will enable recovering the private key - // While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and - // shouldn't be trusted as one - let mut encryption_key = { - let mut encryption_key_preimage = - Zeroizing::new(b"Cached Preprocess Encryption Key".to_vec()); - encryption_key_preimage.extend(self.context.encode()); - let repr = Zeroizing::new(self.key.to_repr()); - encryption_key_preimage.extend(repr.deref()); - Blake2s256::digest(&encryption_key_preimage) - }; - let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); - - let algorithm = Schnorrkel::new(b"substrate"); - let keys: ThresholdKeys = - musig(&musig_context(self.spec.set().into()), self.key, participants) - .expect("signing for a set we aren't in/validator present multiple times") - .into(); - - if CachedPreprocesses::get(self.txn, &self.context).is_none() { - let (machine, _) = - AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); - - let mut cache = machine.cache(); - assert_eq!(cache.0.len(), 32); - #[allow(clippy::needless_range_loop)] - for b in 0 .. 32 { - cache.0[b] ^= encryption_key_slice[b]; - } - - CachedPreprocesses::set(self.txn, &self.context, &cache.0); - } - - let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); - let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); - #[allow(clippy::needless_range_loop)] - for b in 0 .. 32 { - cached[b] ^= encryption_key_slice[b]; - } - encryption_key_slice.zeroize(); - let (machine, preprocess) = - AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); - - (machine, preprocess.serialize().try_into().unwrap()) - } - - fn share_internal( - &mut self, - participants: &[::G], - mut serialized_preprocesses: HashMap>, - msg: &[u8], - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = self.preprocess_internal(participants).0; - - let mut participants = serialized_preprocesses.keys().copied().collect::>(); - participants.sort(); - let mut preprocesses = HashMap::new(); - for participant in participants { - preprocesses.insert( - participant, - machine - .read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice()) - .map_err(|_| participant)?, - ); - } - - let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - - fn complete_internal( - machine: AlgorithmSignatureMachine, - shares: HashMap>, - ) -> Result<[u8; 64], Participant> { - let shares = shares - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - Ok(signature.to_bytes()) - } -} - -// Get the keys of the participants, noted by their threshold is, and return a new map indexed by -// the MuSig is. -fn threshold_i_map_to_keys_and_musig_i_map( - spec: &TributarySpec, - removed: &[::G], - our_key: &Zeroizing<::F>, - mut map: HashMap>, -) -> (Vec<::G>, HashMap>) { - // Insert our own index so calculations aren't offset - let our_threshold_i = spec - .i(removed, ::generator() * our_key.deref()) - .expect("MuSig t-of-n signing a for a protocol we were removed from") - .start; - assert!(map.insert(our_threshold_i, vec![]).is_none()); - - let spec_validators = spec.validators(); - let key_from_threshold_i = |threshold_i| { - for (key, _) in &spec_validators { - if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start { - return *key; - } - } - panic!("requested info for threshold i which doesn't exist") - }; - - let mut sorted = vec![]; - let mut threshold_is = map.keys().copied().collect::>(); - threshold_is.sort(); - for threshold_i in threshold_is { - sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); - } - - // Now that signers are sorted, with their shares, create a map with the is needed for MuSig - let mut participants = vec![]; - let mut map = HashMap::new(); - for (raw_i, (key, share)) in sorted.into_iter().enumerate() { - let musig_i = u16::try_from(raw_i).unwrap() + 1; - participants.push(key); - map.insert(Participant::new(musig_i).unwrap(), share); - } - - map.remove(&our_threshold_i).unwrap(); - - (participants, map) -} - -type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>; - -pub(crate) struct DkgConfirmer<'a, T: DbTxn> { - key: &'a Zeroizing<::F>, - spec: &'a TributarySpec, - removed: Vec<::G>, - txn: &'a mut T, - attempt: u32, -} - -impl DkgConfirmer<'_, T> { - pub(crate) fn new<'a>( - key: &'a Zeroizing<::F>, - spec: &'a TributarySpec, - txn: &'a mut T, - attempt: u32, - ) -> Option> { - // This relies on how confirmations are inlined into the DKG protocol and they accordingly - // share attempts - let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?; - Some(DkgConfirmer { key, spec, removed, txn, attempt }) - } - fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { - let context = (b"DkgConfirmer", self.attempt); - SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } - } - - fn preprocess_internal(&mut self) -> (AlgorithmSignMachine, [u8; 64]) { - let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); - self.signing_protocol().preprocess_internal(&participants) - } - // Get the preprocess for this confirmation. - pub(crate) fn preprocess(&mut self) -> [u8; 64] { - self.preprocess_internal().1 - } - - fn share_internal( - &mut self, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); - let preprocesses = - threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1; - let msg = set_keys_message( - &self.spec.set(), - &self.removed.iter().map(|key| Public(key.to_bytes())).collect::>(), - key_pair, - ); - self.signing_protocol().share_internal(&participants, preprocesses, &msg) - } - // Get the share for this confirmation, if the preprocesses are valid. - pub(crate) fn share( - &mut self, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<[u8; 32], Participant> { - self.share_internal(preprocesses, key_pair).map(|(_, share)| share) - } - - pub(crate) fn complete( - &mut self, - preprocesses: HashMap>, - key_pair: &KeyPair, - shares: HashMap>, - ) -> Result<[u8; 64], Participant> { - let shares = - threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1; - - let machine = self - .share_internal(preprocesses, key_pair) - .expect("trying to complete a machine which failed to preprocess") - .0; - - DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares) - } -} diff --git a/coordinator/src/tributary/spec.rs b/coordinator/src/tributary/spec.rs deleted file mode 100644 index 345584b6..00000000 --- a/coordinator/src/tributary/spec.rs +++ /dev/null @@ -1,156 +0,0 @@ -use core::{ops::Range, fmt::Debug}; -use std::{io, collections::HashMap}; - -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use frost::Participant; - -use scale::Encode; -use borsh::{BorshSerialize, BorshDeserialize}; - -use serai_client::{primitives::PublicKey, validator_sets::primitives::ExternalValidatorSet}; - -fn borsh_serialize_validators( - validators: &Vec<(::G, u16)>, - writer: &mut W, -) -> Result<(), io::Error> { - let len = u16::try_from(validators.len()).unwrap(); - BorshSerialize::serialize(&len, writer)?; - for validator in validators { - BorshSerialize::serialize(&validator.0.to_bytes(), writer)?; - BorshSerialize::serialize(&validator.1, writer)?; - } - Ok(()) -} - -fn borsh_deserialize_validators( - reader: &mut R, -) -> Result::G, u16)>, io::Error> { - let len: u16 = BorshDeserialize::deserialize_reader(reader)?; - let mut res = vec![]; - for _ in 0 .. len { - let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?; - let point = Option::from(::G::from_bytes(&compressed)) - .ok_or_else(|| io::Error::other("invalid point for validator"))?; - let weight: u16 = BorshDeserialize::deserialize_reader(reader)?; - res.push((point, weight)); - } - Ok(res) -} - -#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] -pub struct TributarySpec { - serai_block: [u8; 32], - start_time: u64, - set: ExternalValidatorSet, - #[borsh( - serialize_with = "borsh_serialize_validators", - deserialize_with = "borsh_deserialize_validators" - )] - validators: Vec<(::G, u16)>, -} - -impl TributarySpec { - pub fn new( - serai_block: [u8; 32], - start_time: u64, - set: ExternalValidatorSet, - set_participants: Vec<(PublicKey, u16)>, - ) -> TributarySpec { - let mut validators = vec![]; - for (participant, shares) in set_participants { - let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) - .expect("invalid key registered as participant"); - validators.push((participant, shares)); - } - - Self { serai_block, start_time, set, validators } - } - - pub fn set(&self) -> ExternalValidatorSet { - self.set - } - - pub fn genesis(&self) -> [u8; 32] { - // Calculate the genesis for this Tributary - let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); - // This locks it to a specific Serai chain - genesis.append_message(b"serai_block", self.serai_block); - genesis.append_message(b"session", self.set.session.0.to_le_bytes()); - genesis.append_message(b"network", self.set.network.encode()); - let genesis = genesis.challenge(b"genesis"); - let genesis_ref: &[u8] = genesis.as_ref(); - genesis_ref[.. 32].try_into().unwrap() - } - - pub fn start_time(&self) -> u64 { - self.start_time - } - - pub fn n(&self, removed_validators: &[::G]) -> u16 { - self - .validators - .iter() - .map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight }) - .sum() - } - - pub fn t(&self) -> u16 { - // t doesn't change with regards to the amount of removed validators - ((2 * self.n(&[])) / 3) + 1 - } - - pub fn i( - &self, - removed_validators: &[::G], - key: ::G, - ) -> Option> { - let mut all_is = HashMap::new(); - let mut i = 1; - for (validator, weight) in &self.validators { - all_is.insert( - *validator, - Range { start: Participant::new(i).unwrap(), end: Participant::new(i + weight).unwrap() }, - ); - i += weight; - } - - let original_i = all_is.get(&key)?.clone(); - let mut result_i = original_i.clone(); - for removed_validator in removed_validators { - let removed_i = all_is - .get(removed_validator) - .expect("removed validator wasn't present in set to begin with"); - // If the queried key was removed, return None - if &original_i == removed_i { - return None; - } - - // If the removed was before the queried, shift the queried down accordingly - if removed_i.start < original_i.start { - let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start); - result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap(); - result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap(); - } - } - Some(result_i) - } - - pub fn reverse_lookup_i( - &self, - removed_validators: &[::G], - i: Participant, - ) -> Option<::G> { - for (validator, _) in &self.validators { - if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) { - return Some(*validator); - } - } - None - } - - pub fn validators(&self) -> Vec<(::G, u64)> { - self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() - } -} diff --git a/coordinator/src/tributary/transaction.rs b/coordinator/src/tributary/transaction.rs deleted file mode 100644 index 8d8bdd4c..00000000 --- a/coordinator/src/tributary/transaction.rs +++ /dev/null @@ -1,715 +0,0 @@ -use core::{ops::Deref, fmt::Debug}; -use std::io; - -use zeroize::Zeroizing; -use rand_core::{RngCore, CryptoRng}; - -use blake2::{Digest, Blake2s256}; -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::{ - group::{ff::Field, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use schnorr::SchnorrSignature; -use frost::Participant; - -use scale::{Encode, Decode}; -use processor_messages::coordinator::SubstrateSignableId; - -use tributary::{ - TRANSACTION_SIZE_LIMIT, ReadWrite, - transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, -}; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] -pub enum Label { - Preprocess, - Share, -} - -impl Label { - // TODO: Should nonces be u8 thanks to our use of topics? - pub fn nonce(&self) -> u32 { - match self { - Label::Preprocess => 0, - Label::Share => 1, - } - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct SignData { - pub plan: Id, - pub attempt: u32, - pub label: Label, - - pub data: Vec>, - - pub signed: Signed, -} - -impl Debug for SignData { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt - .debug_struct("SignData") - .field("id", &hex::encode(self.plan.encode())) - .field("attempt", &self.attempt) - .field("label", &self.label) - .field("signer", &hex::encode(self.signed.signer.to_bytes())) - .finish_non_exhaustive() - } -} - -impl SignData { - pub(crate) fn read(reader: &mut R) -> io::Result { - let plan = Id::decode(&mut scale::IoReader(&mut *reader)) - .map_err(|_| io::Error::other("invalid plan in SignData"))?; - - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut label = [0; 1]; - reader.read_exact(&mut label)?; - let label = match label[0] { - 0 => Label::Preprocess, - 1 => Label::Share, - _ => Err(io::Error::other("invalid label in SignData"))?, - }; - - let data = { - let mut data_pieces = [0]; - reader.read_exact(&mut data_pieces)?; - if data_pieces[0] == 0 { - Err(io::Error::other("zero pieces of data in SignData"))?; - } - let mut all_data = vec![]; - for _ in 0 .. data_pieces[0] { - let mut data_len = [0; 2]; - reader.read_exact(&mut data_len)?; - let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; - reader.read_exact(&mut data)?; - all_data.push(data); - } - all_data - }; - - let signed = Signed::read_without_nonce(reader, label.nonce())?; - - Ok(SignData { plan, attempt, label, data, signed }) - } - - pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.plan.encode())?; - writer.write_all(&self.attempt.to_le_bytes())?; - writer.write_all(&[match self.label { - Label::Preprocess => 0, - Label::Share => 1, - }])?; - - writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?; - for data in &self.data { - if data.len() > u16::MAX.into() { - // Currently, the largest individual preprocess is a Monero transaction - // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a - // key image and proof (96 bytes) - // Even with all of that, we could support 227 inputs in a single TX - // Monero is limited to ~120 inputs per TX - // - // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess - Err(io::Error::other("signing data exceeded 65535 bytes"))?; - } - writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?; - writer.write_all(data)?; - } - - self.signed.write_without_nonce(writer) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub enum Transaction { - RemoveParticipantDueToDkg { - participant: ::G, - signed: Signed, - }, - - DkgCommitments { - attempt: u32, - commitments: Vec>, - signed: Signed, - }, - DkgShares { - attempt: u32, - // Sending Participant, Receiving Participant, Share - shares: Vec>>, - confirmation_nonces: [u8; 64], - signed: Signed, - }, - InvalidDkgShare { - attempt: u32, - accuser: Participant, - faulty: Participant, - blame: Option>, - signed: Signed, - }, - DkgConfirmed { - attempt: u32, - confirmation_share: [u8; 32], - signed: Signed, - }, - - // Co-sign a Substrate block. - CosignSubstrateBlock([u8; 32]), - - // When we have synchrony on a batch, we can allow signing it - // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, - // which would be binding over the block hash and automatically achieve synchrony on all - // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline - // with the current processor, yet it would still be an improvement. - Batch { - block: [u8; 32], - batch: u32, - }, - // When a Serai block is finalized, with the contained batches, we can allow the associated plan - // IDs - SubstrateBlock(u64), - - SubstrateSign(SignData), - Sign(SignData<[u8; 32]>), - // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst - // reporters (who should all report the same thing) - // We do still track the signer in order to prevent a single signer from publishing arbitrarily - // many TXs without penalty - // Here, they're denoted as the first_signer, as only the signer of the first TX to be included - // with this pairing will be remembered on-chain - SignCompleted { - plan: [u8; 32], - tx_hash: Vec, - first_signer: ::G, - signature: SchnorrSignature, - }, - - SlashReport(Vec, Signed), -} - -impl Debug for Transaction { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt - .debug_struct("Transaction::RemoveParticipantDueToDkg") - .field("participant", &hex::encode(participant.to_bytes())) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt - .debug_struct("Transaction::DkgCommitments") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::DkgShares { attempt, signed, .. } => fmt - .debug_struct("Transaction::DkgShares") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt - .debug_struct("Transaction::InvalidDkgShare") - .field("attempt", attempt) - .field("accuser", accuser) - .field("faulty", faulty) - .finish_non_exhaustive(), - Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt - .debug_struct("Transaction::DkgConfirmed") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::CosignSubstrateBlock(block) => fmt - .debug_struct("Transaction::CosignSubstrateBlock") - .field("block", &hex::encode(block)) - .finish(), - Transaction::Batch { block, batch } => fmt - .debug_struct("Transaction::Batch") - .field("block", &hex::encode(block)) - .field("batch", &batch) - .finish(), - Transaction::SubstrateBlock(block) => { - fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish() - } - Transaction::SubstrateSign(sign_data) => { - fmt.debug_struct("Transaction::SubstrateSign").field("sign_data", sign_data).finish() - } - Transaction::Sign(sign_data) => { - fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish() - } - Transaction::SignCompleted { plan, tx_hash, .. } => fmt - .debug_struct("Transaction::SignCompleted") - .field("plan", &hex::encode(plan)) - .field("tx_hash", &hex::encode(tx_hash)) - .finish_non_exhaustive(), - Transaction::SlashReport(points, signed) => fmt - .debug_struct("Transaction::SignCompleted") - .field("points", points) - .field("signed", signed) - .finish(), - } - } -} - -impl ReadWrite for Transaction { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => Ok(Transaction::RemoveParticipantDueToDkg { - participant: Ristretto::read_G(reader)?, - signed: Signed::read_without_nonce(reader, 0)?, - }), - - 1 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let commitments = { - let mut commitments_len = [0; 1]; - reader.read_exact(&mut commitments_len)?; - let commitments_len = usize::from(commitments_len[0]); - if commitments_len == 0 { - Err(io::Error::other("zero commitments in DkgCommitments"))?; - } - - let mut each_commitments_len = [0; 2]; - reader.read_exact(&mut each_commitments_len)?; - let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); - if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { - Err(io::Error::other( - "commitments present in transaction exceeded transaction size limit", - ))?; - } - let mut commitments = vec![vec![]; commitments_len]; - for commitments in &mut commitments { - *commitments = vec![0; each_commitments_len]; - reader.read_exact(commitments)?; - } - commitments - }; - - let signed = Signed::read_without_nonce(reader, 0)?; - - Ok(Transaction::DkgCommitments { attempt, commitments, signed }) - } - - 2 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let shares = { - let mut share_quantity = [0; 1]; - reader.read_exact(&mut share_quantity)?; - - let mut key_share_quantity = [0; 1]; - reader.read_exact(&mut key_share_quantity)?; - - let mut share_len = [0; 2]; - reader.read_exact(&mut share_len)?; - let share_len = usize::from(u16::from_le_bytes(share_len)); - - let mut all_shares = vec![]; - for _ in 0 .. share_quantity[0] { - let mut shares = vec![]; - for _ in 0 .. key_share_quantity[0] { - let mut share = vec![0; share_len]; - reader.read_exact(&mut share)?; - shares.push(share); - } - all_shares.push(shares); - } - all_shares - }; - - let mut confirmation_nonces = [0; 64]; - reader.read_exact(&mut confirmation_nonces)?; - - let signed = Signed::read_without_nonce(reader, 1)?; - - Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) - } - - 3 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut accuser = [0; 2]; - reader.read_exact(&mut accuser)?; - let accuser = Participant::new(u16::from_le_bytes(accuser)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut faulty = [0; 2]; - reader.read_exact(&mut faulty)?; - let faulty = Participant::new(u16::from_le_bytes(faulty)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut blame_len = [0; 2]; - reader.read_exact(&mut blame_len)?; - let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; - reader.read_exact(&mut blame)?; - - // This shares a nonce with DkgConfirmed as only one is expected - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::InvalidDkgShare { - attempt, - accuser, - faulty, - blame: Some(blame).filter(|blame| !blame.is_empty()), - signed, - }) - } - - 4 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut confirmation_share = [0; 32]; - reader.read_exact(&mut confirmation_share)?; - - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) - } - - 5 => { - let mut block = [0; 32]; - reader.read_exact(&mut block)?; - Ok(Transaction::CosignSubstrateBlock(block)) - } - - 6 => { - let mut block = [0; 32]; - reader.read_exact(&mut block)?; - let mut batch = [0; 4]; - reader.read_exact(&mut batch)?; - Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) }) - } - - 7 => { - let mut block = [0; 8]; - reader.read_exact(&mut block)?; - Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) - } - - 8 => SignData::read(reader).map(Transaction::SubstrateSign), - 9 => SignData::read(reader).map(Transaction::Sign), - - 10 => { - let mut plan = [0; 32]; - reader.read_exact(&mut plan)?; - - let mut tx_hash_len = [0]; - reader.read_exact(&mut tx_hash_len)?; - let mut tx_hash = vec![0; usize::from(tx_hash_len[0])]; - reader.read_exact(&mut tx_hash)?; - - let first_signer = Ristretto::read_G(reader)?; - let signature = SchnorrSignature::::read(reader)?; - - Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) - } - - 11 => { - let mut len = [0]; - reader.read_exact(&mut len)?; - let len = len[0]; - // If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct - // validators (the amount of validators reported on) will be at most - // `MAX_KEY_SHARES_PER_SET - 1` - if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) { - Err(io::Error::other("more points reported than allowed validator"))?; - } - let mut points = vec![0u32; len.into()]; - for points in &mut points { - let mut these_points = [0; 4]; - reader.read_exact(&mut these_points)?; - *points = u32::from_le_bytes(these_points); - } - Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?)) - } - - _ => Err(io::Error::other("invalid transaction type")), - } - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { - writer.write_all(&[0])?; - writer.write_all(&participant.to_bytes())?; - signed.write_without_nonce(writer) - } - - Transaction::DkgCommitments { attempt, commitments, signed } => { - writer.write_all(&[1])?; - writer.write_all(&attempt.to_le_bytes())?; - if commitments.is_empty() { - Err(io::Error::other("zero commitments in DkgCommitments"))? - } - writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; - for commitments_i in commitments { - if commitments_i.len() != commitments[0].len() { - Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? - } - } - writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; - for commitments in commitments { - writer.write_all(commitments)?; - } - signed.write_without_nonce(writer) - } - - Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { - writer.write_all(&[2])?; - writer.write_all(&attempt.to_le_bytes())?; - - // `shares` is a Vec which is supposed to map to a HashMap>. Since we - // bound participants to 150, this conversion is safe if a valid in-memory transaction. - writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; - // This assumes at least one share is being sent to another party - writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; - let share_len = shares[0][0].len(); - // For BLS12-381 G2, this would be: - // - A 32-byte share - // - A 96-byte ephemeral key - // - A 128-byte signature - // Hence why this has to be u16 - writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; - - for these_shares in shares { - assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); - for share in these_shares { - assert_eq!(share.len(), share_len, "sent shares were of variable length"); - writer.write_all(share)?; - } - } - - writer.write_all(confirmation_nonces)?; - signed.write_without_nonce(writer) - } - - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - writer.write_all(&[3])?; - writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(&u16::from(*accuser).to_le_bytes())?; - writer.write_all(&u16::from(*faulty).to_le_bytes())?; - - // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length - assert!(blame.as_ref().map_or(1, Vec::len) != 0); - let blame_len = - u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); - writer.write_all(&blame_len.to_le_bytes())?; - writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; - - signed.write_without_nonce(writer) - } - - Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { - writer.write_all(&[4])?; - writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(confirmation_share)?; - signed.write_without_nonce(writer) - } - - Transaction::CosignSubstrateBlock(block) => { - writer.write_all(&[5])?; - writer.write_all(block) - } - - Transaction::Batch { block, batch } => { - writer.write_all(&[6])?; - writer.write_all(block)?; - writer.write_all(&batch.to_le_bytes()) - } - - Transaction::SubstrateBlock(block) => { - writer.write_all(&[7])?; - writer.write_all(&block.to_le_bytes()) - } - - Transaction::SubstrateSign(data) => { - writer.write_all(&[8])?; - data.write(writer) - } - Transaction::Sign(data) => { - writer.write_all(&[9])?; - data.write(writer) - } - Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { - writer.write_all(&[10])?; - writer.write_all(plan)?; - writer - .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; - writer.write_all(tx_hash)?; - writer.write_all(&first_signer.to_bytes())?; - signature.write(writer) - } - Transaction::SlashReport(points, signed) => { - writer.write_all(&[11])?; - writer.write_all(&[u8::try_from(points.len()).unwrap()])?; - for points in points { - writer.write_all(&points.to_le_bytes())?; - } - signed.write_without_nonce(writer) - } - } - } -} - -impl TransactionTrait for Transaction { - fn kind(&self) -> TransactionKind<'_> { - match self { - Transaction::RemoveParticipantDueToDkg { participant, signed } => { - TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) - } - - Transaction::DkgCommitments { attempt, commitments: _, signed } | - Transaction::DkgShares { attempt, signed, .. } | - Transaction::InvalidDkgShare { attempt, signed, .. } | - Transaction::DkgConfirmed { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - - Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), - - Transaction::Batch { .. } => TransactionKind::Provided("batch"), - Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), - - Transaction::SubstrateSign(data) => { - TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::Sign(data) => { - TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SignCompleted { .. } => TransactionKind::Unsigned, - - Transaction::SlashReport(_, signed) => { - TransactionKind::Signed(b"slash_report".to_vec(), signed) - } - } - } - - fn hash(&self) -> [u8; 32] { - let mut tx = self.serialize(); - if let TransactionKind::Signed(_, signed) = self.kind() { - // Make sure the part we're cutting off is the signature - assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); - } - Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() - } - - fn verify(&self) -> Result<(), TransactionError> { - // TODO: Check SubstrateSign's lengths here - - if let Transaction::SignCompleted { first_signer, signature, .. } = self { - if !signature.verify(*first_signer, self.sign_completed_challenge()) { - Err(TransactionError::InvalidContent)?; - } - } - - Ok(()) - } -} - -impl Transaction { - // Used to initially construct transactions so we can then get sig hashes and perform signing - pub fn empty_signed() -> Signed { - Signed { - signer: Ristretto::generator(), - nonce: 0, - signature: SchnorrSignature:: { - R: Ristretto::generator(), - s: ::F::ZERO, - }, - } - } - - // Sign a transaction - pub fn sign( - &mut self, - rng: &mut R, - genesis: [u8; 32], - key: &Zeroizing<::F>, - ) { - fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { - #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here - let nonce = match tx { - Transaction::RemoveParticipantDueToDkg { .. } => 0, - - Transaction::DkgCommitments { .. } => 0, - Transaction::DkgShares { .. } => 1, - Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, - - Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), - - Transaction::Batch { .. } => panic!("signing Batch"), - Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), - - Transaction::SubstrateSign(data) => data.label.nonce(), - Transaction::Sign(data) => data.label.nonce(), - - Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), - - Transaction::SlashReport(_, _) => 0, - }; - - ( - nonce, - #[allow(clippy::match_same_arms)] - match tx { - Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } | - Transaction::DkgCommitments { ref mut signed, .. } | - Transaction::DkgShares { ref mut signed, .. } | - Transaction::InvalidDkgShare { ref mut signed, .. } | - Transaction::DkgConfirmed { ref mut signed, .. } => signed, - - Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), - - Transaction::Batch { .. } => panic!("signing Batch"), - Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), - - Transaction::SubstrateSign(ref mut data) => &mut data.signed, - Transaction::Sign(ref mut data) => &mut data.signed, - - Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), - - Transaction::SlashReport(_, ref mut signed) => signed, - }, - ) - } - - let (nonce, signed_ref) = signed(self); - signed_ref.signer = Ristretto::generator() * key.deref(); - signed_ref.nonce = nonce; - - let sig_nonce = Zeroizing::new(::F::random(rng)); - signed(self).1.signature.R = ::generator() * sig_nonce.deref(); - let sig_hash = self.sig_hash(genesis); - signed(self).1.signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); - } - - pub fn sign_completed_challenge(&self) -> ::F { - if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { - let mut transcript = - RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); - transcript.append_message(b"plan", plan); - transcript.append_message(b"tx_hash", tx_hash); - transcript.append_message(b"signer", first_signer.to_bytes()); - transcript.append_message(b"nonce", signature.R.to_bytes()); - Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge")) - } else { - panic!("sign_completed_challenge called on transaction which wasn't SignCompleted") - } - } -} diff --git a/coordinator/substrate/Cargo.toml b/coordinator/substrate/Cargo.toml new file mode 100644 index 00000000..c733cc31 --- /dev/null +++ b/coordinator/substrate/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "serai-coordinator-substrate" +version = "0.1.0" +description = "Serai Coordinator's Substrate Scanner" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/substrate" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +bitvec = { version = "1", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } + +serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] } + +log = { version = "0.4", default-features = false, features = ["std"] } + +futures = { version = "0.3", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false } + +serai-db = { path = "../../common/db", version = "0.1.1" } +serai-task = { path = "../../common/task", version = "0.1" } + +serai-cosign = { path = "../cosign", version = "0.1" } + +messages = { package = "serai-processor-messages", version = "0.1", path = "../../processor/messages" } diff --git a/coordinator/substrate/LICENSE b/coordinator/substrate/LICENSE new file mode 100644 index 00000000..26d57cbb --- /dev/null +++ b/coordinator/substrate/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/coordinator/substrate/README.md b/coordinator/substrate/README.md new file mode 100644 index 00000000..1bce3218 --- /dev/null +++ b/coordinator/substrate/README.md @@ -0,0 +1,20 @@ +# Serai Coordinator Substrate + +This crate manages the Serai coordinators's interactions with Serai's Substrate blockchain. + +Two event streams are defined: + +- Canonical events, which must be handled by every validator, regardless of the sets they're present + in. These are represented by `serai_processor_messages::substrate::CoordinatorMessage`. +- Ephemeral events, which only need to be handled by the validators present within the sets they + relate to. These are represented by two channels, `NewSet` and `SignSlashReport`. + +The canonical event stream is available without provision of a validator's public key. The ephemeral +event stream requires provision of a validator's public key. Both are ordered within themselves, yet +there are no ordering guarantees across the two. + +Additionally, a collection of tasks are defined to publish data onto Serai: + +- `SetKeysTask`, which sets the keys generated via DKGs onto Serai. +- `PublishBatchTask`, which publishes `Batch`s onto Serai. +- `PublishSlashReportTask`, which publishes `SlashReport`s onto Serai. diff --git a/coordinator/substrate/src/canonical.rs b/coordinator/substrate/src/canonical.rs new file mode 100644 index 00000000..81a8ccce --- /dev/null +++ b/coordinator/substrate/src/canonical.rs @@ -0,0 +1,225 @@ +use core::future::Future; +use std::sync::Arc; + +use futures::stream::{StreamExt, FuturesOrdered}; + +use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai}; + +use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage}; + +use serai_db::*; +use serai_task::ContinuallyRan; + +use serai_cosign::Cosigning; + +create_db!( + CoordinatorSubstrateCanonical { + NextBlock: () -> u64, + } +); + +/// The event stream for canonical events. +pub struct CanonicalEventStream { + db: D, + serai: Arc, +} + +impl CanonicalEventStream { + /// Create a new canonical event stream. + /// + /// Only one of these may exist over the provided database. + pub fn new(db: D, serai: Arc) -> Self { + Self { db, serai } + } +} + +impl ContinuallyRan for CanonicalEventStream { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let next_block = NextBlock::get(&self.db).unwrap_or(0); + let latest_finalized_block = + Cosigning::::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?; + + // These are all the events which generate canonical messages + struct CanonicalEvents { + time: u64, + key_gen_events: Vec, + set_retired_events: Vec, + batch_events: Vec, + burn_events: Vec, + } + + // For a cosigned block, fetch all relevant events + let scan = { + let db = self.db.clone(); + let serai = &self.serai; + move |block_number| { + let block_hash = Cosigning::::cosigned_block(&db, block_number); + + async move { + let block_hash = match block_hash { + Ok(Some(block_hash)) => block_hash, + Ok(None) => { + panic!("iterating to latest cosigned block but couldn't get cosigned block") + } + Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()), + }; + let temporal_serai = serai.as_of(block_hash); + let temporal_serai_validators = temporal_serai.validator_sets(); + let temporal_serai_instructions = temporal_serai.in_instructions(); + let temporal_serai_coins = temporal_serai.coins(); + + let (block, key_gen_events, set_retired_events, batch_events, burn_events) = + tokio::try_join!( + serai.block(block_hash), + temporal_serai_validators.key_gen_events(), + temporal_serai_validators.set_retired_events(), + temporal_serai_instructions.batch_events(), + temporal_serai_coins.burn_with_instruction_events(), + ) + .map_err(|e| format!("{e:?}"))?; + let Some(block) = block else { + Err(format!("Serai node didn't have cosigned block #{block_number}"))? + }; + + let time = if block_number == 0 { + block.time().unwrap_or(0) + } else { + // Serai's block time is in milliseconds + block + .time() + .ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? / + 1000 + }; + + Ok(( + block_number, + CanonicalEvents { + time, + key_gen_events, + set_retired_events, + batch_events, + burn_events, + }, + )) + } + } + }; + + // Sync the next set of upcoming blocks all at once to minimize latency + const BLOCKS_TO_SYNC_AT_ONCE: u64 = 10; + // FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't + // sufficiently polled. Considering our processing loop is minimal and it does poll this, + // it's fine. + let mut set = FuturesOrdered::new(); + for block_number in + next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE) + { + set.push_back(scan(block_number)); + } + + for block_number in next_block ..= latest_finalized_block { + // Get the next block in our queue + let (popped_block_number, block) = set.next().await.unwrap()?; + assert_eq!(block_number, popped_block_number); + // Re-populate the queue + if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block { + set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE)); + } + + let mut txn = self.db.txn(); + + for key_gen in block.key_gen_events { + let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen + else { + panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}"); + }; + crate::Canonical::send( + &mut txn, + set.network, + &CoordinatorMessage::SetKeys { + serai_time: block.time, + session: set.session, + key_pair: key_pair.clone(), + }, + ); + } + + for set_retired in block.set_retired_events { + let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired + else { + panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}"); + }; + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + crate::Canonical::send( + &mut txn, + set.network, + &CoordinatorMessage::SlashesReported { session: set.session }, + ); + } + + for network in serai_client::primitives::EXTERNAL_NETWORKS { + let mut batch = None; + for this_batch in &block.batch_events { + let serai_client::in_instructions::InInstructionsEvent::Batch { + network: batch_network, + publishing_session, + id, + external_network_block_hash, + in_instructions_hash, + in_instruction_results, + } = this_batch + else { + panic!("Batch event wasn't a Batch event: {this_batch:?}"); + }; + if network == *batch_network { + if batch.is_some() { + Err("Serai block had multiple batches for the same network".to_string())?; + } + batch = Some(ExecutedBatch { + id: *id, + publisher: *publishing_session, + external_network_block_hash: external_network_block_hash.0, + in_instructions_hash: *in_instructions_hash, + in_instruction_results: in_instruction_results + .iter() + .map(|bit| { + if *bit { + InInstructionResult::Succeeded + } else { + InInstructionResult::Failed + } + }) + .collect(), + }); + } + } + + let mut burns = vec![]; + for burn in &block.burn_events { + let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } = + &burn + else { + panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}"); + }; + if instruction.balance.coin.network() == network { + burns.push(instruction.clone()); + } + } + + crate::Canonical::send( + &mut txn, + network, + &CoordinatorMessage::Block { serai_block_number: block_number, batch, burns }, + ); + } + + txn.commit(); + } + + Ok(next_block <= latest_finalized_block) + } + } +} diff --git a/coordinator/substrate/src/ephemeral.rs b/coordinator/substrate/src/ephemeral.rs new file mode 100644 index 00000000..cb6e14cd --- /dev/null +++ b/coordinator/substrate/src/ephemeral.rs @@ -0,0 +1,256 @@ +use core::future::Future; +use std::sync::Arc; + +use futures::stream::{StreamExt, FuturesOrdered}; + +use serai_client::{ + primitives::{SeraiAddress, EmbeddedEllipticCurve}, + validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet}, + Serai, +}; + +use serai_db::*; +use serai_task::ContinuallyRan; + +use serai_cosign::Cosigning; + +use crate::NewSetInformation; + +create_db!( + CoordinatorSubstrateEphemeral { + NextBlock: () -> u64, + } +); + +/// The event stream for ephemeral events. +pub struct EphemeralEventStream { + db: D, + serai: Arc, + validator: SeraiAddress, +} + +impl EphemeralEventStream { + /// Create a new ephemeral event stream. + /// + /// Only one of these may exist over the provided database. + pub fn new(db: D, serai: Arc, validator: SeraiAddress) -> Self { + Self { db, serai, validator } + } +} + +impl ContinuallyRan for EphemeralEventStream { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let next_block = NextBlock::get(&self.db).unwrap_or(0); + let latest_finalized_block = + Cosigning::::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?; + + // These are all the events which generate canonical messages + struct EphemeralEvents { + block_hash: [u8; 32], + time: u64, + new_set_events: Vec, + accepted_handover_events: Vec, + } + + // For a cosigned block, fetch all relevant events + let scan = { + let db = self.db.clone(); + let serai = &self.serai; + move |block_number| { + let block_hash = Cosigning::::cosigned_block(&db, block_number); + + async move { + let block_hash = match block_hash { + Ok(Some(block_hash)) => block_hash, + Ok(None) => { + panic!("iterating to latest cosigned block but couldn't get cosigned block") + } + Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()), + }; + + let temporal_serai = serai.as_of(block_hash); + let temporal_serai_validators = temporal_serai.validator_sets(); + let (block, new_set_events, accepted_handover_events) = tokio::try_join!( + serai.block(block_hash), + temporal_serai_validators.new_set_events(), + temporal_serai_validators.accepted_handover_events(), + ) + .map_err(|e| format!("{e:?}"))?; + let Some(block) = block else { + Err(format!("Serai node didn't have cosigned block #{block_number}"))? + }; + + let time = if block_number == 0 { + block.time().unwrap_or(0) + } else { + // Serai's block time is in milliseconds + block + .time() + .ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? / + 1000 + }; + + Ok(( + block_number, + EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events }, + )) + } + } + }; + + // Sync the next set of upcoming blocks all at once to minimize latency + const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50; + // FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't + // sufficiently polled. Our processing loop isn't minimal, itself making multiple requests, + // but the loop body should only be executed a few times a week. It's better to get through + // most blocks with this optimization, and have timeouts a few times a week, than not have + // this at all. + let mut set = FuturesOrdered::new(); + for block_number in + next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE) + { + set.push_back(scan(block_number)); + } + + for block_number in next_block ..= latest_finalized_block { + // Get the next block in our queue + let (popped_block_number, block) = set.next().await.unwrap()?; + assert_eq!(block_number, popped_block_number); + // Re-populate the queue + if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block { + set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE)); + } + + let mut txn = self.db.txn(); + + for new_set in block.new_set_events { + let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else { + panic!("NewSet event wasn't a NewSet event: {new_set:?}"); + }; + // We only coordinate over external networks + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + + let serai = self.serai.as_of(block.block_hash); + let serai = serai.validator_sets(); + let Some(validators) = + serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))? + else { + Err(format!( + "block #{block_number} declared a new set but didn't have the participants" + ))? + }; + let validators = validators + .into_iter() + .map(|(validator, weight)| (SeraiAddress::from(validator), weight)) + .collect::>(); + let in_set = validators.iter().any(|(validator, _)| *validator == self.validator); + if in_set { + if u16::try_from(validators.len()).is_err() { + Err("more than u16::MAX validators sent")?; + } + + let Ok(validators) = validators + .into_iter() + .map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight))) + .collect::, _>>() + else { + Err("validator's weight exceeded u16::MAX".to_string())? + }; + + // Do the summation in u32 so we don't risk a u16 overflow + let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::(); + if total_weight > u32::from(MAX_KEY_SHARES_PER_SET) { + Err(format!( + "{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}" + ))?; + } + let total_weight = u16::try_from(total_weight).unwrap(); + + // Fetch all of the validators' embedded elliptic curve keys + let mut embedded_elliptic_curve_keys = FuturesOrdered::new(); + for (validator, _) in &validators { + let validator = *validator; + // try_join doesn't return a future so we need to wrap it in this additional async + // block + embedded_elliptic_curve_keys.push_back(async move { + tokio::try_join!( + // One future to fetch the substrate embedded key + serai.embedded_elliptic_curve_key( + validator.into(), + EmbeddedEllipticCurve::Embedwards25519 + ), + // One future to fetch the external embedded key, if there is a distinct curve + async { + // `embedded_elliptic_curves` is documented to have the second entry be the + // network-specific curve (if it exists and is distinct from Embedwards25519) + if let Some(curve) = set.network.embedded_elliptic_curves().get(1) { + serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some) + } else { + Ok(None) + } + } + ) + .map(|(substrate_embedded_key, external_embedded_key)| { + (validator, substrate_embedded_key, external_embedded_key) + }) + }); + } + + let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight)); + for (validator, weight) in &validators { + let (future_validator, substrate_embedded_key, external_embedded_key) = + embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?; + assert_eq!(*validator, future_validator); + let external_embedded_key = + external_embedded_key.unwrap_or(substrate_embedded_key.clone()); + match (substrate_embedded_key, external_embedded_key) { + (Some(substrate_embedded_key), Some(external_embedded_key)) => { + let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key) + .map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?; + for _ in 0 .. *weight { + evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone())); + } + } + _ => Err("NewSet with validator missing an embedded key".to_string())?, + } + } + + let mut new_set = NewSetInformation { + set, + serai_block: block.block_hash, + declaration_time: block.time, + // TODO: This should be inlined into the Processor's key gen code + // It's legacy from when we removed participants from the key gen + threshold: ((total_weight * 2) / 3) + 1, + validators, + evrf_public_keys, + participant_indexes: Default::default(), + participant_indexes_reverse_lookup: Default::default(), + }; + // These aren't serialized, and we immediately serialize and drop this, so this isn't + // necessary. It's just good practice not have this be dirty + new_set.init_participant_indexes(); + crate::NewSet::send(&mut txn, &new_set); + } + } + + for accepted_handover in block.accepted_handover_events { + let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } = + &accepted_handover + else { + panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}"); + }; + let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue }; + crate::SignSlashReport::send(&mut txn, set); + } + + txn.commit(); + } + + Ok(next_block <= latest_finalized_block) + } + } +} diff --git a/coordinator/substrate/src/lib.rs b/coordinator/substrate/src/lib.rs new file mode 100644 index 00000000..902234dc --- /dev/null +++ b/coordinator/substrate/src/lib.rs @@ -0,0 +1,238 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::collections::HashMap; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use dkg::Participant; + +use serai_client::{ + primitives::{ExternalNetworkId, SeraiAddress, Signature}, + validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport}, + in_instructions::primitives::SignedBatch, + Transaction, +}; + +use serai_db::*; + +mod canonical; +pub use canonical::CanonicalEventStream; +mod ephemeral; +pub use ephemeral::EphemeralEventStream; + +mod set_keys; +pub use set_keys::SetKeysTask; +mod publish_batch; +pub use publish_batch::PublishBatchTask; +mod publish_slash_report; +pub use publish_slash_report::PublishSlashReportTask; + +/// The information for a new set. +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] +#[borsh(init = init_participant_indexes)] +pub struct NewSetInformation { + /// The set. + pub set: ExternalValidatorSet, + /// The Serai block which declared it. + pub serai_block: [u8; 32], + /// The time of the block which declared it, in seconds. + pub declaration_time: u64, + /// The threshold to use. + pub threshold: u16, + /// The validators, with the amount of key shares they have. + pub validators: Vec<(SeraiAddress, u16)>, + /// The eVRF public keys. + /// + /// This will have the necessary copies of the keys proper for each validator's weight, + /// accordingly syncing up with `participant_indexes`. + pub evrf_public_keys: Vec<([u8; 32], Vec)>, + /// The participant indexes, indexed by their validator. + #[borsh(skip)] + pub participant_indexes: HashMap>, + /// The validators, indexed by their participant indexes. + #[borsh(skip)] + pub participant_indexes_reverse_lookup: HashMap, +} + +impl NewSetInformation { + fn init_participant_indexes(&mut self) { + let mut next_i = 1; + self.participant_indexes = HashMap::with_capacity(self.validators.len()); + self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len()); + for (validator, weight) in &self.validators { + let mut these_is = Vec::with_capacity((*weight).into()); + for _ in 0 .. *weight { + let this_i = Participant::new(next_i).unwrap(); + next_i += 1; + + these_is.push(this_i); + self.participant_indexes_reverse_lookup.insert(this_i, *validator); + } + self.participant_indexes.insert(*validator, these_is); + } + } +} + +mod _public_db { + use super::*; + + db_channel!( + CoordinatorSubstrate { + // Canonical messages to send to the processor + Canonical: (network: ExternalNetworkId) -> messages::substrate::CoordinatorMessage, + + // Relevant new set, from an ephemeral event stream + NewSet: () -> NewSetInformation, + // Potentially relevant sign slash report, from an ephemeral event stream + SignSlashReport: (set: ExternalValidatorSet) -> (), + + // Signed batches to publish onto the Serai network + SignedBatches: (network: ExternalNetworkId) -> SignedBatch, + } + ); + + create_db!( + CoordinatorSubstrate { + // Keys to set on the Serai network + Keys: (network: ExternalNetworkId) -> (Session, Vec), + // Slash reports to publish onto the Serai network + SlashReports: (network: ExternalNetworkId) -> (Session, Vec), + } + ); +} + +/// The canonical event stream. +pub struct Canonical; +impl Canonical { + pub(crate) fn send( + txn: &mut impl DbTxn, + network: ExternalNetworkId, + msg: &messages::substrate::CoordinatorMessage, + ) { + _public_db::Canonical::send(txn, network, msg); + } + /// Try to receive a canonical event, returning `None` if there is none to receive. + pub fn try_recv( + txn: &mut impl DbTxn, + network: ExternalNetworkId, + ) -> Option { + _public_db::Canonical::try_recv(txn, network) + } +} + +/// The channel for new set events emitted by an ephemeral event stream. +pub struct NewSet; +impl NewSet { + pub(crate) fn send(txn: &mut impl DbTxn, msg: &NewSetInformation) { + _public_db::NewSet::send(txn, msg); + } + /// Try to receive a new set's information, returning `None` if there is none to receive. + pub fn try_recv(txn: &mut impl DbTxn) -> Option { + _public_db::NewSet::try_recv(txn) + } +} + +/// The channel for notifications to sign a slash report, as emitted by an ephemeral event stream. +/// +/// These notifications MAY be for irrelevant validator sets. The only guarantee is the +/// notifications for all relevant validator sets will be included. +pub struct SignSlashReport; +impl SignSlashReport { + pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet) { + _public_db::SignSlashReport::send(txn, set, &()); + } + /// Try to receive a notification to sign a slash report, returning `None` if there is none to + /// receive. + pub fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<()> { + _public_db::SignSlashReport::try_recv(txn, set) + } +} + +/// The keys to set on Serai. +pub struct Keys; +impl Keys { + /// Set the keys to report for a validator set. + /// + /// This only saves the most recent keys as only a single session is eligible to have its keys + /// reported at once. + pub fn set( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, + signature: Signature, + ) { + // If we have a more recent pair of keys, don't write this historic one + if let Some((existing_session, _)) = _public_db::Keys::get(txn, set.network) { + if existing_session.0 >= set.session.0 { + return; + } + } + + let tx = serai_client::validator_sets::SeraiValidatorSets::set_keys( + set.network, + key_pair, + signature_participants, + signature, + ); + _public_db::Keys::set(txn, set.network, &(set.session, tx.encode())); + } + pub(crate) fn take( + txn: &mut impl DbTxn, + network: ExternalNetworkId, + ) -> Option<(Session, Transaction)> { + let (session, tx) = _public_db::Keys::take(txn, network)?; + Some((session, <_>::decode(&mut tx.as_slice()).unwrap())) + } +} + +/// The signed batches to publish onto Serai. +pub struct SignedBatches; +impl SignedBatches { + /// Send a `SignedBatch` to publish onto Serai. + pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) { + _public_db::SignedBatches::send(txn, batch.batch.network, batch); + } + pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option { + _public_db::SignedBatches::try_recv(txn, network) + } +} + +/// The slash reports to publish onto Serai. +pub struct SlashReports; +impl SlashReports { + /// Set the slashes to report for a validator set. + /// + /// This only saves the most recent slashes as only a single session is eligible to have its + /// slashes reported at once. + pub fn set( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + slash_report: SlashReport, + signature: Signature, + ) { + // If we have a more recent slash report, don't write this historic one + if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) { + if existing_session.0 >= set.session.0 { + return; + } + } + + let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes( + set.network, + slash_report, + signature, + ); + _public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode())); + } + pub(crate) fn take( + txn: &mut impl DbTxn, + network: ExternalNetworkId, + ) -> Option<(Session, Transaction)> { + let (session, tx) = _public_db::SlashReports::take(txn, network)?; + Some((session, <_>::decode(&mut tx.as_slice()).unwrap())) + } +} diff --git a/coordinator/substrate/src/publish_batch.rs b/coordinator/substrate/src/publish_batch.rs new file mode 100644 index 00000000..ff4b46de --- /dev/null +++ b/coordinator/substrate/src/publish_batch.rs @@ -0,0 +1,87 @@ +use core::future::Future; +use std::sync::Arc; + +#[rustfmt::skip] +use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai}; + +use serai_db::{Get, DbTxn, Db, create_db}; +use serai_task::ContinuallyRan; + +use crate::SignedBatches; + +create_db!( + CoordinatorSubstrate { + LastPublishedBatch: (network: ExternalNetworkId) -> u32, + BatchesToPublish: (network: ExternalNetworkId, batch: u32) -> SignedBatch, + } +); + +/// Publish `SignedBatch`s from `SignedBatches` onto Serai. +pub struct PublishBatchTask { + db: D, + serai: Arc, + network: ExternalNetworkId, +} + +impl PublishBatchTask { + /// Create a task to publish `SignedBatch`s onto Serai. + pub fn new(db: D, serai: Arc, network: ExternalNetworkId) -> Self { + Self { db, serai, network } + } +} + +impl ContinuallyRan for PublishBatchTask { + type Error = SeraiError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Read from SignedBatches, which is sequential, into our own mapping + loop { + let mut txn = self.db.txn(); + let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else { + break; + }; + + // If this is a Batch not yet published, save it into our unordered mapping + if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) { + BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch); + } + + txn.commit(); + } + + // Synchronize our last published batch with the Serai network's + let next_to_publish = { + // This uses the latest finalized block, not the latest cosigned block, which should be + // fine as in the worst case, the only impact is no longer attempting TX publication + let serai = self.serai.as_of_latest_finalized_block().await?; + let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?; + + let mut txn = self.db.txn(); + let mut our_last_batch = LastPublishedBatch::get(&txn, self.network); + while our_last_batch < last_batch { + let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0); + // Clean up the Batch to publish since it's already been published + BatchesToPublish::take(&mut txn, self.network, next_batch); + our_last_batch = Some(next_batch); + } + if let Some(last_batch) = our_last_batch { + LastPublishedBatch::set(&mut txn, self.network, &last_batch); + } + last_batch.map(|batch| batch + 1).unwrap_or(0) + }; + + let made_progress = + if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) { + self + .serai + .publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch)) + .await?; + true + } else { + false + }; + Ok(made_progress) + } + } +} diff --git a/coordinator/substrate/src/publish_slash_report.rs b/coordinator/substrate/src/publish_slash_report.rs new file mode 100644 index 00000000..7b90d53d --- /dev/null +++ b/coordinator/substrate/src/publish_slash_report.rs @@ -0,0 +1,101 @@ +use core::future::Future; +use std::sync::Arc; + +use serai_db::{DbTxn, Db}; + +use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai}; + +use serai_task::ContinuallyRan; + +use crate::SlashReports; + +/// Publish slash reports from `SlashReports` onto Serai. +pub struct PublishSlashReportTask { + db: D, + serai: Arc, +} + +impl PublishSlashReportTask { + /// Create a task to publish slash reports onto Serai. + pub fn new(db: D, serai: Arc) -> Self { + Self { db, serai } + } +} + +impl PublishSlashReportTask { + // Returns if a slash report was successfully published + async fn publish(&mut self, network: ExternalNetworkId) -> Result { + let mut txn = self.db.txn(); + let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else { + // No slash report to publish + return Ok(false); + }; + + // This uses the latest finalized block, not the latest cosigned block, which should be + // fine as in the worst case, the only impact is no longer attempting TX publication + let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?; + let serai = serai.validator_sets(); + let session_after_slash_report = Session(session.0 + 1); + let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?; + let current_session = current_session.map(|session| session.0); + // Only attempt to publish the slash report for session #n while session #n+1 is still + // active + let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0); + if session_after_slash_report_retired { + // Commit the txn to drain this slash report from the database and not try it again later + txn.commit(); + return Ok(false); + } + + if Some(session_after_slash_report.0) != current_session { + // We already checked the current session wasn't greater, and they're not equal + assert!(current_session < Some(session_after_slash_report.0)); + // This would mean the Serai node is resyncing and is behind where it prior was + Err("have a slash report for a session Serai has yet to retire".to_string())?; + } + + // If this session which should publish a slash report already has, move on + let key_pending_slash_report = + serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?; + if key_pending_slash_report.is_none() { + txn.commit(); + return Ok(false); + }; + + match self.serai.publish(&slash_report).await { + Ok(()) => { + txn.commit(); + Ok(true) + } + // This could be specific to this TX (such as an already in mempool error) and it may be + // worthwhile to continue iteration with the other pending slash reports. We assume this + // error ephemeral and that the latency incurred for this ephemeral error to resolve is + // miniscule compared to the window available to publish the slash report. That makes + // this a non-issue. + Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")), + } + } +} + +impl ContinuallyRan for PublishSlashReportTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + let mut error = None; + for network in serai_client::primitives::EXTERNAL_NETWORKS { + let network_res = self.publish(network).await; + // We made progress if any network successfully published their slash report + made_progress |= network_res == Ok(true); + // We want to yield the first error *after* attempting for every network + error = error.or(network_res.err()); + } + // Yield the error + if let Some(error) = error { + Err(error)? + } + Ok(made_progress) + } + } +} diff --git a/coordinator/substrate/src/set_keys.rs b/coordinator/substrate/src/set_keys.rs new file mode 100644 index 00000000..b8bf2ad1 --- /dev/null +++ b/coordinator/substrate/src/set_keys.rs @@ -0,0 +1,86 @@ +use core::future::Future; +use std::sync::Arc; + +use serai_db::{DbTxn, Db}; + +use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai}; + +use serai_task::ContinuallyRan; + +use crate::Keys; + +/// Set keys from `Keys` on Serai. +pub struct SetKeysTask { + db: D, + serai: Arc, +} + +impl SetKeysTask { + /// Create a task to publish slash reports onto Serai. + pub fn new(db: D, serai: Arc) -> Self { + Self { db, serai } + } +} + +impl ContinuallyRan for SetKeysTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + for network in serai_client::primitives::EXTERNAL_NETWORKS { + let mut txn = self.db.txn(); + let Some((session, keys)) = Keys::take(&mut txn, network) else { + // No keys to set + continue; + }; + + // This uses the latest finalized block, not the latest cosigned block, which should be + // fine as in the worst case, the only impact is no longer attempting TX publication + let serai = + self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?; + let serai = serai.validator_sets(); + let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?; + let current_session = current_session.map(|session| session.0); + // Only attempt to set these keys if this isn't a retired session + if Some(session.0) < current_session { + // Commit the txn to take these keys from the database and not try it again later + txn.commit(); + continue; + } + + if Some(session.0) != current_session { + // We already checked the current session wasn't greater, and they're not equal + assert!(current_session < Some(session.0)); + // This would mean the Serai node is resyncing and is behind where it prior was + Err("have a keys for a session Serai has yet to start".to_string())?; + } + + // If this session already has had its keys set, move on + if serai + .keys(ExternalValidatorSet { network, session }) + .await + .map_err(|e| format!("{e:?}"))? + .is_some() + { + txn.commit(); + continue; + }; + + match self.serai.publish(&keys).await { + Ok(()) => { + txn.commit(); + made_progress = true; + } + // This could be specific to this TX (such as an already in mempool error) and it may be + // worthwhile to continue iteration with the other pending slash reports. We assume this + // error ephemeral and that the latency incurred for this ephemeral error to resolve is + // miniscule compared to the window reasonable to set the keys. That makes this a + // non-issue. + Err(e) => Err(format!("couldn't publish set keys transaction: {e:?}"))?, + } + } + Ok(made_progress) + } + } +} diff --git a/coordinator/tributary-sdk/Cargo.toml b/coordinator/tributary-sdk/Cargo.toml new file mode 100644 index 00000000..2e92c03d --- /dev/null +++ b/coordinator/tributary-sdk/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "tributary-sdk" +version = "0.1.0" +description = "A micro-blockchain to provide consensus and ordering to P2P communication" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary-sdk" +authors = ["Luke Parker "] +edition = "2021" +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +thiserror = { version = "2", default-features = false, features = ["std"] } + +subtle = { version = "^2", default-features = false, features = ["std"] } +zeroize = { version = "^1.5", default-features = false, features = ["std"] } + +rand = { version = "0.8", default-features = false, features = ["std"] } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } + +blake2 = { version = "0.10", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["std", "recommended"] } + +ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", version = "0.4", default-features = false, features = ["std", "ristretto"] } +schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", version = "0.5", default-features = false, features = ["std"] } + +hex = { version = "0.4", default-features = false, features = ["std"] } +log = { version = "0.4", default-features = false, features = ["std"] } + +serai-db = { path = "../../common/db", version = "0.1" } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } +futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] } +futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } +tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" } + +tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] } + +[dev-dependencies] +tokio = { version = "1", features = ["macros"] } + +[features] +tests = [] diff --git a/coordinator/tributary-sdk/LICENSE b/coordinator/tributary-sdk/LICENSE new file mode 100644 index 00000000..f684d027 --- /dev/null +++ b/coordinator/tributary-sdk/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/coordinator/tributary-sdk/README.md b/coordinator/tributary-sdk/README.md new file mode 100644 index 00000000..6fce976e --- /dev/null +++ b/coordinator/tributary-sdk/README.md @@ -0,0 +1,3 @@ +# Tributary + +A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain. diff --git a/coordinator/tributary/src/block.rs b/coordinator/tributary-sdk/src/block.rs similarity index 99% rename from coordinator/tributary/src/block.rs rename to coordinator/tributary-sdk/src/block.rs index 6f3374bd..d632ce57 100644 --- a/coordinator/tributary/src/block.rs +++ b/coordinator/tributary-sdk/src/block.rs @@ -135,7 +135,7 @@ impl Block { // Check TXs are sorted by nonce. let nonce = |tx: &Transaction| { if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() { - *nonce + nonce } else { 0 } diff --git a/coordinator/tributary/src/blockchain.rs b/coordinator/tributary-sdk/src/blockchain.rs similarity index 99% rename from coordinator/tributary/src/blockchain.rs rename to coordinator/tributary-sdk/src/blockchain.rs index 1664860b..0eee391b 100644 --- a/coordinator/tributary/src/blockchain.rs +++ b/coordinator/tributary-sdk/src/blockchain.rs @@ -323,7 +323,7 @@ impl Blockchain { } TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { let next_nonce = nonce + 1; - txn.put(Self::next_nonce_key(&self.genesis, signer, &order), next_nonce.to_le_bytes()); + txn.put(Self::next_nonce_key(&self.genesis, &signer, &order), next_nonce.to_le_bytes()); self.mempool.remove(&tx.hash()); } } diff --git a/coordinator/tributary-sdk/src/lib.rs b/coordinator/tributary-sdk/src/lib.rs new file mode 100644 index 00000000..2e4a6115 --- /dev/null +++ b/coordinator/tributary-sdk/src/lib.rs @@ -0,0 +1,388 @@ +use core::{marker::PhantomData, fmt::Debug, future::Future}; +use std::{sync::Arc, io}; + +use zeroize::Zeroizing; + +use ciphersuite::{Ciphersuite, Ristretto}; + +use scale::Decode; +use futures_channel::mpsc::UnboundedReceiver; +use futures_util::{StreamExt, SinkExt}; +use ::tendermint::{ + ext::{BlockNumber, Commit, Block as BlockTrait, Network}, + SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, + TendermintMachine, TendermintHandle, +}; + +pub use ::tendermint::Evidence; + +use serai_db::Db; + +use tokio::sync::RwLock; + +mod merkle; +pub(crate) use merkle::*; + +pub mod transaction; +pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait}; + +use crate::tendermint::tx::TendermintTx; + +mod provided; +pub(crate) use provided::*; +pub use provided::ProvidedError; + +mod block; +pub use block::*; + +mod blockchain; +pub(crate) use blockchain::*; + +mod mempool; +pub(crate) use mempool::*; + +pub mod tendermint; +pub(crate) use crate::tendermint::*; + +#[cfg(any(test, feature = "tests"))] +pub mod tests; + +/// Size limit for an individual transaction. +// This needs to be big enough to participate in a 101-of-150 eVRF DKG with each element taking +// `MAX_KEY_LEN`. This also needs to be big enough to pariticpate in signing 520 Bitcoin inputs +// with 49 key shares, and signing 120 Monero inputs with 49 key shares. +// TODO: Add a test for these properties +pub const TRANSACTION_SIZE_LIMIT: usize = 2_000_000; +/// Amount of transactions a single account may have in the mempool. +pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; +/// Block size limit. +// This targets a growth limit of roughly 30 GB a day, under load, in order to prevent a malicious +// participant from flooding disks and causing out of space errors in order processes. +pub const BLOCK_SIZE_LIMIT: usize = 2_001_000; + +pub(crate) const TENDERMINT_MESSAGE: u8 = 0; +pub(crate) const TRANSACTION_MESSAGE: u8 = 1; + +#[allow(clippy::large_enum_variant)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Transaction { + Tendermint(TendermintTx), + Application(T), +} + +impl ReadWrite for Transaction { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let tx = TendermintTx::read(reader)?; + Ok(Transaction::Tendermint(tx)) + } + 1 => { + let tx = T::read(reader)?; + Ok(Transaction::Application(tx)) + } + _ => Err(io::Error::other("invalid transaction type")), + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Transaction::Tendermint(tx) => { + writer.write_all(&[0])?; + tx.write(writer) + } + Transaction::Application(tx) => { + writer.write_all(&[1])?; + tx.write(writer) + } + } + } +} + +impl Transaction { + pub fn hash(&self) -> [u8; 32] { + match self { + Transaction::Tendermint(tx) => tx.hash(), + Transaction::Application(tx) => tx.hash(), + } + } + + pub fn kind(&self) -> TransactionKind { + match self { + Transaction::Tendermint(tx) => tx.kind(), + Transaction::Application(tx) => tx.kind(), + } + } +} + +/// An item which can be read and written. +pub trait ReadWrite: Sized { + fn read(reader: &mut R) -> io::Result; + fn write(&self, writer: &mut W) -> io::Result<()>; + + fn serialize(&self) -> Vec { + // BlockHeader is 64 bytes and likely the smallest item in this system + let mut buf = Vec::with_capacity(64); + self.write(&mut buf).unwrap(); + buf + } +} + +pub trait P2p: 'static + Send + Sync + Clone { + /// Broadcast a message to all other members of the Tributary with the specified genesis. + /// + /// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't + /// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based + /// deduplication to ensure a sane amount of load. + fn broadcast(&self, genesis: [u8; 32], msg: Vec) -> impl Send + Future; +} + +impl P2p for Arc

{ + fn broadcast(&self, genesis: [u8; 32], msg: Vec) -> impl Send + Future { + P::broadcast(self, genesis, msg) + } +} + +#[derive(Clone)] +pub struct Tributary { + db: D, + + genesis: [u8; 32], + network: TendermintNetwork, + + synced_block: Arc>>>, + synced_block_result: Arc>, + messages: Arc>>>, +} + +impl Tributary { + pub async fn new( + db: D, + genesis: [u8; 32], + start_time: u64, + key: Zeroizing<::F>, + validators: Vec<(::G, u64)>, + p2p: P, + ) -> Option { + log::info!("new Tributary with genesis {}", hex::encode(genesis)); + + let validators_vec = validators.iter().map(|validator| validator.0).collect::>(); + + let signer = Arc::new(Signer::new(genesis, key)); + let validators = Arc::new(Validators::new(genesis, validators)?); + + let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec); + let block_number = BlockNumber(blockchain.block_number()); + + let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) { + Commit::::decode(&mut commit.as_ref()).unwrap().end_time + } else { + start_time + }; + let proposal = TendermintBlock( + blockchain.build_block::>(&validators).serialize(), + ); + let blockchain = Arc::new(RwLock::new(blockchain)); + + let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p }; + + let TendermintHandle { synced_block, synced_block_result, messages, machine } = + TendermintMachine::new( + db.clone(), + network.clone(), + genesis, + block_number, + start_time, + proposal, + ) + .await; + tokio::spawn(machine.run()); + + Some(Self { + db, + genesis, + network, + synced_block: Arc::new(RwLock::new(synced_block)), + synced_block_result: Arc::new(RwLock::new(synced_block_result)), + messages: Arc::new(RwLock::new(messages)), + }) + } + + pub fn block_time() -> u32 { + TendermintNetwork::::block_time() + } + + pub fn genesis(&self) -> [u8; 32] { + self.genesis + } + + pub async fn block_number(&self) -> u64 { + self.network.blockchain.read().await.block_number() + } + pub async fn tip(&self) -> [u8; 32] { + self.network.blockchain.read().await.tip() + } + + pub fn reader(&self) -> TributaryReader { + TributaryReader(self.db.clone(), self.genesis, PhantomData) + } + + pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> { + self.network.blockchain.write().await.provide_transaction(tx) + } + + pub async fn next_nonce( + &self, + signer: &::G, + order: &[u8], + ) -> Option { + self.network.blockchain.read().await.next_nonce(signer, order) + } + + // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. + // Safe to be &self since the only meaningful usage of self is self.network.blockchain which + // successfully acquires its own write lock + pub async fn add_transaction(&self, tx: T) -> Result { + let tx = Transaction::Application(tx); + let mut to_broadcast = vec![TRANSACTION_MESSAGE]; + tx.write(&mut to_broadcast).unwrap(); + let res = self.network.blockchain.write().await.add_transaction::>( + true, + tx, + &self.network.signature_scheme(), + ); + if res == Ok(true) { + self.network.p2p.broadcast(self.genesis, to_broadcast).await; + } + res + } + + async fn sync_block_internal( + &self, + block: Block, + commit: Vec, + result: &mut UnboundedReceiver, + ) -> bool { + let (tip, block_number) = { + let blockchain = self.network.blockchain.read().await; + (blockchain.tip(), blockchain.block_number()) + }; + + if block.header.parent != tip { + log::debug!("told to sync a block whose parent wasn't our tip"); + return false; + } + + let block = TendermintBlock(block.serialize()); + let mut commit_ref = commit.as_ref(); + let Ok(commit) = Commit::>::decode(&mut commit_ref) else { + log::error!("sent an invalidly serialized commit"); + return false; + }; + // Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this, + // yet then we'd have to test the truncation was performed correctly. + if !commit_ref.is_empty() { + log::error!("sent an commit with additional data after it"); + return false; + } + if !self.network.verify_commit(block.id(), &commit) { + log::error!("sent an invalid commit"); + return false; + } + + let number = BlockNumber(block_number + 1); + self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap(); + result.next().await.unwrap() + } + + // Sync a block. + // TODO: Since we have a static validator set, we should only need the tail commit? + pub async fn sync_block(&self, block: Block, commit: Vec) -> bool { + let mut result = self.synced_block_result.write().await; + self.sync_block_internal(block, commit, &mut result).await + } + + // Return true if the message should be rebroadcasted. + pub async fn handle_message(&self, msg: &[u8]) -> bool { + match msg.first() { + Some(&TRANSACTION_MESSAGE) => { + let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { + log::error!("received invalid transaction message"); + return false; + }; + + // TODO: Sync mempools with fellow peers + // Can we just rebroadcast transactions not included for at least two blocks? + let res = + self.network.blockchain.write().await.add_transaction::>( + false, + tx, + &self.network.signature_scheme(), + ); + log::debug!("received transaction message. valid new transaction: {res:?}"); + res == Ok(true) + } + + Some(&TENDERMINT_MESSAGE) => { + let Ok(msg) = + SignedMessageFor::>::decode::<&[u8]>(&mut &msg[1 ..]) + else { + log::error!("received invalid tendermint message"); + return false; + }; + + self.messages.write().await.send(msg).await.unwrap(); + false + } + + _ => false, + } + } + + /// Get a Future which will resolve once the next block has been added. + pub async fn next_block_notification( + &self, + ) -> impl Send + Sync + core::future::Future> { + let (tx, rx) = tokio::sync::oneshot::channel(); + self.network.blockchain.write().await.next_block_notifications.push_back(tx); + rx + } +} + +#[derive(Clone)] +pub struct TributaryReader(D, [u8; 32], PhantomData); +impl TributaryReader { + pub fn genesis(&self) -> [u8; 32] { + self.1 + } + + // Since these values are static once set, they can be safely read from the database without lock + // acquisition + pub fn block(&self, hash: &[u8; 32]) -> Option> { + Blockchain::::block_from_db(&self.0, self.1, hash) + } + pub fn commit(&self, hash: &[u8; 32]) -> Option> { + Blockchain::::commit_from_db(&self.0, self.1, hash) + } + pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option> { + self.commit(hash).map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap()) + } + pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> { + Blockchain::::block_after(&self.0, self.1, hash) + } + pub fn time_of_block(&self, hash: &[u8; 32]) -> Option { + self + .commit(hash) + .map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap().end_time) + } + + pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool { + Blockchain::::locally_provided_txs_in_block(&self.0, &self.1, hash, order) + } + + // This isn't static, yet can be read with only minor discrepancy risks + pub fn tip(&self) -> [u8; 32] { + Blockchain::::tip_from_db(&self.0, self.1) + } +} diff --git a/coordinator/tributary/src/mempool.rs b/coordinator/tributary-sdk/src/mempool.rs similarity index 89% rename from coordinator/tributary/src/mempool.rs rename to coordinator/tributary-sdk/src/mempool.rs index 7558bae0..e83c3acb 100644 --- a/coordinator/tributary/src/mempool.rs +++ b/coordinator/tributary-sdk/src/mempool.rs @@ -81,11 +81,11 @@ impl Mempool { } Transaction::Application(tx) => match tx.kind() { TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { - let amount = *res.txs_per_signer.get(signer).unwrap_or(&0) + 1; - res.txs_per_signer.insert(*signer, amount); + let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1; + res.txs_per_signer.insert(signer, amount); if let Some(prior_nonce) = - res.last_nonce_in_mempool.insert((*signer, order.clone()), *nonce) + res.last_nonce_in_mempool.insert((signer, order.clone()), nonce) { assert_eq!(prior_nonce, nonce - 1); } @@ -133,14 +133,14 @@ impl Mempool { match app_tx.kind() { TransactionKind::Signed(order, Signed { signer, .. }) => { // Get the nonce from the blockchain - let Some(blockchain_next_nonce) = blockchain_next_nonce(*signer, order.clone()) else { + let Some(blockchain_next_nonce) = blockchain_next_nonce(signer, order.clone()) else { // Not a participant Err(TransactionError::InvalidSigner)? }; let mut next_nonce = blockchain_next_nonce; if let Some(mempool_last_nonce) = - self.last_nonce_in_mempool.get(&(*signer, order.clone())) + self.last_nonce_in_mempool.get(&(signer, order.clone())) { assert!(*mempool_last_nonce >= blockchain_next_nonce); next_nonce = *mempool_last_nonce + 1; @@ -148,14 +148,14 @@ impl Mempool { // If we have too many transactions from this sender, don't add this yet UNLESS we are // this sender - let amount_in_pool = *self.txs_per_signer.get(signer).unwrap_or(&0) + 1; + let amount_in_pool = *self.txs_per_signer.get(&signer).unwrap_or(&0) + 1; if !internal && (amount_in_pool > ACCOUNT_MEMPOOL_LIMIT) { Err(TransactionError::TooManyInMempool)?; } verify_transaction(app_tx, self.genesis, &mut |_, _| Some(next_nonce))?; - self.last_nonce_in_mempool.insert((*signer, order.clone()), next_nonce); - self.txs_per_signer.insert(*signer, amount_in_pool); + self.last_nonce_in_mempool.insert((signer, order.clone()), next_nonce); + self.txs_per_signer.insert(signer, amount_in_pool); } TransactionKind::Unsigned => { // check we have the tx in the pool/chain @@ -205,7 +205,7 @@ impl Mempool { // Sort signed by nonce let nonce = |tx: &Transaction| { if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() { - *nonce + nonce } else { unreachable!() } @@ -242,11 +242,11 @@ impl Mempool { if let Some(tx) = self.txs.remove(tx) { if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() { - let amount = *self.txs_per_signer.get(signer).unwrap() - 1; - self.txs_per_signer.insert(*signer, amount); + let amount = *self.txs_per_signer.get(&signer).unwrap() - 1; + self.txs_per_signer.insert(signer, amount); - if self.last_nonce_in_mempool.get(&(*signer, order.clone())) == Some(nonce) { - self.last_nonce_in_mempool.remove(&(*signer, order)); + if self.last_nonce_in_mempool.get(&(signer, order.clone())) == Some(&nonce) { + self.last_nonce_in_mempool.remove(&(signer, order)); } } } diff --git a/coordinator/tributary/src/merkle.rs b/coordinator/tributary-sdk/src/merkle.rs similarity index 100% rename from coordinator/tributary/src/merkle.rs rename to coordinator/tributary-sdk/src/merkle.rs diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary-sdk/src/provided.rs similarity index 100% rename from coordinator/tributary/src/provided.rs rename to coordinator/tributary-sdk/src/provided.rs diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary-sdk/src/tendermint/mod.rs similarity index 59% rename from coordinator/tributary/src/tendermint/mod.rs rename to coordinator/tributary-sdk/src/tendermint/mod.rs index 0ce6232c..0fd618ca 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary-sdk/src/tendermint/mod.rs @@ -1,8 +1,6 @@ -use core::ops::Deref; +use core::{ops::Deref, future::Future}; use std::{sync::Arc, collections::HashMap}; -use async_trait::async_trait; - use subtle::ConstantTimeEq; use zeroize::{Zeroize, Zeroizing}; @@ -74,50 +72,52 @@ impl Signer { } } -#[async_trait] impl SignerTrait for Signer { type ValidatorId = [u8; 32]; type Signature = [u8; 64]; /// Returns the validator's current ID. Returns None if they aren't a current validator. - async fn validator_id(&self) -> Option { - Some((Ristretto::generator() * self.key.deref()).to_bytes()) + fn validator_id(&self) -> impl Send + Future> { + async move { Some((Ristretto::generator() * self.key.deref()).to_bytes()) } } /// Sign a signature with the current validator's private key. - async fn sign(&self, msg: &[u8]) -> Self::Signature { - let mut nonce = Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce")); - nonce.append_message(b"genesis", self.genesis); - nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref()); - nonce.append_message(b"message", msg); - let mut nonce = nonce.challenge(b"nonce"); + fn sign(&self, msg: &[u8]) -> impl Send + Future { + async move { + let mut nonce = + Zeroizing::new(RecommendedTranscript::new(b"Tributary Chain Tendermint Nonce")); + nonce.append_message(b"genesis", self.genesis); + nonce.append_message(b"key", Zeroizing::new(self.key.deref().to_repr()).as_ref()); + nonce.append_message(b"message", msg); + let mut nonce = nonce.challenge(b"nonce"); - let mut nonce_arr = [0; 64]; - nonce_arr.copy_from_slice(nonce.as_ref()); + let mut nonce_arr = [0; 64]; + nonce_arr.copy_from_slice(nonce.as_ref()); - let nonce_ref: &mut [u8] = nonce.as_mut(); - nonce_ref.zeroize(); - let nonce_ref: &[u8] = nonce.as_ref(); - assert_eq!(nonce_ref, [0; 64].as_ref()); + let nonce_ref: &mut [u8] = nonce.as_mut(); + nonce_ref.zeroize(); + let nonce_ref: &[u8] = nonce.as_ref(); + assert_eq!(nonce_ref, [0; 64].as_ref()); - let nonce = - Zeroizing::new(::F::from_bytes_mod_order_wide(&nonce_arr)); - nonce_arr.zeroize(); + let nonce = + Zeroizing::new(::F::from_bytes_mod_order_wide(&nonce_arr)); + nonce_arr.zeroize(); - assert!(!bool::from(nonce.ct_eq(&::F::ZERO))); + assert!(!bool::from(nonce.ct_eq(&::F::ZERO))); - let challenge = challenge( - self.genesis, - (Ristretto::generator() * self.key.deref()).to_bytes(), - (Ristretto::generator() * nonce.deref()).to_bytes().as_ref(), - msg, - ); + let challenge = challenge( + self.genesis, + (Ristretto::generator() * self.key.deref()).to_bytes(), + (Ristretto::generator() * nonce.deref()).to_bytes().as_ref(), + msg, + ); - let sig = SchnorrSignature::::sign(&self.key, nonce, challenge).serialize(); + let sig = SchnorrSignature::::sign(&self.key, nonce, challenge).serialize(); - let mut res = [0; 64]; - res.copy_from_slice(&sig); - res + let mut res = [0; 64]; + res.copy_from_slice(&sig); + res + } } } @@ -274,7 +274,6 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999; pub const LATENCY_TIME: u32 = 1667; pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME); -#[async_trait] impl Network for TendermintNetwork { type Db = D; @@ -300,111 +299,126 @@ impl Network for TendermintNetwork self.validators.clone() } - async fn broadcast(&mut self, msg: SignedMessageFor) { - let mut to_broadcast = vec![TENDERMINT_MESSAGE]; - to_broadcast.extend(msg.encode()); - self.p2p.broadcast(self.genesis, to_broadcast).await - } - - async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent) { - log::error!( - "validator {} triggered a slash event on tributary {} (with evidence: {})", - hex::encode(validator), - hex::encode(self.genesis), - matches!(slash_event, SlashEvent::WithEvidence(_)), - ); - - let signer = self.signer(); - let Some(tx) = (match slash_event { - SlashEvent::WithEvidence(evidence) => { - // create an unsigned evidence tx - Some(TendermintTx::SlashEvidence(evidence)) - } - SlashEvent::Id(_reason, _block, _round) => { - // TODO: Increase locally observed slash points - None - } - }) else { - return; - }; - - // add tx to blockchain and broadcast to peers - let mut to_broadcast = vec![TRANSACTION_MESSAGE]; - tx.write(&mut to_broadcast).unwrap(); - if self.blockchain.write().await.add_transaction::( - true, - Transaction::Tendermint(tx), - &self.signature_scheme(), - ) == Ok(true) - { - self.p2p.broadcast(signer.genesis, to_broadcast).await; + fn broadcast(&mut self, msg: SignedMessageFor) -> impl Send + Future { + async move { + let mut to_broadcast = vec![TENDERMINT_MESSAGE]; + to_broadcast.extend(msg.encode()); + self.p2p.broadcast(self.genesis, to_broadcast).await } } - async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> { - let block = - Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; - self - .blockchain - .read() - .await - .verify_block::(&block, &self.signature_scheme(), false) - .map_err(|e| match e { - BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, - _ => { - log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}"); - TendermintBlockError::Fatal + fn slash( + &mut self, + validator: Self::ValidatorId, + slash_event: SlashEvent, + ) -> impl Send + Future { + async move { + log::error!( + "validator {} triggered a slash event on tributary {} (with evidence: {})", + hex::encode(validator), + hex::encode(self.genesis), + matches!(slash_event, SlashEvent::WithEvidence(_)), + ); + + let signer = self.signer(); + let Some(tx) = (match slash_event { + SlashEvent::WithEvidence(evidence) => { + // create an unsigned evidence tx + Some(TendermintTx::SlashEvidence(evidence)) } - }) + SlashEvent::Id(_reason, _block, _round) => { + // TODO: Increase locally observed slash points + None + } + }) else { + return; + }; + + // add tx to blockchain and broadcast to peers + let mut to_broadcast = vec![TRANSACTION_MESSAGE]; + tx.write(&mut to_broadcast).unwrap(); + if self.blockchain.write().await.add_transaction::( + true, + Transaction::Tendermint(tx), + &self.signature_scheme(), + ) == Ok(true) + { + self.p2p.broadcast(signer.genesis, to_broadcast).await; + } + } } - async fn add_block( + fn validate( + &self, + block: &Self::Block, + ) -> impl Send + Future> { + async move { + let block = + Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; + self + .blockchain + .read() + .await + .verify_block::(&block, &self.signature_scheme(), false) + .map_err(|e| match e { + BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, + _ => { + log::warn!("Tributary Tendermint validate returning BlockError::Fatal due to {e:?}"); + TendermintBlockError::Fatal + } + }) + } + } + + fn add_block( &mut self, serialized_block: Self::Block, commit: Commit, - ) -> Option { - let invalid_block = || { - // There's a fatal flaw in the code, it's behind a hard fork, or the validators turned - // malicious - // All justify a halt to then achieve social consensus from - // TODO: Under multiple validator sets, a small validator set turning malicious knocks - // off the entire network. That's an unacceptable DoS. - panic!("validators added invalid block to tributary {}", hex::encode(self.genesis)); - }; + ) -> impl Send + Future> { + async move { + let invalid_block = || { + // There's a fatal flaw in the code, it's behind a hard fork, or the validators turned + // malicious + // All justify a halt to then achieve social consensus from + // TODO: Under multiple validator sets, a small validator set turning malicious knocks + // off the entire network. That's an unacceptable DoS. + panic!("validators added invalid block to tributary {}", hex::encode(self.genesis)); + }; - // Tendermint should only produce valid commits - assert!(self.verify_commit(serialized_block.id(), &commit)); + // Tendermint should only produce valid commits + assert!(self.verify_commit(serialized_block.id(), &commit)); - let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else { - return invalid_block(); - }; + let Ok(block) = Block::read::<&[u8]>(&mut serialized_block.0.as_ref()) else { + return invalid_block(); + }; - let encoded_commit = commit.encode(); - loop { - let block_res = self.blockchain.write().await.add_block::( - &block, - encoded_commit.clone(), - &self.signature_scheme(), - ); - match block_res { - Ok(()) => { - // If we successfully added this block, break - break; + let encoded_commit = commit.encode(); + loop { + let block_res = self.blockchain.write().await.add_block::( + &block, + encoded_commit.clone(), + &self.signature_scheme(), + ); + match block_res { + Ok(()) => { + // If we successfully added this block, break + break; + } + Err(BlockError::NonLocalProvided(hash)) => { + log::error!( + "missing provided transaction {} which other validators on tributary {} had", + hex::encode(hash), + hex::encode(self.genesis) + ); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + _ => return invalid_block(), } - Err(BlockError::NonLocalProvided(hash)) => { - log::error!( - "missing provided transaction {} which other validators on tributary {} had", - hex::encode(hash), - hex::encode(self.genesis) - ); - tokio::time::sleep(core::time::Duration::from_secs(5)).await; - } - _ => return invalid_block(), } - } - Some(TendermintBlock( - self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), - )) + Some(TendermintBlock( + self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), + )) + } } } diff --git a/coordinator/tributary/src/tendermint/tx.rs b/coordinator/tributary-sdk/src/tendermint/tx.rs similarity index 97% rename from coordinator/tributary/src/tendermint/tx.rs rename to coordinator/tributary-sdk/src/tendermint/tx.rs index 8af40708..ea2a7256 100644 --- a/coordinator/tributary/src/tendermint/tx.rs +++ b/coordinator/tributary-sdk/src/tendermint/tx.rs @@ -39,7 +39,7 @@ impl ReadWrite for TendermintTx { } impl Transaction for TendermintTx { - fn kind(&self) -> TransactionKind<'_> { + fn kind(&self) -> TransactionKind { // There's an assert elsewhere in the codebase expecting this behavior // If we do want to add Provided/Signed TendermintTxs, review the implications carefully TransactionKind::Unsigned diff --git a/coordinator/tributary/src/tests/block.rs b/coordinator/tributary-sdk/src/tests/block.rs similarity index 97% rename from coordinator/tributary/src/tests/block.rs rename to coordinator/tributary-sdk/src/tests/block.rs index c5bf19c6..03493e21 100644 --- a/coordinator/tributary/src/tests/block.rs +++ b/coordinator/tributary-sdk/src/tests/block.rs @@ -60,8 +60,8 @@ impl ReadWrite for NonceTransaction { } impl TransactionTrait for NonceTransaction { - fn kind(&self) -> TransactionKind<'_> { - TransactionKind::Signed(vec![], &self.2) + fn kind(&self) -> TransactionKind { + TransactionKind::Signed(vec![], self.2.clone()) } fn hash(&self) -> [u8; 32] { diff --git a/coordinator/tributary/src/tests/blockchain.rs b/coordinator/tributary-sdk/src/tests/blockchain.rs similarity index 99% rename from coordinator/tributary/src/tests/blockchain.rs rename to coordinator/tributary-sdk/src/tests/blockchain.rs index 6103a62f..3c4df327 100644 --- a/coordinator/tributary/src/tests/blockchain.rs +++ b/coordinator/tributary-sdk/src/tests/blockchain.rs @@ -425,7 +425,7 @@ async fn block_tx_ordering() { } impl TransactionTrait for SignedTx { - fn kind(&self) -> TransactionKind<'_> { + fn kind(&self) -> TransactionKind { match self { SignedTx::Signed(signed) => signed.kind(), SignedTx::Provided(pro) => pro.kind(), diff --git a/coordinator/tributary/src/tests/mempool.rs b/coordinator/tributary-sdk/src/tests/mempool.rs similarity index 100% rename from coordinator/tributary/src/tests/mempool.rs rename to coordinator/tributary-sdk/src/tests/mempool.rs diff --git a/coordinator/tributary/src/tests/merkle.rs b/coordinator/tributary-sdk/src/tests/merkle.rs similarity index 100% rename from coordinator/tributary/src/tests/merkle.rs rename to coordinator/tributary-sdk/src/tests/merkle.rs diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary-sdk/src/tests/mod.rs similarity index 100% rename from coordinator/tributary/src/tests/mod.rs rename to coordinator/tributary-sdk/src/tests/mod.rs diff --git a/coordinator/tributary-sdk/src/tests/p2p.rs b/coordinator/tributary-sdk/src/tests/p2p.rs new file mode 100644 index 00000000..32bca7d1 --- /dev/null +++ b/coordinator/tributary-sdk/src/tests/p2p.rs @@ -0,0 +1,12 @@ +use core::future::Future; + +pub use crate::P2p; + +#[derive(Clone, Debug)] +pub struct DummyP2p; + +impl P2p for DummyP2p { + fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + Future { + async move { unimplemented!() } + } +} diff --git a/coordinator/tributary/src/tests/tendermint.rs b/coordinator/tributary-sdk/src/tests/tendermint.rs similarity index 80% rename from coordinator/tributary/src/tests/tendermint.rs rename to coordinator/tributary-sdk/src/tests/tendermint.rs index 77dfc9e5..fc8f190e 100644 --- a/coordinator/tributary/src/tests/tendermint.rs +++ b/coordinator/tributary-sdk/src/tests/tendermint.rs @@ -1,4 +1,7 @@ +use core::future::Future; + use tendermint::ext::Network; + use crate::{ P2p, TendermintTx, tendermint::{TARGET_BLOCK_TIME, TendermintNetwork}, @@ -11,10 +14,9 @@ fn assert_target_block_time() { #[derive(Clone, Debug)] pub struct DummyP2p; - #[async_trait::async_trait] impl P2p for DummyP2p { - async fn broadcast(&self, _: [u8; 32], _: Vec) { - unimplemented!() + fn broadcast(&self, _: [u8; 32], _: Vec) -> impl Send + Future { + async move { unimplemented!() } } } diff --git a/coordinator/tributary/src/tests/transaction/mod.rs b/coordinator/tributary-sdk/src/tests/transaction/mod.rs similarity index 97% rename from coordinator/tributary/src/tests/transaction/mod.rs rename to coordinator/tributary-sdk/src/tests/transaction/mod.rs index 1f85947a..eeaa0acb 100644 --- a/coordinator/tributary/src/tests/transaction/mod.rs +++ b/coordinator/tributary-sdk/src/tests/transaction/mod.rs @@ -67,7 +67,7 @@ impl ReadWrite for ProvidedTransaction { } impl Transaction for ProvidedTransaction { - fn kind(&self) -> TransactionKind<'_> { + fn kind(&self) -> TransactionKind { match self.0[0] { 1 => TransactionKind::Provided("order1"), 2 => TransactionKind::Provided("order2"), @@ -119,8 +119,8 @@ impl ReadWrite for SignedTransaction { } impl Transaction for SignedTransaction { - fn kind(&self) -> TransactionKind<'_> { - TransactionKind::Signed(vec![], &self.1) + fn kind(&self) -> TransactionKind { + TransactionKind::Signed(vec![], self.1.clone()) } fn hash(&self) -> [u8; 32] { diff --git a/coordinator/tributary/src/tests/transaction/signed.rs b/coordinator/tributary-sdk/src/tests/transaction/signed.rs similarity index 100% rename from coordinator/tributary/src/tests/transaction/signed.rs rename to coordinator/tributary-sdk/src/tests/transaction/signed.rs diff --git a/coordinator/tributary/src/tests/transaction/tendermint.rs b/coordinator/tributary-sdk/src/tests/transaction/tendermint.rs similarity index 100% rename from coordinator/tributary/src/tests/transaction/tendermint.rs rename to coordinator/tributary-sdk/src/tests/transaction/tendermint.rs diff --git a/coordinator/tributary-sdk/src/transaction.rs b/coordinator/tributary-sdk/src/transaction.rs new file mode 100644 index 00000000..d7ff4092 --- /dev/null +++ b/coordinator/tributary-sdk/src/transaction.rs @@ -0,0 +1,218 @@ +use core::fmt::Debug; +use std::io; + +use zeroize::Zeroize; +use thiserror::Error; + +use blake2::{Digest, Blake2b512}; + +use ciphersuite::{ + group::{Group, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use schnorr::SchnorrSignature; + +use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite}; + +#[derive(Clone, PartialEq, Eq, Debug, Error)] +pub enum TransactionError { + /// Transaction exceeded the size limit. + #[error("transaction is too large")] + TooLargeTransaction, + /// Transaction's signer isn't a participant. + #[error("invalid signer")] + InvalidSigner, + /// Transaction's nonce isn't the prior nonce plus one. + #[error("invalid nonce")] + InvalidNonce, + /// Transaction's signature is invalid. + #[error("invalid signature")] + InvalidSignature, + /// Transaction's content is invalid. + #[error("transaction content is invalid")] + InvalidContent, + /// Transaction's signer has too many transactions in the mempool. + #[error("signer has too many transactions in the mempool")] + TooManyInMempool, + /// Provided Transaction added to mempool. + #[error("provided transaction added to mempool")] + ProvidedAddedToMempool, +} + +/// Data for a signed transaction. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Signed { + pub signer: ::G, + pub nonce: u32, + pub signature: SchnorrSignature, +} + +impl ReadWrite for Signed { + fn read(reader: &mut R) -> io::Result { + let signer = Ristretto::read_G(reader)?; + + let mut nonce = [0; 4]; + reader.read_exact(&mut nonce)?; + let nonce = u32::from_le_bytes(nonce); + if nonce >= (u32::MAX - 1) { + Err(io::Error::other("nonce exceeded limit"))?; + } + + let mut signature = SchnorrSignature::::read(reader)?; + if signature.R.is_identity().into() { + // Anyone malicious could remove this and try to find zero signatures + // We should never produce zero signatures though meaning this should never come up + // If it does somehow come up, this is a decent courtesy + signature.zeroize(); + Err(io::Error::other("signature nonce was identity"))?; + } + + Ok(Signed { signer, nonce, signature }) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + // This is either an invalid signature or a private key leak + if self.signature.R.is_identity().into() { + Err(io::Error::other("signature nonce was identity"))?; + } + writer.write_all(&self.signer.to_bytes())?; + writer.write_all(&self.nonce.to_le_bytes())?; + self.signature.write(writer) + } +} + +impl Signed { + pub fn read_without_nonce(reader: &mut R, nonce: u32) -> io::Result { + let signer = Ristretto::read_G(reader)?; + + let mut signature = SchnorrSignature::::read(reader)?; + if signature.R.is_identity().into() { + // Anyone malicious could remove this and try to find zero signatures + // We should never produce zero signatures though meaning this should never come up + // If it does somehow come up, this is a decent courtesy + signature.zeroize(); + Err(io::Error::other("signature nonce was identity"))?; + } + + Ok(Signed { signer, nonce, signature }) + } + + pub fn write_without_nonce(&self, writer: &mut W) -> io::Result<()> { + // This is either an invalid signature or a private key leak + if self.signature.R.is_identity().into() { + Err(io::Error::other("signature nonce was identity"))?; + } + writer.write_all(&self.signer.to_bytes())?; + self.signature.write(writer) + } +} + +#[allow(clippy::large_enum_variant)] +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum TransactionKind { + /// This transaction should be provided by every validator, in an exact order. + /// + /// The contained static string names the orderer to use. This allows two distinct provided + /// transaction kinds, without a synchronized order, to be ordered within their own kind without + /// requiring ordering with each other. + /// + /// The only malleability is in when this transaction appears on chain. The block producer will + /// include it when they have it. Block verification will fail for validators without it. + /// + /// If a supermajority of validators produce a commit for a block with a provided transaction + /// which isn't locally held, the block will be added to the local chain. When the transaction is + /// locally provided, it will be compared for correctness to the on-chain version + /// + /// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions + /// must have a unique hash which is also unique to all Unsigned transactions. + Provided(&'static str), + + /// An unsigned transaction, only able to be included by the block producer. + /// + /// Once an Unsigned transaction is included on-chain, it may not be included again. In order to + /// have multiple Unsigned transactions with the same values included on-chain, some distinct + /// nonce must be included in order to cause a distinct hash. + /// + /// The hash must also be unique with all Provided transactions. + Unsigned, + + /// A signed transaction. + Signed(Vec, Signed), +} + +// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists? +// Or should the literal Transaction be renamed to Event? +pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite { + /// Return what type of transaction this is. + fn kind(&self) -> TransactionKind; + + /// Return the hash of this transaction. + /// + /// The hash must NOT commit to the signature. + fn hash(&self) -> [u8; 32]; + + /// Perform transaction-specific verification. + fn verify(&self) -> Result<(), TransactionError>; + + /// Obtain the challenge for this transaction's signature. + /// + /// Do not override this unless you know what you're doing. + /// + /// Panics if called on non-signed transactions. + fn sig_hash(&self, genesis: [u8; 32]) -> ::F { + match self.kind() { + TransactionKind::Signed(order, Signed { signature, .. }) => { + ::F::from_bytes_mod_order_wide( + &Blake2b512::digest( + [ + b"Tributary Signed Transaction", + genesis.as_ref(), + &self.hash(), + order.as_ref(), + signature.R.to_bytes().as_ref(), + ] + .concat(), + ) + .into(), + ) + } + _ => panic!("sig_hash called on non-signed transaction"), + } + } +} + +pub trait GAIN: FnMut(&::G, &[u8]) -> Option {} +impl::G, &[u8]) -> Option> GAIN for F {} + +pub(crate) fn verify_transaction( + tx: &T, + genesis: [u8; 32], + get_and_increment_nonce: &mut F, +) -> Result<(), TransactionError> { + if tx.serialize().len() > TRANSACTION_SIZE_LIMIT { + Err(TransactionError::TooLargeTransaction)?; + } + + tx.verify()?; + + match tx.kind() { + TransactionKind::Provided(_) | TransactionKind::Unsigned => {} + TransactionKind::Signed(order, Signed { signer, nonce, signature }) => { + if let Some(next_nonce) = get_and_increment_nonce(&signer, &order) { + if nonce != next_nonce { + Err(TransactionError::InvalidNonce)?; + } + } else { + // Not a participant + Err(TransactionError::InvalidSigner)?; + } + + // TODO: Use a batch verification here + if !signature.verify(signer, tx.sig_hash(genesis)) { + Err(TransactionError::InvalidSignature)?; + } + } + } + + Ok(()) +} diff --git a/coordinator/tributary/tendermint/Cargo.toml b/coordinator/tributary-sdk/tendermint/Cargo.toml similarity index 91% rename from coordinator/tributary/tendermint/Cargo.toml rename to coordinator/tributary-sdk/tendermint/Cargo.toml index ac6becfa..7f7e2186 100644 --- a/coordinator/tributary/tendermint/Cargo.toml +++ b/coordinator/tributary-sdk/tendermint/Cargo.toml @@ -6,6 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint" authors = ["Luke Parker "] edition = "2021" +rust-version = "1.81" [package.metadata.docs.rs] all-features = true @@ -15,8 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } -thiserror = { version = "1", default-features = false } +thiserror = { version = "2", default-features = false, features = ["std"] } hex = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] } diff --git a/coordinator/tributary/tendermint/LICENSE b/coordinator/tributary-sdk/tendermint/LICENSE similarity index 100% rename from coordinator/tributary/tendermint/LICENSE rename to coordinator/tributary-sdk/tendermint/LICENSE diff --git a/coordinator/tributary/tendermint/README.md b/coordinator/tributary-sdk/tendermint/README.md similarity index 100% rename from coordinator/tributary/tendermint/README.md rename to coordinator/tributary-sdk/tendermint/README.md diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary-sdk/tendermint/src/block.rs similarity index 100% rename from coordinator/tributary/tendermint/src/block.rs rename to coordinator/tributary-sdk/tendermint/src/block.rs diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary-sdk/tendermint/src/ext.rs similarity index 92% rename from coordinator/tributary/tendermint/src/ext.rs rename to coordinator/tributary-sdk/tendermint/src/ext.rs index 3869d9d9..67b8b07d 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary-sdk/tendermint/src/ext.rs @@ -1,7 +1,6 @@ -use core::{hash::Hash, fmt::Debug}; +use core::{hash::Hash, fmt::Debug, future::Future}; use std::{sync::Arc, collections::HashSet}; -use async_trait::async_trait; use thiserror::Error; use parity_scale_codec::{Encode, Decode}; @@ -34,7 +33,6 @@ pub struct BlockNumber(pub u64); pub struct RoundNumber(pub u32); /// A signer for a validator. -#[async_trait] pub trait Signer: Send + Sync { // Type used to identify validators. type ValidatorId: ValidatorId; @@ -42,22 +40,21 @@ pub trait Signer: Send + Sync { type Signature: Signature; /// Returns the validator's current ID. Returns None if they aren't a current validator. - async fn validator_id(&self) -> Option; + fn validator_id(&self) -> impl Send + Future>; /// Sign a signature with the current validator's private key. - async fn sign(&self, msg: &[u8]) -> Self::Signature; + fn sign(&self, msg: &[u8]) -> impl Send + Future; } -#[async_trait] impl Signer for Arc { type ValidatorId = S::ValidatorId; type Signature = S::Signature; - async fn validator_id(&self) -> Option { - self.as_ref().validator_id().await + fn validator_id(&self) -> impl Send + Future> { + self.as_ref().validator_id() } - async fn sign(&self, msg: &[u8]) -> Self::Signature { - self.as_ref().sign(msg).await + fn sign(&self, msg: &[u8]) -> impl Send + Future { + self.as_ref().sign(msg) } } @@ -210,7 +207,6 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode } /// Trait representing the distributed system Tendermint is providing consensus over. -#[async_trait] pub trait Network: Sized + Send + Sync { /// The database used to back this. type Db: serai_db::Db; @@ -229,6 +225,7 @@ pub trait Network: Sized + Send + Sync { /// This should include both the time to download the block and the actual processing time. /// /// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000. + // TODO: Redefine as Duration const BLOCK_PROCESSING_TIME: u32; /// Network latency time in milliseconds. /// @@ -280,15 +277,19 @@ pub trait Network: Sized + Send + Sync { /// Switching to unauthenticated channels in a system already providing authenticated channels is /// not recommended as this is a minor, temporal inefficiency, while downgrading channels may /// have wider implications. - async fn broadcast(&mut self, msg: SignedMessageFor); + fn broadcast(&mut self, msg: SignedMessageFor) -> impl Send + Future; /// Trigger a slash for the validator in question who was definitively malicious. /// /// The exact process of triggering a slash is undefined and left to the network as a whole. - async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent); + fn slash( + &mut self, + validator: Self::ValidatorId, + slash_event: SlashEvent, + ) -> impl Send + Future; /// Validate a block. - async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>; + fn validate(&self, block: &Self::Block) -> impl Send + Future>; /// Add a block, returning the proposal for the next one. /// @@ -298,9 +299,9 @@ pub trait Network: Sized + Send + Sync { /// This deviates from the paper which will have a local node refuse to decide on a block it /// considers invalid. This library acknowledges the network did decide on it, leaving handling /// of it to the network, and outside of this scope. - async fn add_block( + fn add_block( &mut self, block: Self::Block, commit: Commit, - ) -> Option; + ) -> impl Send + Future>; } diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary-sdk/tendermint/src/lib.rs similarity index 100% rename from coordinator/tributary/tendermint/src/lib.rs rename to coordinator/tributary-sdk/tendermint/src/lib.rs diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary-sdk/tendermint/src/message_log.rs similarity index 100% rename from coordinator/tributary/tendermint/src/message_log.rs rename to coordinator/tributary-sdk/tendermint/src/message_log.rs diff --git a/coordinator/tributary/tendermint/src/round.rs b/coordinator/tributary-sdk/tendermint/src/round.rs similarity index 100% rename from coordinator/tributary/tendermint/src/round.rs rename to coordinator/tributary-sdk/tendermint/src/round.rs diff --git a/coordinator/tributary/tendermint/src/time.rs b/coordinator/tributary-sdk/tendermint/src/time.rs similarity index 100% rename from coordinator/tributary/tendermint/src/time.rs rename to coordinator/tributary-sdk/tendermint/src/time.rs diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary-sdk/tendermint/tests/ext.rs similarity index 91% rename from coordinator/tributary/tendermint/tests/ext.rs rename to coordinator/tributary-sdk/tendermint/tests/ext.rs index bec95ddc..58a5d468 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary-sdk/tendermint/tests/ext.rs @@ -1,10 +1,9 @@ +use core::future::Future; use std::{ sync::Arc, time::{UNIX_EPOCH, SystemTime, Duration}, }; -use async_trait::async_trait; - use parity_scale_codec::{Encode, Decode}; use futures_util::sink::SinkExt; @@ -21,20 +20,21 @@ type TestValidatorId = u16; type TestBlockId = [u8; 4]; struct TestSigner(u16); -#[async_trait] impl Signer for TestSigner { type ValidatorId = TestValidatorId; type Signature = [u8; 32]; - async fn validator_id(&self) -> Option { - Some(self.0) + fn validator_id(&self) -> impl Send + Future> { + async move { Some(self.0) } } - async fn sign(&self, msg: &[u8]) -> [u8; 32] { - let mut sig = [0; 32]; - sig[.. 2].copy_from_slice(&self.0.to_le_bytes()); - sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]); - sig + fn sign(&self, msg: &[u8]) -> impl Send + Future { + async move { + let mut sig = [0; 32]; + sig[.. 2].copy_from_slice(&self.0.to_le_bytes()); + sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]); + sig + } } } @@ -111,7 +111,6 @@ struct TestNetwork( Arc, SyncedBlockSender, SyncedBlockResultReceiver)>>>, ); -#[async_trait] impl Network for TestNetwork { type Db = MemDb; diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index b6a5a251..431dae3c 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -1,11 +1,14 @@ [package] -name = "tributary-chain" +name = "serai-coordinator-tributary" version = "0.1.0" -description = "A micro-blockchain to provide consensus and ordering to P2P communication" +description = "The Tributary used by the Serai Coordinator" license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tributary" authors = ["Luke Parker "] +keywords = [] edition = "2021" +publish = false +rust-version = "1.81" [package.metadata.docs.rs] all-features = true @@ -15,35 +18,30 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -async-trait = { version = "0.1", default-features = false } -thiserror = { version = "1", default-features = false } - -subtle = { version = "^2", default-features = false, features = ["std"] } zeroize = { version = "^1.5", default-features = false, features = ["std"] } - -rand = { version = "0.8", default-features = false, features = ["std"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } - -blake2 = { version = "0.10", default-features = false, features = ["std"] } -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std", "recommended"] } - -ciphersuite = { package = "ciphersuite", path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } -schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] } - -hex = { version = "0.4", default-features = false, features = ["std"] } -log = { version = "0.4", default-features = false, features = ["std"] } - -serai-db = { path = "../../common/db" } +rand_core = { version = "0.6", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } -futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] } -futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } -tendermint = { package = "tendermint-machine", path = "./tendermint" } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] } +blake2 = { version = "0.10", default-features = false, features = ["std"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } +schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] } -[dev-dependencies] -tokio = { version = "1", features = ["macros"] } +serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] } + +serai-db = { path = "../../common/db" } +serai-task = { path = "../../common/task", version = "0.1" } + +tributary-sdk = { path = "../tributary-sdk" } + +serai-cosign = { path = "../cosign" } +serai-coordinator-substrate = { path = "../substrate" } + +messages = { package = "serai-processor-messages", path = "../../processor/messages" } + +log = { version = "0.4", default-features = false, features = ["std"] } [features] -tests = [] +longer-reattempts = [] diff --git a/coordinator/tributary/LICENSE b/coordinator/tributary/LICENSE index f684d027..621233a9 100644 --- a/coordinator/tributary/LICENSE +++ b/coordinator/tributary/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2023 Luke Parker +Copyright (c) 2023-2025 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/coordinator/tributary/README.md b/coordinator/tributary/README.md index 6fce976e..384b8f97 100644 --- a/coordinator/tributary/README.md +++ b/coordinator/tributary/README.md @@ -1,3 +1,4 @@ -# Tributary +# Serai Coordinator Tributary -A verifiable, ordered broadcast layer implemented as a BFT micro-blockchain. +The Tributary used by the Serai Coordinator. This includes the `Transaction` +definition and the code to handle blocks added on-chain. diff --git a/coordinator/tributary/src/db.rs b/coordinator/tributary/src/db.rs new file mode 100644 index 00000000..7d5857eb --- /dev/null +++ b/coordinator/tributary/src/db.rs @@ -0,0 +1,529 @@ +use std::collections::HashMap; + +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ExternalValidatorSet}; + +use messages::sign::{VariantSignId, SignId}; + +use serai_db::*; + +use serai_cosign::CosignIntent; + +use crate::transaction::SigningProtocolRound; + +/// A topic within the database which the group participates in +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] +pub enum Topic { + /// Vote to remove a participant + RemoveParticipant { + /// The participant to remove + participant: SeraiAddress, + }, + + // DkgParticipation isn't represented here as participations are immediately sent to the + // processor, not accumulated within this databse + /// Participation in the signing protocol to confirm the DKG results on Substrate + DkgConfirmation { + /// The attempt number this is for + attempt: u32, + /// The round of the signing protocol + round: SigningProtocolRound, + }, + + /// The local view of the SlashReport, to be aggregated into the final SlashReport + SlashReport, + + /// Participation in a signing protocol + Sign { + /// The ID of the signing protocol + id: VariantSignId, + /// The attempt number this is for + attempt: u32, + /// The round of the signing protocol + round: SigningProtocolRound, + }, +} + +enum Participating { + Participated, + Everyone, +} + +impl Topic { + // The topic used by the next attempt of this protocol + fn next_attempt_topic(self) -> Option { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { attempt, round: _ } => Some(Topic::DkgConfirmation { + attempt: attempt + 1, + round: SigningProtocolRound::Preprocess, + }), + Topic::SlashReport { .. } => None, + Topic::Sign { id, attempt, round: _ } => { + Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess }) + } + } + } + + // The topic for the re-attempt to schedule + fn reattempt_topic(self) -> Option<(u32, Topic)> { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { attempt, round } => match round { + SigningProtocolRound::Preprocess => { + let attempt = attempt + 1; + Some(( + attempt, + Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }, + )) + } + SigningProtocolRound::Share => None, + }, + Topic::SlashReport { .. } => None, + Topic::Sign { id, attempt, round } => match round { + SigningProtocolRound::Preprocess => { + let attempt = attempt + 1; + Some((attempt, Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess })) + } + SigningProtocolRound::Share => None, + }, + } + } + + /// The SignId for this topic + /// + /// Returns None if Topic isn't Topic::Sign + pub(crate) fn sign_id(self, set: ExternalValidatorSet) -> Option { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { .. } => None, + Topic::SlashReport { .. } => None, + Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }), + } + } + + /// The SignId for this DKG Confirmation. + /// + /// This is undefined except for being consistent to the DKG Confirmation signing protocol and + /// unique across sets. + /// + /// Returns None if Topic isn't Topic::DkgConfirmation. + pub(crate) fn dkg_confirmation_sign_id( + self, + set: ExternalValidatorSet, + ) -> Option { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { attempt, round: _ } => Some({ + let id = { + let mut id = [0; 32]; + let encoded_set = set.encode(); + id[.. encoded_set.len()].copy_from_slice(&encoded_set); + VariantSignId::Batch(id) + }; + SignId { session: set.session, id, attempt } + }), + Topic::SlashReport { .. } => None, + Topic::Sign { .. } => None, + } + } + + /// The topic which precedes this topic as a prerequisite + /// + /// The preceding topic must define this topic as succeeding + fn preceding_topic(self) -> Option { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { attempt, round } => match round { + SigningProtocolRound::Preprocess => None, + SigningProtocolRound::Share => { + Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess }) + } + }, + Topic::SlashReport { .. } => None, + Topic::Sign { id, attempt, round } => match round { + SigningProtocolRound::Preprocess => None, + SigningProtocolRound::Share => { + Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Preprocess }) + } + }, + } + } + + /// The topic which succeeds this topic, with this topic as a prerequisite + /// + /// The succeeding topic must define this topic as preceding + fn succeeding_topic(self) -> Option { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => None, + Topic::DkgConfirmation { attempt, round } => match round { + SigningProtocolRound::Preprocess => { + Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Share }) + } + SigningProtocolRound::Share => None, + }, + Topic::SlashReport { .. } => None, + Topic::Sign { id, attempt, round } => match round { + SigningProtocolRound::Preprocess => { + Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share }) + } + SigningProtocolRound::Share => None, + }, + } + } + + /// If this topic requires recognition before entries are permitted for it. + pub fn requires_recognition(&self) -> bool { + #[allow(clippy::match_same_arms)] + match self { + // We don't require recognition to remove a participant + Topic::RemoveParticipant { .. } => false, + // We don't require recognition for the first attempt, solely the re-attempts + Topic::DkgConfirmation { attempt, .. } => *attempt != 0, + // We don't require recognition for the slash report + Topic::SlashReport { .. } => false, + // We do require recognition for every sign protocol + Topic::Sign { .. } => true, + } + } + + fn required_participation(&self, n: u16) -> u16 { + let _ = self; + // All of our topics require 2/3rds participation + ((2 * n) / 3) + 1 + } + + fn participating(&self) -> Participating { + #[allow(clippy::match_same_arms)] + match self { + Topic::RemoveParticipant { .. } => Participating::Everyone, + Topic::DkgConfirmation { .. } => Participating::Participated, + Topic::SlashReport { .. } => Participating::Everyone, + Topic::Sign { .. } => Participating::Participated, + } + } +} + +pub(crate) trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + +/// The resulting data set from an accumulation +pub(crate) enum DataSet { + /// Accumulating this did not produce a data set to act on + /// (non-existent, not ready, prior handled, not participating, etc.) + None, + /// The data set was ready and we are participating in this event + Participating(HashMap), +} + +create_db!( + CoordinatorTributary { + // The last handled tributary block's (number, hash) + LastHandledTributaryBlock: (set: ExternalValidatorSet) -> (u64, [u8; 32]), + + // The slash points a validator has accrued, with u32::MAX representing a fatal slash. + SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32, + + // The cosign intent for a Substrate block + CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent, + // The latest Substrate block to cosign. + LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> [u8; 32], + // The hash of the block we're actively cosigning. + ActivelyCosigning: (set: ExternalValidatorSet) -> [u8; 32], + // If this block has already been cosigned. + Cosigned: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> (), + + // The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain. + SubstrateBlockPlans: ( + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32] + ) -> Vec<[u8; 32]>, + + // The weight accumulated for a topic. + AccumulatedWeight: (set: ExternalValidatorSet, topic: Topic) -> u16, + // The entries accumulated for a topic, by validator. + Accumulated: ( + set: ExternalValidatorSet, + topic: Topic, + validator: SeraiAddress + ) -> D, + + // Topics to be recognized as of a certain block number due to the reattempt protocol. + Reattempt: (set: ExternalValidatorSet, block_number: u64) -> Vec, + } +); + +db_channel!( + CoordinatorTributary { + // Messages to send to the processor + ProcessorMessages: (set: ExternalValidatorSet) -> messages::CoordinatorMessage, + // Messages for the DKG confirmation + DkgConfirmationMessages: (set: ExternalValidatorSet) -> messages::sign::CoordinatorMessage, + // Topics which have been explicitly recognized + RecognizedTopics: (set: ExternalValidatorSet) -> Topic, + } +); + +pub(crate) struct TributaryDb; +impl TributaryDb { + pub(crate) fn last_handled_tributary_block( + getter: &impl Get, + set: ExternalValidatorSet, + ) -> Option<(u64, [u8; 32])> { + LastHandledTributaryBlock::get(getter, set) + } + pub(crate) fn set_last_handled_tributary_block( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + block_number: u64, + block_hash: [u8; 32], + ) { + LastHandledTributaryBlock::set(txn, set, &(block_number, block_hash)); + } + + pub(crate) fn latest_substrate_block_to_cosign( + getter: &impl Get, + set: ExternalValidatorSet, + ) -> Option<[u8; 32]> { + LatestSubstrateBlockToCosign::get(getter, set) + } + pub(crate) fn set_latest_substrate_block_to_cosign( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + ) { + LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash); + } + pub(crate) fn actively_cosigning( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option<[u8; 32]> { + ActivelyCosigning::get(txn, set) + } + pub(crate) fn start_cosigning( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + substrate_block_number: u64, + ) { + assert!( + ActivelyCosigning::get(txn, set).is_none(), + "starting cosigning while already cosigning" + ); + ActivelyCosigning::set(txn, set, &substrate_block_hash); + + Self::recognize_topic( + txn, + set, + Topic::Sign { + id: VariantSignId::Cosign(substrate_block_number), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }, + ); + } + pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ExternalValidatorSet) { + assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning"); + } + pub(crate) fn mark_cosigned( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + ) { + Cosigned::set(txn, set, substrate_block_hash, &()); + } + pub(crate) fn cosigned( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + ) -> bool { + Cosigned::get(txn, set, substrate_block_hash).is_some() + } + + pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ExternalValidatorSet, topic: Topic) { + AccumulatedWeight::set(txn, set, topic, &0); + RecognizedTopics::send(txn, set, &topic); + } + pub(crate) fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { + AccumulatedWeight::get(getter, set, topic).is_some() + } + + pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ExternalValidatorSet, block_number: u64) { + for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) { + /* + TODO: Slash all people who preprocessed but didn't share, and add a delay to their + participations in future protocols. When we call accumulate, if the participant has no + delay, their accumulation occurs immediately. Else, the accumulation occurs after the + specified delay. + + This means even if faulty validators are first to preprocess, they won't be selected for + the signing set unless there's a lack of less faulty validators available. + + We need to decrease this delay upon successful partipations, and set it to the maximum upon + `f + 1` validators voting to fatally slash the validator in question. This won't issue the + fatal slash but should still be effective. + */ + Self::recognize_topic(txn, set, topic); + if let Some(id) = topic.sign_id(set) { + Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id }); + } else if let Some(id) = topic.dkg_confirmation_sign_id(set) { + DkgConfirmationMessages::send( + txn, + set, + &messages::sign::CoordinatorMessage::Reattempt { id }, + ); + } + } + } + + pub(crate) fn fatal_slash( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + validator: SeraiAddress, + reason: &str, + ) { + log::warn!("{validator} fatally slashed: {reason}"); + SlashPoints::set(txn, set, validator, &u32::MAX); + } + + pub(crate) fn is_fatally_slashed( + getter: &impl Get, + set: ExternalValidatorSet, + validator: SeraiAddress, + ) -> bool { + SlashPoints::get(getter, set, validator).unwrap_or(0) == u32::MAX + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn accumulate( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + validators: &[SeraiAddress], + total_weight: u16, + block_number: u64, + topic: Topic, + validator: SeraiAddress, + validator_weight: u16, + data: &D, + ) -> DataSet { + // This function will only be called once for a (validator, topic) tuple due to how we handle + // nonces on transactions (deterministically to the topic) + + let accumulated_weight = AccumulatedWeight::get(txn, set, topic); + if topic.requires_recognition() && accumulated_weight.is_none() { + Self::fatal_slash( + txn, + set, + validator, + "participated in unrecognized topic which requires recognition", + ); + return DataSet::None; + } + let mut accumulated_weight = accumulated_weight.unwrap_or(0); + + // Check if there's a preceding topic, this validator participated + let preceding_topic = topic.preceding_topic(); + if let Some(preceding_topic) = preceding_topic { + if Accumulated::::get(txn, set, preceding_topic, validator).is_none() { + Self::fatal_slash( + txn, + set, + validator, + "participated in topic without participating in prior", + ); + return DataSet::None; + } + } + + // The complete lack of validation on the data by these NOPs opens the potential for spam here + + // If we've already accumulated past the threshold, NOP + if accumulated_weight >= topic.required_participation(total_weight) { + return DataSet::None; + } + // If this is for an old attempt, NOP + if let Some(next_attempt_topic) = topic.next_attempt_topic() { + if AccumulatedWeight::get(txn, set, next_attempt_topic).is_some() { + return DataSet::None; + } + } + + // Accumulate the data + accumulated_weight += validator_weight; + AccumulatedWeight::set(txn, set, topic, &accumulated_weight); + Accumulated::set(txn, set, topic, validator, data); + + // Check if we now cross the weight threshold + if accumulated_weight >= topic.required_participation(total_weight) { + // Queue this for re-attempt after enough time passes + let reattempt_topic = topic.reattempt_topic(); + if let Some((attempt, reattempt_topic)) = reattempt_topic { + // 5 minutes + #[cfg(not(feature = "longer-reattempts"))] + const BASE_REATTEMPT_DELAY: u32 = + (5u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + + // 10 minutes, intended for latent environments like the GitHub CI + #[cfg(feature = "longer-reattempts")] + const BASE_REATTEMPT_DELAY: u32 = + (10u32 * 60 * 1000).div_ceil(tributary_sdk::tendermint::TARGET_BLOCK_TIME); + + // Linearly scale the time for the protocol with the attempt number + let blocks_till_reattempt = u64::from(attempt * BASE_REATTEMPT_DELAY); + + let recognize_at = block_number + blocks_till_reattempt; + let mut queued = Reattempt::get(txn, set, recognize_at).unwrap_or(Vec::with_capacity(1)); + queued.push(reattempt_topic); + Reattempt::set(txn, set, recognize_at, &queued); + } + + // Register the succeeding topic + let succeeding_topic = topic.succeeding_topic(); + if let Some(succeeding_topic) = succeeding_topic { + Self::recognize_topic(txn, set, succeeding_topic); + } + + // Fetch and return all participations + let mut data_set = HashMap::with_capacity(validators.len()); + for validator in validators { + if let Some(data) = Accumulated::::get(txn, set, topic, *validator) { + // Clean this data up if there's not a re-attempt topic + // If there is a re-attempt topic, we clean it up upon re-attempt + if reattempt_topic.is_none() { + Accumulated::::del(txn, set, topic, *validator); + } + data_set.insert(*validator, data); + } + } + let participated = data_set.contains_key(&validator); + match topic.participating() { + Participating::Participated => { + if participated { + DataSet::Participating(data_set) + } else { + DataSet::None + } + } + Participating::Everyone => DataSet::Participating(data_set), + } + } else { + DataSet::None + } + } + + pub(crate) fn send_message( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + message: impl Into, + ) { + ProcessorMessages::send(txn, set, &message.into()); + } +} diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 0ea74bfe..1c82d5b9 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -1,388 +1,709 @@ -use core::{marker::PhantomData, fmt::Debug}; -use std::{sync::Arc, io}; +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] -use async_trait::async_trait; +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; -use zeroize::Zeroizing; +use ciphersuite::group::GroupEncoding; +use dkg::Participant; -use ciphersuite::{Ciphersuite, Ristretto}; - -use scale::Decode; -use futures_channel::mpsc::UnboundedReceiver; -use futures_util::{StreamExt, SinkExt}; -use ::tendermint::{ - ext::{BlockNumber, Commit, Block as BlockTrait, Network}, - SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, - TendermintMachine, TendermintHandle, +use serai_client::{ + primitives::SeraiAddress, + validator_sets::primitives::{ExternalValidatorSet, Slash}, }; -pub use ::tendermint::Evidence; +use serai_db::*; +use serai_task::ContinuallyRan; -use serai_db::Db; +use tributary_sdk::{ + tendermint::{ + tx::{TendermintTx, Evidence, decode_signed_message}, + TendermintNetwork, + }, + Signed as TributarySigned, TransactionKind, TransactionTrait, + Transaction as TributaryTransaction, Block, TributaryReader, P2p, +}; -use tokio::sync::RwLock; +use serai_cosign::CosignIntent; +use serai_coordinator_substrate::NewSetInformation; -mod merkle; -pub(crate) use merkle::*; +use messages::sign::{VariantSignId, SignId}; -pub mod transaction; -pub use transaction::{TransactionError, Signed, TransactionKind, Transaction as TransactionTrait}; +mod transaction; +pub use transaction::{SigningProtocolRound, Signed, Transaction}; -use crate::tendermint::tx::TendermintTx; +mod db; +use db::*; +pub use db::Topic; -mod provided; -pub(crate) use provided::*; -pub use provided::ProvidedError; - -mod block; -pub use block::*; - -mod blockchain; -pub(crate) use blockchain::*; - -mod mempool; -pub(crate) use mempool::*; - -pub mod tendermint; -pub(crate) use crate::tendermint::*; - -#[cfg(any(test, feature = "tests"))] -pub mod tests; - -/// Size limit for an individual transaction. -pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000; -/// Amount of transactions a single account may have in the mempool. -pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; -/// Block size limit. -// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious -// participant from flooding disks and causing out of space errors in order processes. -pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; - -pub(crate) const TENDERMINT_MESSAGE: u8 = 0; -pub(crate) const TRANSACTION_MESSAGE: u8 = 1; - -#[allow(clippy::large_enum_variant)] -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Transaction { - Tendermint(TendermintTx), - Application(T), -} - -impl ReadWrite for Transaction { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let tx = TendermintTx::read(reader)?; - Ok(Transaction::Tendermint(tx)) - } - 1 => { - let tx = T::read(reader)?; - Ok(Transaction::Application(tx)) - } - _ => Err(io::Error::other("invalid transaction type")), - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Transaction::Tendermint(tx) => { - writer.write_all(&[0])?; - tx.write(writer) - } - Transaction::Application(tx) => { - writer.write_all(&[1])?; - tx.write(writer) - } - } +/// Messages to send to the Processors. +pub struct ProcessorMessages; +impl ProcessorMessages { + /// Try to receive a message to send to a Processor. + pub fn try_recv( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { + db::ProcessorMessages::try_recv(txn, set) } } -impl Transaction { - pub fn hash(&self) -> [u8; 32] { - match self { - Transaction::Tendermint(tx) => tx.hash(), - Transaction::Application(tx) => tx.hash(), - } - } - - pub fn kind(&self) -> TransactionKind<'_> { - match self { - Transaction::Tendermint(tx) => tx.kind(), - Transaction::Application(tx) => tx.kind(), - } - } -} - -/// An item which can be read and written. -pub trait ReadWrite: Sized { - fn read(reader: &mut R) -> io::Result; - fn write(&self, writer: &mut W) -> io::Result<()>; - - fn serialize(&self) -> Vec { - // BlockHeader is 64 bytes and likely the smallest item in this system - let mut buf = Vec::with_capacity(64); - self.write(&mut buf).unwrap(); - buf - } -} - -#[async_trait] -pub trait P2p: 'static + Send + Sync + Clone + Debug { - /// Broadcast a message to all other members of the Tributary with the specified genesis. +/// Messages for the DKG confirmation. +pub struct DkgConfirmationMessages; +impl DkgConfirmationMessages { + /// Receive a message for the DKG confirmation. /// - /// The Tributary will re-broadcast consensus messages on a fixed interval to ensure they aren't - /// prematurely dropped from the P2P layer. THe P2P layer SHOULD perform content-based - /// deduplication to ensure a sane amount of load. - async fn broadcast(&self, genesis: [u8; 32], msg: Vec); -} - -#[async_trait] -impl P2p for Arc

{ - async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - (*self).broadcast(genesis, msg).await + /// These messages use the ProcessorMessage API as that's what existing flows are designed + /// around, enabling their reuse. The ProcessorMessage includes a VariantSignId which isn't + /// applicable to the DKG confirmation (as there's no such variant of the VariantSignId). The + /// actual ID is undefined other than it will be consistent to the signing protocol and unique + /// across validator sets, with no guarantees of uniqueness across contexts. + pub fn try_recv( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { + db::DkgConfirmationMessages::try_recv(txn, set) } } -#[derive(Clone)] -pub struct Tributary { - db: D, - - genesis: [u8; 32], - network: TendermintNetwork, - - synced_block: Arc>>>, - synced_block_result: Arc>, - messages: Arc>>>, +/// The cosign intents. +pub struct CosignIntents; +impl CosignIntents { + /// Provide a CosignIntent for this Tributary. + /// + /// This must be done before the associated `Transaction::Cosign` is provided. + pub fn provide(txn: &mut impl DbTxn, set: ExternalValidatorSet, intent: &CosignIntent) { + db::CosignIntents::set(txn, set, intent.block_hash, intent); + } + fn take( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + ) -> Option { + db::CosignIntents::take(txn, set, substrate_block_hash) + } } -impl Tributary { - pub async fn new( - db: D, - genesis: [u8; 32], - start_time: u64, - key: Zeroizing<::F>, - validators: Vec<(::G, u64)>, - p2p: P, - ) -> Option { - log::info!("new Tributary with genesis {}", hex::encode(genesis)); +/// An interface to the topics recognized on this Tributary. +pub struct RecognizedTopics; +impl RecognizedTopics { + /// If this topic has been recognized by this Tributary. + /// + /// This will either be by explicit recognition or participation. + pub fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool { + TributaryDb::recognized(getter, set, topic) + } + /// The next topic requiring recognition which has been recognized by this Tributary. + pub fn try_recv_topic_requiring_recognition( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + ) -> Option { + db::RecognizedTopics::try_recv(txn, set) + } +} - let validators_vec = validators.iter().map(|validator| validator.0).collect::>(); +/// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain. +pub struct SubstrateBlockPlans; +impl SubstrateBlockPlans { + /// Set the plans to recognize upon the associated `Transaction::SubstrateBlock` being included + /// on-chain. + /// + /// This must be done before the associated `Transaction::Cosign` is provided. + pub fn set( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + plans: &Vec<[u8; 32]>, + ) { + db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans); + } + fn take( + txn: &mut impl DbTxn, + set: ExternalValidatorSet, + substrate_block_hash: [u8; 32], + ) -> Option> { + db::SubstrateBlockPlans::take(txn, set, substrate_block_hash) + } +} - let signer = Arc::new(Signer::new(genesis, key)); - let validators = Arc::new(Validators::new(genesis, validators)?); +struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> { + _td: PhantomData, + _p2p: PhantomData

, + tributary_txn: &'a mut TDT, + set: &'a NewSetInformation, + validators: &'a [SeraiAddress], + total_weight: u16, + validator_weights: &'a HashMap, +} +impl ScanBlock<'_, TD, TDT, P> { + fn potentially_start_cosign(&mut self) { + // Don't start a new cosigning instance if we're actively running one + if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set).is_some() { + return; + } - let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec); - let block_number = BlockNumber(blockchain.block_number()); - - let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) { - Commit::::decode(&mut commit.as_ref()).unwrap().end_time - } else { - start_time + // Fetch the latest intended-to-be-cosigned block + let Some(latest_substrate_block_to_cosign) = + TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set.set) + else { + return; }; - let proposal = TendermintBlock( - blockchain.build_block::>(&validators).serialize(), + + // If it was already cosigned, return + if TributaryDb::cosigned(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign) { + return; + } + + let intent = + CosignIntents::take(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign) + .expect("Transaction::Cosign locally provided but CosignIntents wasn't populated"); + assert_eq!( + intent.block_hash, latest_substrate_block_to_cosign, + "provided CosignIntent wasn't saved by its block hash" ); - let blockchain = Arc::new(RwLock::new(blockchain)); - let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p }; - - let TendermintHandle { synced_block, synced_block_result, messages, machine } = - TendermintMachine::new( - db.clone(), - network.clone(), - genesis, - block_number, - start_time, - proposal, - ) - .await; - tokio::spawn(machine.run()); - - Some(Self { - db, - genesis, - network, - synced_block: Arc::new(RwLock::new(synced_block)), - synced_block_result: Arc::new(RwLock::new(synced_block_result)), - messages: Arc::new(RwLock::new(messages)), - }) - } - - pub fn block_time() -> u32 { - TendermintNetwork::::block_time() - } - - pub fn genesis(&self) -> [u8; 32] { - self.genesis - } - - pub async fn block_number(&self) -> u64 { - self.network.blockchain.read().await.block_number() - } - pub async fn tip(&self) -> [u8; 32] { - self.network.blockchain.read().await.tip() - } - - pub fn reader(&self) -> TributaryReader { - TributaryReader(self.db.clone(), self.genesis, PhantomData) - } - - pub async fn provide_transaction(&self, tx: T) -> Result<(), ProvidedError> { - self.network.blockchain.write().await.provide_transaction(tx) - } - - pub async fn next_nonce( - &self, - signer: &::G, - order: &[u8], - ) -> Option { - self.network.blockchain.read().await.next_nonce(signer, order) - } - - // Returns Ok(true) if new, Ok(false) if an already present unsigned, or the error. - // Safe to be &self since the only meaningful usage of self is self.network.blockchain which - // successfully acquires its own write lock - pub async fn add_transaction(&self, tx: T) -> Result { - let tx = Transaction::Application(tx); - let mut to_broadcast = vec![TRANSACTION_MESSAGE]; - tx.write(&mut to_broadcast).unwrap(); - let res = self.network.blockchain.write().await.add_transaction::>( - true, - tx, - &self.network.signature_scheme(), + // Mark us as actively cosigning + TributaryDb::start_cosigning( + self.tributary_txn, + self.set.set, + latest_substrate_block_to_cosign, + intent.block_number, + ); + // Send the message for the processor to start signing + TributaryDb::send_message( + self.tributary_txn, + self.set.set, + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { + session: self.set.set.session, + cosign: intent.into_cosign(self.set.set.network), + }, ); - if res == Ok(true) { - self.network.p2p.broadcast(self.genesis, to_broadcast).await; - } - res } - async fn sync_block_internal( - &self, - block: Block, - commit: Vec, - result: &mut UnboundedReceiver, - ) -> bool { - let (tip, block_number) = { - let blockchain = self.network.blockchain.read().await; - (blockchain.tip(), blockchain.block_number()) - }; + fn accumulate_dkg_confirmation + Borshy>( + &mut self, + block_number: u64, + topic: Topic, + data: &D, + signer: SeraiAddress, + ) -> Option<(SignId, HashMap>)> { + match TributaryDb::accumulate::( + self.tributary_txn, + self.set.set, + self.validators, + self.total_weight, + block_number, + topic, + signer, + self.validator_weights[&signer], + data, + ) { + DataSet::None => None, + DataSet::Participating(data_set) => { + let id = topic.dkg_confirmation_sign_id(self.set.set).unwrap(); - if block.header.parent != tip { - log::debug!("told to sync a block whose parent wasn't our tip"); - return false; - } + // This will be used in a MuSig protocol, so the Participant indexes are the validator's + // position in the list regardless of their weight + let flatten_data_set = |data_set: HashMap<_, D>| { + let mut entries = HashMap::with_capacity(usize::from(self.total_weight)); + for (validator, participation) in data_set { + let (index, (_validator, _weight)) = &self + .set + .validators + .iter() + .enumerate() + .find(|(_i, (validator_i, _weight))| validator == *validator_i) + .unwrap(); + // The index is zero-indexed yet participants are one-indexed + let index = index + 1; - let block = TendermintBlock(block.serialize()); - let mut commit_ref = commit.as_ref(); - let Ok(commit) = Commit::>::decode(&mut commit_ref) else { - log::error!("sent an invalidly serialized commit"); - return false; - }; - // Storage DoS vector. We *could* truncate to solely the relevant portion, trying to save this, - // yet then we'd have to test the truncation was performed correctly. - if !commit_ref.is_empty() { - log::error!("sent an commit with additional data after it"); - return false; - } - if !self.network.verify_commit(block.id(), &commit) { - log::error!("sent an invalid commit"); - return false; - } - - let number = BlockNumber(block_number + 1); - self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap(); - result.next().await.unwrap() - } - - // Sync a block. - // TODO: Since we have a static validator set, we should only need the tail commit? - pub async fn sync_block(&self, block: Block, commit: Vec) -> bool { - let mut result = self.synced_block_result.write().await; - self.sync_block_internal(block, commit, &mut result).await - } - - // Return true if the message should be rebroadcasted. - pub async fn handle_message(&self, msg: &[u8]) -> bool { - match msg.first() { - Some(&TRANSACTION_MESSAGE) => { - let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { - log::error!("received invalid transaction message"); - return false; + entries.insert( + Participant::new(u16::try_from(index).unwrap()).unwrap(), + participation.as_ref().to_vec(), + ); + } + entries }; + let data_set = flatten_data_set(data_set); + Some((id, data_set)) + } + } + } - // TODO: Sync mempools with fellow peers - // Can we just rebroadcast transactions not included for at least two blocks? - let res = - self.network.blockchain.write().await.add_transaction::>( - false, - tx, - &self.network.signature_scheme(), + fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) { + let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes()); + + if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() { + // Don't handle transactions from those fatally slashed + // TODO: The fact they can publish these TXs makes this a notable spam vector + if TributaryDb::is_fatally_slashed( + self.tributary_txn, + self.set.set, + SeraiAddress(signer.to_bytes()), + ) { + return; + } + } + + let topic = tx.topic(); + match tx { + // Accumulate this vote and fatally slash the participant if past the threshold + Transaction::RemoveParticipant { participant, signed } => { + let signer = signer(signed); + + // Check the participant voted to be removed actually exists + if !self.validators.iter().any(|validator| *validator == participant) { + TributaryDb::fatal_slash( + self.tributary_txn, + self.set.set, + signer, + "voted to remove non-existent participant", ); - log::debug!("received transaction message. valid new transaction: {res:?}"); - res == Ok(true) + return; + } + + match TributaryDb::accumulate( + self.tributary_txn, + self.set.set, + self.validators, + self.total_weight, + block_number, + topic.unwrap(), + signer, + self.validator_weights[&signer], + &(), + ) { + DataSet::None => {} + DataSet::Participating(_) => { + TributaryDb::fatal_slash( + self.tributary_txn, + self.set.set, + participant, + "voted to remove", + ); + } + }; } - Some(&TENDERMINT_MESSAGE) => { - let Ok(msg) = - SignedMessageFor::>::decode::<&[u8]>(&mut &msg[1 ..]) + // Send the participation to the processor + Transaction::DkgParticipation { participation, signed } => { + TributaryDb::send_message( + self.tributary_txn, + self.set.set, + messages::key_gen::CoordinatorMessage::Participation { + session: self.set.set.session, + participant: self.set.participant_indexes[&signer(signed)][0], + participation, + }, + ); + } + Transaction::DkgConfirmationPreprocess { attempt: _, preprocess, signed } => { + let topic = topic.unwrap(); + let signer = signer(signed); + + let Some((id, data_set)) = + self.accumulate_dkg_confirmation(block_number, topic, &preprocess, signer) else { - log::error!("received invalid tendermint message"); - return false; + return; }; - self.messages.write().await.send(msg).await.unwrap(); - false + db::DkgConfirmationMessages::send( + self.tributary_txn, + self.set.set, + &messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }, + ); + } + Transaction::DkgConfirmationShare { attempt: _, share, signed } => { + let topic = topic.unwrap(); + let signer = signer(signed); + + let Some((id, data_set)) = + self.accumulate_dkg_confirmation(block_number, topic, &share, signer) + else { + return; + }; + + db::DkgConfirmationMessages::send( + self.tributary_txn, + self.set.set, + &messages::sign::CoordinatorMessage::Shares { id, shares: data_set }, + ); } - _ => false, + Transaction::Cosign { substrate_block_hash } => { + // Update the latest intended-to-be-cosigned Substrate block + TributaryDb::set_latest_substrate_block_to_cosign( + self.tributary_txn, + self.set.set, + substrate_block_hash, + ); + // Start a new cosign if we aren't already working on one + self.potentially_start_cosign(); + } + Transaction::Cosigned { substrate_block_hash } => { + /* + We provide one Cosigned per Cosign transaction, but they have independent orders. This + means we may receive Cosigned before Cosign. In order to ensure we only start work on + not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose + the next block to work on, we won't if it's already been cosigned. + */ + TributaryDb::mark_cosigned(self.tributary_txn, self.set.set, substrate_block_hash); + + // If we aren't actively cosigning this block, return + // This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C, + // and then receive Cosigned for B + if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set) != + Some(substrate_block_hash) + { + return; + } + + // Since this is the block we were cosigning, mark us as having finished cosigning + TributaryDb::finish_cosigning(self.tributary_txn, self.set.set); + + // Start working on the next cosign + self.potentially_start_cosign(); + } + Transaction::SubstrateBlock { hash } => { + // Recognize all of the IDs this Substrate block causes to be signed + let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set.set, hash).expect( + "Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated", + ); + for plan in plans { + TributaryDb::recognize_topic( + self.tributary_txn, + self.set.set, + Topic::Sign { + id: VariantSignId::Transaction(plan), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }, + ); + } + } + Transaction::Batch { hash } => { + // Recognize the signing of this batch + TributaryDb::recognize_topic( + self.tributary_txn, + self.set.set, + Topic::Sign { + id: VariantSignId::Batch(hash), + attempt: 0, + round: SigningProtocolRound::Preprocess, + }, + ); + } + + Transaction::SlashReport { slash_points, signed } => { + let signer = signer(signed); + + if slash_points.len() != self.validators.len() { + TributaryDb::fatal_slash( + self.tributary_txn, + self.set.set, + signer, + "slash report was for a distinct amount of signers", + ); + return; + } + + // Accumulate, and if past the threshold, calculate *the* slash report and start signing it + match TributaryDb::accumulate( + self.tributary_txn, + self.set.set, + self.validators, + self.total_weight, + block_number, + topic.unwrap(), + signer, + self.validator_weights[&signer], + &slash_points, + ) { + DataSet::None => {} + DataSet::Participating(data_set) => { + // Find the median reported slashes for this validator + /* + TODO: This lets 34% perform a fatal slash. That shouldn't be allowed. We need + to accept slash reports for a period past the threshold, and only fatally slash if we + have a supermajority agree the slash should be fatal. If there isn't a supermajority, + but the median believe the slash should be fatal, we need to fallback to a large + constant. + */ + let mut median_slash_report = Vec::with_capacity(self.validators.len()); + for i in 0 .. self.validators.len() { + let mut this_validator = + data_set.values().map(|report| report[i]).collect::>(); + this_validator.sort_unstable(); + // Choose the median, where if there are two median values, the lower one is chosen + let median_index = if (this_validator.len() % 2) == 1 { + this_validator.len() / 2 + } else { + (this_validator.len() / 2) - 1 + }; + median_slash_report.push(this_validator[median_index]); + } + + // We only publish slashes for the `f` worst performers to: + // 1) Effect amnesty if there were network disruptions which affected everyone + // 2) Ensure the signing threshold doesn't have a disincentive to do their job + + // Find the worst performer within the signing threshold's slash points + let f = (self.validators.len() - 1) / 3; + let worst_validator_in_supermajority_slash_points = { + let mut sorted_slash_points = median_slash_report.clone(); + sorted_slash_points.sort_unstable(); + // This won't be a valid index if `f == 0`, which means we don't have any validators + // to slash + let index_of_first_validator_to_slash = self.validators.len() - f; + let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1; + sorted_slash_points[index_of_worst_validator_in_supermajority] + }; + + // Perform the amortization + for slash_points in &mut median_slash_report { + *slash_points = + slash_points.saturating_sub(worst_validator_in_supermajority_slash_points) + } + let amortized_slash_report = median_slash_report; + + // Create the resulting slash report + let mut slash_report = vec![]; + for points in amortized_slash_report { + // TODO: Natively store this as a `Slash` + if points == u32::MAX { + slash_report.push(Slash::Fatal); + } else { + slash_report.push(Slash::Points(points)); + } + } + assert!(slash_report.len() <= f); + + // Recognize the topic for signing the slash report + TributaryDb::recognize_topic( + self.tributary_txn, + self.set.set, + Topic::Sign { + id: VariantSignId::SlashReport, + attempt: 0, + round: SigningProtocolRound::Preprocess, + }, + ); + // Send the message for the processor to start signing + TributaryDb::send_message( + self.tributary_txn, + self.set.set, + messages::coordinator::CoordinatorMessage::SignSlashReport { + session: self.set.set.session, + slash_report: slash_report.try_into().unwrap(), + }, + ); + } + }; + } + + Transaction::Sign { id: _, attempt: _, round, data, signed } => { + let topic = topic.unwrap(); + let signer = signer(signed); + + if data.len() != usize::from(self.validator_weights[&signer]) { + TributaryDb::fatal_slash( + self.tributary_txn, + self.set.set, + signer, + "signer signed with a distinct amount of key shares than they had key shares", + ); + return; + } + + match TributaryDb::accumulate( + self.tributary_txn, + self.set.set, + self.validators, + self.total_weight, + block_number, + topic, + signer, + self.validator_weights[&signer], + &data, + ) { + DataSet::None => {} + DataSet::Participating(data_set) => { + let id = topic.sign_id(self.set.set).expect("Topic::Sign didn't have SignId"); + let flatten_data_set = |data_set: HashMap<_, Vec<_>>| { + let mut entries = HashMap::with_capacity(usize::from(self.total_weight)); + for (validator, shares) in data_set { + let indexes = &self.set.participant_indexes[&validator]; + assert_eq!(indexes.len(), shares.len()); + for (index, share) in indexes.iter().zip(shares) { + entries.insert(*index, share); + } + } + entries + }; + let data_set = flatten_data_set(data_set); + TributaryDb::send_message( + self.tributary_txn, + self.set.set, + match round { + SigningProtocolRound::Preprocess => { + messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set } + } + SigningProtocolRound::Share => { + messages::sign::CoordinatorMessage::Shares { id, shares: data_set } + } + }, + ) + } + } + } } } - /// Get a Future which will resolve once the next block has been added. - pub async fn next_block_notification( - &self, - ) -> impl Send + Sync + core::future::Future> { - let (tx, rx) = tokio::sync::oneshot::channel(); - self.network.blockchain.write().await.next_block_notifications.push_back(tx); - rx + fn handle_block(mut self, block_number: u64, block: Block) { + TributaryDb::start_of_block(self.tributary_txn, self.set.set, block_number); + + for tx in block.transactions { + match tx { + TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { + // Since the evidence is on the chain, it will have already been validated + // We can just punish the signer + let data = match ev { + Evidence::ConflictingMessages(first, second) => (first, Some(second)), + Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None), + }; + let msgs = ( + decode_signed_message::>(&data.0).unwrap(), + if data.1.is_some() { + Some( + decode_signed_message::>(&data.1.unwrap()) + .unwrap(), + ) + } else { + None + }, + ); + + // Since anything with evidence is fundamentally faulty behavior, not just temporal + // errors, mark the node as fatally slashed + TributaryDb::fatal_slash( + self.tributary_txn, + self.set.set, + SeraiAddress(msgs.0.msg.sender), + &format!("invalid tendermint messages: {msgs:?}"), + ); + } + TributaryTransaction::Application(tx) => { + self.handle_application_tx(block_number, tx); + } + } + } } } -#[derive(Clone)] -pub struct TributaryReader(D, [u8; 32], PhantomData); -impl TributaryReader { - pub fn genesis(&self) -> [u8; 32] { - self.1 - } +/// The task to scan the Tributary, populating `ProcessorMessages`. +pub struct ScanTributaryTask { + tributary_db: TD, + set: NewSetInformation, + validators: Vec, + total_weight: u16, + validator_weights: HashMap, + tributary: TributaryReader, + _p2p: PhantomData

, +} - // Since these values are static once set, they can be safely read from the database without lock - // acquisition - pub fn block(&self, hash: &[u8; 32]) -> Option> { - Blockchain::::block_from_db(&self.0, self.1, hash) - } - pub fn commit(&self, hash: &[u8; 32]) -> Option> { - Blockchain::::commit_from_db(&self.0, self.1, hash) - } - pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option> { - self.commit(hash).map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap()) - } - pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> { - Blockchain::::block_after(&self.0, self.1, hash) - } - pub fn time_of_block(&self, hash: &[u8; 32]) -> Option { - self - .commit(hash) - .map(|commit| Commit::::decode(&mut commit.as_ref()).unwrap().end_time) - } +impl ScanTributaryTask { + /// Create a new instance of this task. + pub fn new( + tributary_db: TD, + set: NewSetInformation, + tributary: TributaryReader, + ) -> Self { + let mut validators = Vec::with_capacity(set.validators.len()); + let mut total_weight = 0; + let mut validator_weights = HashMap::with_capacity(set.validators.len()); + for (validator, weight) in set.validators.iter().copied() { + validators.push(validator); + total_weight += weight; + validator_weights.insert(validator, weight); + } - pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool { - Blockchain::::locally_provided_txs_in_block(&self.0, &self.1, hash, order) - } - - // This isn't static, yet can be read with only minor discrepancy risks - pub fn tip(&self) -> [u8; 32] { - Blockchain::::tip_from_db(&self.0, self.1) + ScanTributaryTask { + tributary_db, + set, + validators, + total_weight, + validator_weights, + tributary, + _p2p: PhantomData, + } } } + +impl ContinuallyRan for ScanTributaryTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let (mut last_block_number, mut last_block_hash) = + TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set.set) + .unwrap_or((0, self.tributary.genesis())); + + let mut made_progress = false; + while let Some(next) = self.tributary.block_after(&last_block_hash) { + let block = self.tributary.block(&next).unwrap(); + let block_number = last_block_number + 1; + let block_hash = block.hash(); + + // Make sure we have all of the provided transactions for this block + for tx in &block.transactions { + let TransactionKind::Provided(order) = tx.kind() else { + continue; + }; + + // make sure we have all the provided txs in this block locally + if !self.tributary.locally_provided_txs_in_block(&block_hash, order) { + return Err(format!( + "didn't have the provided Transactions on-chain for set (ephemeral error): {:?}", + self.set.set + )); + } + } + + let mut tributary_txn = self.tributary_db.txn(); + (ScanBlock { + _td: PhantomData::, + _p2p: PhantomData::

, + tributary_txn: &mut tributary_txn, + set: &self.set, + validators: &self.validators, + total_weight: self.total_weight, + validator_weights: &self.validator_weights, + }) + .handle_block(block_number, block); + TributaryDb::set_last_handled_tributary_block( + &mut tributary_txn, + self.set.set, + block_number, + block_hash, + ); + last_block_number = block_number; + last_block_hash = block_hash; + tributary_txn.commit(); + + made_progress = true; + } + + Ok(made_progress) + } + } +} + +/// Create the Transaction::SlashReport to publish per the local view. +pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction { + let mut slash_points = Vec::with_capacity(set.validators.len()); + for (validator, _weight) in set.validators.iter().copied() { + slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0)); + } + Transaction::SlashReport { slash_points, signed: Signed::default() } +} diff --git a/coordinator/tributary/src/tests/p2p.rs b/coordinator/tributary/src/tests/p2p.rs deleted file mode 100644 index d3e3b74c..00000000 --- a/coordinator/tributary/src/tests/p2p.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub use crate::P2p; - -#[derive(Clone, Debug)] -pub struct DummyP2p; - -#[async_trait::async_trait] -impl P2p for DummyP2p { - async fn broadcast(&self, _: [u8; 32], _: Vec) { - unimplemented!() - } -} diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 8e9342d7..d05bf3c2 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -1,218 +1,397 @@ -use core::fmt::Debug; +use core::{ops::Deref, fmt::Debug}; use std::io; -use zeroize::Zeroize; -use thiserror::Error; - -use blake2::{Digest, Blake2b512}; +use zeroize::Zeroizing; +use rand_core::{RngCore, CryptoRng}; +use blake2::{digest::typenum::U32, Digest, Blake2b}; use ciphersuite::{ - group::{Group, GroupEncoding}, + group::{ff::Field, Group, GroupEncoding}, Ciphersuite, Ristretto, }; use schnorr::SchnorrSignature; -use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite}; +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; -#[derive(Clone, PartialEq, Eq, Debug, Error)] -pub enum TransactionError { - /// Transaction exceeded the size limit. - #[error("transaction is too large")] - TooLargeTransaction, - /// Transaction's signer isn't a participant. - #[error("invalid signer")] - InvalidSigner, - /// Transaction's nonce isn't the prior nonce plus one. - #[error("invalid nonce")] - InvalidNonce, - /// Transaction's signature is invalid. - #[error("invalid signature")] - InvalidSignature, - /// Transaction's content is invalid. - #[error("transaction content is invalid")] - InvalidContent, - /// Transaction's signer has too many transactions in the mempool. - #[error("signer has too many transactions in the mempool")] - TooManyInMempool, - /// Provided Transaction added to mempool. - #[error("provided transaction added to mempool")] - ProvidedAddedToMempool, +use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET}; + +use messages::sign::VariantSignId; + +use tributary_sdk::{ + ReadWrite, + transaction::{ + Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait, + }, +}; + +use crate::db::Topic; + +/// The round this data is for, within a signing protocol. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] +pub enum SigningProtocolRound { + /// A preprocess. + Preprocess, + /// A signature share. + Share, } -/// Data for a signed transaction. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Signed { - pub signer: ::G, - pub nonce: u32, - pub signature: SchnorrSignature, -} - -impl ReadWrite for Signed { - fn read(reader: &mut R) -> io::Result { - let signer = Ristretto::read_G(reader)?; - - let mut nonce = [0; 4]; - reader.read_exact(&mut nonce)?; - let nonce = u32::from_le_bytes(nonce); - if nonce >= (u32::MAX - 1) { - Err(io::Error::other("nonce exceeded limit"))?; +impl SigningProtocolRound { + fn nonce(&self) -> u32 { + match self { + SigningProtocolRound::Preprocess => 0, + SigningProtocolRound::Share => 1, } - - let mut signature = SchnorrSignature::::read(reader)?; - if signature.R.is_identity().into() { - // Anyone malicious could remove this and try to find zero signatures - // We should never produce zero signatures though meaning this should never come up - // If it does somehow come up, this is a decent courtesy - signature.zeroize(); - Err(io::Error::other("signature nonce was identity"))?; - } - - Ok(Signed { signer, nonce, signature }) } +} - fn write(&self, writer: &mut W) -> io::Result<()> { - // This is either an invalid signature or a private key leak - if self.signature.R.is_identity().into() { - Err(io::Error::other("signature nonce was identity"))?; - } - writer.write_all(&self.signer.to_bytes())?; - writer.write_all(&self.nonce.to_le_bytes())?; +/// `tributary::Signed` but without the nonce. +/// +/// All of our nonces are deterministic to the type of transaction and fields within. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Signed { + /// The signer. + signer: ::G, + /// The signature. + signature: SchnorrSignature, +} + +impl BorshSerialize for Signed { + fn serialize(&self, writer: &mut W) -> Result<(), io::Error> { + writer.write_all(self.signer.to_bytes().as_ref())?; self.signature.write(writer) } } +impl BorshDeserialize for Signed { + fn deserialize_reader(reader: &mut R) -> Result { + let signer = Ristretto::read_G(reader)?; + let signature = SchnorrSignature::read(reader)?; + Ok(Self { signer, signature }) + } +} impl Signed { - pub fn read_without_nonce(reader: &mut R, nonce: u32) -> io::Result { - let signer = Ristretto::read_G(reader)?; - - let mut signature = SchnorrSignature::::read(reader)?; - if signature.R.is_identity().into() { - // Anyone malicious could remove this and try to find zero signatures - // We should never produce zero signatures though meaning this should never come up - // If it does somehow come up, this is a decent courtesy - signature.zeroize(); - Err(io::Error::other("signature nonce was identity"))?; - } - - Ok(Signed { signer, nonce, signature }) + /// Fetch the signer. + pub(crate) fn signer(&self) -> ::G { + self.signer } - pub fn write_without_nonce(&self, writer: &mut W) -> io::Result<()> { - // This is either an invalid signature or a private key leak - if self.signature.R.is_identity().into() { - Err(io::Error::other("signature nonce was identity"))?; - } - writer.write_all(&self.signer.to_bytes())?; - self.signature.write(writer) + /// Provide a nonce to convert a `Signed` into a `tributary::Signed`. + fn to_tributary_signed(self, nonce: u32) -> TributarySigned { + TributarySigned { signer: self.signer, nonce, signature: self.signature } } } -#[allow(clippy::large_enum_variant)] -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum TransactionKind<'a> { - /// This transaction should be provided by every validator, in an exact order. - /// - /// The contained static string names the orderer to use. This allows two distinct provided - /// transaction kinds, without a synchronized order, to be ordered within their own kind without - /// requiring ordering with each other. - /// - /// The only malleability is in when this transaction appears on chain. The block producer will - /// include it when they have it. Block verification will fail for validators without it. - /// - /// If a supermajority of validators produce a commit for a block with a provided transaction - /// which isn't locally held, the block will be added to the local chain. When the transaction is - /// locally provided, it will be compared for correctness to the on-chain version - /// - /// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions - /// must have a unique hash which is also unique to all Unsigned transactions. - Provided(&'static str), - - /// An unsigned transaction, only able to be included by the block producer. - /// - /// Once an Unsigned transaction is included on-chain, it may not be included again. In order to - /// have multiple Unsigned transactions with the same values included on-chain, some distinct - /// nonce must be included in order to cause a distinct hash. - /// - /// The hash must also be unique with all Provided transactions. - Unsigned, - - /// A signed transaction. - Signed(Vec, &'a Signed), +impl Default for Signed { + fn default() -> Self { + Self { + signer: ::G::identity(), + signature: SchnorrSignature { + R: ::G::identity(), + s: ::F::ZERO, + }, + } + } } -// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists? -// Or should the literal Transaction be renamed to Event? -pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite { - /// Return what type of transaction this is. - fn kind(&self) -> TransactionKind<'_>; +/// The Tributary transaction definition used by Serai +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub enum Transaction { + /// A vote to remove a participant for invalid behavior + RemoveParticipant { + /// The participant to remove + participant: SeraiAddress, + /// The transaction's signer and signature + signed: Signed, + }, - /// Return the hash of this transaction. - /// - /// The hash must NOT commit to the signature. - fn hash(&self) -> [u8; 32]; + /// A participation in the DKG + DkgParticipation { + /// The serialized participation + participation: Vec, + /// The transaction's signer and signature + signed: Signed, + }, + /// The preprocess to confirm the DKG results on-chain + DkgConfirmationPreprocess { + /// The attempt number of this signing protocol + attempt: u32, + /// The preprocess + preprocess: [u8; 64], + /// The transaction's signer and signature + signed: Signed, + }, + /// The signature share to confirm the DKG results on-chain + DkgConfirmationShare { + /// The attempt number of this signing protocol + attempt: u32, + /// The signature share + share: [u8; 32], + /// The transaction's signer and signature + signed: Signed, + }, - /// Perform transaction-specific verification. - fn verify(&self) -> Result<(), TransactionError>; + /// Intend to cosign a finalized Substrate block + /// + /// When the time comes to start a new cosigning protocol, the most recent Substrate block will + /// be the one selected to be cosigned. + Cosign { + /// The hash of the Substrate block to cosign + substrate_block_hash: [u8; 32], + }, - /// Obtain the challenge for this transaction's signature. + /// Note an intended-to-be-cosigned Substrate block as cosigned /// - /// Do not override this unless you know what you're doing. + /// After producing this cosign, we need to start work on the latest intended-to-be cosigned + /// block. That requires agreement on when this cosign was produced, which we solve by noting + /// this cosign on-chain. /// - /// Panics if called on non-signed transactions. - fn sig_hash(&self, genesis: [u8; 32]) -> ::F { - match self.kind() { - TransactionKind::Signed(order, Signed { signature, .. }) => { - ::F::from_bytes_mod_order_wide( - &Blake2b512::digest( - [ - b"Tributary Signed Transaction", - genesis.as_ref(), - &self.hash(), - order.as_ref(), - signature.R.to_bytes().as_ref(), - ] - .concat(), - ) - .into(), - ) + /// We ideally don't have this transaction at all. The coordinator, without access to any of the + /// key shares, could observe the FROST signing session and determine a successful completion. + /// Unfortunately, that functionality is not present in modular-frost, so we do need to support + /// *some* asynchronous flow (where the processor or P2P network informs us of the successful + /// completion). + /// + /// If we use a `Provided` transaction, that requires everyone observe this cosign. + /// + /// If we use an `Unsigned` transaction, we can't verify the cosign signature inside + /// `Transaction::verify` unless we embedded the full `SignedCosign` on-chain. The issue is since + /// a Tributary is stateless with regards to the on-chain logic, including `Transaction::verify`, + /// we can't verify the signature against the group's public key unless we also include that (but + /// then we open a DoS where arbitrary group keys are specified to cause inclusion of arbitrary + /// blobs on chain). + /// + /// If we use a `Signed` transaction, we mitigate the DoS risk by having someone to fatally + /// slash. We have horrible performance though as for 100 validators, all 100 will publish this + /// transaction. + /// + /// We could use a signed `Unsigned` transaction, where it includes a signer and signature but + /// isn't technically a Signed transaction. This lets us de-duplicate the transaction premised on + /// its contents. + /// + /// The optimal choice is likely to use a `Provided` transaction. We don't actually need to + /// observe the produced cosign (which is ephemeral). As long as it's agreed the cosign in + /// question no longer needs to produced, which would mean the cosigning protocol at-large + /// cosigning the block in question, it'd be safe to provide this and move on to the next cosign. + Cosigned { + /// The hash of the Substrate block which was cosigned + substrate_block_hash: [u8; 32], + }, + + /// Acknowledge a Substrate block + /// + /// This is provided after the block has been cosigned. + /// + /// With the acknowledgement of a Substrate block, we can recognize all the `VariantSignId`s + /// resulting from its handling. + SubstrateBlock { + /// The hash of the Substrate block + hash: [u8; 32], + }, + + /// Acknowledge a Batch + /// + /// Once everyone has acknowledged the Batch, we can begin signing it. + Batch { + /// The hash of the Batch's serialization. + /// + /// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to + /// ensure consensus on the Batch, and achieving consensus on its hash is the most effective + /// way to do that. + hash: [u8; 32], + }, + + /// Data from a signing protocol. + Sign { + /// The ID of the object being signed + id: VariantSignId, + /// The attempt number of this signing protocol + attempt: u32, + /// The round this data is for, within the signing protocol + round: SigningProtocolRound, + /// The data itself + /// + /// There will be `n` blobs of data where `n` is the amount of key shares the validator sending + /// this transaction has. + data: Vec>, + /// The transaction's signer and signature + signed: Signed, + }, + + /// The local view of slashes observed by the transaction's sender + SlashReport { + /// The slash points accrued by each validator + slash_points: Vec, + /// The transaction's signer and signature + signed: Signed, + }, +} + +impl ReadWrite for Transaction { + fn read(reader: &mut R) -> io::Result { + borsh::from_reader(reader) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + borsh::to_writer(writer, self) + } +} + +impl TransactionTrait for Transaction { + fn kind(&self) -> TransactionKind { + match self { + Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed( + (b"RemoveParticipant", participant).encode(), + signed.to_tributary_signed(0), + ), + + Transaction::DkgParticipation { signed, .. } => { + TransactionKind::Signed(b"DkgParticipation".encode(), signed.to_tributary_signed(0)) + } + Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed( + (b"DkgConfirmation", attempt).encode(), + signed.to_tributary_signed(0), + ), + Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed( + (b"DkgConfirmation", attempt).encode(), + signed.to_tributary_signed(1), + ), + + Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"), + Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"), + Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"), + Transaction::Batch { .. } => TransactionKind::Provided("Batch"), + + Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed( + (b"Sign", id, attempt).encode(), + signed.to_tributary_signed(round.nonce()), + ), + + Transaction::SlashReport { signed, .. } => { + TransactionKind::Signed(b"SlashReport".encode(), signed.to_tributary_signed(0)) } - _ => panic!("sig_hash called on non-signed transaction"), } } -} -pub trait GAIN: FnMut(&::G, &[u8]) -> Option {} -impl::G, &[u8]) -> Option> GAIN for F {} - -pub(crate) fn verify_transaction( - tx: &T, - genesis: [u8; 32], - get_and_increment_nonce: &mut F, -) -> Result<(), TransactionError> { - if tx.serialize().len() > TRANSACTION_SIZE_LIMIT { - Err(TransactionError::TooLargeTransaction)?; + fn hash(&self) -> [u8; 32] { + let mut tx = ReadWrite::serialize(self); + if let TransactionKind::Signed(_, signed) = self.kind() { + // Make sure the part we're cutting off is the signature + assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); + } + Blake2b::::digest(&tx).into() } - tx.verify()?; + // This is a stateless verification which we use to enforce some size limits. + fn verify(&self) -> Result<(), TransactionError> { + #[allow(clippy::match_same_arms)] + match self { + // Fixed-length TX + Transaction::RemoveParticipant { .. } => {} - match tx.kind() { - TransactionKind::Provided(_) | TransactionKind::Unsigned => {} - TransactionKind::Signed(order, Signed { signer, nonce, signature }) => { - if let Some(next_nonce) = get_and_increment_nonce(signer, &order) { - if *nonce != next_nonce { - Err(TransactionError::InvalidNonce)?; + // TODO: MAX_DKG_PARTICIPATION_LEN + Transaction::DkgParticipation { .. } => {} + // These are fixed-length TXs + Transaction::DkgConfirmationPreprocess { .. } | Transaction::DkgConfirmationShare { .. } => {} + + // Provided TXs + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } => {} + + Transaction::Sign { data, .. } => { + if data.len() > usize::from(MAX_KEY_SHARES_PER_SET) { + Err(TransactionError::InvalidContent)? } - } else { - // Not a participant - Err(TransactionError::InvalidSigner)?; + // TODO: MAX_SIGN_LEN } - // TODO: Use a batch verification here - if !signature.verify(*signer, tx.sig_hash(genesis)) { - Err(TransactionError::InvalidSignature)?; + Transaction::SlashReport { slash_points, .. } => { + if slash_points.len() > usize::from(MAX_KEY_SHARES_PER_SET) { + Err(TransactionError::InvalidContent)? + } } + }; + Ok(()) + } +} + +impl Transaction { + /// The topic in the database for this transaction. + pub fn topic(&self) -> Option { + #[allow(clippy::match_same_arms)] // This doesn't make semantic sense here + match self { + Transaction::RemoveParticipant { participant, .. } => { + Some(Topic::RemoveParticipant { participant: *participant }) + } + + Transaction::DkgParticipation { .. } => None, + Transaction::DkgConfirmationPreprocess { attempt, .. } => { + Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Preprocess }) + } + Transaction::DkgConfirmationShare { attempt, .. } => { + Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Share }) + } + + // Provided TXs + Transaction::Cosign { .. } | + Transaction::Cosigned { .. } | + Transaction::SubstrateBlock { .. } | + Transaction::Batch { .. } => None, + + Transaction::Sign { id, attempt, round, .. } => { + Some(Topic::Sign { id: *id, attempt: *attempt, round: *round }) + } + + Transaction::SlashReport { .. } => Some(Topic::SlashReport), } } - Ok(()) + /// Sign a transaction. + /// + /// Panics if signing a transaction whose type isn't `TransactionKind::Signed`. + pub fn sign( + &mut self, + rng: &mut R, + genesis: [u8; 32], + key: &Zeroizing<::F>, + ) { + fn signed(tx: &mut Transaction) -> &mut Signed { + #[allow(clippy::match_same_arms)] // This doesn't make semantic sense here + match tx { + Transaction::RemoveParticipant { ref mut signed, .. } | + Transaction::DkgParticipation { ref mut signed, .. } | + Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed, + Transaction::DkgConfirmationShare { ref mut signed, .. } => signed, + + Transaction::Cosign { .. } => panic!("signing Cosign transaction (provided)"), + Transaction::Cosigned { .. } => panic!("signing Cosigned transaction (provided)"), + Transaction::SubstrateBlock { .. } => { + panic!("signing SubstrateBlock transaction (provided)") + } + Transaction::Batch { .. } => panic!("signing Batch transaction (provided)"), + + Transaction::Sign { ref mut signed, .. } => signed, + + Transaction::SlashReport { ref mut signed, .. } => signed, + } + } + + // Decide the nonce to sign with + let sig_nonce = Zeroizing::new(::F::random(rng)); + + { + // Set the signer and the nonce + let signed = signed(self); + signed.signer = Ristretto::generator() * key.deref(); + signed.signature.R = ::generator() * sig_nonce.deref(); + } + + // Get the signature hash (which now includes `R || A` making it valid as the challenge) + let sig_hash = self.sig_hash(genesis); + + // Sign the signature + signed(self).signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); + } } diff --git a/crypto/ciphersuite/Cargo.toml b/crypto/ciphersuite/Cargo.toml index 9fcf60a6..b666dbaa 100644 --- a/crypto/ciphersuite/Cargo.toml +++ b/crypto/ciphersuite/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ciphersuite authors = ["Luke Parker "] keywords = ["ciphersuite", "ff", "group"] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/crypto/ciphersuite/src/dalek.rs b/crypto/ciphersuite/src/dalek.rs index bd9c70c1..a04195b2 100644 --- a/crypto/ciphersuite/src/dalek.rs +++ b/crypto/ciphersuite/src/dalek.rs @@ -28,6 +28,12 @@ macro_rules! dalek_curve { $Point::generator() } + fn reduce_512(mut scalar: [u8; 64]) -> Self::F { + let res = Scalar::from_bytes_mod_order_wide(&scalar); + scalar.zeroize(); + res + } + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat())) } diff --git a/crypto/ciphersuite/src/ed448.rs b/crypto/ciphersuite/src/ed448.rs index 8a927251..0b19ffa5 100644 --- a/crypto/ciphersuite/src/ed448.rs +++ b/crypto/ciphersuite/src/ed448.rs @@ -66,6 +66,12 @@ impl Ciphersuite for Ed448 { Point::generator() } + fn reduce_512(mut scalar: [u8; 64]) -> Self::F { + let res = Self::hash_to_F(b"Ciphersuite-reduce_512", &scalar); + scalar.zeroize(); + res + } + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_ref().try_into().unwrap()) } diff --git a/crypto/ciphersuite/src/kp256.rs b/crypto/ciphersuite/src/kp256.rs index 37fdb2e4..a1f64ae4 100644 --- a/crypto/ciphersuite/src/kp256.rs +++ b/crypto/ciphersuite/src/kp256.rs @@ -6,7 +6,7 @@ use group::ff::PrimeField; use elliptic_curve::{ generic_array::GenericArray, - bigint::{NonZero, CheckedAdd, Encoding, U384}, + bigint::{NonZero, CheckedAdd, Encoding, U384, U512}, hash2curve::{Expander, ExpandMsg, ExpandMsgXmd}, }; @@ -31,6 +31,22 @@ macro_rules! kp_curve { $lib::ProjectivePoint::GENERATOR } + fn reduce_512(scalar: [u8; 64]) -> Self::F { + let mut modulus = [0; 64]; + modulus[32 ..].copy_from_slice(&(Self::F::ZERO - Self::F::ONE).to_bytes()); + let modulus = U512::from_be_slice(&modulus).checked_add(&U512::ONE).unwrap(); + + let mut wide = + U512::from_be_bytes(scalar).rem(&NonZero::new(modulus).unwrap()).to_be_bytes(); + + let mut array = *GenericArray::from_slice(&wide[32 ..]); + let res = $lib::Scalar::from_repr(array).unwrap(); + + wide.zeroize(); + array.zeroize(); + res + } + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { // While one of these two libraries does support directly hashing to the Scalar field, the // other doesn't. While that's probably an oversight, this is a universally working method diff --git a/crypto/ciphersuite/src/lib.rs b/crypto/ciphersuite/src/lib.rs index e5ea6645..6519a413 100644 --- a/crypto/ciphersuite/src/lib.rs +++ b/crypto/ciphersuite/src/lib.rs @@ -62,6 +62,12 @@ pub trait Ciphersuite: // While group does provide this in its API, privacy coins may want to use a custom basepoint fn generator() -> Self::G; + /// Reduce 512 bits into a uniform scalar. + /// + /// If 512 bits is insufficient to perform a reduction into a uniform scalar, the ciphersuite + /// will perform a hash to sample the necessary bits. + fn reduce_512(scalar: [u8; 64]) -> Self::F; + /// Hash the provided domain-separation tag and message to a scalar. Ciphersuites MAY naively /// prefix the tag to the message, enabling transpotion between the two. Accordingly, this /// function should NOT be used in any scheme where one tag is a valid substring of another @@ -99,6 +105,9 @@ pub trait Ciphersuite: } /// Read a canonical point from something implementing std::io::Read. + /// + /// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a + /// canonical serialization. #[cfg(any(feature = "alloc", feature = "std"))] #[allow(non_snake_case)] fn read_G(reader: &mut R) -> io::Result { diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 29b8806c..b41e1f4e 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr authors = ["Luke Parker "] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] edition = "2021" -rust-version = "1.66" +rust-version = "1.71" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dalek-ff-group/src/field.rs b/crypto/dalek-ff-group/src/field.rs index b1af2711..10ca67d9 100644 --- a/crypto/dalek-ff-group/src/field.rs +++ b/crypto/dalek-ff-group/src/field.rs @@ -35,7 +35,7 @@ impl_modulus!( type ResidueType = Residue; /// A constant-time implementation of the Ed25519 field. -#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug, Zeroize)] pub struct FieldElement(ResidueType); // Square root of -1. @@ -92,7 +92,7 @@ impl Neg for FieldElement { } } -impl<'a> Neg for &'a FieldElement { +impl Neg for &FieldElement { type Output = FieldElement; fn neg(self) -> Self::Output { (*self).neg() @@ -244,7 +244,16 @@ impl FieldElement { res *= res; } } - res *= table[usize::from(bits)]; + + let mut scale_by = FieldElement::ONE; + #[allow(clippy::needless_range_loop)] + for i in 0 .. 16 { + #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 + { + scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8))); + } + } + res *= scale_by; bits = 0; } } diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index dcbcacc0..e6aad5b2 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -208,7 +208,16 @@ impl Scalar { res *= res; } } - res *= table[usize::from(bits)]; + + let mut scale_by = Scalar::ONE; + #[allow(clippy::needless_range_loop)] + for i in 0 .. 16 { + #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 + { + scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8))); + } + } + res *= scale_by; bits = 0; } } diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index 7ed301f5..a15a1d47 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" -rust-version = "1.79" +rust-version = "1.81" [package.metadata.docs.rs] all-features = true @@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } rand_core = { version = "0.6", default-features = false } @@ -36,13 +36,29 @@ multiexp = { path = "../multiexp", version = "0.4", default-features = false } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false } dleq = { path = "../dleq", version = "^0.4.1", default-features = false } +# eVRF DKG dependencies +generic-array = { version = "1", default-features = false, features = ["alloc"], optional = true } +blake2 = { version = "0.10", default-features = false, features = ["std"], optional = true } +rand_chacha = { version = "0.3", default-features = false, features = ["std"], optional = true } +generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", default-features = false, optional = true } +ec-divisors = { path = "../evrf/divisors", default-features = false, optional = true } +generalized-bulletproofs-circuit-abstraction = { path = "../evrf/circuit-abstraction", optional = true } +generalized-bulletproofs-ec-gadgets = { path = "../evrf/ec-gadgets", optional = true } + +secq256k1 = { path = "../evrf/secq256k1", optional = true } +embedwards25519 = { path = "../evrf/embedwards25519", optional = true } + [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } +rand = { version = "0.8", default-features = false, features = ["std"] } ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] } +generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] } +ec-divisors = { path = "../evrf/divisors", features = ["pasta"] } +pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" } [features] std = [ - "thiserror", + "thiserror/std", "rand_core/std", @@ -62,5 +78,21 @@ std = [ "dleq/serialize" ] borsh = ["dep:borsh"] +evrf = [ + "std", + + "dep:generic-array", + + "dep:blake2", + "dep:rand_chacha", + + "dep:generalized-bulletproofs", + "dep:ec-divisors", + "dep:generalized-bulletproofs-circuit-abstraction", + "dep:generalized-bulletproofs-ec-gadgets", +] +evrf-secp256k1 = ["evrf", "ciphersuite/secp256k1", "secq256k1"] +evrf-ed25519 = ["evrf", "ciphersuite/ed25519", "embedwards25519"] +evrf-ristretto = ["evrf", "ciphersuite/ristretto", "embedwards25519"] tests = ["rand_core/getrandom"] default = ["std"] diff --git a/crypto/dkg/src/encryption.rs b/crypto/dkg/src/encryption.rs index 51cf6b06..1ad721f6 100644 --- a/crypto/dkg/src/encryption.rs +++ b/crypto/dkg/src/encryption.rs @@ -98,11 +98,11 @@ fn ecdh(private: &Zeroizing, public: C::G) -> Zeroizing(context: &str, ecdh: &Zeroizing) -> ChaCha20 { +fn cipher(context: [u8; 32], ecdh: &Zeroizing) -> ChaCha20 { // Ideally, we'd box this transcript with ZAlloc, yet that's only possible on nightly // TODO: https://github.com/serai-dex/serai/issues/151 let mut transcript = RecommendedTranscript::new(b"DKG Encryption v0.2"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript.domain_separate(b"encryption_key"); @@ -134,7 +134,7 @@ fn cipher(context: &str, ecdh: &Zeroizing) -> ChaCha20 { fn encrypt( rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, to: C::G, mut msg: Zeroizing, @@ -197,7 +197,7 @@ impl EncryptedMessage { pub(crate) fn invalidate_msg( &mut self, rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, ) { // Invalidate the message by specifying a new key/Schnorr PoP @@ -219,7 +219,7 @@ impl EncryptedMessage { pub(crate) fn invalidate_share_serialization( &mut self, rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, to: C::G, ) { @@ -243,7 +243,7 @@ impl EncryptedMessage { pub(crate) fn invalidate_share_value( &mut self, rng: &mut R, - context: &str, + context: [u8; 32], from: Participant, to: C::G, ) { @@ -300,14 +300,14 @@ impl EncryptionKeyProof { // This still doesn't mean the DKG offers an authenticated channel. The per-message keys have no // root of trust other than their existence in the assumed-to-exist external authenticated channel. fn pop_challenge( - context: &str, + context: [u8; 32], nonce: C::G, key: C::G, sender: Participant, msg: &[u8], ) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Proof of Possession v0.2"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript.domain_separate(b"proof_of_possession"); @@ -323,9 +323,9 @@ fn pop_challenge( C::hash_to_F(b"DKG-encryption-proof_of_possession", &transcript.challenge(b"schnorr")) } -fn encryption_key_transcript(context: &str) -> RecommendedTranscript { +fn encryption_key_transcript(context: [u8; 32]) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"DKG Encryption Key Correctness Proof v0.2"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript } @@ -337,58 +337,17 @@ pub(crate) enum DecryptionError { InvalidProof, } -// A simple box for managing encryption. -#[derive(Clone)] -pub(crate) struct Encryption { - context: String, - i: Option, - enc_key: Zeroizing, - enc_pub_key: C::G, +// A simple box for managing decryption. +#[derive(Clone, Debug)] +pub(crate) struct Decryption { + context: [u8; 32], enc_keys: HashMap, } -impl fmt::Debug for Encryption { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Encryption") - .field("context", &self.context) - .field("i", &self.i) - .field("enc_pub_key", &self.enc_pub_key) - .field("enc_keys", &self.enc_keys) - .finish_non_exhaustive() +impl Decryption { + pub(crate) fn new(context: [u8; 32]) -> Self { + Self { context, enc_keys: HashMap::new() } } -} - -impl Zeroize for Encryption { - fn zeroize(&mut self) { - self.enc_key.zeroize(); - self.enc_pub_key.zeroize(); - for (_, mut value) in self.enc_keys.drain() { - value.zeroize(); - } - } -} - -impl Encryption { - pub(crate) fn new( - context: String, - i: Option, - rng: &mut R, - ) -> Self { - let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); - Self { - context, - i, - enc_pub_key: C::generator() * enc_key.deref(), - enc_key, - enc_keys: HashMap::new(), - } - } - - pub(crate) fn registration(&self, msg: M) -> EncryptionKeyMessage { - EncryptionKeyMessage { msg, enc_key: self.enc_pub_key } - } - pub(crate) fn register( &mut self, participant: Participant, @@ -402,13 +361,109 @@ impl Encryption { msg.msg } + // Given a message, and the intended decryptor, and a proof for its key, decrypt the message. + // Returns None if the key was wrong. + pub(crate) fn decrypt_with_proof( + &self, + from: Participant, + decryptor: Participant, + mut msg: EncryptedMessage, + // There's no encryption key proof if the accusation is of an invalid signature + proof: Option>, + ) -> Result, DecryptionError> { + if !msg.pop.verify( + msg.key, + pop_challenge::(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), + ) { + Err(DecryptionError::InvalidSignature)?; + } + + if let Some(proof) = proof { + // Verify this is the decryption key for this message + proof + .dleq + .verify( + &mut encryption_key_transcript(self.context), + &[C::generator(), msg.key], + &[self.enc_keys[&decryptor], *proof.key], + ) + .map_err(|_| DecryptionError::InvalidProof)?; + + cipher::(self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); + Ok(msg.msg) + } else { + Err(DecryptionError::InvalidProof) + } + } +} + +// A simple box for managing encryption. +#[derive(Clone)] +pub(crate) struct Encryption { + context: [u8; 32], + i: Participant, + enc_key: Zeroizing, + enc_pub_key: C::G, + decryption: Decryption, +} + +impl fmt::Debug for Encryption { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt + .debug_struct("Encryption") + .field("context", &self.context) + .field("i", &self.i) + .field("enc_pub_key", &self.enc_pub_key) + .field("decryption", &self.decryption) + .finish_non_exhaustive() + } +} + +impl Zeroize for Encryption { + fn zeroize(&mut self) { + self.enc_key.zeroize(); + self.enc_pub_key.zeroize(); + for (_, mut value) in self.decryption.enc_keys.drain() { + value.zeroize(); + } + } +} + +impl Encryption { + pub(crate) fn new( + context: [u8; 32], + i: Participant, + rng: &mut R, + ) -> Self { + let enc_key = Zeroizing::new(C::random_nonzero_F(rng)); + Self { + context, + i, + enc_pub_key: C::generator() * enc_key.deref(), + enc_key, + decryption: Decryption::new(context), + } + } + + pub(crate) fn registration(&self, msg: M) -> EncryptionKeyMessage { + EncryptionKeyMessage { msg, enc_key: self.enc_pub_key } + } + + pub(crate) fn register( + &mut self, + participant: Participant, + msg: EncryptionKeyMessage, + ) -> M { + self.decryption.register(participant, msg) + } + pub(crate) fn encrypt( &self, rng: &mut R, participant: Participant, msg: Zeroizing, ) -> EncryptedMessage { - encrypt(rng, &self.context, self.i.unwrap(), self.enc_keys[&participant], msg) + encrypt(rng, self.context, self.i, self.decryption.enc_keys[&participant], msg) } pub(crate) fn decrypt( @@ -426,18 +481,18 @@ impl Encryption { batch, batch_id, msg.key, - pop_challenge::(&self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), + pop_challenge::(self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), ); let key = ecdh::(&self.enc_key, msg.key); - cipher::(&self.context, &key).apply_keystream(msg.msg.as_mut().as_mut()); + cipher::(self.context, &key).apply_keystream(msg.msg.as_mut().as_mut()); ( msg.msg, EncryptionKeyProof { key, dleq: DLEqProof::prove( rng, - &mut encryption_key_transcript(&self.context), + &mut encryption_key_transcript(self.context), &[C::generator(), msg.key], &self.enc_key, ), @@ -445,38 +500,7 @@ impl Encryption { ) } - // Given a message, and the intended decryptor, and a proof for its key, decrypt the message. - // Returns None if the key was wrong. - pub(crate) fn decrypt_with_proof( - &self, - from: Participant, - decryptor: Participant, - mut msg: EncryptedMessage, - // There's no encryption key proof if the accusation is of an invalid signature - proof: Option>, - ) -> Result, DecryptionError> { - if !msg.pop.verify( - msg.key, - pop_challenge::(&self.context, msg.pop.R, msg.key, from, msg.msg.deref().as_ref()), - ) { - Err(DecryptionError::InvalidSignature)?; - } - - if let Some(proof) = proof { - // Verify this is the decryption key for this message - proof - .dleq - .verify( - &mut encryption_key_transcript(&self.context), - &[C::generator(), msg.key], - &[self.enc_keys[&decryptor], *proof.key], - ) - .map_err(|_| DecryptionError::InvalidProof)?; - - cipher::(&self.context, &proof.key).apply_keystream(msg.msg.as_mut().as_mut()); - Ok(msg.msg) - } else { - Err(DecryptionError::InvalidProof) - } + pub(crate) fn into_decryption(self) -> Decryption { + self.decryption } } diff --git a/crypto/dkg/src/evrf/mod.rs b/crypto/dkg/src/evrf/mod.rs new file mode 100644 index 00000000..d31f33f7 --- /dev/null +++ b/crypto/dkg/src/evrf/mod.rs @@ -0,0 +1,585 @@ +/* + We implement a DKG using an eVRF, as detailed in the eVRF paper. For the eVRF itself, we do not + use a Paillier-based construction, nor the detailed construction premised on a Bulletproof. + + For reference, the detailed construction premised on a Bulletproof involves two curves, notated + here as `C` and `E`, where the scalar field of `C` is the field of `E`. Accordingly, Bulletproofs + over `C` can efficiently perform group operations of points of curve `E`. Each participant has a + private point (`P_i`) on curve `E` committed to over curve `C`. The eVRF selects a pair of + scalars `a, b`, where the participant proves in-Bulletproof the points `A_i, B_i` are + `a * P_i, b * P_i`. The eVRF proceeds to commit to `A_i.x + B_i.x` in a Pedersen Commitment. + + Our eVRF uses + [Generalized Bulletproofs]( + https://repo.getmonero.org/monero-project/ccs-proposals + /uploads/a9baa50c38c6312efc0fea5c6a188bb9/gbp.pdf + ). + This allows us much larger witnesses without growing the reference string, and enables us to + efficiently sample challenges off in-circuit variables (via placing the variables in a vector + commitment, then challenging from a transcript of the commitments). We proceed to use + [elliptic curve divisors]( + https://repo.getmonero.org/-/project/54/ + uploads/eb1bf5b4d4855a3480c38abf895bd8e8/Veridise_Divisor_Proofs.pdf + ) + (which require the ability to sample a challenge off in-circuit variables) to prove discrete + logarithms efficiently. + + This is done via having a private scalar (`p_i`) on curve `E`, not a private point, and + publishing the public key for it (`P_i = p_i * G`, where `G` is a generator of `E`). The eVRF + samples two points with unknown discrete logarithms `A, B`, and the circuit proves a Pedersen + Commitment commits to `(p_i * A).x + (p_i * B).x`. + + With the eVRF established, we now detail our other novel aspect. The eVRF paper expects secret + shares to be sent to the other parties yet does not detail a precise way to do so. If we + encrypted the secret shares with some stream cipher, each recipient would have to attest validity + or accuse the sender of impropriety. We want an encryption scheme where anyone can verify the + secret shares were encrypted properly, without additional info, efficiently. + + Please note from the published commitments, it's possible to calculcate a commitment to the + secret share each party should receive (`V_i`). + + We have the sender sample two scalars per recipient, denoted `x_i, y_i` (where `i` is the + recipient index). They perform the eVRF to prove a Pedersen Commitment commits to + `z_i = (x_i * P_i).x + (y_i * P_i).x` and `x_i, y_i` are the discrete logarithms of `X_i, Y_i` + over `G`. They then publish the encrypted share `s_i + z_i` and `X_i, Y_i`. + + The recipient is able to decrypt the share via calculating + `s_i - ((p_i * X_i).x + (p_i * Y_i).x)`. + + To verify the secret share, we have the `F` terms of the Pedersen Commitments revealed (where + `F, H` are generators of `C`, `F` is used for binding and `H` for blinding). This already needs + to be done for the eVRF outputs used within the DKG, in order to obtain thecommitments to the + coefficients. When we have the commitment `Z_i = ((p_i * A).x + (p_i * B).x) * F`, we simply + check `s_i * F = Z_i + V_i`. + + In order to open the Pedersen Commitments to their `F` terms, we transcript the commitments and + the claimed openings, then assign random weights to each pair of `(commitment, opening). The + prover proves knowledge of the discrete logarithm of the sum weighted commitments, minus the sum + sum weighted openings, over `H`. + + The benefit to this construction is that given an broadcast channel which is reliable and + ordered, only `t` messages must be broadcast from honest parties in order to create a `t`-of-`n` + multisig. If the encrypted secret shares were not verifiable, one would need at least `t + n` + messages to ensure every participant has a correct dealing and can participate in future + reconstructions of the secret. This would also require all `n` parties be online, whereas this is + robust to threshold `t`. +*/ + +use core::ops::Deref; +use std::{ + io::{self, Read, Write}, + collections::{HashSet, HashMap}, +}; + +use rand_core::{RngCore, CryptoRng}; + +use zeroize::{Zeroize, Zeroizing}; + +use blake2::{Digest, Blake2s256}; +use ciphersuite::{ + group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, + }, + Ciphersuite, +}; +use multiexp::multiexp_vartime; + +use generalized_bulletproofs::{Generators, arithmetic_circuit_proof::*}; +use ec_divisors::DivisorCurve; + +use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore, ThresholdKeys}; + +pub(crate) mod proof; +use proof::*; +pub use proof::{EvrfCurve, EvrfGenerators}; + +/// Participation in the DKG. +/// +/// `Participation` is meant to be broadcast to all other participants over an authenticated, +/// reliable broadcast channel. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Participation { + proof: Vec, + encrypted_secret_shares: HashMap, +} + +impl Participation { + pub fn read(reader: &mut R, n: u16) -> io::Result { + // TODO: Replace `len` with some calculation deterministic to the params + let mut len = [0; 4]; + reader.read_exact(&mut len)?; + let len = usize::try_from(u32::from_le_bytes(len)).expect("<32-bit platform?"); + + // Don't allocate a buffer for the claimed length + // Read chunks until we reach the claimed length + // This means if we were told to read GB, we must actually be sent GB before allocating as such + const CHUNK_SIZE: usize = 1024; + let mut proof = Vec::with_capacity(len.min(CHUNK_SIZE)); + while proof.len() < len { + let next_chunk = (len - proof.len()).min(CHUNK_SIZE); + let old_proof_len = proof.len(); + proof.resize(old_proof_len + next_chunk, 0); + reader.read_exact(&mut proof[old_proof_len ..])?; + } + + let mut encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + for i in (1 ..= n).map(Participant) { + encrypted_secret_shares.insert(i, C::read_F(reader)?); + } + + Ok(Self { proof, encrypted_secret_shares }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&u32::try_from(self.proof.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.proof)?; + for i in (1 ..= u16::try_from(self.encrypted_secret_shares.len()) + .expect("writing a Participation which has a n > u16::MAX")) + .map(Participant) + { + writer.write_all(self.encrypted_secret_shares[&i].to_repr().as_ref())?; + } + Ok(()) + } +} + +fn polynomial( + coefficients: &[Zeroizing], + l: Participant, +) -> Zeroizing { + let l = F::from(u64::from(u16::from(l))); + // This should never be reached since Participant is explicitly non-zero + assert!(l != F::ZERO, "zero participant passed to polynomial"); + let mut share = Zeroizing::new(F::ZERO); + for (idx, coefficient) in coefficients.iter().rev().enumerate() { + *share += coefficient.deref(); + if idx != (coefficients.len() - 1) { + *share *= l; + } + } + share +} + +#[allow(clippy::type_complexity)] +fn share_verification_statements( + rng: &mut (impl RngCore + CryptoRng), + commitments: &[C::G], + n: u16, + encryption_commitments: &[C::G], + encrypted_secret_shares: &HashMap, +) -> (C::F, Vec<(C::F, C::G)>) { + debug_assert_eq!(usize::from(n), encryption_commitments.len()); + debug_assert_eq!(usize::from(n), encrypted_secret_shares.len()); + + let mut g_scalar = C::F::ZERO; + let mut pairs = Vec::with_capacity(commitments.len() + encryption_commitments.len()); + for commitment in commitments { + pairs.push((C::F::ZERO, *commitment)); + } + + let mut weight; + for (i, enc_share) in encrypted_secret_shares { + let enc_commitment = encryption_commitments[usize::from(u16::from(*i)) - 1]; + + weight = C::F::random(&mut *rng); + + // s_i F + g_scalar += weight * enc_share; + // - Z_i + let weight = -weight; + pairs.push((weight, enc_commitment)); + // - V_i + { + let i = C::F::from(u64::from(u16::from(*i))); + // The first `commitments.len()` pairs are for the commitments + (0 .. commitments.len()).fold(weight, |exp, j| { + pairs[j].0 += exp; + exp * i + }); + } + } + + (g_scalar, pairs) +} + +/// Errors from the eVRF DKG. +#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] +pub enum EvrfError { + #[error("n, the amount of participants, exceeded a u16")] + TooManyParticipants, + #[error("the threshold t wasn't in range 1 <= t <= n")] + InvalidThreshold, + #[error("a public key was the identity point")] + PublicKeyWasIdentity, + #[error("participating in a DKG we aren't a participant in")] + NotAParticipant, + #[error("a participant with an unrecognized ID participated")] + NonExistentParticipant, + #[error("the passed in generators did not have enough generators for this DKG")] + NotEnoughGenerators, +} + +/// The result of calling EvrfDkg::verify. +pub enum VerifyResult { + Valid(EvrfDkg), + Invalid(Vec), + NotEnoughParticipants, +} + +/// Struct to perform/verify the DKG with. +#[derive(Debug)] +pub struct EvrfDkg { + t: u16, + n: u16, + evrf_public_keys: Vec<::G>, + group_key: C::G, + verification_shares: HashMap, + #[allow(clippy::type_complexity)] + encrypted_secret_shares: + HashMap::G; 2], C::F)>>, +} + +impl EvrfDkg { + // Form the initial transcript for the proofs. + fn initial_transcript( + invocation: [u8; 32], + evrf_public_keys: &[::G], + t: u16, + ) -> [u8; 32] { + let mut transcript = Blake2s256::new(); + transcript.update(invocation); + for key in evrf_public_keys { + transcript.update(key.to_bytes().as_ref()); + } + transcript.update(t.to_le_bytes()); + transcript.finalize().into() + } + + /// Participate in performing the DKG for the specified parameters. + /// + /// The context MUST be unique across invocations. Reuse of context will lead to sharing + /// prior-shared secrets. + /// + /// Public keys are not allowed to be the identity point. This will error if any are. + pub fn participate( + rng: &mut (impl RngCore + CryptoRng), + generators: &EvrfGenerators, + context: [u8; 32], + t: u16, + evrf_public_keys: &[::G], + evrf_private_key: &Zeroizing<::F>, + ) -> Result, EvrfError> { + let Ok(n) = u16::try_from(evrf_public_keys.len()) else { Err(EvrfError::TooManyParticipants)? }; + if (t == 0) || (t > n) { + Err(EvrfError::InvalidThreshold)?; + } + if evrf_public_keys.iter().any(|key| bool::from(key.is_identity())) { + Err(EvrfError::PublicKeyWasIdentity)?; + }; + // This also checks the private key is not 0 + let evrf_public_key = ::generator() * evrf_private_key.deref(); + if !evrf_public_keys.iter().any(|key| *key == evrf_public_key) { + Err(EvrfError::NotAParticipant)?; + }; + + let transcript = Self::initial_transcript(context, evrf_public_keys, t); + // Further bind to the participant index so each index gets unique generators + // This allows reusing eVRF public keys as the prover + let mut per_proof_transcript = Blake2s256::new(); + per_proof_transcript.update(transcript); + per_proof_transcript.update(evrf_public_key.to_bytes()); + + // The above transcript is expected to be binding to all arguments here + // The generators are constant to this ciphersuite's generator, and the parameters are + // transcripted + let EvrfProveResult { coefficients, encryption_masks, proof } = match Evrf::prove( + rng, + &generators.0, + per_proof_transcript.finalize().into(), + usize::from(t), + evrf_public_keys, + evrf_private_key, + ) { + Ok(res) => res, + Err(AcError::NotEnoughGenerators) => Err(EvrfError::NotEnoughGenerators)?, + Err( + AcError::DifferingLrLengths | + AcError::InconsistentAmountOfConstraints | + AcError::ConstrainedNonExistentTerm | + AcError::ConstrainedNonExistentCommitment | + AcError::InconsistentWitness | + AcError::Ip(_) | + AcError::IncompleteProof, + ) => { + panic!("failed to prove for the eVRF proof") + } + }; + + let mut encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + for (l, encryption_mask) in (1 ..= n).map(Participant).zip(encryption_masks) { + let share = polynomial::(&coefficients, l); + encrypted_secret_shares.insert(l, *share + *encryption_mask); + } + + Ok(Participation { proof, encrypted_secret_shares }) + } + + /// Check if a batch of `Participation`s are valid. + /// + /// If any `Participation` is invalid, the list of all invalid participants will be returned. + /// If all `Participation`s are valid and there's at least `t`, an instance of this struct + /// (usable to obtain a threshold share of generated key) is returned. If all are valid and + /// there's not at least `t`, `VerifyResult::NotEnoughParticipants` is returned. + /// + /// This DKG is unbiased if all `n` people participate. This DKG is biased if only a threshold + /// participate. + pub fn verify( + rng: &mut (impl RngCore + CryptoRng), + generators: &EvrfGenerators, + context: [u8; 32], + t: u16, + evrf_public_keys: &[::G], + participations: &HashMap>, + ) -> Result, EvrfError> { + let Ok(n) = u16::try_from(evrf_public_keys.len()) else { Err(EvrfError::TooManyParticipants)? }; + if (t == 0) || (t > n) { + Err(EvrfError::InvalidThreshold)?; + } + if evrf_public_keys.iter().any(|key| bool::from(key.is_identity())) { + Err(EvrfError::PublicKeyWasIdentity)?; + }; + for i in participations.keys() { + if u16::from(*i) > n { + Err(EvrfError::NonExistentParticipant)?; + } + } + + let mut valid = HashMap::with_capacity(participations.len()); + let mut faulty = HashSet::new(); + + let transcript = Self::initial_transcript(context, evrf_public_keys, t); + + let mut evrf_verifier = Generators::batch_verifier(); + for (i, participation) in participations { + let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1]; + + let mut per_proof_transcript = Blake2s256::new(); + per_proof_transcript.update(transcript); + per_proof_transcript.update(evrf_public_key.to_bytes()); + + // Clone the verifier so if this proof is faulty, it doesn't corrupt the verifier + let mut verifier_clone = evrf_verifier.clone(); + let Ok(data) = Evrf::::verify( + rng, + &generators.0, + &mut verifier_clone, + per_proof_transcript.finalize().into(), + usize::from(t), + evrf_public_keys, + evrf_public_key, + &participation.proof, + ) else { + faulty.insert(*i); + continue; + }; + evrf_verifier = verifier_clone; + + valid.insert(*i, (participation.encrypted_secret_shares.clone(), data)); + } + debug_assert_eq!(valid.len() + faulty.len(), participations.len()); + + // Perform the batch verification of the eVRFs + if !generators.0.verify(evrf_verifier) { + // If the batch failed, verify them each individually + for (i, participation) in participations { + if faulty.contains(i) { + continue; + } + let mut evrf_verifier = Generators::batch_verifier(); + Evrf::::verify( + rng, + &generators.0, + &mut evrf_verifier, + context, + usize::from(t), + evrf_public_keys, + evrf_public_keys[usize::from(u16::from(*i)) - 1], + &participation.proof, + ) + .expect("evrf failed basic checks yet prover wasn't prior marked faulty"); + if !generators.0.verify(evrf_verifier) { + valid.remove(i); + faulty.insert(*i); + } + } + } + debug_assert_eq!(valid.len() + faulty.len(), participations.len()); + + // Perform the batch verification of the shares + let mut sum_encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + let mut sum_masks = HashMap::with_capacity(usize::from(n)); + let mut all_encrypted_secret_shares = HashMap::with_capacity(usize::from(t)); + { + let mut share_verification_statements_actual = HashMap::with_capacity(valid.len()); + if !{ + let mut g_scalar = C::F::ZERO; + let mut pairs = Vec::with_capacity(valid.len() * (usize::from(t) + evrf_public_keys.len())); + for (i, (encrypted_secret_shares, data)) in &valid { + let (this_g_scalar, mut these_pairs) = share_verification_statements::( + &mut *rng, + &data.coefficients, + evrf_public_keys + .len() + .try_into() + .expect("n prior checked to be <= u16::MAX couldn't be converted to a u16"), + &data.encryption_commitments, + encrypted_secret_shares, + ); + // Queue this into our batch + g_scalar += this_g_scalar; + pairs.extend(&these_pairs); + + // Also push this g_scalar onto these_pairs so these_pairs can be verified individually + // upon error + these_pairs.push((this_g_scalar, generators.0.g())); + share_verification_statements_actual.insert(*i, these_pairs); + + // Also format this data as we'd need it upon success + let mut formatted_encrypted_secret_shares = HashMap::with_capacity(usize::from(n)); + for (j, enc_share) in encrypted_secret_shares { + /* + We calculcate verification shares as the sum of the encrypted scalars, minus their + masks. This only does one scalar multiplication, and `1+t` point additions (with + one negation), and is accordingly much cheaper than interpolating the commitments. + This is only possible because already interpolated the commitments to verify the + encrypted secret share. + */ + let sum_encrypted_secret_share = + sum_encrypted_secret_shares.get(j).copied().unwrap_or(C::F::ZERO); + let sum_mask = sum_masks.get(j).copied().unwrap_or(C::G::identity()); + sum_encrypted_secret_shares.insert(*j, sum_encrypted_secret_share + enc_share); + + let j_index = usize::from(u16::from(*j)) - 1; + sum_masks.insert(*j, sum_mask + data.encryption_commitments[j_index]); + + formatted_encrypted_secret_shares.insert(*j, (data.ecdh_keys[j_index], *enc_share)); + } + all_encrypted_secret_shares.insert(*i, formatted_encrypted_secret_shares); + } + pairs.push((g_scalar, generators.0.g())); + bool::from(multiexp_vartime(&pairs).is_identity()) + } { + // If the batch failed, verify them each individually + for (i, pairs) in share_verification_statements_actual { + if !bool::from(multiexp_vartime(&pairs).is_identity()) { + valid.remove(&i); + faulty.insert(i); + } + } + } + } + debug_assert_eq!(valid.len() + faulty.len(), participations.len()); + + let mut faulty = faulty.into_iter().collect::>(); + if !faulty.is_empty() { + faulty.sort_unstable(); + return Ok(VerifyResult::Invalid(faulty)); + } + + // We check at least t key shares of people have participated in contributing entropy + // Since the key shares of the participants exceed t, meaning if they're malicious they can + // reconstruct the key regardless, this is safe to the threshold + { + let mut participating_weight = 0; + let mut evrf_public_keys_mut = evrf_public_keys.to_vec(); + for i in valid.keys() { + let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1]; + + // Remove this key from the Vec to prevent double-counting + /* + Double-counting would be a risk if multiple participants shared an eVRF public key and + participated. This code does still allow such participants (in order to let participants + be weighted), and any one of them participating will count as all participating. This is + fine as any one such participant will be able to decrypt the shares for themselves and + all other participants, so this is still a key generated by an amount of participants who + could simply reconstruct the key. + */ + let start_len = evrf_public_keys_mut.len(); + evrf_public_keys_mut.retain(|key| *key != evrf_public_key); + let end_len = evrf_public_keys_mut.len(); + let count = start_len - end_len; + + participating_weight += count; + } + if participating_weight < usize::from(t) { + return Ok(VerifyResult::NotEnoughParticipants); + } + } + + // If we now have >= t participations, calculate the group key and verification shares + + // The group key is the sum of the zero coefficients + let group_key = valid.values().map(|(_, evrf_data)| evrf_data.coefficients[0]).sum::(); + + // Calculate each user's verification share + let mut verification_shares = HashMap::with_capacity(usize::from(n)); + for i in (1 ..= n).map(Participant) { + verification_shares + .insert(i, (C::generator() * sum_encrypted_secret_shares[&i]) - sum_masks[&i]); + } + + Ok(VerifyResult::Valid(EvrfDkg { + t, + n, + evrf_public_keys: evrf_public_keys.to_vec(), + group_key, + verification_shares, + encrypted_secret_shares: all_encrypted_secret_shares, + })) + } + + pub fn keys( + &self, + evrf_private_key: &Zeroizing<::F>, + ) -> Vec> { + let evrf_public_key = ::generator() * evrf_private_key.deref(); + let mut is = Vec::with_capacity(1); + for (i, evrf_key) in self.evrf_public_keys.iter().enumerate() { + if *evrf_key == evrf_public_key { + let i = u16::try_from(i).expect("n <= u16::MAX yet i > u16::MAX?"); + let i = Participant(1 + i); + is.push(i); + } + } + + let mut res = Vec::with_capacity(is.len()); + for i in is { + let mut secret_share = Zeroizing::new(C::F::ZERO); + for shares in self.encrypted_secret_shares.values() { + let (ecdh_keys, enc_share) = shares[&i]; + + let mut ecdh = Zeroizing::new(C::F::ZERO); + for point in ecdh_keys { + let (mut x, mut y) = + ::G::to_xy(point * evrf_private_key.deref()).unwrap(); + *ecdh += x; + x.zeroize(); + y.zeroize(); + } + *secret_share += enc_share - ecdh.deref(); + } + + debug_assert_eq!(self.verification_shares[&i], C::generator() * secret_share.deref()); + + res.push(ThresholdKeys::from(ThresholdCore { + params: ThresholdParams::new(self.t, self.n, i).unwrap(), + interpolation: Interpolation::Lagrange, + secret_share, + group_key: self.group_key, + verification_shares: self.verification_shares.clone(), + })); + } + res + } +} diff --git a/crypto/dkg/src/evrf/proof.rs b/crypto/dkg/src/evrf/proof.rs new file mode 100644 index 00000000..9c16fec6 --- /dev/null +++ b/crypto/dkg/src/evrf/proof.rs @@ -0,0 +1,690 @@ +use core::{marker::PhantomData, ops::Deref, fmt}; + +use zeroize::{Zeroize, Zeroizing}; + +use rand_core::{RngCore, CryptoRng, SeedableRng}; +use rand_chacha::ChaCha20Rng; + +use generic_array::{typenum::Unsigned, ArrayLength, GenericArray}; + +use blake2::{Digest, Blake2s256}; +use ciphersuite::{ + group::{ff::Field, Group, GroupEncoding}, + Ciphersuite, +}; + +use generalized_bulletproofs::{ + *, + transcript::{Transcript as ProverTranscript, VerifierTranscript}, + arithmetic_circuit_proof::*, +}; +use generalized_bulletproofs_circuit_abstraction::*; + +use ec_divisors::{DivisorCurve, ScalarDecomposition}; +use generalized_bulletproofs_ec_gadgets::*; + +/// A pair of curves to perform the eVRF with. +pub trait EvrfCurve: Ciphersuite { + type EmbeddedCurve: Ciphersuite::F>>; + type EmbeddedCurveParameters: DiscreteLogParameters; +} + +#[cfg(feature = "evrf-secp256k1")] +impl EvrfCurve for ciphersuite::Secp256k1 { + type EmbeddedCurve = secq256k1::Secq256k1; + type EmbeddedCurveParameters = secq256k1::Secq256k1; +} + +#[cfg(feature = "evrf-ed25519")] +impl EvrfCurve for ciphersuite::Ed25519 { + type EmbeddedCurve = embedwards25519::Embedwards25519; + type EmbeddedCurveParameters = embedwards25519::Embedwards25519; +} + +#[cfg(feature = "evrf-ristretto")] +impl EvrfCurve for ciphersuite::Ristretto { + type EmbeddedCurve = embedwards25519::Embedwards25519; + type EmbeddedCurveParameters = embedwards25519::Embedwards25519; +} + +fn sample_point(rng: &mut (impl RngCore + CryptoRng)) -> C::G { + let mut repr = ::Repr::default(); + loop { + rng.fill_bytes(repr.as_mut()); + if let Ok(point) = C::read_G(&mut repr.as_ref()) { + if bool::from(!point.is_identity()) { + return point; + } + } + } +} + +/// Generators for eVRF proof. +#[derive(Clone, Debug)] +pub struct EvrfGenerators(pub(crate) Generators); + +impl EvrfGenerators { + /// Create a new set of generators. + pub fn new(max_threshold: u16, max_participants: u16) -> EvrfGenerators { + let g = C::generator(); + let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(g.to_bytes()).into()); + let h = sample_point::(&mut rng); + let (_, generators) = + Evrf::::muls_and_generators_to_use(max_threshold.into(), max_participants.into()); + let mut g_bold = vec![]; + let mut h_bold = vec![]; + for _ in 0 .. generators { + g_bold.push(sample_point::(&mut rng)); + h_bold.push(sample_point::(&mut rng)); + } + Self(Generators::new(g, h, g_bold, h_bold).unwrap()) + } +} + +/// The result of proving for an eVRF. +pub(crate) struct EvrfProveResult { + /// The coefficients for use in the DKG. + pub(crate) coefficients: Vec>, + /// The masks to encrypt secret shares with. + pub(crate) encryption_masks: Vec>, + /// The proof itself. + pub(crate) proof: Vec, +} + +/// The result of verifying an eVRF. +pub(crate) struct EvrfVerifyResult { + /// The commitments to the coefficients for use in the DKG. + pub(crate) coefficients: Vec, + /// The ephemeral public keys to perform ECDHs with + pub(crate) ecdh_keys: Vec<[::G; 2]>, + /// The commitments to the masks used to encrypt secret shares with. + pub(crate) encryption_commitments: Vec, +} + +impl fmt::Debug for EvrfVerifyResult { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("EvrfVerifyResult").finish_non_exhaustive() + } +} + +/// A struct to prove/verify eVRFs with. +pub(crate) struct Evrf(PhantomData); +impl Evrf { + // Sample uniform points (via rejection-sampling) on the embedded elliptic curve + fn transcript_to_points( + seed: [u8; 32], + coefficients: usize, + ) -> Vec<::G> { + // We need to do two Diffie-Hellman's per coefficient in order to achieve an unbiased result + let quantity = 2 * coefficients; + + let mut rng = ChaCha20Rng::from_seed(seed); + let mut res = Vec::with_capacity(quantity); + for _ in 0 .. quantity { + res.push(sample_point::(&mut rng)); + } + res + } + + /// Read a Variable from a theoretical vector commitment tape + fn read_one_from_tape(generators_to_use: usize, start: &mut usize) -> Variable { + // Each commitment has twice as many variables as generators in use + let commitment = *start / generators_to_use; + // The index will be less than the amount of generators in use, as half are left and half are + // right + let index = *start % generators_to_use; + let res = Variable::CG { commitment, index }; + *start += 1; + res + } + + /// Read a set of variables from a theoretical vector commitment tape + fn read_from_tape( + generators_to_use: usize, + start: &mut usize, + ) -> GenericArray { + let mut buf = Vec::with_capacity(N::USIZE); + for _ in 0 .. N::USIZE { + buf.push(Self::read_one_from_tape(generators_to_use, start)); + } + GenericArray::from_slice(&buf).clone() + } + + /// Read `PointWithDlog`s, which share a discrete logarithm, from the theoretical vector + /// commitment tape. + fn point_with_dlogs( + start: &mut usize, + quantity: usize, + generators_to_use: usize, + ) -> Vec> { + // We define a serialized tape of the discrete logarithm, then for each divisor/point, we push: + // zero, x**i, y x**i, y, x_coord, y_coord + // We then chunk that into vector commitments + // Here, we take the assumed layout and generate the expected `Variable`s for this layout + + let dlog = Self::read_from_tape(generators_to_use, start); + + let mut res = Vec::with_capacity(quantity); + let mut read_point_with_dlog = || { + let zero = Self::read_one_from_tape(generators_to_use, start); + let x_from_power_of_2 = Self::read_from_tape(generators_to_use, start); + let yx = Self::read_from_tape(generators_to_use, start); + let y = Self::read_one_from_tape(generators_to_use, start); + let divisor = Divisor { zero, x_from_power_of_2, yx, y }; + + let point = ( + Self::read_one_from_tape(generators_to_use, start), + Self::read_one_from_tape(generators_to_use, start), + ); + + res.push(PointWithDlog { dlog: dlog.clone(), divisor, point }); + }; + + for _ in 0 .. quantity { + read_point_with_dlog(); + } + res + } + + fn muls_and_generators_to_use(coefficients: usize, ecdhs: usize) -> (usize, usize) { + const MULS_PER_DH: usize = 7; + // 1 DH to prove the discrete logarithm corresponds to the eVRF public key + // 2 DHs per generated coefficient + // 2 DHs per generated ECDH + let expected_muls = MULS_PER_DH * (1 + (2 * coefficients) + (2 * 2 * ecdhs)); + let generators_to_use = { + let mut padded_pow_of_2 = 1; + while padded_pow_of_2 < expected_muls { + padded_pow_of_2 <<= 1; + } + // This may as small as 16, which would create an excessive amount of vector commitments + // We set a floor of 2048 rows for bandwidth reasons + padded_pow_of_2.max(2048) + }; + (expected_muls, generators_to_use) + } + + fn circuit( + curve_spec: &CurveSpec, + evrf_public_key: (C::F, C::F), + coefficients: usize, + ecdh_commitments: &[[(C::F, C::F); 2]], + generator_tables: &[&GeneratorTable], + circuit: &mut Circuit, + transcript: &mut impl Transcript, + ) { + let (expected_muls, generators_to_use) = + Self::muls_and_generators_to_use(coefficients, ecdh_commitments.len()); + let (challenge, challenged_generators) = + circuit.discrete_log_challenge(transcript, curve_spec, generator_tables); + debug_assert_eq!(challenged_generators.len(), 1 + (2 * coefficients) + ecdh_commitments.len()); + + // The generators tables/challenged generators are expected to have the following layouts + // G, coefficients * [A, B], ecdhs * [P] + #[allow(non_snake_case)] + let challenged_G = &challenged_generators[0]; + + // Execute the circuit for the coefficients + let mut tape_pos = 0; + { + let mut point_with_dlogs = + Self::point_with_dlogs(&mut tape_pos, 1 + (2 * coefficients), generators_to_use) + .into_iter(); + + // Verify the discrete logarithm is in the fact the discrete logarithm of the eVRF public key + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_G, + ); + circuit.equality(LinComb::from(point.x()), &LinComb::empty().constant(evrf_public_key.0)); + circuit.equality(LinComb::from(point.y()), &LinComb::empty().constant(evrf_public_key.1)); + + // Verify the DLog claims against the sampled points + for (i, pair) in challenged_generators[1 ..].chunks(2).take(coefficients).enumerate() { + let mut lincomb = LinComb::empty(); + debug_assert_eq!(pair.len(), 2); + for challenged_generator in pair { + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_generator, + ); + // For each point in this pair, add its x coordinate to a lincomb + lincomb = lincomb.term(C::F::ONE, point.x()); + } + // Constrain the sum of the two x coordinates to be equal to the value in the Pedersen + // commitment + circuit.equality(lincomb, &LinComb::from(Variable::V(i))); + } + debug_assert!(point_with_dlogs.next().is_none()); + } + + // Now execute the circuit for the ECDHs + let mut challenged_generators = challenged_generators.iter().skip(1 + (2 * coefficients)); + for (i, ecdh) in ecdh_commitments.iter().enumerate() { + let challenged_generator = challenged_generators.next().unwrap(); + let mut lincomb = LinComb::empty(); + for ecdh in ecdh { + let mut point_with_dlogs = + Self::point_with_dlogs(&mut tape_pos, 2, generators_to_use).into_iter(); + + // One proof of the ECDH secret * G for the commitment published + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_G, + ); + circuit.equality(LinComb::from(point.x()), &LinComb::empty().constant(ecdh.0)); + circuit.equality(LinComb::from(point.y()), &LinComb::empty().constant(ecdh.1)); + + // One proof of the ECDH secret * P for the ECDH + let point = circuit.discrete_log( + curve_spec, + point_with_dlogs.next().unwrap(), + &challenge, + challenged_generator, + ); + // For each point in this pair, add its x coordinate to a lincomb + lincomb = lincomb.term(C::F::ONE, point.x()); + } + + // Constrain the sum of the two x coordinates to be equal to the value in the Pedersen + // commitment + circuit.equality(lincomb, &LinComb::from(Variable::V(coefficients + i))); + } + + debug_assert_eq!(expected_muls, circuit.muls()); + debug_assert!(challenged_generators.next().is_none()); + } + + /// Prove a point on an elliptic curve had its discrete logarithm generated via an eVRF. + pub(crate) fn prove( + rng: &mut (impl RngCore + CryptoRng), + generators: &Generators, + transcript: [u8; 32], + coefficients: usize, + ecdh_public_keys: &[<::EmbeddedCurve as Ciphersuite>::G], + evrf_private_key: &Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + ) -> Result, AcError> { + let curve_spec = CurveSpec { + a: <::EmbeddedCurve as Ciphersuite>::G::a(), + b: <::EmbeddedCurve as Ciphersuite>::G::b(), + }; + + // A tape of the discrete logarithm, then [zero, x**i, y x**i, y, x_coord, y_coord] + let mut vector_commitment_tape = vec![]; + + let mut generator_tables = Vec::with_capacity(1 + (2 * coefficients) + ecdh_public_keys.len()); + + // A function to calculate a divisor and push it onto the tape + // This defines a vec, divisor_points, outside of the fn to reuse its allocation + let mut divisor = + |vector_commitment_tape: &mut Vec<_>, + dlog: &ScalarDecomposition<<::EmbeddedCurve as Ciphersuite>::F>, + push_generator: bool, + generator: <::EmbeddedCurve as Ciphersuite>::G, + dh: <::EmbeddedCurve as Ciphersuite>::G| { + if push_generator { + let (x, y) = ::G::to_xy(generator).unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + + let mut divisor = dlog.scalar_mul_divisor(generator).normalize_x_coefficient(); + + vector_commitment_tape.push(divisor.zero_coefficient); + + for coefficient in divisor.x_coefficients.iter().skip(1) { + vector_commitment_tape.push(*coefficient); + } + for _ in divisor.x_coefficients.len() .. + ::XCoefficientsMinusOne::USIZE + { + vector_commitment_tape.push(::F::ZERO); + } + + for coefficient in divisor.yx_coefficients.first().unwrap_or(&vec![]) { + vector_commitment_tape.push(*coefficient); + } + for _ in divisor.yx_coefficients.first().unwrap_or(&vec![]).len() .. + ::YxCoefficients::USIZE + { + vector_commitment_tape.push(::F::ZERO); + } + + vector_commitment_tape + .push(divisor.y_coefficients.first().copied().unwrap_or(::F::ZERO)); + + divisor.zeroize(); + drop(divisor); + + let (x, y) = ::G::to_xy(dh).unwrap(); + vector_commitment_tape.push(x); + vector_commitment_tape.push(y); + + (x, y) + }; + + // Start with the coefficients + let evrf_public_key; + let mut actual_coefficients = Vec::with_capacity(coefficients); + { + // This is checked at a higher level + let dlog = + ScalarDecomposition::<::F>::new(**evrf_private_key) + .expect("eVRF private key was zero"); + let points = Self::transcript_to_points(transcript, coefficients); + + // Start by pushing the discrete logarithm onto the tape + for coefficient in dlog.decomposition() { + vector_commitment_tape.push(<_>::from(*coefficient)); + } + + // Push a divisor for proving that we're using the correct scalar + evrf_public_key = divisor( + &mut vector_commitment_tape, + &dlog, + true, + <::EmbeddedCurve as Ciphersuite>::generator(), + <::EmbeddedCurve as Ciphersuite>::generator() * evrf_private_key.deref(), + ); + + // Push a divisor for each point we use in the eVRF + for pair in points.chunks(2) { + let mut res = Zeroizing::new(C::F::ZERO); + for point in pair { + let (dh_x, _) = divisor( + &mut vector_commitment_tape, + &dlog, + true, + *point, + *point * evrf_private_key.deref(), + ); + *res += dh_x; + } + actual_coefficients.push(res); + } + debug_assert_eq!(actual_coefficients.len(), coefficients); + } + + // Now do the ECDHs for the encryption + let mut encryption_masks = Vec::with_capacity(ecdh_public_keys.len()); + let mut ecdh_commitments = Vec::with_capacity(2 * ecdh_public_keys.len()); + let mut ecdh_commitments_xy = Vec::with_capacity(ecdh_public_keys.len()); + for ecdh_public_key in ecdh_public_keys { + ecdh_commitments_xy.push([(C::F::ZERO, C::F::ZERO); 2]); + + let mut res = Zeroizing::new(C::F::ZERO); + for j in 0 .. 2 { + let mut ecdh_private_key; + loop { + ecdh_private_key = ::F::random(&mut *rng); + // Generate a non-0 ECDH private key, as necessary to not produce an identity output + // Identity isn't representable with the divisors, hence the explicit effort + if bool::from(!ecdh_private_key.is_zero()) { + break; + } + } + let dlog = + ScalarDecomposition::<::F>::new(ecdh_private_key) + .expect("ECDH private key was zero"); + let ecdh_commitment = ::generator() * ecdh_private_key; + ecdh_commitments.push(ecdh_commitment); + ecdh_commitments_xy.last_mut().unwrap()[j] = + <::G as DivisorCurve>::to_xy(ecdh_commitment).unwrap(); + + // Start by pushing the discrete logarithm onto the tape + for coefficient in dlog.decomposition() { + vector_commitment_tape.push(<_>::from(*coefficient)); + } + + // Push a divisor for proving that we're using the correct scalar for the commitment + divisor( + &mut vector_commitment_tape, + &dlog, + false, + <::EmbeddedCurve as Ciphersuite>::generator(), + <::EmbeddedCurve as Ciphersuite>::generator() * ecdh_private_key, + ); + // Push a divisor for the key we're performing the ECDH with + let (dh_x, _) = divisor( + &mut vector_commitment_tape, + &dlog, + j == 0, + *ecdh_public_key, + *ecdh_public_key * ecdh_private_key, + ); + *res += dh_x; + + ecdh_private_key.zeroize(); + } + encryption_masks.push(res); + } + debug_assert_eq!(encryption_masks.len(), ecdh_public_keys.len()); + + // Now that we have the vector commitment tape, chunk it + let (_, generators_to_use) = + Self::muls_and_generators_to_use(coefficients, ecdh_public_keys.len()); + + let mut vector_commitments = + Vec::with_capacity(vector_commitment_tape.len().div_ceil(generators_to_use)); + for chunk in vector_commitment_tape.chunks(generators_to_use) { + let g_values = chunk[.. generators_to_use.min(chunk.len())].to_vec().into(); + vector_commitments.push(PedersenVectorCommitment { g_values, mask: C::F::random(&mut *rng) }); + } + + vector_commitment_tape.zeroize(); + drop(vector_commitment_tape); + + let mut commitments = Vec::with_capacity(coefficients + ecdh_public_keys.len()); + for coefficient in &actual_coefficients { + commitments.push(PedersenCommitment { value: **coefficient, mask: C::F::random(&mut *rng) }); + } + for enc_mask in &encryption_masks { + commitments.push(PedersenCommitment { value: **enc_mask, mask: C::F::random(&mut *rng) }); + } + + let mut transcript = ProverTranscript::new(transcript); + let commited_commitments = transcript.write_commitments( + vector_commitments + .iter() + .map(|commitment| { + commitment + .commit(generators.g_bold_slice(), generators.h()) + .ok_or(AcError::NotEnoughGenerators) + }) + .collect::>()?, + commitments + .iter() + .map(|commitment| commitment.commit(generators.g(), generators.h())) + .collect(), + ); + for ecdh_commitment in ecdh_commitments { + transcript.push_point(ecdh_commitment); + } + + let mut circuit = Circuit::prove(vector_commitments, commitments.clone()); + Self::circuit( + &curve_spec, + evrf_public_key, + coefficients, + &ecdh_commitments_xy, + &generator_tables.iter().collect::>(), + &mut circuit, + &mut transcript, + ); + + let (statement, Some(witness)) = circuit + .statement( + generators.reduce(generators_to_use).ok_or(AcError::NotEnoughGenerators)?, + commited_commitments, + ) + .unwrap() + else { + panic!("proving yet wasn't yielded the witness"); + }; + statement.prove(&mut *rng, &mut transcript, witness).unwrap(); + + // Push the reveal onto the transcript + for commitment in &commitments { + transcript.push_point(generators.g() * commitment.value); + } + + // Define a weight to aggregate the commitments with + let mut agg_weights = Vec::with_capacity(commitments.len()); + agg_weights.push(C::F::ONE); + while agg_weights.len() < commitments.len() { + agg_weights.push(transcript.challenge::()); + } + let mut x = commitments + .iter() + .zip(&agg_weights) + .map(|(commitment, weight)| commitment.mask * *weight) + .sum::(); + + // Do a Schnorr PoK for the randomness of the aggregated Pedersen commitment + let mut r = C::F::random(&mut *rng); + transcript.push_point(generators.h() * r); + let c = transcript.challenge::(); + transcript.push_scalar(r + (c * x)); + r.zeroize(); + x.zeroize(); + + Ok(EvrfProveResult { + coefficients: actual_coefficients, + encryption_masks, + proof: transcript.complete(), + }) + } + + /// Verify an eVRF proof, returning the commitments output. + #[allow(clippy::too_many_arguments)] + pub(crate) fn verify( + rng: &mut (impl RngCore + CryptoRng), + generators: &Generators, + verifier: &mut BatchVerifier, + transcript: [u8; 32], + coefficients: usize, + ecdh_public_keys: &[<::EmbeddedCurve as Ciphersuite>::G], + evrf_public_key: <::EmbeddedCurve as Ciphersuite>::G, + proof: &[u8], + ) -> Result, ()> { + let curve_spec = CurveSpec { + a: <::EmbeddedCurve as Ciphersuite>::G::a(), + b: <::EmbeddedCurve as Ciphersuite>::G::b(), + }; + + let mut generator_tables = Vec::with_capacity(1 + (2 * coefficients) + ecdh_public_keys.len()); + { + let (x, y) = + ::G::to_xy(::generator()) + .unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + let points = Self::transcript_to_points(transcript, coefficients); + for generator in points { + let (x, y) = ::G::to_xy(generator).unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + for generator in ecdh_public_keys { + let (x, y) = ::G::to_xy(*generator).unwrap(); + generator_tables.push(GeneratorTable::new(&curve_spec, x, y)); + } + + let (_, generators_to_use) = + Self::muls_and_generators_to_use(coefficients, ecdh_public_keys.len()); + + let mut transcript = VerifierTranscript::new(transcript, proof); + + let dlog_len = ::ScalarBits::USIZE; + let divisor_len = 1 + + ::XCoefficientsMinusOne::USIZE + + ::YxCoefficients::USIZE + + 1; + let dlog_proof_len = divisor_len + 2; + + let coeffs_vc_variables = dlog_len + ((1 + (2 * coefficients)) * dlog_proof_len); + let ecdhs_vc_variables = ((2 * ecdh_public_keys.len()) * dlog_len) + + ((2 * 2 * ecdh_public_keys.len()) * dlog_proof_len); + let vcs = (coeffs_vc_variables + ecdhs_vc_variables).div_ceil(generators_to_use); + + let all_commitments = + transcript.read_commitments(vcs, coefficients + ecdh_public_keys.len()).map_err(|_| ())?; + let commitments = all_commitments.V().to_vec(); + + let mut ecdh_keys = Vec::with_capacity(ecdh_public_keys.len()); + let mut ecdh_keys_xy = Vec::with_capacity(ecdh_public_keys.len()); + for _ in 0 .. ecdh_public_keys.len() { + let ecdh_keys_i = [ + transcript.read_point::().map_err(|_| ())?, + transcript.read_point::().map_err(|_| ())?, + ]; + ecdh_keys.push(ecdh_keys_i); + // This bans zero ECDH keys + ecdh_keys_xy.push([ + <::G as DivisorCurve>::to_xy(ecdh_keys_i[0]).ok_or(())?, + <::G as DivisorCurve>::to_xy(ecdh_keys_i[1]).ok_or(())?, + ]); + } + + let mut circuit = Circuit::verify(); + Self::circuit( + &curve_spec, + ::G::to_xy(evrf_public_key).ok_or(())?, + coefficients, + &ecdh_keys_xy, + &generator_tables.iter().collect::>(), + &mut circuit, + &mut transcript, + ); + + let (statement, None) = + circuit.statement(generators.reduce(generators_to_use).ok_or(())?, all_commitments).unwrap() + else { + panic!("verifying yet was yielded a witness"); + }; + + statement.verify(rng, verifier, &mut transcript).map_err(|_| ())?; + + // Read the openings for the commitments + let mut openings = Vec::with_capacity(commitments.len()); + for _ in 0 .. commitments.len() { + openings.push(transcript.read_point::().map_err(|_| ())?); + } + + // Verify the openings of the commitments + let mut agg_weights = Vec::with_capacity(commitments.len()); + agg_weights.push(C::F::ONE); + while agg_weights.len() < commitments.len() { + agg_weights.push(transcript.challenge::()); + } + + let sum_points = + openings.iter().zip(&agg_weights).map(|(point, weight)| *point * *weight).sum::(); + let sum_commitments = + commitments.into_iter().zip(agg_weights).map(|(point, weight)| point * weight).sum::(); + #[allow(non_snake_case)] + let A = sum_commitments - sum_points; + + #[allow(non_snake_case)] + let R = transcript.read_point::().map_err(|_| ())?; + let c = transcript.challenge::(); + let s = transcript.read_scalar::().map_err(|_| ())?; + + // Doesn't batch verify this as we can't access the internals of the GBP batch verifier + if (R + (A * c)) != (generators.h() * s) { + Err(())?; + } + + if !transcript.complete().is_empty() { + Err(())? + }; + + let encryption_commitments = openings[coefficients ..].to_vec(); + let coefficients = openings[.. coefficients].to_vec(); + Ok(EvrfVerifyResult { coefficients, ecdh_keys, encryption_commitments }) + } +} diff --git a/crypto/dkg/src/lib.rs b/crypto/dkg/src/lib.rs index 478f400f..7ab29168 100644 --- a/crypto/dkg/src/lib.rs +++ b/crypto/dkg/src/lib.rs @@ -4,7 +4,6 @@ use core::fmt::{self, Debug}; -#[cfg(feature = "std")] use thiserror::Error; use zeroize::Zeroize; @@ -21,6 +20,10 @@ pub mod encryption; #[cfg(feature = "std")] pub mod pedpop; +/// The one-round DKG described in the [eVRF paper](https://eprint.iacr.org/2024/397). +#[cfg(all(feature = "std", feature = "evrf"))] +pub mod evrf; + /// Promote keys between ciphersuites. #[cfg(feature = "std")] pub mod promote; @@ -63,8 +66,7 @@ impl fmt::Display for Participant { } /// Various errors possible during key generation. -#[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(Error))] +#[derive(Clone, PartialEq, Eq, Debug, Error)] pub enum DkgError { /// A parameter was zero. #[cfg_attr(feature = "std", error("a parameter was 0 (threshold {0}, participants {1})"))] @@ -205,25 +207,37 @@ mod lib { } } - /// Calculate the lagrange coefficient for a signing set. - pub fn lagrange(i: Participant, included: &[Participant]) -> F { - let i_f = F::from(u64::from(u16::from(i))); + #[derive(Clone, PartialEq, Eq, Debug, Zeroize)] + pub(crate) enum Interpolation { + Constant(Vec), + Lagrange, + } - let mut num = F::ONE; - let mut denom = F::ONE; - for l in included { - if i == *l { - continue; + impl Interpolation { + pub(crate) fn interpolation_factor(&self, i: Participant, included: &[Participant]) -> F { + match self { + Interpolation::Constant(c) => c[usize::from(u16::from(i) - 1)], + Interpolation::Lagrange => { + let i_f = F::from(u64::from(u16::from(i))); + + let mut num = F::ONE; + let mut denom = F::ONE; + for l in included { + if i == *l { + continue; + } + + let share = F::from(u64::from(u16::from(*l))); + num *= share; + denom *= share - i_f; + } + + // Safe as this will only be 0 if we're part of the above loop + // (which we have an if case to avoid) + num * denom.invert().unwrap() + } } - - let share = F::from(u64::from(u16::from(*l))); - num *= share; - denom *= share - i_f; } - - // Safe as this will only be 0 if we're part of the above loop - // (which we have an if case to avoid) - num * denom.invert().unwrap() } /// Keys and verification shares generated by a DKG. @@ -232,6 +246,8 @@ mod lib { pub struct ThresholdCore { /// Threshold Parameters. pub(crate) params: ThresholdParams, + /// The interpolation method used. + pub(crate) interpolation: Interpolation, /// Secret share key. pub(crate) secret_share: Zeroizing, @@ -246,6 +262,7 @@ mod lib { fmt .debug_struct("ThresholdCore") .field("params", &self.params) + .field("interpolation", &self.interpolation) .field("group_key", &self.group_key) .field("verification_shares", &self.verification_shares) .finish_non_exhaustive() @@ -255,6 +272,7 @@ mod lib { impl Zeroize for ThresholdCore { fn zeroize(&mut self) { self.params.zeroize(); + self.interpolation.zeroize(); self.secret_share.zeroize(); self.group_key.zeroize(); for share in self.verification_shares.values_mut() { @@ -266,16 +284,14 @@ mod lib { impl ThresholdCore { pub(crate) fn new( params: ThresholdParams, + interpolation: Interpolation, secret_share: Zeroizing, verification_shares: HashMap, ) -> ThresholdCore { let t = (1 ..= params.t()).map(Participant).collect::>(); - ThresholdCore { - params, - secret_share, - group_key: t.iter().map(|i| verification_shares[i] * lagrange::(*i, &t)).sum(), - verification_shares, - } + let group_key = + t.iter().map(|i| verification_shares[i] * interpolation.interpolation_factor(*i, &t)).sum(); + ThresholdCore { params, interpolation, secret_share, group_key, verification_shares } } /// Parameters for these keys. @@ -304,6 +320,15 @@ mod lib { writer.write_all(&self.params.t.to_le_bytes())?; writer.write_all(&self.params.n.to_le_bytes())?; writer.write_all(&self.params.i.to_bytes())?; + match &self.interpolation { + Interpolation::Constant(c) => { + writer.write_all(&[0])?; + for c in c { + writer.write_all(c.to_repr().as_ref())?; + } + } + Interpolation::Lagrange => writer.write_all(&[1])?, + }; let mut share_bytes = self.secret_share.to_repr(); writer.write_all(share_bytes.as_ref())?; share_bytes.as_mut().zeroize(); @@ -352,6 +377,20 @@ mod lib { ) }; + let mut interpolation = [0]; + reader.read_exact(&mut interpolation)?; + let interpolation = match interpolation[0] { + 0 => Interpolation::Constant({ + let mut res = Vec::with_capacity(usize::from(n)); + for _ in 0 .. n { + res.push(C::read_F(reader)?); + } + res + }), + 1 => Interpolation::Lagrange, + _ => Err(io::Error::other("invalid interpolation method"))?, + }; + let secret_share = Zeroizing::new(C::read_F(reader)?); let mut verification_shares = HashMap::new(); @@ -361,6 +400,7 @@ mod lib { Ok(ThresholdCore::new( ThresholdParams::new(t, n, i).map_err(|_| io::Error::other("invalid parameters"))?, + interpolation, secret_share, verification_shares, )) @@ -383,6 +423,7 @@ mod lib { /// View of keys, interpolated and offset for usage. #[derive(Clone)] pub struct ThresholdView { + interpolation: Interpolation, offset: C::F, group_key: C::G, included: Vec, @@ -395,6 +436,7 @@ mod lib { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("ThresholdView") + .field("interpolation", &self.interpolation) .field("offset", &self.offset) .field("group_key", &self.group_key) .field("included", &self.included) @@ -480,12 +522,13 @@ mod lib { included.sort(); let mut secret_share = Zeroizing::new( - lagrange::(self.params().i(), &included) * self.secret_share().deref(), + self.core.interpolation.interpolation_factor(self.params().i(), &included) * + self.secret_share().deref(), ); let mut verification_shares = self.verification_shares(); for (i, share) in &mut verification_shares { - *share *= lagrange::(*i, &included); + *share *= self.core.interpolation.interpolation_factor(*i, &included); } // The offset is included by adding it to the participant with the lowest ID @@ -496,6 +539,7 @@ mod lib { *verification_shares.get_mut(&included[0]).unwrap() += C::generator() * offset; Ok(ThresholdView { + interpolation: self.core.interpolation.clone(), offset, group_key: self.group_key(), secret_share, @@ -528,6 +572,14 @@ mod lib { &self.included } + /// Return the interpolation factor for a signer. + pub fn interpolation_factor(&self, participant: Participant) -> Option { + if !self.included.contains(&participant) { + None? + } + Some(self.interpolation.interpolation_factor(participant, &self.included)) + } + /// Return the interpolated, offset secret share. pub fn secret_share(&self) -> &Zeroizing { &self.secret_share diff --git a/crypto/dkg/src/musig.rs b/crypto/dkg/src/musig.rs index 4d6b54c8..82843272 100644 --- a/crypto/dkg/src/musig.rs +++ b/crypto/dkg/src/musig.rs @@ -7,8 +7,6 @@ use std_shims::collections::HashMap; #[cfg(feature = "std")] use zeroize::Zeroizing; -#[cfg(feature = "std")] -use ciphersuite::group::ff::Field; use ciphersuite::{ group::{Group, GroupEncoding}, Ciphersuite, @@ -16,7 +14,7 @@ use ciphersuite::{ use crate::DkgError; #[cfg(feature = "std")] -use crate::{Participant, ThresholdParams, ThresholdCore, lagrange}; +use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore}; fn check_keys(keys: &[C::G]) -> Result> { if keys.is_empty() { @@ -67,6 +65,7 @@ pub fn musig_key(context: &[u8], keys: &[C::G]) -> Result(context, keys)?; let mut res = C::G::identity(); for i in 1 ..= keys_len { + // TODO: Calculate this with a multiexp res += keys[usize::from(i - 1)] * binding_factor::(transcript.clone(), i); } Ok(res) @@ -104,38 +103,26 @@ pub fn musig( binding.push(binding_factor::(transcript.clone(), i)); } - // Multiply our private key by our binding factor - let mut secret_share = private_key.clone(); - *secret_share *= binding[pos]; + // Our secret share is our private key + let secret_share = private_key.clone(); // Calculate verification shares let mut verification_shares = HashMap::new(); - // When this library offers a ThresholdView for a specific signing set, it applies the lagrange - // factor - // Since this is a n-of-n scheme, there's only one possible signing set, and one possible - // lagrange factor - // In the name of simplicity, we define the group key as the sum of all bound keys - // Accordingly, the secret share must be multiplied by the inverse of the lagrange factor, along - // with all verification shares - // This is less performant than simply defining the group key as the sum of all post-lagrange - // bound keys, yet the simplicity is preferred - let included = (1 ..= keys_len) - // This error also shouldn't be possible, for the same reasons as documented above - .map(|l| Participant::new(l).ok_or(DkgError::InvalidSigningSet)) - .collect::, _>>()?; let mut group_key = C::G::identity(); - for (l, p) in included.iter().enumerate() { - let bound = keys[l] * binding[l]; - group_key += bound; + for l in 1 ..= keys_len { + let key = keys[usize::from(l) - 1]; + group_key += key * binding[usize::from(l - 1)]; - let lagrange_inv = lagrange::(*p, &included).invert().unwrap(); - if params.i() == *p { - *secret_share *= lagrange_inv; - } - verification_shares.insert(*p, bound * lagrange_inv); + // These errors also shouldn't be possible, for the same reasons as documented above + verification_shares.insert(Participant::new(l).ok_or(DkgError::InvalidSigningSet)?, key); } debug_assert_eq!(C::generator() * secret_share.deref(), verification_shares[¶ms.i()]); debug_assert_eq!(musig_key::(context, keys).unwrap(), group_key); - Ok(ThresholdCore { params, secret_share, group_key, verification_shares }) + Ok(ThresholdCore::new( + params, + Interpolation::Constant(binding), + secret_share, + verification_shares, + )) } diff --git a/crypto/dkg/src/pedpop.rs b/crypto/dkg/src/pedpop.rs index 1faeebe5..adfc6958 100644 --- a/crypto/dkg/src/pedpop.rs +++ b/crypto/dkg/src/pedpop.rs @@ -22,9 +22,9 @@ use multiexp::{multiexp_vartime, BatchVerifier}; use schnorr::SchnorrSignature; use crate::{ - Participant, DkgError, ThresholdParams, ThresholdCore, validate_map, + Participant, DkgError, ThresholdParams, Interpolation, ThresholdCore, validate_map, encryption::{ - ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, EncryptionKeyProof, + ReadWrite, EncryptionKeyMessage, EncryptedMessage, Encryption, Decryption, EncryptionKeyProof, DecryptionError, }, }; @@ -32,10 +32,10 @@ use crate::{ type FrostError = DkgError>; #[allow(non_snake_case)] -fn challenge(context: &str, l: Participant, R: &[u8], Am: &[u8]) -> C::F { +fn challenge(context: [u8; 32], l: Participant, R: &[u8], Am: &[u8]) -> C::F { let mut transcript = RecommendedTranscript::new(b"DKG FROST v0.2"); transcript.domain_separate(b"schnorr_proof_of_knowledge"); - transcript.append_message(b"context", context.as_bytes()); + transcript.append_message(b"context", context); transcript.append_message(b"participant", l.to_bytes()); transcript.append_message(b"nonce", R); transcript.append_message(b"commitments", Am); @@ -86,15 +86,15 @@ impl ReadWrite for Commitments { #[derive(Debug, Zeroize)] pub struct KeyGenMachine { params: ThresholdParams, - context: String, + context: [u8; 32], _curve: PhantomData, } impl KeyGenMachine { /// Create a new machine to generate a key. /// - /// The context string should be unique among multisigs. - pub fn new(params: ThresholdParams, context: String) -> KeyGenMachine { + /// The context should be unique among multisigs. + pub fn new(params: ThresholdParams, context: [u8; 32]) -> KeyGenMachine { KeyGenMachine { params, context, _curve: PhantomData } } @@ -129,11 +129,11 @@ impl KeyGenMachine { // There's no reason to spend the time and effort to make this deterministic besides a // general obsession with canonicity and determinism though r, - challenge::(&self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg), + challenge::(self.context, self.params.i(), nonce.to_bytes().as_ref(), &cached_msg), ); // Additionally create an encryption mechanism to protect the secret shares - let encryption = Encryption::new(self.context.clone(), Some(self.params.i), rng); + let encryption = Encryption::new(self.context, self.params.i, rng); // Step 4: Broadcast let msg = @@ -225,7 +225,7 @@ impl ReadWrite for SecretShare { #[derive(Zeroize)] pub struct SecretShareMachine { params: ThresholdParams, - context: String, + context: [u8; 32], coefficients: Vec>, our_commitments: Vec, encryption: Encryption, @@ -274,7 +274,7 @@ impl SecretShareMachine { &mut batch, l, msg.commitments[0], - challenge::(&self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg), + challenge::(self.context, l, msg.sig.R.to_bytes().as_ref(), &msg.cached_msg), ); commitments.insert(l, msg.commitments.drain(..).collect::>()); @@ -472,9 +472,10 @@ impl KeyMachine { let KeyMachine { commitments, encryption, params, secret } = self; Ok(BlameMachine { commitments, - encryption, + encryption: encryption.into_decryption(), result: Some(ThresholdCore { params, + interpolation: Interpolation::Lagrange, secret_share: secret, group_key: stripes[0], verification_shares, @@ -486,7 +487,7 @@ impl KeyMachine { /// A machine capable of handling blame proofs. pub struct BlameMachine { commitments: HashMap>, - encryption: Encryption, + encryption: Decryption, result: Option>, } @@ -505,7 +506,6 @@ impl Zeroize for BlameMachine { for commitments in self.commitments.values_mut() { commitments.zeroize(); } - self.encryption.zeroize(); self.result.zeroize(); } } @@ -598,14 +598,13 @@ impl AdditionalBlameMachine { /// authenticated as having come from the supposed party and verified as valid. Usage of invalid /// commitments is considered undefined behavior, and may cause everything from inaccurate blame /// to panics. - pub fn new( - rng: &mut R, - context: String, + pub fn new( + context: [u8; 32], n: u16, mut commitment_msgs: HashMap>>, ) -> Result> { let mut commitments = HashMap::new(); - let mut encryption = Encryption::new(context, None, rng); + let mut encryption = Decryption::new(context); for i in 1 ..= n { let i = Participant::new(i).unwrap(); let Some(msg) = commitment_msgs.remove(&i) else { Err(DkgError::MissingParticipant(i))? }; diff --git a/crypto/dkg/src/promote.rs b/crypto/dkg/src/promote.rs index 7cad4f23..c8dcaed0 100644 --- a/crypto/dkg/src/promote.rs +++ b/crypto/dkg/src/promote.rs @@ -113,6 +113,7 @@ impl> GeneratorPromotion< Ok(ThresholdKeys { core: Arc::new(ThresholdCore::new( params, + self.base.core.interpolation.clone(), self.base.secret_share().clone(), verification_shares, )), diff --git a/crypto/dkg/src/tests/evrf/mod.rs b/crypto/dkg/src/tests/evrf/mod.rs new file mode 100644 index 00000000..e6fd2230 --- /dev/null +++ b/crypto/dkg/src/tests/evrf/mod.rs @@ -0,0 +1,79 @@ +use std::collections::HashMap; + +use zeroize::Zeroizing; +use rand_core::OsRng; +use rand::seq::SliceRandom; + +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use crate::{ + Participant, + evrf::*, + tests::{THRESHOLD, PARTICIPANTS, recover_key}, +}; + +mod proof; +use proof::{Pallas, Vesta}; + +#[test] +fn evrf_dkg() { + let generators = EvrfGenerators::::new(THRESHOLD, PARTICIPANTS); + let context = [0; 32]; + + let mut priv_keys = vec![]; + let mut pub_keys = vec![]; + for i in 0 .. PARTICIPANTS { + let priv_key = ::F::random(&mut OsRng); + pub_keys.push(::generator() * priv_key); + priv_keys.push((Participant::new(1 + i).unwrap(), Zeroizing::new(priv_key))); + } + + let mut participations = HashMap::new(); + // Shuffle the private keys so we iterate over a random subset of them + priv_keys.shuffle(&mut OsRng); + for (i, priv_key) in priv_keys.iter().take(usize::from(THRESHOLD)) { + participations.insert( + *i, + EvrfDkg::::participate( + &mut OsRng, + &generators, + context, + THRESHOLD, + &pub_keys, + priv_key, + ) + .unwrap(), + ); + } + + let VerifyResult::Valid(dkg) = EvrfDkg::::verify( + &mut OsRng, + &generators, + context, + THRESHOLD, + &pub_keys, + &participations, + ) + .unwrap() else { + panic!("verify didn't return VerifyResult::Valid") + }; + + let mut group_key = None; + let mut verification_shares = None; + let mut all_keys = HashMap::new(); + for (i, priv_key) in priv_keys { + let keys = dkg.keys(&priv_key).into_iter().next().unwrap(); + assert_eq!(keys.params().i(), i); + assert_eq!(keys.params().t(), THRESHOLD); + assert_eq!(keys.params().n(), PARTICIPANTS); + group_key = group_key.or(Some(keys.group_key())); + verification_shares = verification_shares.or(Some(keys.verification_shares())); + assert_eq!(Some(keys.group_key()), group_key); + assert_eq!(Some(keys.verification_shares()), verification_shares); + + all_keys.insert(i, keys); + } + + // TODO: Test for all possible combinations of keys + assert_eq!(Pallas::generator() * recover_key(&all_keys), group_key.unwrap()); +} diff --git a/crypto/dkg/src/tests/evrf/proof.rs b/crypto/dkg/src/tests/evrf/proof.rs new file mode 100644 index 00000000..cc2fb7f7 --- /dev/null +++ b/crypto/dkg/src/tests/evrf/proof.rs @@ -0,0 +1,124 @@ +use std::time::Instant; + +use rand_core::OsRng; + +use zeroize::{Zeroize, Zeroizing}; +use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2}; +use blake2::{Digest, Blake2b512}; + +use ciphersuite::{ + group::{ + ff::{FromUniformBytes, Field, PrimeField}, + Group, + }, + Ciphersuite, Secp256k1, Ed25519, Ristretto, +}; +use pasta_curves::{Ep, Eq, Fp, Fq}; + +use generalized_bulletproofs::{Generators, tests::generators}; +use generalized_bulletproofs_ec_gadgets::DiscreteLogParameters; + +use crate::evrf::proof::*; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] +pub(crate) struct Pallas; +impl Ciphersuite for Pallas { + type F = Fq; + type G = Ep; + type H = Blake2b512; + const ID: &'static [u8] = b"Pallas"; + fn generator() -> Ep { + Ep::generator() + } + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + // This naive concat may be insecure in a real world deployment + // This is solely test code so it's fine + Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into()) + } + fn reduce_512(scalar: [u8; 64]) -> Self::F { + Self::F::from_uniform_bytes(&scalar) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] +pub(crate) struct Vesta; +impl Ciphersuite for Vesta { + type F = Fp; + type G = Eq; + type H = Blake2b512; + const ID: &'static [u8] = b"Vesta"; + fn generator() -> Eq { + Eq::generator() + } + fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F { + // This naive concat may be insecure in a real world deployment + // This is solely test code so it's fine + Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into()) + } + fn reduce_512(scalar: [u8; 64]) -> Self::F { + Self::F::from_uniform_bytes(&scalar) + } +} + +pub struct VestaParams; +impl DiscreteLogParameters for VestaParams { + type ScalarBits = U<{ <::F as PrimeField>::NUM_BITS as usize }>; + type XCoefficients = Quot, U2>; + type XCoefficientsMinusOne = Diff; + type YxCoefficients = Diff, U1>, U2>, U2>; +} + +impl EvrfCurve for Pallas { + type EmbeddedCurve = Vesta; + type EmbeddedCurveParameters = VestaParams; +} + +fn evrf_proof_test() { + let generators = generators(2048); + let vesta_private_key = Zeroizing::new(::F::random(&mut OsRng)); + let ecdh_public_keys = [ + ::G::random(&mut OsRng), + ::G::random(&mut OsRng), + ]; + let time = Instant::now(); + let res = + Evrf::::prove(&mut OsRng, &generators, [0; 32], 1, &ecdh_public_keys, &vesta_private_key) + .unwrap(); + println!("Proving time: {:?}", time.elapsed()); + + let time = Instant::now(); + let mut verifier = Generators::batch_verifier(); + Evrf::::verify( + &mut OsRng, + &generators, + &mut verifier, + [0; 32], + 1, + &ecdh_public_keys, + C::EmbeddedCurve::generator() * *vesta_private_key, + &res.proof, + ) + .unwrap(); + assert!(generators.verify(verifier)); + println!("Verifying time: {:?}", time.elapsed()); +} + +#[test] +fn pallas_evrf_proof_test() { + evrf_proof_test::(); +} + +#[test] +fn secp256k1_evrf_proof_test() { + evrf_proof_test::(); +} + +#[test] +fn ed25519_evrf_proof_test() { + evrf_proof_test::(); +} + +#[test] +fn ristretto_evrf_proof_test() { + evrf_proof_test::(); +} diff --git a/crypto/dkg/src/tests/mod.rs b/crypto/dkg/src/tests/mod.rs index f21d7254..4399d72a 100644 --- a/crypto/dkg/src/tests/mod.rs +++ b/crypto/dkg/src/tests/mod.rs @@ -6,7 +6,7 @@ use rand_core::{RngCore, CryptoRng}; use ciphersuite::{group::ff::Field, Ciphersuite}; -use crate::{Participant, ThresholdCore, ThresholdKeys, lagrange, musig::musig as musig_fn}; +use crate::{Participant, ThresholdCore, ThresholdKeys, musig::musig as musig_fn}; mod musig; pub use musig::test_musig; @@ -19,6 +19,9 @@ use pedpop::pedpop_gen; mod promote; use promote::test_generator_promotion; +#[cfg(all(test, feature = "evrf"))] +mod evrf; + /// Constant amount of participants to use when testing. pub const PARTICIPANTS: u16 = 5; /// Constant threshold of participants to use when testing. @@ -43,7 +46,8 @@ pub fn recover_key(keys: &HashMap> let included = keys.keys().copied().collect::>(); let group_private = keys.iter().fold(C::F::ZERO, |accum, (i, keys)| { - accum + (lagrange::(*i, &included) * keys.secret_share().deref()) + accum + + (first.core.interpolation.interpolation_factor(*i, &included) * keys.secret_share().deref()) }); assert_eq!(C::generator() * group_private, first.group_key(), "failed to recover keys"); group_private diff --git a/crypto/dkg/src/tests/pedpop.rs b/crypto/dkg/src/tests/pedpop.rs index 3ae383e3..42d7af67 100644 --- a/crypto/dkg/src/tests/pedpop.rs +++ b/crypto/dkg/src/tests/pedpop.rs @@ -14,7 +14,7 @@ use crate::{ type PedPoPEncryptedMessage = EncryptedMessage::F>>; type PedPoPSecretShares = HashMap>; -const CONTEXT: &str = "DKG Test Key Generation"; +const CONTEXT: [u8; 32] = *b"DKG Test Key Generation "; // Commit, then return commitment messages, enc keys, and shares #[allow(clippy::type_complexity)] @@ -31,7 +31,7 @@ fn commit_enc_keys_and_shares( let mut enc_keys = HashMap::new(); for i in (1 ..= PARTICIPANTS).map(Participant) { let params = ThresholdParams::new(THRESHOLD, PARTICIPANTS, i).unwrap(); - let machine = KeyGenMachine::::new(params, CONTEXT.to_string()); + let machine = KeyGenMachine::::new(params, CONTEXT); let (machine, these_commitments) = machine.generate_coefficients(rng); machines.insert(i, machine); @@ -147,14 +147,12 @@ mod literal { // Verify machines constructed with AdditionalBlameMachine::new work assert_eq!( - AdditionalBlameMachine::new( - &mut OsRng, - CONTEXT.to_string(), - PARTICIPANTS, - commitment_msgs.clone() - ) - .unwrap() - .blame(ONE, TWO, msg.clone(), blame.clone()), + AdditionalBlameMachine::new(CONTEXT, PARTICIPANTS, commitment_msgs.clone()).unwrap().blame( + ONE, + TWO, + msg.clone(), + blame.clone() + ), ONE, ); } diff --git a/crypto/dkg/src/tests/promote.rs b/crypto/dkg/src/tests/promote.rs index 99c00433..242f085b 100644 --- a/crypto/dkg/src/tests/promote.rs +++ b/crypto/dkg/src/tests/promote.rs @@ -28,6 +28,10 @@ impl Ciphersuite for AltGenerator { C::G::generator() * ::hash_to_F(b"DKG Promotion Test", b"generator") } + fn reduce_512(scalar: [u8; 64]) -> Self::F { + ::reduce_512(scalar) + } + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { ::hash_to_F(dst, data) } diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index fc25899f..a2b8ad9e 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.79" +rust-version = "1.81" [package.metadata.docs.rs] all-features = true @@ -18,7 +18,7 @@ workspace = true [dependencies] rustversion = "1" -thiserror = { version = "1", optional = true } +thiserror = { version = "2", default-features = false, optional = true } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } @@ -44,7 +44,7 @@ dalek-ff-group = { path = "../dalek-ff-group" } transcript = { package = "flexible-transcript", path = "../transcript", features = ["recommended"] } [features] -std = ["rand_core/std", "zeroize/std", "digest/std", "transcript/std", "ff/std", "multiexp?/std"] +std = ["thiserror?/std", "rand_core/std", "zeroize/std", "digest/std", "transcript/std", "ff/std", "multiexp?/std"] serialize = ["std"] # Needed for cross-group DLEqs diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 8014ea9f..c530f60a 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -92,7 +92,7 @@ impl Generators { } /// Error for cross-group DLEq proofs. -#[derive(Error, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum DLEqError { /// Invalid proof length. #[error("invalid proof length")] diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index a8958a2e..f6aed25a 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -37,11 +37,11 @@ pub(crate) fn challenge(transcript: &mut T) -> F { // Get a wide amount of bytes to safely reduce without bias // In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical // groups which may technically require more than 1.5x bytes for this to work as intended - let target_bytes = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2; + let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2; let mut challenge_bytes = transcript.challenge(b"challenge"); let challenge_bytes_len = challenge_bytes.as_ref().len(); // If the challenge is 32 bytes, and we need 64, we need two challenges - let needed_challenges = (target_bytes + (challenge_bytes_len - 1)) / challenge_bytes_len; + let needed_challenges = target_bytes.div_ceil(challenge_bytes_len); // The following algorithm should be equivalent to a wide reduction of the challenges, // interpreted as concatenated, big-endian byte string diff --git a/crypto/ed448/Cargo.toml b/crypto/ed448/Cargo.toml index b0d0026e..64c1b243 100644 --- a/crypto/ed448/Cargo.toml +++ b/crypto/ed448/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448" authors = ["Luke Parker "] keywords = ["ed448", "ff", "group"] edition = "2021" -rust-version = "1.66" +rust-version = "1.71" [package.metadata.docs.rs] all-features = true diff --git a/crypto/ed448/src/backend.rs b/crypto/ed448/src/backend.rs index db41e811..327fcf97 100644 --- a/crypto/ed448/src/backend.rs +++ b/crypto/ed448/src/backend.rs @@ -161,7 +161,16 @@ macro_rules! field { res *= res; } } - res *= table[usize::from(bits)]; + + let mut scale_by = $FieldName(Residue::ONE); + #[allow(clippy::needless_range_loop)] + for i in 0 .. 16 { + #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 + { + scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8))); + } + } + res *= scale_by; bits = 0; } } diff --git a/crypto/ed448/src/point.rs b/crypto/ed448/src/point.rs index c3b10f79..cd49023f 100644 --- a/crypto/ed448/src/point.rs +++ b/crypto/ed448/src/point.rs @@ -242,7 +242,16 @@ impl Mul for Point { res = res.double(); } } - res += table[usize::from(bits)]; + + let mut add_by = Point::identity(); + #[allow(clippy::needless_range_loop)] + for i in 0 .. 16 { + #[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16 + { + add_by = <_>::conditional_select(&add_by, &table[i], bits.ct_eq(&(i as u8))); + } + } + res += add_by; bits = 0; } } diff --git a/crypto/evrf/circuit-abstraction/Cargo.toml b/crypto/evrf/circuit-abstraction/Cargo.toml new file mode 100644 index 00000000..64d4758c --- /dev/null +++ b/crypto/evrf/circuit-abstraction/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "generalized-bulletproofs-circuit-abstraction" +version = "0.1.0" +description = "An abstraction for arithmetic circuits over Generalized Bulletproofs" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/fcmps/circuit-abstraction" +authors = ["Luke Parker "] +keywords = ["bulletproofs", "circuit"] +edition = "2021" +rust-version = "1.69" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } + +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } + +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false } + +generalized-bulletproofs = { path = "../generalized-bulletproofs", default-features = false } + +[features] +std = ["std-shims/std", "zeroize/std", "ciphersuite/std", "generalized-bulletproofs/std"] +default = ["std"] diff --git a/crypto/evrf/circuit-abstraction/LICENSE b/crypto/evrf/circuit-abstraction/LICENSE new file mode 100644 index 00000000..659881f1 --- /dev/null +++ b/crypto/evrf/circuit-abstraction/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/circuit-abstraction/README.md b/crypto/evrf/circuit-abstraction/README.md new file mode 100644 index 00000000..95149d93 --- /dev/null +++ b/crypto/evrf/circuit-abstraction/README.md @@ -0,0 +1,3 @@ +# Generalized Bulletproofs Circuit Abstraction + +A circuit abstraction around `generalized-bulletproofs`. diff --git a/crypto/evrf/circuit-abstraction/src/gadgets.rs b/crypto/evrf/circuit-abstraction/src/gadgets.rs new file mode 100644 index 00000000..08e5214e --- /dev/null +++ b/crypto/evrf/circuit-abstraction/src/gadgets.rs @@ -0,0 +1,39 @@ +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use crate::*; + +impl Circuit { + /// Constrain two linear combinations to be equal. + pub fn equality(&mut self, a: LinComb, b: &LinComb) { + self.constrain_equal_to_zero(a - b); + } + + /// Calculate (and constrain) the inverse of a value. + /// + /// A linear combination may optionally be passed as a constraint for the value being inverted. + /// A reference to the inverted value and its inverse is returned. + /// + /// May panic if any linear combinations reference non-existent terms, the witness isn't provided + /// when proving/is provided when verifying, or if the witness is 0 (and accordingly doesn't have + /// an inverse). + pub fn inverse( + &mut self, + lincomb: Option>, + witness: Option, + ) -> (Variable, Variable) { + let (l, r, o) = self.mul(lincomb, None, witness.map(|f| (f, f.invert().unwrap()))); + // The output of a value multiplied by its inverse is 1 + // Constrain `1 o - 1 = 0` + self.constrain_equal_to_zero(LinComb::from(o).constant(-C::F::ONE)); + (l, r) + } + + /// Constrain two linear combinations as inequal. + /// + /// May panic if any linear combinations reference non-existent terms. + pub fn inequality(&mut self, a: LinComb, b: &LinComb, witness: Option<(C::F, C::F)>) { + let l_constraint = a - b; + // The existence of a multiplicative inverse means a-b != 0, which means a != b + self.inverse(Some(l_constraint), witness.map(|(a, b)| a - b)); + } +} diff --git a/crypto/evrf/circuit-abstraction/src/lib.rs b/crypto/evrf/circuit-abstraction/src/lib.rs new file mode 100644 index 00000000..8a4b826b --- /dev/null +++ b/crypto/evrf/circuit-abstraction/src/lib.rs @@ -0,0 +1,197 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use std_shims::{vec, vec::Vec}; + +use zeroize::{Zeroize, ZeroizeOnDrop}; + +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use generalized_bulletproofs::{ + ScalarVector, PedersenCommitment, PedersenVectorCommitment, ProofGenerators, + transcript::{Transcript as ProverTranscript, VerifierTranscript, Commitments}, + arithmetic_circuit_proof::{AcError, ArithmeticCircuitStatement, ArithmeticCircuitWitness}, +}; +pub use generalized_bulletproofs::arithmetic_circuit_proof::{Variable, LinComb}; + +mod gadgets; + +/// A trait for the transcript, whether proving for verifying, as necessary for sampling +/// challenges. +pub trait Transcript { + /// Sample a challenge from the transacript. + /// + /// It is the caller's responsibility to have properly transcripted all variables prior to + /// sampling this challenge. + fn challenge(&mut self) -> C::F; + + /// Sample a challenge as a byte array. + /// + /// It is the caller's responsibility to have properly transcripted all variables prior to + /// sampling this challenge. + fn challenge_bytes(&mut self) -> [u8; 64]; +} +impl Transcript for ProverTranscript { + fn challenge(&mut self) -> C::F { + self.challenge::() + } + fn challenge_bytes(&mut self) -> [u8; 64] { + self.challenge_bytes() + } +} +impl Transcript for VerifierTranscript<'_> { + fn challenge(&mut self) -> C::F { + self.challenge::() + } + fn challenge_bytes(&mut self) -> [u8; 64] { + self.challenge_bytes() + } +} + +/// The witness for the satisfaction of this circuit. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] +struct ProverData { + aL: Vec, + aR: Vec, + C: Vec>, + V: Vec>, +} + +/// A struct representing a circuit. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Circuit { + muls: usize, + // A series of linear combinations which must evaluate to 0. + constraints: Vec>, + prover: Option>, +} + +impl Circuit { + /// Returns the amount of multiplications used by this circuit. + pub fn muls(&self) -> usize { + self.muls + } + + /// Create an instance to prove satisfaction of a circuit with. + #[allow(clippy::type_complexity)] + pub fn prove( + vector_commitments: Vec>, + commitments: Vec>, + ) -> Self { + Self { + muls: 0, + constraints: vec![], + prover: Some(ProverData { aL: vec![], aR: vec![], C: vector_commitments, V: commitments }), + } + } + + /// Create an instance to verify a proof with. + pub fn verify() -> Self { + Self { muls: 0, constraints: vec![], prover: None } + } + + /// Evaluate a linear combination. + /// + /// Yields WL aL + WR aR + WO aO + WCG CG + WV V + c. + /// + /// May panic if the linear combination references non-existent terms. + /// + /// Returns None if not a prover. + pub fn eval(&self, lincomb: &LinComb) -> Option { + self.prover.as_ref().map(|prover| { + let mut res = lincomb.c(); + for (index, weight) in lincomb.WL() { + res += prover.aL[*index] * weight; + } + for (index, weight) in lincomb.WR() { + res += prover.aR[*index] * weight; + } + for (index, weight) in lincomb.WO() { + res += prover.aL[*index] * prover.aR[*index] * weight; + } + for (WCG, C) in lincomb.WCG().iter().zip(&prover.C) { + for (j, weight) in WCG { + res += C.g_values[*j] * weight; + } + } + for (index, weight) in lincomb.WV() { + res += prover.V[*index].value * weight; + } + res + }) + } + + /// Multiply two values, optionally constrained, returning the constrainable left/right/out + /// terms. + /// + /// May panic if any linear combinations reference non-existent terms or if the witness isn't + /// provided when proving/is provided when verifying. + pub fn mul( + &mut self, + a: Option>, + b: Option>, + witness: Option<(C::F, C::F)>, + ) -> (Variable, Variable, Variable) { + let l = Variable::aL(self.muls); + let r = Variable::aR(self.muls); + let o = Variable::aO(self.muls); + self.muls += 1; + + debug_assert_eq!(self.prover.is_some(), witness.is_some()); + if let Some(witness) = witness { + let prover = self.prover.as_mut().unwrap(); + prover.aL.push(witness.0); + prover.aR.push(witness.1); + } + + if let Some(a) = a { + self.constrain_equal_to_zero(a.term(-C::F::ONE, l)); + } + if let Some(b) = b { + self.constrain_equal_to_zero(b.term(-C::F::ONE, r)); + } + + (l, r, o) + } + + /// Constrain a linear combination to be equal to 0. + /// + /// May panic if the linear combination references non-existent terms. + pub fn constrain_equal_to_zero(&mut self, lincomb: LinComb) { + self.constraints.push(lincomb); + } + + /// Obtain the statement for this circuit. + /// + /// If configured as the prover, the witness to use is also returned. + #[allow(clippy::type_complexity)] + pub fn statement( + self, + generators: ProofGenerators<'_, C>, + commitments: Commitments, + ) -> Result<(ArithmeticCircuitStatement<'_, C>, Option>), AcError> { + let statement = ArithmeticCircuitStatement::new(generators, self.constraints, commitments)?; + + let witness = self + .prover + .map(|mut prover| { + // We can't deconstruct the witness as it implements Drop (per ZeroizeOnDrop) + // Accordingly, we take the values within it and move forward with those + let mut aL = vec![]; + core::mem::swap(&mut prover.aL, &mut aL); + let mut aR = vec![]; + core::mem::swap(&mut prover.aR, &mut aR); + let mut C = vec![]; + core::mem::swap(&mut prover.C, &mut C); + let mut V = vec![]; + core::mem::swap(&mut prover.V, &mut V); + ArithmeticCircuitWitness::new(ScalarVector::from(aL), ScalarVector::from(aR), C, V) + }) + .transpose()?; + + Ok((statement, witness)) + } +} diff --git a/crypto/evrf/divisors/Cargo.toml b/crypto/evrf/divisors/Cargo.toml new file mode 100644 index 00000000..af6da971 --- /dev/null +++ b/crypto/evrf/divisors/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "ec-divisors" +version = "0.1.0" +description = "A library for calculating elliptic curve divisors" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/divisors" +authors = ["Luke Parker "] +keywords = ["ciphersuite", "ff", "group"] +edition = "2021" +rust-version = "1.69" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } + +rand_core = { version = "0.6", default-features = false } +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } + +subtle = { version = "2", default-features = false } +ff = { version = "0.13", default-features = false, features = ["bits"] } +group = { version = "0.13", default-features = false } + +hex = { version = "0.4", default-features = false, optional = true } +dalek-ff-group = { path = "../../dalek-ff-group", default-features = false, optional = true } +pasta_curves = { version = "0.5", git = "https://github.com/kayabaNerve/pasta_curves.git", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616", default-features = false, features = ["bits", "alloc"], optional = true } + +[dev-dependencies] +rand_core = { version = "0.6", features = ["getrandom"] } + +hex = "0.4" +dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"] } +pasta_curves = { version = "0.5", git = "https://github.com/kayabaNerve/pasta_curves.git", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616", default-features = false, features = ["bits", "alloc"] } + +[features] +std = ["std-shims/std", "zeroize/std", "subtle/std", "ff/std", "dalek-ff-group?/std"] +ed25519 = ["hex/alloc", "dalek-ff-group"] +pasta = ["pasta_curves"] +default = ["std"] diff --git a/crypto/evrf/divisors/LICENSE b/crypto/evrf/divisors/LICENSE new file mode 100644 index 00000000..36fd4d60 --- /dev/null +++ b/crypto/evrf/divisors/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/divisors/README.md b/crypto/evrf/divisors/README.md new file mode 100644 index 00000000..51ba542a --- /dev/null +++ b/crypto/evrf/divisors/README.md @@ -0,0 +1,4 @@ +# Elliptic Curve Divisors + +An implementation of a representation for and construction of elliptic curve +divisors, intended for Eagen's [EC IP work](https://eprint.iacr.org/2022/596). diff --git a/crypto/evrf/divisors/src/lib.rs b/crypto/evrf/divisors/src/lib.rs new file mode 100644 index 00000000..8f6325da --- /dev/null +++ b/crypto/evrf/divisors/src/lib.rs @@ -0,0 +1,581 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use std_shims::{vec, vec::Vec}; + +use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable}; +use zeroize::{Zeroize, ZeroizeOnDrop}; + +use group::{ + ff::{Field, PrimeField, PrimeFieldBits}, + Group, +}; + +mod poly; +pub use poly::Poly; + +#[cfg(test)] +mod tests; + +/// A curve usable with this library. +pub trait DivisorCurve: Group + ConstantTimeEq + ConditionallySelectable + Zeroize { + /// An element of the field this curve is defined over. + type FieldElement: Zeroize + PrimeField + ConditionallySelectable; + + /// The A in the curve equation y^2 = x^3 + A x + B. + fn a() -> Self::FieldElement; + /// The B in the curve equation y^2 = x^3 + A x + B. + fn b() -> Self::FieldElement; + + /// y^2 - x^3 - A x - B + /// + /// Section 2 of the security proofs define this modulus. + /// + /// This MUST NOT be overriden. + // TODO: Move to an extension trait + fn divisor_modulus() -> Poly { + Poly { + // 0 y**1, 1 y*2 + y_coefficients: vec![Self::FieldElement::ZERO, Self::FieldElement::ONE], + yx_coefficients: vec![], + x_coefficients: vec![ + // - A x + -Self::a(), + // 0 x^2 + Self::FieldElement::ZERO, + // - x^3 + -Self::FieldElement::ONE, + ], + // - B + zero_coefficient: -Self::b(), + } + } + + /// Convert a point to its x and y coordinates. + /// + /// Returns None if passed the point at infinity. + /// + /// This function may run in time variable to if the point is the identity. + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)>; +} + +/// Calculate the slope and intercept between two points. +/// +/// This function panics when `a @ infinity`, `b @ infinity`, `a == b`, or when `a == -b`. +pub(crate) fn slope_intercept(a: C, b: C) -> (C::FieldElement, C::FieldElement) { + let (ax, ay) = C::to_xy(a).unwrap(); + debug_assert_eq!(C::divisor_modulus().eval(ax, ay), C::FieldElement::ZERO); + let (bx, by) = C::to_xy(b).unwrap(); + debug_assert_eq!(C::divisor_modulus().eval(bx, by), C::FieldElement::ZERO); + let slope = (by - ay) * + Option::::from((bx - ax).invert()) + .expect("trying to get slope/intercept of points sharing an x coordinate"); + let intercept = by - (slope * bx); + debug_assert!(bool::from((ay - (slope * ax) - intercept).is_zero())); + debug_assert!(bool::from((by - (slope * bx) - intercept).is_zero())); + (slope, intercept) +} + +// The line interpolating two points. +fn line(a: C, b: C) -> Poly { + #[derive(Clone, Copy)] + struct LinesRes { + y_coefficient: F, + x_coefficient: F, + zero_coefficient: F, + } + impl ConditionallySelectable for LinesRes { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Self { + y_coefficient: <_>::conditional_select(&a.y_coefficient, &b.y_coefficient, choice), + x_coefficient: <_>::conditional_select(&a.x_coefficient, &b.x_coefficient, choice), + zero_coefficient: <_>::conditional_select(&a.zero_coefficient, &b.zero_coefficient, choice), + } + } + } + + let a_is_identity = a.is_identity(); + let b_is_identity = b.is_identity(); + + // If they're both the point at infinity, we simply set the line to one + let both_are_identity = a_is_identity & b_is_identity; + let if_both_are_identity = LinesRes { + y_coefficient: C::FieldElement::ZERO, + x_coefficient: C::FieldElement::ZERO, + zero_coefficient: C::FieldElement::ONE, + }; + + // If either point is the point at infinity, or these are additive inverses, the line is + // `1 * x - x`. The first `x` is a term in the polynomial, the `x` is the `x` coordinate of these + // points (of which there is one, as the second point is either at infinity or has a matching `x` + // coordinate). + let one_is_identity = a_is_identity | b_is_identity; + let additive_inverses = a.ct_eq(&-b); + let one_is_identity_or_additive_inverses = one_is_identity | additive_inverses; + let if_one_is_identity_or_additive_inverses = { + // If both are identity, set `a` to the generator so we can safely evaluate the following + // (which we won't select at the end of this function) + let a = <_>::conditional_select(&a, &C::generator(), both_are_identity); + // If `a` is identity, this selects `b`. If `a` isn't identity, this selects `a` + let non_identity = <_>::conditional_select(&a, &b, a.is_identity()); + let (x, _) = C::to_xy(non_identity).unwrap(); + LinesRes { + y_coefficient: C::FieldElement::ZERO, + x_coefficient: C::FieldElement::ONE, + zero_coefficient: -x, + } + }; + + // The following calculation assumes neither point is the point at infinity + // If either are, we use a prior result + // To ensure we can calculcate a result here, set any points at infinity to the generator + let a = <_>::conditional_select(&a, &C::generator(), a_is_identity); + let b = <_>::conditional_select(&b, &C::generator(), b_is_identity); + // It also assumes a, b aren't additive inverses which is also covered by a prior result + let b = <_>::conditional_select(&b, &a.double(), additive_inverses); + + // If the points are equal, we use the line interpolating the sum of these points with the point + // at infinity + let b = <_>::conditional_select(&b, &-a.double(), a.ct_eq(&b)); + + let (slope, intercept) = slope_intercept::(a, b); + + // Section 4 of the proofs explicitly state the line `L = y - lambda * x - mu` + // y - (slope * x) - intercept + let mut res = LinesRes { + y_coefficient: C::FieldElement::ONE, + x_coefficient: -slope, + zero_coefficient: -intercept, + }; + + res = <_>::conditional_select( + &res, + &if_one_is_identity_or_additive_inverses, + one_is_identity_or_additive_inverses, + ); + res = <_>::conditional_select(&res, &if_both_are_identity, both_are_identity); + + Poly { + y_coefficients: vec![res.y_coefficient], + yx_coefficients: vec![], + x_coefficients: vec![res.x_coefficient], + zero_coefficient: res.zero_coefficient, + } +} + +/// Create a divisor interpolating the following points. +/// +/// Returns None if: +/// - No points were passed in +/// - The points don't sum to the point at infinity +/// - A passed in point was the point at infinity +/// +/// If the arguments were valid, this function executes in an amount of time constant to the amount +/// of points. +#[allow(clippy::new_ret_no_self)] +pub fn new_divisor(points: &[C]) -> Option> { + // No points were passed in, this is the point at infinity, or the single point isn't infinity + // and accordingly doesn't sum to infinity. All three cause us to return None + // Checks a bit other than the first bit is set, meaning this is >= 2 + let mut invalid_args = (points.len() & (!1)).ct_eq(&0); + // The points don't sum to the point at infinity + invalid_args |= !points.iter().sum::().is_identity(); + // A point was the point at identity + for point in points { + invalid_args |= point.is_identity(); + } + if bool::from(invalid_args) { + None?; + } + + let points_len = points.len(); + + // Create the initial set of divisors + let mut divs = vec![]; + let mut iter = points.iter().copied(); + while let Some(a) = iter.next() { + let b = iter.next(); + + // Draw the line between those points + // These unwraps are branching on the length of the iterator, not violating the constant-time + // priorites desired + divs.push((2, a + b.unwrap_or(C::identity()), line::(a, b.unwrap_or(-a)))); + } + + let modulus = C::divisor_modulus(); + + // Our Poly algorithm is leaky and will create an excessive amount of y x**j and x**j + // coefficients which are zero, yet as our implementation is constant time, still come with + // an immense performance cost. This code truncates the coefficients we know are zero. + let trim = |divisor: &mut Poly<_>, points_len: usize| { + // We should only be trimming divisors reduced by the modulus + debug_assert!(divisor.yx_coefficients.len() <= 1); + if divisor.yx_coefficients.len() == 1 { + let truncate_to = ((points_len + 1) / 2).saturating_sub(2); + #[cfg(debug_assertions)] + for p in truncate_to .. divisor.yx_coefficients[0].len() { + debug_assert_eq!(divisor.yx_coefficients[0][p], ::ZERO); + } + divisor.yx_coefficients[0].truncate(truncate_to); + } + { + let truncate_to = points_len / 2; + #[cfg(debug_assertions)] + for p in truncate_to .. divisor.x_coefficients.len() { + debug_assert_eq!(divisor.x_coefficients[p], ::ZERO); + } + divisor.x_coefficients.truncate(truncate_to); + } + }; + + // Pair them off until only one remains + while divs.len() > 1 { + let mut next_divs = vec![]; + // If there's an odd amount of divisors, carry the odd one out to the next iteration + if (divs.len() % 2) == 1 { + next_divs.push(divs.pop().unwrap()); + } + + while let Some((a_points, a, a_div)) = divs.pop() { + let (b_points, b, b_div) = divs.pop().unwrap(); + let points = a_points + b_points; + + // Merge the two divisors + let numerator = a_div.mul_mod(&b_div, &modulus).mul_mod(&line::(a, b), &modulus); + let denominator = line::(a, -a).mul_mod(&line::(b, -b), &modulus); + let (mut q, r) = numerator.div_rem(&denominator); + debug_assert_eq!(r, Poly::zero()); + + trim(&mut q, 1 + points); + + next_divs.push((points, a + b, q)); + } + + divs = next_divs; + } + + // Return the unified divisor + let mut divisor = divs.remove(0).2; + trim(&mut divisor, points_len); + Some(divisor) +} + +/// The decomposition of a scalar. +/// +/// The decomposition ($d$) of a scalar ($s$) has the following two properties: +/// +/// - $\sum^{\mathsf{NUM_BITS} - 1}_{i=0} d_i * 2^i = s$ +/// - $\sum^{\mathsf{NUM_BITS} - 1}_{i=0} d_i = \mathsf{NUM_BITS}$ +#[derive(Clone, Zeroize, ZeroizeOnDrop)] +pub struct ScalarDecomposition { + scalar: F, + decomposition: Vec, +} + +impl ScalarDecomposition { + /// Decompose a non-zero scalar. + /// + /// Returns `None` if the scalar is zero. + /// + /// This function is constant time if the scalar is non-zero. + pub fn new(scalar: F) -> Option { + if bool::from(scalar.is_zero()) { + None?; + } + + /* + We need the sum of the coefficients to equal F::NUM_BITS. The scalar's bits will be less than + F::NUM_BITS. Accordingly, we need to increment the sum of the coefficients without + incrementing the scalar represented. We do this by finding the highest non-0 coefficient, + decrementing it, and increasing the immediately less significant coefficient by 2. This + increases the sum of the coefficients by 1 (-1+2=1). + */ + + let num_bits = u64::from(F::NUM_BITS); + + // Obtain the bits of the scalar + let num_bits_usize = usize::try_from(num_bits).unwrap(); + let mut decomposition = vec![0; num_bits_usize]; + for (i, bit) in scalar.to_le_bits().into_iter().take(num_bits_usize).enumerate() { + let bit = u64::from(u8::from(bit)); + decomposition[i] = bit; + } + + // The following algorithm only works if the value of the scalar exceeds num_bits + // If it isn't, we increase it by the modulus such that it does exceed num_bits + { + let mut less_than_num_bits = Choice::from(0); + for i in 0 .. num_bits { + less_than_num_bits |= scalar.ct_eq(&F::from(i)); + } + let mut decomposition_of_modulus = vec![0; num_bits_usize]; + // Decompose negative one + for (i, bit) in (-F::ONE).to_le_bits().into_iter().take(num_bits_usize).enumerate() { + let bit = u64::from(u8::from(bit)); + decomposition_of_modulus[i] = bit; + } + // Increment it by one + decomposition_of_modulus[0] += 1; + + // Add the decomposition onto the decomposition of the modulus + for i in 0 .. num_bits_usize { + let new_decomposition = <_>::conditional_select( + &decomposition[i], + &(decomposition[i] + decomposition_of_modulus[i]), + less_than_num_bits, + ); + decomposition[i] = new_decomposition; + } + } + + // Calculcate the sum of the coefficients + let mut sum_of_coefficients: u64 = 0; + for decomposition in &decomposition { + sum_of_coefficients += *decomposition; + } + + /* + Now, because we added a log2(k)-bit number to a k-bit number, we may have our sum of + coefficients be *too high*. We attempt to reduce the sum of the coefficients accordingly. + + This algorithm is guaranteed to complete as expected. Take the sequence `222`. `222` becomes + `032` becomes `013`. Even if the next coefficient in the sequence is `2`, the third + coefficient will be reduced once and the next coefficient (`2`, increased to `3`) will only + be eligible for reduction once. This demonstrates, even for a worst case of log2(k) `2`s + followed by `1`s (as possible if the modulus is a Mersenne prime), the log2(k) `2`s can be + reduced as necessary so long as there is a single coefficient after (requiring the entire + sequence be at least of length log2(k) + 1). For a 2-bit number, log2(k) + 1 == 2, so this + holds for any odd prime field. + + To fully type out the demonstration for the Mersenne prime 3, with scalar to encode 1 (the + highest value less than the number of bits): + + 10 - Little-endian bits of 1 + 21 - Little-endian bits of 1, plus the modulus + 02 - After one reduction, where the sum of the coefficients does in fact equal 2 (the target) + */ + { + let mut log2_num_bits = 0; + while (1 << log2_num_bits) < num_bits { + log2_num_bits += 1; + } + + for _ in 0 .. log2_num_bits { + // If the sum of coefficients is the amount of bits, we're done + let mut done = sum_of_coefficients.ct_eq(&num_bits); + + for i in 0 .. (num_bits_usize - 1) { + let should_act = (!done) & decomposition[i].ct_gt(&1); + // Subtract 2 from this coefficient + let amount_to_sub = <_>::conditional_select(&0, &2, should_act); + decomposition[i] -= amount_to_sub; + // Add 1 to the next coefficient + let amount_to_add = <_>::conditional_select(&0, &1, should_act); + decomposition[i + 1] += amount_to_add; + + // Also update the sum of coefficients + sum_of_coefficients -= <_>::conditional_select(&0, &1, should_act); + + // If we updated the coefficients this loop iter, we're done for this loop iter + done |= should_act; + } + } + } + + for _ in 0 .. num_bits { + // If the sum of coefficients is the amount of bits, we're done + let mut done = sum_of_coefficients.ct_eq(&num_bits); + + // Find the highest coefficient currently non-zero + for i in (1 .. decomposition.len()).rev() { + // If this is non-zero, we should decrement this coefficient if we haven't already + // decremented a coefficient this round + let is_non_zero = !(0.ct_eq(&decomposition[i])); + let should_act = (!done) & is_non_zero; + + // Update this coefficient and the prior coefficient + let amount_to_sub = <_>::conditional_select(&0, &1, should_act); + decomposition[i] -= amount_to_sub; + + let amount_to_add = <_>::conditional_select(&0, &2, should_act); + // i must be at least 1, so i - 1 will be at least 0 (meaning it's safe to index with) + decomposition[i - 1] += amount_to_add; + + // Also update the sum of coefficients + sum_of_coefficients += <_>::conditional_select(&0, &1, should_act); + + // If we updated the coefficients this loop iter, we're done for this loop iter + done |= should_act; + } + } + debug_assert!(bool::from(decomposition.iter().sum::().ct_eq(&num_bits))); + + Some(ScalarDecomposition { scalar, decomposition }) + } + + /// The scalar. + pub fn scalar(&self) -> &F { + &self.scalar + } + + /// The decomposition of the scalar. + pub fn decomposition(&self) -> &[u64] { + &self.decomposition + } + + /// A divisor to prove a scalar multiplication. + /// + /// The divisor will interpolate $-(s \cdot G)$ with $d_i$ instances of $2^i \cdot G$. + /// + /// This function executes in constant time with regards to the scalar. + /// + /// This function MAY panic if the generator is the point at infinity. + pub fn scalar_mul_divisor>( + &self, + mut generator: C, + ) -> Poly { + // 1 is used for the resulting point, NUM_BITS is used for the decomposition, and then we store + // one additional index in a usize for the points we shouldn't write at all (hence the +2) + let _ = usize::try_from(::NUM_BITS + 2) + .expect("NUM_BITS + 2 didn't fit in usize"); + let mut divisor_points = + vec![C::identity(); (::NUM_BITS + 1) as usize]; + + // Write the inverse of the resulting point + divisor_points[0] = -generator * self.scalar; + + // Write the decomposition + let mut write_above: u64 = 0; + for coefficient in &self.decomposition { + // Write the generator to every slot except the slots we have already written to. + for i in 1 ..= (::NUM_BITS as u64) { + divisor_points[i as usize].conditional_assign(&generator, i.ct_gt(&write_above)); + } + + // Increase the next write start by the coefficient. + write_above += coefficient; + generator = generator.double(); + } + + // Create a divisor out of the points + let res = new_divisor(&divisor_points).unwrap(); + divisor_points.zeroize(); + res + } +} + +#[cfg(any(test, feature = "pasta"))] +mod pasta { + use group::{ff::Field, Curve}; + use pasta_curves::{ + arithmetic::{Coordinates, CurveAffine}, + Ep, Fp, Eq, Fq, + }; + use crate::DivisorCurve; + + impl DivisorCurve for Ep { + type FieldElement = Fp; + + fn a() -> Self::FieldElement { + Self::FieldElement::ZERO + } + fn b() -> Self::FieldElement { + Self::FieldElement::from(5u64) + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + Option::>::from(point.to_affine().coordinates()) + .map(|coords| (*coords.x(), *coords.y())) + } + } + + impl DivisorCurve for Eq { + type FieldElement = Fq; + + fn a() -> Self::FieldElement { + Self::FieldElement::ZERO + } + fn b() -> Self::FieldElement { + Self::FieldElement::from(5u64) + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + Option::>::from(point.to_affine().coordinates()) + .map(|coords| (*coords.x(), *coords.y())) + } + } +} + +#[cfg(any(test, feature = "ed25519"))] +mod ed25519 { + use subtle::{Choice, ConditionallySelectable}; + use group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, + }; + use dalek_ff_group::{FieldElement, EdwardsPoint}; + + impl crate::DivisorCurve for EdwardsPoint { + type FieldElement = FieldElement; + + // Wei25519 a/b + // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.3 + fn a() -> Self::FieldElement { + let mut be_bytes = + hex::decode("2aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa984914a144").unwrap(); + be_bytes.reverse(); + let le_bytes = be_bytes; + Self::FieldElement::from_repr(le_bytes.try_into().unwrap()).unwrap() + } + fn b() -> Self::FieldElement { + let mut be_bytes = + hex::decode("7b425ed097b425ed097b425ed097b425ed097b425ed097b4260b5e9c7710c864").unwrap(); + be_bytes.reverse(); + let le_bytes = be_bytes; + + Self::FieldElement::from_repr(le_bytes.try_into().unwrap()).unwrap() + } + + // https://www.ietf.org/archive/id/draft-ietf-lwig-curve-representations-02.pdf E.2 + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + if bool::from(point.is_identity()) { + None?; + } + + // Extract the y coordinate from the compressed point + let mut edwards_y = point.to_bytes(); + let x_is_odd = edwards_y[31] >> 7; + edwards_y[31] &= (1 << 7) - 1; + let edwards_y = Self::FieldElement::from_repr(edwards_y).unwrap(); + + // Recover the x coordinate + let edwards_y_sq = edwards_y * edwards_y; + let D = -Self::FieldElement::from(121665u64) * + Self::FieldElement::from(121666u64).invert().unwrap(); + let mut edwards_x = ((edwards_y_sq - Self::FieldElement::ONE) * + ((D * edwards_y_sq) + Self::FieldElement::ONE).invert().unwrap()) + .sqrt() + .unwrap(); + + // Negate the x coordinate if the sign doesn't match + edwards_x = <_>::conditional_select( + &edwards_x, + &-edwards_x, + edwards_x.is_odd() ^ Choice::from(x_is_odd), + ); + + // Calculate the x and y coordinates for Wei25519 + let edwards_y_plus_one = Self::FieldElement::ONE + edwards_y; + let one_minus_edwards_y = Self::FieldElement::ONE - edwards_y; + let wei_x = (edwards_y_plus_one * one_minus_edwards_y.invert().unwrap()) + + (Self::FieldElement::from(486662u64) * Self::FieldElement::from(3u64).invert().unwrap()); + let c = + (-(Self::FieldElement::from(486662u64) + Self::FieldElement::from(2u64))).sqrt().unwrap(); + let wei_y = c * edwards_y_plus_one * (one_minus_edwards_y * edwards_x).invert().unwrap(); + Some((wei_x, wei_y)) + } + } +} diff --git a/crypto/evrf/divisors/src/poly.rs b/crypto/evrf/divisors/src/poly.rs new file mode 100644 index 00000000..4ade0f79 --- /dev/null +++ b/crypto/evrf/divisors/src/poly.rs @@ -0,0 +1,744 @@ +use core::ops::{Add, Neg, Sub, Mul, Rem}; +use std_shims::{vec, vec::Vec}; + +use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable}; +use zeroize::{Zeroize, ZeroizeOnDrop}; + +use group::ff::PrimeField; + +#[derive(Clone, Copy, PartialEq, Debug)] +struct CoefficientIndex { + y_pow: u64, + x_pow: u64, +} +impl ConditionallySelectable for CoefficientIndex { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Self { + y_pow: <_>::conditional_select(&a.y_pow, &b.y_pow, choice), + x_pow: <_>::conditional_select(&a.x_pow, &b.x_pow, choice), + } + } +} +impl ConstantTimeEq for CoefficientIndex { + fn ct_eq(&self, other: &Self) -> Choice { + self.y_pow.ct_eq(&other.y_pow) & self.x_pow.ct_eq(&other.x_pow) + } +} +impl ConstantTimeGreater for CoefficientIndex { + fn ct_gt(&self, other: &Self) -> Choice { + self.y_pow.ct_gt(&other.y_pow) | + (self.y_pow.ct_eq(&other.y_pow) & self.x_pow.ct_gt(&other.x_pow)) + } +} + +/// A structure representing a Polynomial with x^i, y^i, and y^i * x^j terms. +#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] +pub struct Poly + Zeroize + PrimeField> { + /// c\[i] * y^(i + 1) + pub y_coefficients: Vec, + /// c\[i]\[j] * y^(i + 1) x^(j + 1) + pub yx_coefficients: Vec>, + /// c\[i] * x^(i + 1) + pub x_coefficients: Vec, + /// Coefficient for x^0, y^0, and x^0 y^0 (the coefficient for 1) + pub zero_coefficient: F, +} + +impl + Zeroize + PrimeField> PartialEq for Poly { + // This is not constant time and is not meant to be + fn eq(&self, b: &Poly) -> bool { + { + let mutual_y_coefficients = self.y_coefficients.len().min(b.y_coefficients.len()); + if self.y_coefficients[.. mutual_y_coefficients] != b.y_coefficients[.. mutual_y_coefficients] + { + return false; + } + for coeff in &self.y_coefficients[mutual_y_coefficients ..] { + if *coeff != F::ZERO { + return false; + } + } + for coeff in &b.y_coefficients[mutual_y_coefficients ..] { + if *coeff != F::ZERO { + return false; + } + } + } + + { + for (i, yx_coeffs) in self.yx_coefficients.iter().enumerate() { + for (j, coeff) in yx_coeffs.iter().enumerate() { + if coeff != b.yx_coefficients.get(i).unwrap_or(&vec![]).get(j).unwrap_or(&F::ZERO) { + return false; + } + } + } + // Run from the other perspective in case other is longer than self + for (i, yx_coeffs) in b.yx_coefficients.iter().enumerate() { + for (j, coeff) in yx_coeffs.iter().enumerate() { + if coeff != self.yx_coefficients.get(i).unwrap_or(&vec![]).get(j).unwrap_or(&F::ZERO) { + return false; + } + } + } + } + + { + let mutual_x_coefficients = self.x_coefficients.len().min(b.x_coefficients.len()); + if self.x_coefficients[.. mutual_x_coefficients] != b.x_coefficients[.. mutual_x_coefficients] + { + return false; + } + for coeff in &self.x_coefficients[mutual_x_coefficients ..] { + if *coeff != F::ZERO { + return false; + } + } + for coeff in &b.x_coefficients[mutual_x_coefficients ..] { + if *coeff != F::ZERO { + return false; + } + } + } + + self.zero_coefficient == b.zero_coefficient + } +} + +impl + Zeroize + PrimeField> Poly { + /// A polynomial for zero. + pub(crate) fn zero() -> Self { + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: F::ZERO, + } + } +} + +impl + Zeroize + PrimeField> Add<&Self> for Poly { + type Output = Self; + + fn add(mut self, other: &Self) -> Self { + // Expand to be the neeeded size + while self.y_coefficients.len() < other.y_coefficients.len() { + self.y_coefficients.push(F::ZERO); + } + while self.yx_coefficients.len() < other.yx_coefficients.len() { + self.yx_coefficients.push(vec![]); + } + for i in 0 .. other.yx_coefficients.len() { + while self.yx_coefficients[i].len() < other.yx_coefficients[i].len() { + self.yx_coefficients[i].push(F::ZERO); + } + } + while self.x_coefficients.len() < other.x_coefficients.len() { + self.x_coefficients.push(F::ZERO); + } + + // Perform the addition + for (i, coeff) in other.y_coefficients.iter().enumerate() { + self.y_coefficients[i] += coeff; + } + for (i, coeffs) in other.yx_coefficients.iter().enumerate() { + for (j, coeff) in coeffs.iter().enumerate() { + self.yx_coefficients[i][j] += coeff; + } + } + for (i, coeff) in other.x_coefficients.iter().enumerate() { + self.x_coefficients[i] += coeff; + } + self.zero_coefficient += other.zero_coefficient; + + self + } +} + +impl + Zeroize + PrimeField> Neg for Poly { + type Output = Self; + + fn neg(mut self) -> Self { + for y_coeff in self.y_coefficients.iter_mut() { + *y_coeff = -*y_coeff; + } + for yx_coeffs in self.yx_coefficients.iter_mut() { + for yx_coeff in yx_coeffs.iter_mut() { + *yx_coeff = -*yx_coeff; + } + } + for x_coeff in self.x_coefficients.iter_mut() { + *x_coeff = -*x_coeff; + } + self.zero_coefficient = -self.zero_coefficient; + + self + } +} + +impl + Zeroize + PrimeField> Sub for Poly { + type Output = Self; + + fn sub(self, other: Self) -> Self { + self + &-other + } +} + +impl + Zeroize + PrimeField> Mul for Poly { + type Output = Self; + + fn mul(mut self, scalar: F) -> Self { + for y_coeff in self.y_coefficients.iter_mut() { + *y_coeff *= scalar; + } + for coeffs in self.yx_coefficients.iter_mut() { + for coeff in coeffs.iter_mut() { + *coeff *= scalar; + } + } + for x_coeff in self.x_coefficients.iter_mut() { + *x_coeff *= scalar; + } + self.zero_coefficient *= scalar; + self + } +} + +impl + Zeroize + PrimeField> Poly { + #[must_use] + fn shift_by_x(mut self, power_of_x: usize) -> Self { + if power_of_x == 0 { + return self; + } + + // Shift up every x coefficient + for _ in 0 .. power_of_x { + self.x_coefficients.insert(0, F::ZERO); + for yx_coeffs in &mut self.yx_coefficients { + yx_coeffs.insert(0, F::ZERO); + } + } + + // Move the zero coefficient + self.x_coefficients[power_of_x - 1] = self.zero_coefficient; + self.zero_coefficient = F::ZERO; + + // Move the y coefficients + // Start by creating yx coefficients with the necessary powers of x + let mut yx_coefficients_to_push = vec![]; + while yx_coefficients_to_push.len() < power_of_x { + yx_coefficients_to_push.push(F::ZERO); + } + // Now, ensure the yx coefficients has the slots for the y coefficients we're moving + while self.yx_coefficients.len() < self.y_coefficients.len() { + self.yx_coefficients.push(yx_coefficients_to_push.clone()); + } + // Perform the move + for (i, y_coeff) in self.y_coefficients.drain(..).enumerate() { + self.yx_coefficients[i][power_of_x - 1] = y_coeff; + } + + self + } + + #[must_use] + fn shift_by_y(mut self, power_of_y: usize) -> Self { + if power_of_y == 0 { + return self; + } + + // Shift up every y coefficient + for _ in 0 .. power_of_y { + self.y_coefficients.insert(0, F::ZERO); + self.yx_coefficients.insert(0, vec![]); + } + + // Move the zero coefficient + self.y_coefficients[power_of_y - 1] = self.zero_coefficient; + self.zero_coefficient = F::ZERO; + + // Move the x coefficients + core::mem::swap(&mut self.yx_coefficients[power_of_y - 1], &mut self.x_coefficients); + self.x_coefficients = vec![]; + + self + } +} + +impl + Zeroize + PrimeField> Mul<&Poly> for Poly { + type Output = Self; + + fn mul(self, other: &Self) -> Self { + let mut res = self.clone() * other.zero_coefficient; + + for (i, y_coeff) in other.y_coefficients.iter().enumerate() { + let scaled = self.clone() * *y_coeff; + res = res + &scaled.shift_by_y(i + 1); + } + + for (y_i, yx_coeffs) in other.yx_coefficients.iter().enumerate() { + for (x_i, yx_coeff) in yx_coeffs.iter().enumerate() { + let scaled = self.clone() * *yx_coeff; + res = res + &scaled.shift_by_y(y_i + 1).shift_by_x(x_i + 1); + } + } + + for (i, x_coeff) in other.x_coefficients.iter().enumerate() { + let scaled = self.clone() * *x_coeff; + res = res + &scaled.shift_by_x(i + 1); + } + + res + } +} + +impl + Zeroize + PrimeField> Poly { + // The leading y coefficient and associated x coefficient. + fn leading_coefficient(&self) -> (usize, usize) { + if self.y_coefficients.len() > self.yx_coefficients.len() { + (self.y_coefficients.len(), 0) + } else if !self.yx_coefficients.is_empty() { + (self.yx_coefficients.len(), self.yx_coefficients.last().unwrap().len()) + } else { + (0, self.x_coefficients.len()) + } + } + + /// Returns the highest non-zero coefficient greater than the specified coefficient. + /// + /// If no non-zero coefficient is greater than the specified coefficient, this will return + /// (0, 0). + fn greater_than_or_equal_coefficient( + &self, + greater_than_or_equal: &CoefficientIndex, + ) -> CoefficientIndex { + let mut leading_coefficient = CoefficientIndex { y_pow: 0, x_pow: 0 }; + for (y_pow_sub_one, coeff) in self.y_coefficients.iter().enumerate() { + let y_pow = u64::try_from(y_pow_sub_one + 1).unwrap(); + let coeff_is_non_zero = !coeff.is_zero(); + let potential = CoefficientIndex { y_pow, x_pow: 0 }; + leading_coefficient = <_>::conditional_select( + &leading_coefficient, + &potential, + coeff_is_non_zero & + potential.ct_gt(&leading_coefficient) & + (potential.ct_gt(greater_than_or_equal) | potential.ct_eq(greater_than_or_equal)), + ); + } + for (y_pow_sub_one, yx_coefficients) in self.yx_coefficients.iter().enumerate() { + let y_pow = u64::try_from(y_pow_sub_one + 1).unwrap(); + for (x_pow_sub_one, coeff) in yx_coefficients.iter().enumerate() { + let x_pow = u64::try_from(x_pow_sub_one + 1).unwrap(); + let coeff_is_non_zero = !coeff.is_zero(); + let potential = CoefficientIndex { y_pow, x_pow }; + leading_coefficient = <_>::conditional_select( + &leading_coefficient, + &potential, + coeff_is_non_zero & + potential.ct_gt(&leading_coefficient) & + (potential.ct_gt(greater_than_or_equal) | potential.ct_eq(greater_than_or_equal)), + ); + } + } + for (x_pow_sub_one, coeff) in self.x_coefficients.iter().enumerate() { + let x_pow = u64::try_from(x_pow_sub_one + 1).unwrap(); + let coeff_is_non_zero = !coeff.is_zero(); + let potential = CoefficientIndex { y_pow: 0, x_pow }; + leading_coefficient = <_>::conditional_select( + &leading_coefficient, + &potential, + coeff_is_non_zero & + potential.ct_gt(&leading_coefficient) & + (potential.ct_gt(greater_than_or_equal) | potential.ct_eq(greater_than_or_equal)), + ); + } + leading_coefficient + } + + /// Perform multiplication mod `modulus`. + #[must_use] + pub(crate) fn mul_mod(self, other: &Self, modulus: &Self) -> Self { + (self * other) % modulus + } + + /// Perform division, returning the result and remainder. + /// + /// This function is constant time to the structure of the numerator and denominator. The actual + /// value of the coefficients will not introduce timing differences. + /// + /// Panics upon division by a polynomial where all coefficients are zero. + #[must_use] + pub(crate) fn div_rem(self, denominator: &Self) -> (Self, Self) { + // These functions have undefined behavior if this isn't a valid index for this poly + fn ct_get + Zeroize + PrimeField>(poly: &Poly, index: CoefficientIndex) -> F { + let mut res = poly.zero_coefficient; + for (y_pow_sub_one, coeff) in poly.y_coefficients.iter().enumerate() { + res = <_>::conditional_select( + &res, + coeff, + index + .ct_eq(&CoefficientIndex { y_pow: (y_pow_sub_one + 1).try_into().unwrap(), x_pow: 0 }), + ); + } + for (y_pow_sub_one, coeffs) in poly.yx_coefficients.iter().enumerate() { + for (x_pow_sub_one, coeff) in coeffs.iter().enumerate() { + res = <_>::conditional_select( + &res, + coeff, + index.ct_eq(&CoefficientIndex { + y_pow: (y_pow_sub_one + 1).try_into().unwrap(), + x_pow: (x_pow_sub_one + 1).try_into().unwrap(), + }), + ); + } + } + for (x_pow_sub_one, coeff) in poly.x_coefficients.iter().enumerate() { + res = <_>::conditional_select( + &res, + coeff, + index + .ct_eq(&CoefficientIndex { y_pow: 0, x_pow: (x_pow_sub_one + 1).try_into().unwrap() }), + ); + } + res + } + + fn ct_set + Zeroize + PrimeField>( + poly: &mut Poly, + index: CoefficientIndex, + value: F, + ) { + for (y_pow_sub_one, coeff) in poly.y_coefficients.iter_mut().enumerate() { + *coeff = <_>::conditional_select( + coeff, + &value, + index + .ct_eq(&CoefficientIndex { y_pow: (y_pow_sub_one + 1).try_into().unwrap(), x_pow: 0 }), + ); + } + for (y_pow_sub_one, coeffs) in poly.yx_coefficients.iter_mut().enumerate() { + for (x_pow_sub_one, coeff) in coeffs.iter_mut().enumerate() { + *coeff = <_>::conditional_select( + coeff, + &value, + index.ct_eq(&CoefficientIndex { + y_pow: (y_pow_sub_one + 1).try_into().unwrap(), + x_pow: (x_pow_sub_one + 1).try_into().unwrap(), + }), + ); + } + } + for (x_pow_sub_one, coeff) in poly.x_coefficients.iter_mut().enumerate() { + *coeff = <_>::conditional_select( + coeff, + &value, + index + .ct_eq(&CoefficientIndex { y_pow: 0, x_pow: (x_pow_sub_one + 1).try_into().unwrap() }), + ); + } + poly.zero_coefficient = <_>::conditional_select( + &poly.zero_coefficient, + &value, + index.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }), + ); + } + + fn conditional_select_poly + Zeroize + PrimeField>( + mut a: Poly, + mut b: Poly, + choice: Choice, + ) -> Poly { + let pad_to = |a: &mut Poly, b: &Poly| { + while a.x_coefficients.len() < b.x_coefficients.len() { + a.x_coefficients.push(F::ZERO); + } + while a.yx_coefficients.len() < b.yx_coefficients.len() { + a.yx_coefficients.push(vec![]); + } + for (a, b) in a.yx_coefficients.iter_mut().zip(&b.yx_coefficients) { + while a.len() < b.len() { + a.push(F::ZERO); + } + } + while a.y_coefficients.len() < b.y_coefficients.len() { + a.y_coefficients.push(F::ZERO); + } + }; + // Pad these to be the same size/layout as each other + pad_to(&mut a, &b); + pad_to(&mut b, &a); + + let mut res = Poly::zero(); + for (a, b) in a.y_coefficients.iter().zip(&b.y_coefficients) { + res.y_coefficients.push(<_>::conditional_select(a, b, choice)); + } + for (a, b) in a.yx_coefficients.iter().zip(&b.yx_coefficients) { + let mut yx_coefficients = Vec::with_capacity(a.len()); + for (a, b) in a.iter().zip(b) { + yx_coefficients.push(<_>::conditional_select(a, b, choice)) + } + res.yx_coefficients.push(yx_coefficients); + } + for (a, b) in a.x_coefficients.iter().zip(&b.x_coefficients) { + res.x_coefficients.push(<_>::conditional_select(a, b, choice)); + } + res.zero_coefficient = + <_>::conditional_select(&a.zero_coefficient, &b.zero_coefficient, choice); + + res + } + + // The following long division algorithm only works if the denominator actually has a variable + // If the denominator isn't variable to anything, short-circuit to scalar 'division' + // This is safe as `leading_coefficient` is based on the structure, not the values, of the poly + let denominator_leading_coefficient = denominator.leading_coefficient(); + if denominator_leading_coefficient == (0, 0) { + return (self * denominator.zero_coefficient.invert().unwrap(), Poly::zero()); + } + + // The structure of the quotient, which is the the numerator with all coefficients set to 0 + let mut quotient_structure = Poly { + y_coefficients: vec![F::ZERO; self.y_coefficients.len()], + yx_coefficients: self.yx_coefficients.clone(), + x_coefficients: vec![F::ZERO; self.x_coefficients.len()], + zero_coefficient: F::ZERO, + }; + for coeff in quotient_structure + .yx_coefficients + .iter_mut() + .flat_map(|yx_coefficients| yx_coefficients.iter_mut()) + { + *coeff = F::ZERO; + } + + // Calculate the amount of iterations we need to perform + let iterations = self.y_coefficients.len() + + self.yx_coefficients.iter().map(|yx_coefficients| yx_coefficients.len()).sum::() + + self.x_coefficients.len(); + + // Find the highest non-zero coefficient in the denominator + // This is the coefficient which we actually perform division with + let denominator_dividing_coefficient = + denominator.greater_than_or_equal_coefficient(&CoefficientIndex { y_pow: 0, x_pow: 0 }); + let denominator_dividing_coefficient_inv = + ct_get(denominator, denominator_dividing_coefficient).invert().unwrap(); + + let mut quotient = quotient_structure.clone(); + let mut remainder = self.clone(); + for _ in 0 .. iterations { + // Find the numerator coefficient we're clearing + // This will be (0, 0) if we aren't clearing a coefficient + let numerator_coefficient = + remainder.greater_than_or_equal_coefficient(&denominator_dividing_coefficient); + + // We only apply the effects of this iteration if the numerator's coefficient is actually >= + let meaningful_iteration = numerator_coefficient.ct_gt(&denominator_dividing_coefficient) | + numerator_coefficient.ct_eq(&denominator_dividing_coefficient); + + // 1) Find the scalar `q` such that the leading coefficient of `q * denominator` is equal to + // the leading coefficient of self. + let numerator_coefficient_value = ct_get(&remainder, numerator_coefficient); + let q = numerator_coefficient_value * denominator_dividing_coefficient_inv; + + // 2) Calculate the full term of the quotient by scaling with the necessary powers of y/x + let proper_powers_of_yx = CoefficientIndex { + y_pow: numerator_coefficient.y_pow.wrapping_sub(denominator_dividing_coefficient.y_pow), + x_pow: numerator_coefficient.x_pow.wrapping_sub(denominator_dividing_coefficient.x_pow), + }; + let fallabck_powers_of_yx = CoefficientIndex { y_pow: 0, x_pow: 0 }; + let mut quotient_term = quotient_structure.clone(); + ct_set( + &mut quotient_term, + // If the numerator coefficient isn't >=, proper_powers_of_yx will have garbage in them + <_>::conditional_select(&fallabck_powers_of_yx, &proper_powers_of_yx, meaningful_iteration), + q, + ); + + let quotient_if_meaningful = quotient.clone() + "ient_term; + quotient = conditional_select_poly(quotient, quotient_if_meaningful, meaningful_iteration); + + // 3) Remove what we've divided out from self + let remainder_if_meaningful = remainder.clone() - (quotient_term * denominator); + remainder = conditional_select_poly(remainder, remainder_if_meaningful, meaningful_iteration); + } + + quotient = conditional_select_poly( + quotient, + // If the dividing coefficient was for y**0 x**0, we return the poly scaled by its inverse + self * denominator_dividing_coefficient_inv, + denominator_dividing_coefficient.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }), + ); + remainder = conditional_select_poly( + remainder, + // If the dividing coefficient was for y**0 x**0, we're able to perfectly divide and there's + // no remainder + Poly::zero(), + denominator_dividing_coefficient.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }), + ); + + // Clear any junk terms out of the remainder which are less than the denominator + let denominator_leading_coefficient = CoefficientIndex { + y_pow: denominator_leading_coefficient.0.try_into().unwrap(), + x_pow: denominator_leading_coefficient.1.try_into().unwrap(), + }; + if denominator_leading_coefficient != (CoefficientIndex { y_pow: 0, x_pow: 0 }) { + while { + let index = + CoefficientIndex { y_pow: remainder.y_coefficients.len().try_into().unwrap(), x_pow: 0 }; + bool::from( + index.ct_gt(&denominator_leading_coefficient) | + index.ct_eq(&denominator_leading_coefficient), + ) + } { + let popped = remainder.y_coefficients.pop(); + debug_assert_eq!(popped, Some(F::ZERO)); + } + while { + let index = CoefficientIndex { + y_pow: remainder.yx_coefficients.len().try_into().unwrap(), + x_pow: remainder + .yx_coefficients + .last() + .map(|yx_coefficients| yx_coefficients.len()) + .unwrap_or(0) + .try_into() + .unwrap(), + }; + bool::from( + index.ct_gt(&denominator_leading_coefficient) | + index.ct_eq(&denominator_leading_coefficient), + ) + } { + let popped = remainder.yx_coefficients.last_mut().unwrap().pop(); + // This may have been `vec![]` + if let Some(popped) = popped { + debug_assert_eq!(popped, F::ZERO); + } + if remainder.yx_coefficients.last().unwrap().is_empty() { + let popped = remainder.yx_coefficients.pop(); + debug_assert_eq!(popped, Some(vec![])); + } + } + while { + let index = + CoefficientIndex { y_pow: 0, x_pow: remainder.x_coefficients.len().try_into().unwrap() }; + bool::from( + index.ct_gt(&denominator_leading_coefficient) | + index.ct_eq(&denominator_leading_coefficient), + ) + } { + let popped = remainder.x_coefficients.pop(); + debug_assert_eq!(popped, Some(F::ZERO)); + } + } + + (quotient, remainder) + } +} + +impl + Zeroize + PrimeField> Rem<&Self> for Poly { + type Output = Self; + + fn rem(self, modulus: &Self) -> Self { + self.div_rem(modulus).1 + } +} + +impl + Zeroize + PrimeField> Poly { + /// Evaluate this polynomial with the specified x/y values. + /// + /// Panics on polynomials with terms whose powers exceed 2^64. + #[must_use] + pub fn eval(&self, x: F, y: F) -> F { + let mut res = self.zero_coefficient; + for (pow, coeff) in + self.y_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + res += y.pow([pow]) * coeff; + } + for (y_pow, coeffs) in + self.yx_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + let y_pow = y.pow([y_pow]); + for (x_pow, coeff) in + coeffs.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + res += y_pow * x.pow([x_pow]) * coeff; + } + } + for (pow, coeff) in + self.x_coefficients.iter().enumerate().map(|(i, v)| (u64::try_from(i + 1).unwrap(), v)) + { + res += x.pow([pow]) * coeff; + } + res + } + + /// Differentiate a polynomial, reduced by a modulus with a leading y term y^2 x^0, by x and y. + /// + /// This function has undefined behavior if unreduced. + #[must_use] + pub fn differentiate(&self) -> (Poly, Poly) { + // Differentation by x practically involves: + // - Dropping everything without an x component + // - Shifting everything down a power of x + // - Multiplying the new coefficient by the power it prior was used with + let diff_x = { + let mut diff_x = Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: F::ZERO, + }; + if !self.x_coefficients.is_empty() { + let mut x_coeffs = self.x_coefficients.clone(); + diff_x.zero_coefficient = x_coeffs.remove(0); + diff_x.x_coefficients = x_coeffs; + + let mut prior_x_power = F::from(2); + for x_coeff in &mut diff_x.x_coefficients { + *x_coeff *= prior_x_power; + prior_x_power += F::ONE; + } + } + + if !self.yx_coefficients.is_empty() { + let mut yx_coeffs = self.yx_coefficients[0].clone(); + if !yx_coeffs.is_empty() { + diff_x.y_coefficients = vec![yx_coeffs.remove(0)]; + diff_x.yx_coefficients = vec![yx_coeffs]; + + let mut prior_x_power = F::from(2); + for yx_coeff in &mut diff_x.yx_coefficients[0] { + *yx_coeff *= prior_x_power; + prior_x_power += F::ONE; + } + } + } + + diff_x + }; + + // Differentation by y is trivial + // It's the y coefficient as the zero coefficient, and the yx coefficients as the x + // coefficients + // This is thanks to any y term over y^2 being reduced out + let diff_y = Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: self.yx_coefficients.first().cloned().unwrap_or(vec![]), + zero_coefficient: self.y_coefficients.first().cloned().unwrap_or(F::ZERO), + }; + + (diff_x, diff_y) + } + + /// Normalize the x coefficient to 1. + /// + /// Panics if there is no x coefficient to normalize or if it cannot be normalized to 1. + #[must_use] + pub fn normalize_x_coefficient(self) -> Self { + let scalar = self.x_coefficients[0].invert().unwrap(); + self * scalar + } +} diff --git a/crypto/evrf/divisors/src/tests/mod.rs b/crypto/evrf/divisors/src/tests/mod.rs new file mode 100644 index 00000000..c7c95567 --- /dev/null +++ b/crypto/evrf/divisors/src/tests/mod.rs @@ -0,0 +1,237 @@ +use rand_core::OsRng; + +use group::{ff::Field, Group}; +use dalek_ff_group::EdwardsPoint; +use pasta_curves::{Ep, Eq}; + +use crate::{DivisorCurve, Poly, new_divisor}; + +mod poly; + +// Equation 4 in the security proofs +fn check_divisor(points: Vec) { + // Create the divisor + let divisor = new_divisor::(&points).unwrap(); + let eval = |c| { + let (x, y) = C::to_xy(c).unwrap(); + divisor.eval(x, y) + }; + + // Decide challgenges + let c0 = C::random(&mut OsRng); + let c1 = C::random(&mut OsRng); + let c2 = -(c0 + c1); + let (slope, intercept) = crate::slope_intercept::(c0, c1); + + let mut rhs = ::FieldElement::ONE; + for point in points { + let (x, y) = C::to_xy(point).unwrap(); + rhs *= intercept - (y - (slope * x)); + } + assert_eq!(eval(c0) * eval(c1) * eval(c2), rhs); +} + +fn test_divisor() { + for i in 1 ..= 255 { + println!("Test iteration {i}"); + + // Select points + let mut points = vec![]; + for _ in 0 .. i { + points.push(C::random(&mut OsRng)); + } + points.push(-points.iter().sum::()); + println!("Points {}", points.len()); + + // Perform the original check + check_divisor(points.clone()); + + // Create the divisor + let divisor = new_divisor::(&points).unwrap(); + + // For a divisor interpolating 256 points, as one does when interpreting a 255-bit discrete log + // with the result of its scalar multiplication against a fixed generator, the lengths of the + // yx/x coefficients shouldn't supersede the following bounds + assert!((divisor.yx_coefficients.first().unwrap_or(&vec![]).len()) <= 126); + assert!((divisor.x_coefficients.len() - 1) <= 127); + assert!( + (1 + divisor.yx_coefficients.first().unwrap_or(&vec![]).len() + + (divisor.x_coefficients.len() - 1) + + 1) <= + 255 + ); + + // Decide challgenges + let c0 = C::random(&mut OsRng); + let c1 = C::random(&mut OsRng); + let c2 = -(c0 + c1); + let (slope, intercept) = crate::slope_intercept::(c0, c1); + + // Perform the Logarithmic derivative check + { + let dx_over_dz = { + let dx = Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![C::FieldElement::ZERO, C::FieldElement::from(3)], + zero_coefficient: C::a(), + }; + + let dy = Poly { + y_coefficients: vec![C::FieldElement::from(2)], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: C::FieldElement::ZERO, + }; + + let dz = (dy.clone() * -slope) + &dx; + + // We want dx/dz, and dz/dx is equal to dy/dx - slope + // Sagemath claims this, dy / dz, is the proper inverse + (dy, dz) + }; + + { + let sanity_eval = |c| { + let (x, y) = C::to_xy(c).unwrap(); + dx_over_dz.0.eval(x, y) * dx_over_dz.1.eval(x, y).invert().unwrap() + }; + let sanity = sanity_eval(c0) + sanity_eval(c1) + sanity_eval(c2); + // This verifies the dx/dz polynomial is correct + assert_eq!(sanity, C::FieldElement::ZERO); + } + + // Logarithmic derivative check + let test = |divisor: Poly<_>| { + let (dx, dy) = divisor.differentiate(); + + let lhs = |c| { + let (x, y) = C::to_xy(c).unwrap(); + + let n_0 = (C::FieldElement::from(3) * (x * x)) + C::a(); + let d_0 = (C::FieldElement::from(2) * y).invert().unwrap(); + let p_0_n_0 = n_0 * d_0; + + let n_1 = dy.eval(x, y); + let first = p_0_n_0 * n_1; + + let second = dx.eval(x, y); + + let d_1 = divisor.eval(x, y); + + let fraction_1_n = first + second; + let fraction_1_d = d_1; + + let fraction_2_n = dx_over_dz.0.eval(x, y); + let fraction_2_d = dx_over_dz.1.eval(x, y); + + fraction_1_n * fraction_2_n * (fraction_1_d * fraction_2_d).invert().unwrap() + }; + let lhs = lhs(c0) + lhs(c1) + lhs(c2); + + let mut rhs = C::FieldElement::ZERO; + for point in &points { + let (x, y) = ::to_xy(*point).unwrap(); + rhs += (intercept - (y - (slope * x))).invert().unwrap(); + } + + assert_eq!(lhs, rhs); + }; + // Test the divisor and the divisor with a normalized x coefficient + test(divisor.clone()); + test(divisor.normalize_x_coefficient()); + } + } +} + +fn test_same_point() { + let mut points = vec![C::random(&mut OsRng)]; + points.push(points[0]); + points.push(-points.iter().sum::()); + check_divisor(points); +} + +fn test_subset_sum_to_infinity() { + // Internally, a binary tree algorithm is used + // This executes the first pass to end up with [0, 0] for further reductions + { + let mut points = vec![C::random(&mut OsRng)]; + points.push(-points[0]); + + let next = C::random(&mut OsRng); + points.push(next); + points.push(-next); + check_divisor(points); + } + + // This executes the first pass to end up with [0, X, -X, 0] + { + let mut points = vec![C::random(&mut OsRng)]; + points.push(-points[0]); + + let x_1 = C::random(&mut OsRng); + let x_2 = C::random(&mut OsRng); + points.push(x_1); + points.push(x_2); + + points.push(-x_1); + points.push(-x_2); + + let next = C::random(&mut OsRng); + points.push(next); + points.push(-next); + check_divisor(points); + } +} + +#[test] +fn test_divisor_pallas() { + test_same_point::(); + test_subset_sum_to_infinity::(); + test_divisor::(); +} + +#[test] +fn test_divisor_vesta() { + test_same_point::(); + test_subset_sum_to_infinity::(); + test_divisor::(); +} + +#[test] +fn test_divisor_ed25519() { + // Since we're implementing Wei25519 ourselves, check the isomorphism works as expected + { + let incomplete_add = |p1, p2| { + let (x1, y1) = EdwardsPoint::to_xy(p1).unwrap(); + let (x2, y2) = EdwardsPoint::to_xy(p2).unwrap(); + + // mmadd-1998-cmo + let u = y2 - y1; + let uu = u * u; + let v = x2 - x1; + let vv = v * v; + let vvv = v * vv; + let R = vv * x1; + let A = uu - vvv - R.double(); + let x3 = v * A; + let y3 = (u * (R - A)) - (vvv * y1); + let z3 = vvv; + + // Normalize from XYZ to XY + let x3 = x3 * z3.invert().unwrap(); + let y3 = y3 * z3.invert().unwrap(); + + // Edwards addition -> Wei25519 coordinates should be equivalent to Wei25519 addition + assert_eq!(EdwardsPoint::to_xy(p1 + p2).unwrap(), (x3, y3)); + }; + + for _ in 0 .. 256 { + incomplete_add(EdwardsPoint::random(&mut OsRng), EdwardsPoint::random(&mut OsRng)); + } + } + + test_same_point::(); + test_subset_sum_to_infinity::(); + test_divisor::(); +} diff --git a/crypto/evrf/divisors/src/tests/poly.rs b/crypto/evrf/divisors/src/tests/poly.rs new file mode 100644 index 00000000..63f73a96 --- /dev/null +++ b/crypto/evrf/divisors/src/tests/poly.rs @@ -0,0 +1,148 @@ +use rand_core::OsRng; + +use group::ff::Field; +use pasta_curves::Ep; + +use crate::{DivisorCurve, Poly}; + +type F = ::FieldElement; + +#[test] +fn test_poly() { + let zero = F::ZERO; + let one = F::ONE; + + { + let mut poly = Poly::zero(); + poly.y_coefficients = vec![zero, one]; + + let mut modulus = Poly::zero(); + modulus.y_coefficients = vec![one]; + assert_eq!( + poly.clone().div_rem(&modulus).0, + Poly { + y_coefficients: vec![one], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: zero + } + ); + assert_eq!( + poly % &modulus, + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![], + zero_coefficient: zero + } + ); + } + + { + let mut poly = Poly::zero(); + poly.y_coefficients = vec![zero, one]; + + let mut squared = Poly::zero(); + squared.y_coefficients = vec![zero, zero, zero, one]; + assert_eq!(poly.clone() * &poly, squared); + } + + { + let mut a = Poly::zero(); + a.zero_coefficient = F::from(2u64); + + let mut b = Poly::zero(); + b.zero_coefficient = F::from(3u64); + + let mut res = Poly::zero(); + res.zero_coefficient = F::from(6u64); + assert_eq!(a.clone() * &b, res); + + b.y_coefficients = vec![F::from(4u64)]; + res.y_coefficients = vec![F::from(8u64)]; + assert_eq!(a.clone() * &b, res); + assert_eq!(b.clone() * &a, res); + + a.x_coefficients = vec![F::from(5u64)]; + res.x_coefficients = vec![F::from(15u64)]; + res.yx_coefficients = vec![vec![F::from(20u64)]]; + assert_eq!(a.clone() * &b, res); + assert_eq!(b * &a, res); + + // res is now 20xy + 8*y + 15*x + 6 + // res ** 2 = + // 400*x^2*y^2 + 320*x*y^2 + 64*y^2 + 600*x^2*y + 480*x*y + 96*y + 225*x^2 + 180*x + 36 + + let mut squared = Poly::zero(); + squared.y_coefficients = vec![F::from(96u64), F::from(64u64)]; + squared.yx_coefficients = + vec![vec![F::from(480u64), F::from(600u64)], vec![F::from(320u64), F::from(400u64)]]; + squared.x_coefficients = vec![F::from(180u64), F::from(225u64)]; + squared.zero_coefficient = F::from(36u64); + assert_eq!(res.clone() * &res, squared); + } +} + +#[test] +fn test_differentation() { + let random = || F::random(&mut OsRng); + + let input = Poly { + y_coefficients: vec![random()], + yx_coefficients: vec![vec![random()]], + x_coefficients: vec![random(), random(), random()], + zero_coefficient: random(), + }; + let (diff_x, diff_y) = input.differentiate(); + assert_eq!( + diff_x, + Poly { + y_coefficients: vec![input.yx_coefficients[0][0]], + yx_coefficients: vec![], + x_coefficients: vec![ + F::from(2) * input.x_coefficients[1], + F::from(3) * input.x_coefficients[2] + ], + zero_coefficient: input.x_coefficients[0], + } + ); + assert_eq!( + diff_y, + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![input.yx_coefficients[0][0]], + zero_coefficient: input.y_coefficients[0], + } + ); + + let input = Poly { + y_coefficients: vec![random()], + yx_coefficients: vec![vec![random(), random()]], + x_coefficients: vec![random(), random(), random(), random()], + zero_coefficient: random(), + }; + let (diff_x, diff_y) = input.differentiate(); + assert_eq!( + diff_x, + Poly { + y_coefficients: vec![input.yx_coefficients[0][0]], + yx_coefficients: vec![vec![F::from(2) * input.yx_coefficients[0][1]]], + x_coefficients: vec![ + F::from(2) * input.x_coefficients[1], + F::from(3) * input.x_coefficients[2], + F::from(4) * input.x_coefficients[3], + ], + zero_coefficient: input.x_coefficients[0], + } + ); + assert_eq!( + diff_y, + Poly { + y_coefficients: vec![], + yx_coefficients: vec![], + x_coefficients: vec![input.yx_coefficients[0][0], input.yx_coefficients[0][1]], + zero_coefficient: input.y_coefficients[0], + } + ); +} diff --git a/crypto/evrf/ec-gadgets/Cargo.toml b/crypto/evrf/ec-gadgets/Cargo.toml new file mode 100644 index 00000000..f29cc4c4 --- /dev/null +++ b/crypto/evrf/ec-gadgets/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "generalized-bulletproofs-ec-gadgets" +version = "0.1.0" +description = "Gadgets for working with an embedded Elliptic Curve in a Generalized Bulletproofs circuit" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/fcmps/ec-gadgets" +authors = ["Luke Parker "] +keywords = ["bulletproofs", "circuit", "divisors"] +edition = "2021" +rust-version = "1.69" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } + +generic-array = { version = "1", default-features = false, features = ["alloc"] } + +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false } + +generalized-bulletproofs-circuit-abstraction = { path = "../circuit-abstraction", default-features = false } + +[features] +std = ["std-shims/std", "ciphersuite/std", "generalized-bulletproofs-circuit-abstraction/std"] +default = ["std"] diff --git a/crypto/evrf/ec-gadgets/LICENSE b/crypto/evrf/ec-gadgets/LICENSE new file mode 100644 index 00000000..659881f1 --- /dev/null +++ b/crypto/evrf/ec-gadgets/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/ec-gadgets/README.md b/crypto/evrf/ec-gadgets/README.md new file mode 100644 index 00000000..95149d93 --- /dev/null +++ b/crypto/evrf/ec-gadgets/README.md @@ -0,0 +1,3 @@ +# Generalized Bulletproofs Circuit Abstraction + +A circuit abstraction around `generalized-bulletproofs`. diff --git a/crypto/evrf/ec-gadgets/src/dlog.rs b/crypto/evrf/ec-gadgets/src/dlog.rs new file mode 100644 index 00000000..d124e07f --- /dev/null +++ b/crypto/evrf/ec-gadgets/src/dlog.rs @@ -0,0 +1,524 @@ +use core::fmt; +use std_shims::{vec, vec::Vec}; + +use ciphersuite::{ + group::ff::{Field, PrimeField, BatchInverter}, + Ciphersuite, +}; + +use generalized_bulletproofs_circuit_abstraction::*; + +use crate::*; + +/// Parameters for a discrete logarithm proof. +pub trait DiscreteLogParameters { + /// The amount of bits used to represent a scalar. + type ScalarBits: ArrayLength; + + /// The amount of x**i coefficients in a divisor. + /// + /// This is the amount of points in a divisor (the amount of bits in a scalar, plus one) divided + /// by two. + type XCoefficients: ArrayLength; + + /// The amount of x**i coefficients in a divisor, minus one. + type XCoefficientsMinusOne: ArrayLength; + + /// The amount of y x**i coefficients in a divisor. + /// + /// This is the amount of points in a divisor (the amount of bits in a scalar, plus one) divided + /// by two, minus two. + type YxCoefficients: ArrayLength; +} + +/// A tabled generator for proving/verifying discrete logarithm claims. +#[derive(Clone)] +pub struct GeneratorTable( + GenericArray<(F, F), Parameters::ScalarBits>, +); + +impl fmt::Debug + for GeneratorTable +{ + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt + .debug_struct("GeneratorTable") + .field("x", &self.0[0].0) + .field("y", &self.0[0].1) + .finish_non_exhaustive() + } +} + +impl GeneratorTable { + /// Create a new table for this generator. + /// + /// The generator is assumed to be well-formed and on-curve. This function may panic if it's not. + pub fn new(curve: &CurveSpec, generator_x: F, generator_y: F) -> Self { + // mdbl-2007-bl + fn dbl(a: F, x1: F, y1: F) -> (F, F) { + let xx = x1 * x1; + let w = a + (xx + xx.double()); + let y1y1 = y1 * y1; + let r = y1y1 + y1y1; + let sss = (y1 * r).double().double(); + let rr = r * r; + + let b = x1 + r; + let b = (b * b) - xx - rr; + + let h = (w * w) - b.double(); + let x3 = h.double() * y1; + let y3 = (w * (b - h)) - rr.double(); + let z3 = sss; + + // Normalize from XYZ to XY + let z3_inv = z3.invert().unwrap(); + let x3 = x3 * z3_inv; + let y3 = y3 * z3_inv; + + (x3, y3) + } + + let mut res = Self(GenericArray::default()); + res.0[0] = (generator_x, generator_y); + for i in 1 .. Parameters::ScalarBits::USIZE { + let last = res.0[i - 1]; + res.0[i] = dbl(curve.a, last.0, last.1); + } + + res + } +} + +/// A representation of the divisor. +/// +/// The coefficient for x**1 is explicitly excluded as it's expected to be normalized to 1. +#[derive(Clone)] +pub struct Divisor { + /// The coefficient for the `y` term of the divisor. + /// + /// There is never more than one `y**i x**0` coefficient as the leading term of the modulus is + /// `y**2`. It's assumed the coefficient is non-zero (and present) as it will be for any divisor + /// exceeding trivial complexity. + pub y: Variable, + /// The coefficients for the `y**1 x**i` terms of the polynomial. + pub yx: GenericArray, + /// The coefficients for the `x**i` terms of the polynomial, skipping x**1. + /// + /// x**1 is skipped as it's expected to be normalized to 1, and therefore constant, in order to + /// ensure the divisor is non-zero (as necessary for the proof to be complete). + // Subtract 1 from the length due to skipping the coefficient for x**1 + pub x_from_power_of_2: GenericArray, + /// The constant term in the polynomial (alternatively, the coefficient for y**0 x**0). + pub zero: Variable, +} + +/// A point, its discrete logarithm, and the divisor to prove it. +#[derive(Clone)] +pub struct PointWithDlog { + /// The point which is supposedly the result of scaling the generator by the discrete logarithm. + pub point: (Variable, Variable), + /// The discrete logarithm, represented as coefficients of a polynomial of 2**i. + pub dlog: GenericArray, + /// The divisor interpolating the relevant doublings of generator with the inverse of the point. + pub divisor: Divisor, +} + +/// A struct containing a point used for the evaluation of a divisor. +/// +/// Preprocesses and caches as much of the calculation as possible to minimize work upon reuse of +/// challenge points. +struct ChallengePoint { + y: F, + yx: GenericArray, + x: GenericArray, + p_0_n_0: F, + x_p_0_n_0: GenericArray, + p_1_n: F, + p_1_d: F, +} + +impl ChallengePoint { + fn new( + curve: &CurveSpec, + // The slope between all of the challenge points + slope: F, + // The x and y coordinates + x: F, + y: F, + // The inversion of twice the y coordinate + // We accept this as an argument so that the caller can calculcate these with a batch inversion + inv_two_y: F, + ) -> Self { + // Powers of x, skipping x**0 + let divisor_x_len = Parameters::XCoefficients::USIZE; + let mut x_pows = GenericArray::default(); + x_pows[0] = x; + for i in 1 .. divisor_x_len { + let last = x_pows[i - 1]; + x_pows[i] = last * x; + } + + // Powers of x multiplied by y + let divisor_yx_len = Parameters::YxCoefficients::USIZE; + let mut yx = GenericArray::default(); + // Skips x**0 + yx[0] = y * x; + for i in 1 .. divisor_yx_len { + let last = yx[i - 1]; + yx[i] = last * x; + } + + let x_sq = x.square(); + let three_x_sq = x_sq.double() + x_sq; + let three_x_sq_plus_a = three_x_sq + curve.a; + let two_y = y.double(); + + // p_0_n_0 from `DivisorChallenge` + let p_0_n_0 = three_x_sq_plus_a * inv_two_y; + let mut x_p_0_n_0 = GenericArray::default(); + // Since this iterates over x, which skips x**0, this also skips p_0_n_0 x**0 + for (i, x) in x_pows.iter().take(divisor_yx_len).enumerate() { + x_p_0_n_0[i] = p_0_n_0 * x; + } + + // p_1_n from `DivisorChallenge` + let p_1_n = two_y; + // p_1_d from `DivisorChallenge` + let p_1_d = (-slope * p_1_n) + three_x_sq_plus_a; + + ChallengePoint { x: x_pows, y, yx, p_0_n_0, x_p_0_n_0, p_1_n, p_1_d } + } +} + +// `DivisorChallenge` from the section `Discrete Log Proof` +fn divisor_challenge_eval( + circuit: &mut Circuit, + divisor: &Divisor, + challenge: &ChallengePoint, +) -> Variable { + // The evaluation of the divisor differentiated by y, further multiplied by p_0_n_0 + // Differentation drops everything without a y coefficient, and drops what remains by a power + // of y + // (y**1 -> y**0, yx**i -> x**i) + // This aligns with p_0_n_1 from `DivisorChallenge` + let p_0_n_1 = { + let mut p_0_n_1 = LinComb::empty().term(challenge.p_0_n_0, divisor.y); + for (j, var) in divisor.yx.iter().enumerate() { + // This does not raise by `j + 1` as x_p_0_n_0 omits x**0 + p_0_n_1 = p_0_n_1.term(challenge.x_p_0_n_0[j], *var); + } + p_0_n_1 + }; + + // The evaluation of the divisor differentiated by x + // This aligns with p_0_n_2 from `DivisorChallenge` + let p_0_n_2 = { + // The coefficient for x**1 is 1, so 1 becomes the new zero coefficient + let mut p_0_n_2 = LinComb::empty().constant(C::F::ONE); + + // Handle the new y coefficient + p_0_n_2 = p_0_n_2.term(challenge.y, divisor.yx[0]); + + // Handle the new yx coefficients + for (j, yx) in divisor.yx.iter().enumerate().skip(1) { + // For the power which was shifted down, we multiply this coefficient + // 3 x**2 -> 2 * 3 x**1 + let original_power_of_x = C::F::from(u64::try_from(j + 1).unwrap()); + // `j - 1` so `j = 1` indexes yx[0] as yx[0] is the y x**1 + // (yx omits y x**0) + let this_weight = original_power_of_x * challenge.yx[j - 1]; + p_0_n_2 = p_0_n_2.term(this_weight, *yx); + } + + // Handle the x coefficients + // We don't skip the first one as `x_from_power_of_2` already omits x**1 + for (i, x) in divisor.x_from_power_of_2.iter().enumerate() { + // i + 2 as the paper expects i to start from 1 and be + 1, yet we start from 0 + let original_power_of_x = C::F::from(u64::try_from(i + 2).unwrap()); + // Still x[i] as x[0] is x**1 + let this_weight = original_power_of_x * challenge.x[i]; + + p_0_n_2 = p_0_n_2.term(this_weight, *x); + } + + p_0_n_2 + }; + + // p_0_n from `DivisorChallenge` + let p_0_n = p_0_n_1 + &p_0_n_2; + + // Evaluation of the divisor + // p_0_d from `DivisorChallenge` + let p_0_d = { + let mut p_0_d = LinComb::empty().term(challenge.y, divisor.y); + + for (var, c_yx) in divisor.yx.iter().zip(&challenge.yx) { + p_0_d = p_0_d.term(*c_yx, *var); + } + + for (i, var) in divisor.x_from_power_of_2.iter().enumerate() { + // This `i+1` is preserved, despite most not being as x omits x**0, as this assumes we + // start with `i=1` + p_0_d = p_0_d.term(challenge.x[i + 1], *var); + } + + // Adding x effectively adds a `1 x` term, ensuring the divisor isn't 0 + p_0_d.term(C::F::ONE, divisor.zero).constant(challenge.x[0]) + }; + + // Calculate the joint numerator + // p_n from `DivisorChallenge` + let p_n = p_0_n * challenge.p_1_n; + // Calculate the joint denominator + // p_d from `DivisorChallenge` + let p_d = p_0_d * challenge.p_1_d; + + // We want `n / d = o` + // `n / d = o` == `n = d * o` + // These are safe unwraps as they're solely done by the prover and should always be non-zero + let witness = + circuit.eval(&p_d).map(|p_d| (p_d, circuit.eval(&p_n).unwrap() * p_d.invert().unwrap())); + let (_l, o, n_claim) = circuit.mul(Some(p_d), None, witness); + circuit.equality(p_n, &n_claim.into()); + o +} + +/// A challenge to evaluate divisors with. +/// +/// This challenge must be sampled after writing the commitments to the transcript. This challenge +/// is reusable across various divisors. +pub struct DiscreteLogChallenge { + c0: ChallengePoint, + c1: ChallengePoint, + c2: ChallengePoint, + slope: F, + intercept: F, +} + +/// A generator which has been challenged and is ready for use in evaluating discrete logarithm +/// claims. +pub struct ChallengedGenerator( + GenericArray, +); + +/// Gadgets for proving the discrete logarithm of points on an elliptic curve defined over the +/// scalar field of the curve of the Bulletproof. +pub trait EcDlogGadgets { + /// Sample a challenge for a series of discrete logarithm claims. + /// + /// This must be called after writing the commitments to the transcript. + /// + /// The generators are assumed to be non-empty. They are not transcripted. If your generators are + /// dynamic, they must be properly transcripted into the context. + /// + /// May panic/have undefined behavior if an assumption is broken. + #[allow(clippy::type_complexity)] + fn discrete_log_challenge( + &self, + transcript: &mut T, + curve: &CurveSpec, + generators: &[&GeneratorTable], + ) -> (DiscreteLogChallenge, Vec>); + + /// Prove this point has the specified discrete logarithm over the specified generator. + /// + /// The discrete logarithm is not validated to be in a canonical form. The only guarantee made on + /// it is that it's a consistent representation of _a_ discrete logarithm (reuse won't enable + /// re-interpretation as a distinct discrete logarithm). + /// + /// This does ensure the point is on-curve. + /// + /// This MUST only be called with `Variable`s present within commitments. + /// + /// May panic/have undefined behavior if an assumption is broken, or if passed an invalid + /// witness. + fn discrete_log( + &mut self, + curve: &CurveSpec, + point: PointWithDlog, + challenge: &DiscreteLogChallenge, + challenged_generator: &ChallengedGenerator, + ) -> OnCurve; +} + +impl EcDlogGadgets for Circuit { + // This is part of `DiscreteLog` from `Discrete Log Proof`, specifically, the challenges and + // the calculations dependent solely on them + fn discrete_log_challenge( + &self, + transcript: &mut T, + curve: &CurveSpec, + generators: &[&GeneratorTable], + ) -> (DiscreteLogChallenge, Vec>) { + // Get the challenge points + let sign_of_points = transcript.challenge_bytes(); + let sign_of_point_0 = (sign_of_points[0] & 1) == 1; + let sign_of_point_1 = ((sign_of_points[0] >> 1) & 1) == 1; + let (c0_x, c0_y) = loop { + let c0_x = transcript.challenge::(); + let Some(c0_y) = + Option::::from(((c0_x.square() * c0_x) + (curve.a * c0_x) + curve.b).sqrt()) + else { + continue; + }; + // Takes the even y coordinate as to not be dependent on whatever root the above sqrt + // happens to returns + break (c0_x, if bool::from(c0_y.is_odd()) != sign_of_point_0 { -c0_y } else { c0_y }); + }; + let (c1_x, c1_y) = loop { + let c1_x = transcript.challenge::(); + let Some(c1_y) = + Option::::from(((c1_x.square() * c1_x) + (curve.a * c1_x) + curve.b).sqrt()) + else { + continue; + }; + break (c1_x, if bool::from(c1_y.is_odd()) != sign_of_point_1 { -c1_y } else { c1_y }); + }; + + // mmadd-1998-cmo + fn incomplete_add(x1: F, y1: F, x2: F, y2: F) -> Option<(F, F)> { + if x1 == x2 { + None? + } + + let u = y2 - y1; + let uu = u * u; + let v = x2 - x1; + let vv = v * v; + let vvv = v * vv; + let r = vv * x1; + let a = uu - vvv - r.double(); + let x3 = v * a; + let y3 = (u * (r - a)) - (vvv * y1); + let z3 = vvv; + + // Normalize from XYZ to XY + let z3_inv = Option::::from(z3.invert())?; + let x3 = x3 * z3_inv; + let y3 = y3 * z3_inv; + + Some((x3, y3)) + } + + let (c2_x, c2_y) = incomplete_add::(c0_x, c0_y, c1_x, c1_y) + .expect("randomly selected points shared an x coordinate"); + // We want C0, C1, C2 = -(C0 + C1) + let c2_y = -c2_y; + + // Calculate the slope and intercept + // Safe invert as these x coordinates must be distinct due to passing the above incomplete_add + let slope = (c1_y - c0_y) * (c1_x - c0_x).invert().unwrap(); + let intercept = c0_y - (slope * c0_x); + + // Calculate the inversions for 2 c_y (for each c) and all of the challenged generators + let mut inversions = vec![C::F::ZERO; 3 + (generators.len() * Parameters::ScalarBits::USIZE)]; + + // Needed for the left-hand side eval + { + inversions[0] = c0_y.double(); + inversions[1] = c1_y.double(); + inversions[2] = c2_y.double(); + } + + // Perform the inversions for the generators + for (i, generator) in generators.iter().enumerate() { + // Needed for the right-hand side eval + for (j, generator) in generator.0.iter().enumerate() { + // `DiscreteLog` has weights of `(mu - (G_i.y + (slope * G_i.x)))**-1` in its last line + inversions[3 + (i * Parameters::ScalarBits::USIZE) + j] = + intercept - (generator.1 - (slope * generator.0)); + } + } + for challenge_inversion in &inversions { + // This should be unreachable barring negligible probability + if challenge_inversion.is_zero().into() { + panic!("trying to invert 0"); + } + } + let mut scratch = vec![C::F::ZERO; inversions.len()]; + let _ = BatchInverter::invert_with_external_scratch(&mut inversions, &mut scratch); + + let mut inversions = inversions.into_iter(); + let inv_c0_two_y = inversions.next().unwrap(); + let inv_c1_two_y = inversions.next().unwrap(); + let inv_c2_two_y = inversions.next().unwrap(); + + let c0 = ChallengePoint::new(curve, slope, c0_x, c0_y, inv_c0_two_y); + let c1 = ChallengePoint::new(curve, slope, c1_x, c1_y, inv_c1_two_y); + let c2 = ChallengePoint::new(curve, slope, c2_x, c2_y, inv_c2_two_y); + + // Fill in the inverted values + let mut challenged_generators = Vec::with_capacity(generators.len()); + for _ in 0 .. generators.len() { + let mut challenged_generator = GenericArray::default(); + for i in 0 .. Parameters::ScalarBits::USIZE { + challenged_generator[i] = inversions.next().unwrap(); + } + challenged_generators.push(ChallengedGenerator(challenged_generator)); + } + + (DiscreteLogChallenge { c0, c1, c2, slope, intercept }, challenged_generators) + } + + // `DiscreteLog` from `Discrete Log Proof` + fn discrete_log( + &mut self, + curve: &CurveSpec, + point: PointWithDlog, + challenge: &DiscreteLogChallenge, + challenged_generator: &ChallengedGenerator, + ) -> OnCurve { + let PointWithDlog { divisor, dlog, point } = point; + + // Ensure this is being safely called + let arg_iter = [point.0, point.1, divisor.y, divisor.zero]; + let arg_iter = arg_iter.iter().chain(divisor.yx.iter()); + let arg_iter = arg_iter.chain(divisor.x_from_power_of_2.iter()); + let arg_iter = arg_iter.chain(dlog.iter()); + for variable in arg_iter { + debug_assert!( + matches!(variable, Variable::CG { .. } | Variable::V(_)), + "discrete log proofs requires all arguments belong to commitments", + ); + } + + // Check the point is on curve + let point = self.on_curve(curve, point); + + // The challenge has already been sampled so those lines aren't necessary + + // lhs from the paper, evaluating the divisor + let lhs_eval = LinComb::from(divisor_challenge_eval(self, &divisor, &challenge.c0)) + + &LinComb::from(divisor_challenge_eval(self, &divisor, &challenge.c1)) + + &LinComb::from(divisor_challenge_eval(self, &divisor, &challenge.c2)); + + // Interpolate the doublings of the generator + let mut rhs_eval = LinComb::empty(); + // We call this `bit` yet it's not constrained to being a bit + // It's presumed to be yet may be malleated + for (bit, weight) in dlog.into_iter().zip(&challenged_generator.0) { + rhs_eval = rhs_eval.term(*weight, bit); + } + + // Interpolate the output point + // intercept - (y - (slope * x)) + // intercept - y + (slope * x) + // -y + (slope * x) + intercept + // EXCEPT the output point we're proving the discrete log for isn't the one interpolated + // Its negative is, so -y becomes y + // y + (slope * x) + intercept + let output_interpolation = LinComb::empty() + .constant(challenge.intercept) + .term(C::F::ONE, point.y) + .term(challenge.slope, point.x); + let output_interpolation_eval = self.eval(&output_interpolation); + let (_output_interpolation, inverse) = + self.inverse(Some(output_interpolation), output_interpolation_eval); + rhs_eval = rhs_eval.term(C::F::ONE, inverse); + + self.equality(lhs_eval, &rhs_eval); + + point + } +} diff --git a/crypto/evrf/ec-gadgets/src/lib.rs b/crypto/evrf/ec-gadgets/src/lib.rs new file mode 100644 index 00000000..e9fb57fb --- /dev/null +++ b/crypto/evrf/ec-gadgets/src/lib.rs @@ -0,0 +1,131 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use generic_array::{typenum::Unsigned, ArrayLength, GenericArray}; + +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use generalized_bulletproofs_circuit_abstraction::*; + +mod dlog; +pub use dlog::*; + +/// The specification of a short Weierstrass curve over the field `F`. +/// +/// The short Weierstrass curve is defined via the formula `y**2 = x**3 + a*x + b`. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct CurveSpec { + /// The `a` constant in the curve formula. + pub a: F, + /// The `b` constant in the curve formula. + pub b: F, +} + +/// A struct for a point on a towered curve which has been confirmed to be on-curve. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct OnCurve { + pub(crate) x: Variable, + pub(crate) y: Variable, +} + +impl OnCurve { + /// The variable for the x-coordinate. + pub fn x(&self) -> Variable { + self.x + } + /// The variable for the y-coordinate. + pub fn y(&self) -> Variable { + self.y + } +} + +/// Gadgets for working with points on an elliptic curve defined over the scalar field of the curve +/// of the Bulletproof. +pub trait EcGadgets { + /// Constrain an x and y coordinate as being on the specified curve. + /// + /// The specified curve is defined over the scalar field of the curve this proof is performed + /// over, offering efficient arithmetic. + /// + /// May panic if the prover and the point is not actually on-curve. + fn on_curve(&mut self, curve: &CurveSpec, point: (Variable, Variable)) -> OnCurve; + + /// Perform incomplete addition for a fixed point and an on-curve point. + /// + /// `a` is the x and y coordinates of the fixed point, assumed to be on-curve. + /// + /// `b` is a point prior checked to be on-curve. + /// + /// `c` is a point prior checked to be on-curve, constrained to be the sum of `a` and `b`. + /// + /// `a` and `b` are checked to have distinct x coordinates. + /// + /// This function may panic if `a` is malformed or if the prover and `c` is not actually the sum + /// of `a` and `b`. + fn incomplete_add_fixed(&mut self, a: (C::F, C::F), b: OnCurve, c: OnCurve) -> OnCurve; +} + +impl EcGadgets for Circuit { + fn on_curve(&mut self, curve: &CurveSpec, (x, y): (Variable, Variable)) -> OnCurve { + let x_eval = self.eval(&LinComb::from(x)); + let (_x, _x_2, x2) = + self.mul(Some(LinComb::from(x)), Some(LinComb::from(x)), x_eval.map(|x| (x, x))); + let (_x, _x_2, x3) = + self.mul(Some(LinComb::from(x2)), Some(LinComb::from(x)), x_eval.map(|x| (x * x, x))); + let expected_y2 = LinComb::from(x3).term(curve.a, x).constant(curve.b); + + let y_eval = self.eval(&LinComb::from(y)); + let (_y, _y_2, y2) = + self.mul(Some(LinComb::from(y)), Some(LinComb::from(y)), y_eval.map(|y| (y, y))); + + self.equality(y2.into(), &expected_y2); + + OnCurve { x, y } + } + + fn incomplete_add_fixed(&mut self, a: (C::F, C::F), b: OnCurve, c: OnCurve) -> OnCurve { + // Check b.x != a.0 + { + let bx_lincomb = LinComb::from(b.x); + let bx_eval = self.eval(&bx_lincomb); + self.inequality(bx_lincomb, &LinComb::empty().constant(a.0), bx_eval.map(|bx| (bx, a.0))); + } + + let (x0, y0) = (a.0, a.1); + let (x1, y1) = (b.x, b.y); + let (x2, y2) = (c.x, c.y); + + let slope_eval = self.eval(&LinComb::from(x1)).map(|x1| { + let y1 = self.eval(&LinComb::from(b.y)).unwrap(); + + (y1 - y0) * (x1 - x0).invert().unwrap() + }); + + // slope * (x1 - x0) = y1 - y0 + let x1_minus_x0 = LinComb::from(x1).constant(-x0); + let x1_minus_x0_eval = self.eval(&x1_minus_x0); + let (slope, _r, o) = + self.mul(None, Some(x1_minus_x0), slope_eval.map(|slope| (slope, x1_minus_x0_eval.unwrap()))); + self.equality(LinComb::from(o), &LinComb::from(y1).constant(-y0)); + + // slope * (x2 - x0) = -y2 - y0 + let x2_minus_x0 = LinComb::from(x2).constant(-x0); + let x2_minus_x0_eval = self.eval(&x2_minus_x0); + let (_slope, _x2_minus_x0, o) = self.mul( + Some(slope.into()), + Some(x2_minus_x0), + slope_eval.map(|slope| (slope, x2_minus_x0_eval.unwrap())), + ); + self.equality(o.into(), &LinComb::empty().term(-C::F::ONE, y2).constant(-y0)); + + // slope * slope = x0 + x1 + x2 + let (_slope, _slope_2, o) = + self.mul(Some(slope.into()), Some(slope.into()), slope_eval.map(|slope| (slope, slope))); + self.equality(o.into(), &LinComb::from(x1).term(C::F::ONE, x2).constant(x0)); + + OnCurve { x: x2, y: y2 } + } +} diff --git a/crypto/evrf/embedwards25519/Cargo.toml b/crypto/evrf/embedwards25519/Cargo.toml new file mode 100644 index 00000000..e45d06c0 --- /dev/null +++ b/crypto/evrf/embedwards25519/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "embedwards25519" +version = "0.1.0" +description = "A curve defined over the Ed25519 scalar field" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/embedwards25519" +authors = ["Luke Parker "] +keywords = ["curve25519", "ed25519", "ristretto255", "group"] +edition = "2021" +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rustversion = "1" +hex-literal = { version = "0.4", default-features = false } + +std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, optional = true } + +rand_core = { version = "0.6", default-features = false } + +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } +subtle = { version = "^2.4", default-features = false } + +generic-array = { version = "1", default-features = false } +crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } + +dalek-ff-group = { path = "../../dalek-ff-group", version = "0.4", default-features = false } + +blake2 = { version = "0.10", default-features = false } +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false } +ec-divisors = { path = "../divisors", default-features = false } +generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets", default-features = false } + +[dev-dependencies] +hex = "0.4" + +rand_core = { version = "0.6", features = ["std"] } + +ff-group-tests = { path = "../../ff-group-tests" } + +[features] +alloc = ["std-shims", "zeroize/alloc", "ciphersuite/alloc"] +std = ["std-shims/std", "rand_core/std", "zeroize/std", "subtle/std", "blake2/std", "ciphersuite/std", "ec-divisors/std", "generalized-bulletproofs-ec-gadgets/std"] +default = ["std"] diff --git a/crypto/evrf/embedwards25519/LICENSE b/crypto/evrf/embedwards25519/LICENSE new file mode 100644 index 00000000..91d893c1 --- /dev/null +++ b/crypto/evrf/embedwards25519/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/embedwards25519/README.md b/crypto/evrf/embedwards25519/README.md new file mode 100644 index 00000000..e282c063 --- /dev/null +++ b/crypto/evrf/embedwards25519/README.md @@ -0,0 +1,21 @@ +# embedwards25519 + +A curve defined over the Ed25519 scalar field. + +This curve was found via +[tevador's script](https://gist.github.com/tevador/4524c2092178df08996487d4e272b096) +for finding curves (specifically, curve cycles), modified to search for curves +whose field is the Ed25519 scalar field (not the Ed25519 field). + +```ignore +p = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed +q = 0x0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b +D = -420435 +y^2 = x^3 - 3*x + 4188043517836764736459661287169077812555441231147410753119540549773825148767 +``` + +The embedding degree is `(q-1)/2`. + +This curve should not be used with single-coordinate ladders, and points should +always be represented in a compressed form (preventing receiving off-curve +points). diff --git a/crypto/evrf/embedwards25519/src/backend.rs b/crypto/evrf/embedwards25519/src/backend.rs new file mode 100644 index 00000000..304fa0bc --- /dev/null +++ b/crypto/evrf/embedwards25519/src/backend.rs @@ -0,0 +1,293 @@ +use zeroize::Zeroize; + +// Use black_box when possible +#[rustversion::since(1.66)] +use core::hint::black_box; +#[rustversion::before(1.66)] +fn black_box(val: T) -> T { + val +} + +pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 { + let bit_ref = black_box(bit_ref); + + let mut bit = black_box(*bit_ref); + let res = black_box(bit as u8); + bit.zeroize(); + debug_assert!((res | 1) == 1); + + bit_ref.zeroize(); + res +} + +macro_rules! math_op { + ( + $Value: ident, + $Other: ident, + $Op: ident, + $op_fn: ident, + $Assign: ident, + $assign_fn: ident, + $function: expr + ) => { + impl $Op<$Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl $Assign<$Other> for $Value { + fn $assign_fn(&mut self, other: $Other) { + self.0 = $function(self.0, other.0); + } + } + impl<'a> $Op<&'a $Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: &'a $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl<'a> $Assign<&'a $Other> for $Value { + fn $assign_fn(&mut self, other: &'a $Other) { + self.0 = $function(self.0, other.0); + } + } + }; +} + +macro_rules! from_wrapper { + ($wrapper: ident, $inner: ident, $uint: ident) => { + impl From<$uint> for $wrapper { + fn from(a: $uint) -> $wrapper { + Self(Residue::new(&$inner::from(a))) + } + } + }; +} + +macro_rules! field { + ( + $FieldName: ident, + $ResidueType: ident, + + $MODULUS_STR: ident, + $MODULUS: ident, + $WIDE_MODULUS: ident, + + $NUM_BITS: literal, + $MULTIPLICATIVE_GENERATOR: literal, + $S: literal, + $ROOT_OF_UNITY: literal, + $DELTA: literal, + ) => { + use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::{Sum, Product}, + }; + + use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable}; + use rand_core::RngCore; + + use crypto_bigint::{Integer, NonZero, Encoding, impl_modulus}; + + use ciphersuite::group::ff::{ + Field, PrimeField, FieldBits, PrimeFieldBits, helpers::sqrt_ratio_generic, + }; + + use $crate::backend::u8_from_bool; + + fn reduce(x: U512) -> U256 { + U256::from_le_slice(&x.rem(&NonZero::new($WIDE_MODULUS).unwrap()).to_le_bytes()[.. 32]) + } + + impl ConstantTimeEq for $FieldName { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } + } + + impl ConditionallySelectable for $FieldName { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $FieldName(Residue::conditional_select(&a.0, &b.0, choice)) + } + } + + math_op!($FieldName, $FieldName, Add, add, AddAssign, add_assign, |x: $ResidueType, y| x + .add(&y)); + math_op!($FieldName, $FieldName, Sub, sub, SubAssign, sub_assign, |x: $ResidueType, y| x + .sub(&y)); + math_op!($FieldName, $FieldName, Mul, mul, MulAssign, mul_assign, |x: $ResidueType, y| x + .mul(&y)); + + from_wrapper!($FieldName, U256, u8); + from_wrapper!($FieldName, U256, u16); + from_wrapper!($FieldName, U256, u32); + from_wrapper!($FieldName, U256, u64); + from_wrapper!($FieldName, U256, u128); + + impl Neg for $FieldName { + type Output = $FieldName; + fn neg(self) -> $FieldName { + Self(self.0.neg()) + } + } + + impl<'a> Neg for &'a $FieldName { + type Output = $FieldName; + fn neg(self) -> Self::Output { + (*self).neg() + } + } + + impl $FieldName { + /// Perform an exponentation. + pub fn pow(&self, other: $FieldName) -> $FieldName { + let mut table = [Self(Residue::ONE); 16]; + table[1] = *self; + for i in 2 .. 16 { + table[i] = table[i - 1] * self; + } + + let mut res = Self(Residue::ONE); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res *= res; + } + } + + let mut factor = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + factor = Self::conditional_select(&factor, &candidate, usize::from(bits).ct_eq(&j)); + } + res *= factor; + bits = 0; + } + } + res + } + } + + impl Field for $FieldName { + const ZERO: Self = Self(Residue::ZERO); + const ONE: Self = Self(Residue::ONE); + + fn random(mut rng: impl RngCore) -> Self { + let mut bytes = [0; 64]; + rng.fill_bytes(&mut bytes); + $FieldName(Residue::new(&reduce(U512::from_le_slice(bytes.as_ref())))) + } + + fn square(&self) -> Self { + Self(self.0.square()) + } + fn double(&self) -> Self { + *self + self + } + + fn invert(&self) -> CtOption { + let res = self.0.invert(); + CtOption::new(Self(res.0), res.1.into()) + } + + fn sqrt(&self) -> CtOption { + // (p + 1) // 4, as valid since p % 4 == 3 + let mod_plus_one_div_four = $MODULUS.saturating_add(&U256::ONE).wrapping_div(&(4u8.into())); + let res = self.pow(Self($ResidueType::new_checked(&mod_plus_one_div_four).unwrap())); + CtOption::new(res, res.square().ct_eq(self)) + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + sqrt_ratio_generic(num, div) + } + } + + impl PrimeField for $FieldName { + type Repr = [u8; 32]; + + const MODULUS: &'static str = $MODULUS_STR; + + const NUM_BITS: u32 = $NUM_BITS; + const CAPACITY: u32 = $NUM_BITS - 1; + + const TWO_INV: Self = $FieldName($ResidueType::new(&U256::from_u8(2)).invert().0); + + const MULTIPLICATIVE_GENERATOR: Self = + Self(Residue::new(&U256::from_u8($MULTIPLICATIVE_GENERATOR))); + const S: u32 = $S; + + const ROOT_OF_UNITY: Self = $FieldName(Residue::new(&U256::from_be_hex($ROOT_OF_UNITY))); + const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0); + + const DELTA: Self = $FieldName(Residue::new(&U256::from_be_hex($DELTA))); + + fn from_repr(bytes: Self::Repr) -> CtOption { + let res = U256::from_le_slice(&bytes); + CtOption::new($FieldName(Residue::new(&res)), res.ct_lt(&$MODULUS)) + } + fn to_repr(&self) -> Self::Repr { + let mut repr = [0; 32]; + repr.copy_from_slice(&self.0.retrieve().to_le_bytes()); + repr + } + + fn is_odd(&self) -> Choice { + self.0.retrieve().is_odd() + } + } + + impl PrimeFieldBits for $FieldName { + type ReprBits = [u8; 32]; + + fn to_le_bits(&self) -> FieldBits { + self.to_repr().into() + } + + fn char_le_bits() -> FieldBits { + let mut repr = [0; 32]; + repr.copy_from_slice(&MODULUS.to_le_bytes()); + repr.into() + } + } + + impl Sum<$FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + let mut res = $FieldName::ZERO; + for item in iter { + res += item; + } + res + } + } + + impl<'a> Sum<&'a $FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + iter.cloned().sum() + } + } + + impl Product<$FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + let mut res = $FieldName::ONE; + for item in iter { + res *= item; + } + res + } + } + + impl<'a> Product<&'a $FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + iter.cloned().product() + } + } + }; +} diff --git a/crypto/evrf/embedwards25519/src/lib.rs b/crypto/evrf/embedwards25519/src/lib.rs new file mode 100644 index 00000000..c3ee6e1d --- /dev/null +++ b/crypto/evrf/embedwards25519/src/lib.rs @@ -0,0 +1,70 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(any(feature = "alloc", feature = "std"))] +use std_shims::io::{self, Read}; + +use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2}; +use ciphersuite::group::{ff::PrimeField, Group}; + +#[macro_use] +mod backend; + +mod scalar; +pub use scalar::Scalar; + +pub use dalek_ff_group::Scalar as FieldElement; + +mod point; +pub use point::Point; + +/// Ciphersuite for Embedwards25519. +/// +/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition +/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as +/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. +#[derive(Clone, Copy, PartialEq, Eq, Debug, zeroize::Zeroize)] +pub struct Embedwards25519; +impl ciphersuite::Ciphersuite for Embedwards25519 { + type F = Scalar; + type G = Point; + type H = blake2::Blake2b512; + + const ID: &'static [u8] = b"embedwards25519"; + + fn generator() -> Self::G { + Point::generator() + } + + fn reduce_512(scalar: [u8; 64]) -> Self::F { + Scalar::wide_reduce(scalar) + } + + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { + use blake2::Digest; + Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap()) + } + + // We override the provided impl, which compares against the reserialization, because + // we already require canonicity + #[cfg(any(feature = "alloc", feature = "std"))] + #[allow(non_snake_case)] + fn read_G(reader: &mut R) -> io::Result { + use ciphersuite::group::GroupEncoding; + + let mut encoding = ::Repr::default(); + reader.read_exact(encoding.as_mut())?; + + let point = Option::::from(Self::G::from_bytes(&encoding)) + .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid point"))?; + Ok(point) + } +} + +impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Embedwards25519 { + type ScalarBits = U<{ Scalar::NUM_BITS as usize }>; + type XCoefficients = Quot, U2>; + type XCoefficientsMinusOne = Diff; + type YxCoefficients = Diff, U1>, U2>, U2>; +} diff --git a/crypto/evrf/embedwards25519/src/point.rs b/crypto/evrf/embedwards25519/src/point.rs new file mode 100644 index 00000000..19f95c6a --- /dev/null +++ b/crypto/evrf/embedwards25519/src/point.rs @@ -0,0 +1,419 @@ +use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::Sum, +}; + +use rand_core::RngCore; + +use zeroize::Zeroize; +use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable}; + +use ciphersuite::group::{ + ff::{Field, PrimeField, PrimeFieldBits}, + Group, GroupEncoding, + prime::PrimeGroup, +}; + +use crate::{backend::u8_from_bool, Scalar, FieldElement}; + +#[allow(non_snake_case)] +fn B() -> FieldElement { + FieldElement::from_repr(hex_literal::hex!( + "5f07603a853f20370b682036210d463e64903a23ea669d07ca26cfc13f594209" + )) + .unwrap() +} + +fn recover_y(x: FieldElement) -> CtOption { + // x**3 - 3 * x + B + ((x.square() * x) - (x.double() + x) + B()).sqrt() +} + +/// Point. +#[derive(Clone, Copy, Debug, Zeroize)] +#[repr(C)] +pub struct Point { + x: FieldElement, // / Z + y: FieldElement, // / Z + z: FieldElement, +} + +impl ConstantTimeEq for Point { + fn ct_eq(&self, other: &Self) -> Choice { + let x1 = self.x * other.z; + let x2 = other.x * self.z; + + let y1 = self.y * other.z; + let y2 = other.y * self.z; + + // Both identity or equivalent over their denominators + (self.z.is_zero() & other.z.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2)) + } +} + +impl PartialEq for Point { + fn eq(&self, other: &Point) -> bool { + self.ct_eq(other).into() + } +} + +impl Eq for Point {} + +impl ConditionallySelectable for Point { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Point { + x: FieldElement::conditional_select(&a.x, &b.x, choice), + y: FieldElement::conditional_select(&a.y, &b.y, choice), + z: FieldElement::conditional_select(&a.z, &b.z, choice), + } + } +} + +impl Add for Point { + type Output = Point; + #[allow(non_snake_case)] + fn add(self, other: Self) -> Self { + // add-2015-rcb + + let a = -FieldElement::from(3u64); + let B = B(); + let b3 = B + B + B; + + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + let X2 = other.x; + let Y2 = other.y; + let Z2 = other.z; + + let t0 = X1 * X2; + let t1 = Y1 * Y2; + let t2 = Z1 * Z2; + let t3 = X1 + Y1; + let t4 = X2 + Y2; + let t3 = t3 * t4; + let t4 = t0 + t1; + let t3 = t3 - t4; + let t4 = X1 + Z1; + let t5 = X2 + Z2; + let t4 = t4 * t5; + let t5 = t0 + t2; + let t4 = t4 - t5; + let t5 = Y1 + Z1; + let X3 = Y2 + Z2; + let t5 = t5 * X3; + let X3 = t1 + t2; + let t5 = t5 - X3; + let Z3 = a * t4; + let X3 = b3 * t2; + let Z3 = X3 + Z3; + let X3 = t1 - Z3; + let Z3 = t1 + Z3; + let Y3 = X3 * Z3; + let t1 = t0 + t0; + let t1 = t1 + t0; + let t2 = a * t2; + let t4 = b3 * t4; + let t1 = t1 + t2; + let t2 = t0 - t2; + let t2 = a * t2; + let t4 = t4 + t2; + let t0 = t1 * t4; + let Y3 = Y3 + t0; + let t0 = t5 * t4; + let X3 = t3 * X3; + let X3 = X3 - t0; + let t0 = t3 * t1; + let Z3 = t5 * Z3; + let Z3 = Z3 + t0; + Point { x: X3, y: Y3, z: Z3 } + } +} + +impl AddAssign for Point { + fn add_assign(&mut self, other: Point) { + *self = *self + other; + } +} + +impl Add<&Point> for Point { + type Output = Point; + fn add(self, other: &Point) -> Point { + self + *other + } +} + +impl AddAssign<&Point> for Point { + fn add_assign(&mut self, other: &Point) { + *self += *other; + } +} + +impl Neg for Point { + type Output = Point; + fn neg(self) -> Self { + Point { x: self.x, y: -self.y, z: self.z } + } +} + +impl Sub for Point { + type Output = Point; + #[allow(clippy::suspicious_arithmetic_impl)] + fn sub(self, other: Self) -> Self { + self + other.neg() + } +} + +impl SubAssign for Point { + fn sub_assign(&mut self, other: Point) { + *self = *self - other; + } +} + +impl Sub<&Point> for Point { + type Output = Point; + fn sub(self, other: &Point) -> Point { + self - *other + } +} + +impl SubAssign<&Point> for Point { + fn sub_assign(&mut self, other: &Point) { + *self -= *other; + } +} + +impl Group for Point { + type Scalar = Scalar; + fn random(mut rng: impl RngCore) -> Self { + loop { + let mut bytes = [0; 32]; + rng.fill_bytes(bytes.as_mut()); + let opt = Self::from_bytes(&bytes); + if opt.is_some().into() { + return opt.unwrap(); + } + } + } + fn identity() -> Self { + Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO } + } + fn generator() -> Self { + // Point with the lowest valid x-coordinate + Point { + x: FieldElement::from_repr(hex_literal::hex!( + "0100000000000000000000000000000000000000000000000000000000000000" + )) + .unwrap(), + y: FieldElement::from_repr(hex_literal::hex!( + "2e4118080a484a3dfbafe2199a0e36b7193581d676c0dadfa376b0265616020c" + )) + .unwrap(), + z: FieldElement::ONE, + } + } + fn is_identity(&self) -> Choice { + self.z.ct_eq(&FieldElement::ZERO) + } + #[allow(non_snake_case)] + fn double(&self) -> Self { + // dbl-2007-bl-2 + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + + let w = (X1 - Z1) * (X1 + Z1); + let w = w.double() + w; + let s = (Y1 * Z1).double(); + let ss = s.square(); + let sss = s * ss; + let R = Y1 * s; + let RR = R.square(); + let B_ = (X1 * R).double(); + let h = w.square() - B_.double(); + let X3 = h * s; + let Y3 = w * (B_ - h) - RR.double(); + let Z3 = sss; + + let res = Self { x: X3, y: Y3, z: Z3 }; + // If self is identity, res will not be well-formed + // Accordingly, we return self if self was the identity + Self::conditional_select(&res, self, self.is_identity()) + } +} + +impl Sum for Point { + fn sum>(iter: I) -> Point { + let mut res = Self::identity(); + for i in iter { + res += i; + } + res + } +} + +impl<'a> Sum<&'a Point> for Point { + fn sum>(iter: I) -> Point { + Point::sum(iter.cloned()) + } +} + +impl Mul for Point { + type Output = Point; + fn mul(self, mut other: Scalar) -> Point { + // Precompute the optimal amount that's a multiple of 2 + let mut table = [Point::identity(); 16]; + table[1] = self; + for i in 2 .. 16 { + table[i] = table[i - 1] + self; + } + + let mut res = Self::identity(); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res = res.double(); + } + } + + let mut term = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + term = Self::conditional_select(&term, candidate, usize::from(bits).ct_eq(&j)); + } + res += term; + bits = 0; + } + } + other.zeroize(); + res + } +} + +impl MulAssign for Point { + fn mul_assign(&mut self, other: Scalar) { + *self = *self * other; + } +} + +impl Mul<&Scalar> for Point { + type Output = Point; + fn mul(self, other: &Scalar) -> Point { + self * *other + } +} + +impl MulAssign<&Scalar> for Point { + fn mul_assign(&mut self, other: &Scalar) { + *self *= *other; + } +} + +impl GroupEncoding for Point { + type Repr = [u8; 32]; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + // Extract and clear the sign bit + let mut bytes = *bytes; + let sign = Choice::from(bytes[31] >> 7); + bytes[31] &= u8::MAX >> 1; + + // Parse x, recover y + FieldElement::from_repr(bytes).and_then(|x| { + let is_identity = x.is_zero(); + + let y = recover_y(x).map(|mut y| { + y = <_>::conditional_select(&y, &-y, y.is_odd().ct_eq(&!sign)); + y + }); + + // If this the identity, set y to 1 + let y = + CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity); + // If this the identity, set y to 1 and z to 0 (instead of 1) + let z = <_>::conditional_select(&FieldElement::ONE, &FieldElement::ZERO, is_identity); + // Create the point if we have a y solution + let point = y.map(|y| Point { x, y, z }); + + let not_negative_zero = !(is_identity & sign); + // Only return the point if it isn't -0 + CtOption::conditional_select( + &CtOption::new(Point::identity(), 0.into()), + &point, + not_negative_zero, + ) + }) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + Point::from_bytes(bytes) + } + + fn to_bytes(&self) -> Self::Repr { + let Some(z) = Option::::from(self.z.invert()) else { + return [0; 32]; + }; + let x = self.x * z; + let y = self.y * z; + + let mut res = [0; 32]; + res.as_mut().copy_from_slice(&x.to_repr()); + + // The following conditional select normalizes the sign to 0 when x is 0 + let y_sign = u8::conditional_select(&y.is_odd().unwrap_u8(), &0, x.ct_eq(&FieldElement::ZERO)); + res[31] |= y_sign << 7; + res + } +} + +impl PrimeGroup for Point {} + +impl ec_divisors::DivisorCurve for Point { + type FieldElement = FieldElement; + + fn a() -> Self::FieldElement { + -FieldElement::from(3u64) + } + fn b() -> Self::FieldElement { + B() + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + let z: Self::FieldElement = Option::from(point.z.invert())?; + Some((point.x * z, point.y * z)) + } +} + +#[test] +fn test_curve() { + ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng); +} + +#[test] +fn generator() { + assert_eq!( + Point::generator(), + Point::from_bytes(&hex_literal::hex!( + "0100000000000000000000000000000000000000000000000000000000000000" + )) + .unwrap() + ); +} + +#[test] +fn zero_x_is_invalid() { + assert!(Option::::from(recover_y(FieldElement::ZERO)).is_none()); +} + +// Checks random won't infinitely loop +#[test] +fn random() { + Point::random(&mut rand_core::OsRng); +} diff --git a/crypto/evrf/embedwards25519/src/scalar.rs b/crypto/evrf/embedwards25519/src/scalar.rs new file mode 100644 index 00000000..f2d6e61f --- /dev/null +++ b/crypto/evrf/embedwards25519/src/scalar.rs @@ -0,0 +1,52 @@ +use zeroize::{DefaultIsZeroes, Zeroize}; + +use crypto_bigint::{ + U256, U512, + modular::constant_mod::{ResidueParams, Residue}, +}; + +const MODULUS_STR: &str = "0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b"; + +impl_modulus!(EmbedwardsQ, U256, MODULUS_STR); +type ResidueType = Residue; + +/// The Scalar field of Embedwards25519. +/// +/// This is equivalent to the field secp256k1 is defined over. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +#[repr(C)] +pub struct Scalar(pub(crate) ResidueType); + +impl DefaultIsZeroes for Scalar {} + +pub(crate) const MODULUS: U256 = U256::from_be_hex(MODULUS_STR); + +const WIDE_MODULUS: U512 = U512::from_be_hex(concat!( + "0000000000000000000000000000000000000000000000000000000000000000", + "0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b", +)); + +field!( + Scalar, + ResidueType, + MODULUS_STR, + MODULUS, + WIDE_MODULUS, + 252, + 10, + 1, + "0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96a", + "0000000000000000000000000000000000000000000000000000000000000064", +); + +impl Scalar { + /// Perform a wide reduction, presumably to obtain a non-biased Scalar field element. + pub fn wide_reduce(bytes: [u8; 64]) -> Scalar { + Scalar(Residue::new(&reduce(U512::from_le_slice(bytes.as_ref())))) + } +} + +#[test] +fn test_scalar_field() { + ff_group_tests::prime_field::test_prime_field_bits::<_, Scalar>(&mut rand_core::OsRng); +} diff --git a/crypto/evrf/generalized-bulletproofs/Cargo.toml b/crypto/evrf/generalized-bulletproofs/Cargo.toml new file mode 100644 index 00000000..1b7ad7b0 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "generalized-bulletproofs" +version = "0.1.0" +description = "Generalized Bulletproofs" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/generalized-bulletproofs" +authors = ["Luke Parker "] +keywords = ["ciphersuite", "ff", "group"] +edition = "2021" +rust-version = "1.69" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } + +rand_core = { version = "0.6", default-features = false } + +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } + +blake2 = { version = "0.10", default-features = false } + +multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["batch"] } +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false } + +[dev-dependencies] +rand_core = { version = "0.6", features = ["getrandom"] } + +transcript = { package = "flexible-transcript", path = "../../transcript", features = ["recommended"] } + +ciphersuite = { path = "../../ciphersuite", features = ["ristretto"] } + +[features] +std = ["std-shims/std", "rand_core/std", "zeroize/std", "blake2/std", "multiexp/std", "ciphersuite/std"] +tests = ["std"] +default = ["std"] diff --git a/crypto/evrf/generalized-bulletproofs/LICENSE b/crypto/evrf/generalized-bulletproofs/LICENSE new file mode 100644 index 00000000..ad3c2fd5 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/generalized-bulletproofs/README.md b/crypto/evrf/generalized-bulletproofs/README.md new file mode 100644 index 00000000..da588b8d --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/README.md @@ -0,0 +1,6 @@ +# Generalized Bulletproofs + +An implementation of +[Generalized Bulletproofs](https://repo.getmonero.org/monero-project/ccs-proposals/uploads/a9baa50c38c6312efc0fea5c6a188bb9/gbp.pdf), +a variant of the Bulletproofs arithmetic circuit statement to support Pedersen +vector commitments. diff --git a/crypto/evrf/generalized-bulletproofs/src/arithmetic_circuit_proof.rs b/crypto/evrf/generalized-bulletproofs/src/arithmetic_circuit_proof.rs new file mode 100644 index 00000000..32a071ce --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/arithmetic_circuit_proof.rs @@ -0,0 +1,663 @@ +use std_shims::{vec, vec::Vec}; + +use rand_core::{RngCore, CryptoRng}; + +use zeroize::{Zeroize, ZeroizeOnDrop}; + +use multiexp::{multiexp, multiexp_vartime}; +use ciphersuite::{group::ff::Field, Ciphersuite}; + +use crate::{ + ScalarVector, PointVector, ProofGenerators, PedersenCommitment, PedersenVectorCommitment, + BatchVerifier, + transcript::*, + lincomb::accumulate_vector, + inner_product::{IpError, IpStatement, IpWitness, P}, +}; +pub use crate::lincomb::{Variable, LinComb}; + +/// An Arithmetic Circuit Statement. +/// +/// Bulletproofs' constraints are of the form +/// `aL * aR = aO, WL * aL + WR * aR + WO * aO = WV * V + c`. +/// +/// Generalized Bulletproofs modifies this to +/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G = WV * V + c`. +/// +/// We implement the latter, yet represented (for simplicity) as +/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G + WV * V + c = 0`. +#[derive(Clone, Debug)] +pub struct ArithmeticCircuitStatement<'a, C: Ciphersuite> { + generators: ProofGenerators<'a, C>, + + constraints: Vec>, + C: PointVector, + V: PointVector, +} + +impl Zeroize for ArithmeticCircuitStatement<'_, C> { + fn zeroize(&mut self) { + self.constraints.zeroize(); + self.C.zeroize(); + self.V.zeroize(); + } +} + +/// The witness for an arithmetic circuit statement. +#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] +pub struct ArithmeticCircuitWitness { + aL: ScalarVector, + aR: ScalarVector, + aO: ScalarVector, + + c: Vec>, + v: Vec>, +} + +/// An error incurred during arithmetic circuit proof operations. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum AcError { + /// The vectors of scalars which are multiplied against each other were of different lengths. + DifferingLrLengths, + /// The matrices of constraints are of different lengths. + InconsistentAmountOfConstraints, + /// A constraint referred to a non-existent term. + ConstrainedNonExistentTerm, + /// A constraint referred to a non-existent commitment. + ConstrainedNonExistentCommitment, + /// There weren't enough generators to prove for this statement. + NotEnoughGenerators, + /// The witness was inconsistent to the statement. + /// + /// Sanity checks on the witness are always performed. If the library is compiled with debug + /// assertions on, the satisfaction of all constraints and validity of the commitmentsd is + /// additionally checked. + InconsistentWitness, + /// There was an error from the inner-product proof. + Ip(IpError), + /// The proof wasn't complete and the necessary values could not be read from the transcript. + IncompleteProof, +} + +impl ArithmeticCircuitWitness { + /// Constructs a new witness instance. + pub fn new( + aL: ScalarVector, + aR: ScalarVector, + c: Vec>, + v: Vec>, + ) -> Result { + if aL.len() != aR.len() { + Err(AcError::DifferingLrLengths)?; + } + + // The Pedersen Vector Commitments don't have their variables' lengths checked as they aren't + // paired off with each other as aL, aR are + + // The PVC commit function ensures there's enough generators for their amount of terms + // If there aren't enough/the same generators when this is proven for, it'll trigger + // InconsistentWitness + + let aO = aL.clone() * &aR; + Ok(ArithmeticCircuitWitness { aL, aR, aO, c, v }) + } +} + +struct YzChallenges { + y_inv: ScalarVector, + z: ScalarVector, +} + +impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> { + // The amount of multiplications performed. + fn n(&self) -> usize { + self.generators.len() + } + + // The amount of constraints. + fn q(&self) -> usize { + self.constraints.len() + } + + // The amount of Pedersen vector commitments. + fn c(&self) -> usize { + self.C.len() + } + + // The amount of Pedersen commitments. + fn m(&self) -> usize { + self.V.len() + } + + /// Create a new ArithmeticCircuitStatement for the specified relationship. + /// + /// The `LinComb`s passed as `constraints` will be bound to evaluate to 0. + /// + /// The constraints are not transcripted. They're expected to be deterministic from the context + /// and higher-level statement. If your constraints are variable, you MUST transcript them before + /// calling prove/verify. + /// + /// The commitments are expected to have been transcripted extenally to this statement's + /// invocation. That's practically ensured by taking a `Commitments` struct here, which is only + /// obtainable via a transcript. + pub fn new( + generators: ProofGenerators<'a, C>, + constraints: Vec>, + commitments: Commitments, + ) -> Result { + let Commitments { C, V } = commitments; + + for constraint in &constraints { + if Some(generators.len()) <= constraint.highest_a_index { + Err(AcError::ConstrainedNonExistentTerm)?; + } + if Some(C.len()) <= constraint.highest_c_index { + Err(AcError::ConstrainedNonExistentCommitment)?; + } + if Some(V.len()) <= constraint.highest_v_index { + Err(AcError::ConstrainedNonExistentCommitment)?; + } + } + + Ok(Self { generators, constraints, C, V }) + } + + fn yz_challenges(&self, y: C::F, z_1: C::F) -> YzChallenges { + let y_inv = y.invert().unwrap(); + let y_inv = ScalarVector::powers(y_inv, self.n()); + + // Powers of z *starting with z**1* + // We could reuse powers and remove the first element, yet this is cheaper than the shift that + // would require + let q = self.q(); + let mut z = ScalarVector(Vec::with_capacity(q)); + z.0.push(z_1); + for _ in 1 .. q { + z.0.push(*z.0.last().unwrap() * z_1); + } + z.0.truncate(q); + + YzChallenges { y_inv, z } + } + + /// Prove for this statement/witness. + pub fn prove( + self, + rng: &mut R, + transcript: &mut Transcript, + mut witness: ArithmeticCircuitWitness, + ) -> Result<(), AcError> { + let n = self.n(); + let c = self.c(); + let m = self.m(); + + // Check the witness length and pad it to the necessary power of two + if witness.aL.len() > n { + Err(AcError::NotEnoughGenerators)?; + } + while witness.aL.len() < n { + witness.aL.0.push(C::F::ZERO); + witness.aR.0.push(C::F::ZERO); + witness.aO.0.push(C::F::ZERO); + } + for c in &mut witness.c { + if c.g_values.len() > n { + Err(AcError::NotEnoughGenerators)?; + } + // The Pedersen vector commitments internally have n terms + while c.g_values.len() < n { + c.g_values.0.push(C::F::ZERO); + } + } + + // Check the witness's consistency with the statement + if (c != witness.c.len()) || (m != witness.v.len()) { + Err(AcError::InconsistentWitness)?; + } + + #[cfg(debug_assertions)] + { + for (commitment, opening) in self.V.0.iter().zip(witness.v.iter()) { + if *commitment != opening.commit(self.generators.g(), self.generators.h()) { + Err(AcError::InconsistentWitness)?; + } + } + for (commitment, opening) in self.C.0.iter().zip(witness.c.iter()) { + if Some(*commitment) != opening.commit(self.generators.g_bold_slice(), self.generators.h()) + { + Err(AcError::InconsistentWitness)?; + } + } + for constraint in &self.constraints { + let eval = + constraint + .WL + .iter() + .map(|(i, weight)| *weight * witness.aL[*i]) + .chain(constraint.WR.iter().map(|(i, weight)| *weight * witness.aR[*i])) + .chain(constraint.WO.iter().map(|(i, weight)| *weight * witness.aO[*i])) + .chain( + constraint.WCG.iter().zip(&witness.c).flat_map(|(weights, c)| { + weights.iter().map(|(j, weight)| *weight * c.g_values[*j]) + }), + ) + .chain(constraint.WV.iter().map(|(i, weight)| *weight * witness.v[*i].value)) + .chain(core::iter::once(constraint.c)) + .sum::(); + + if eval != C::F::ZERO { + Err(AcError::InconsistentWitness)?; + } + } + } + + let alpha = C::F::random(&mut *rng); + let beta = C::F::random(&mut *rng); + let rho = C::F::random(&mut *rng); + + let AI = { + let alg = witness.aL.0.iter().enumerate().map(|(i, aL)| (*aL, self.generators.g_bold(i))); + let arh = witness.aR.0.iter().enumerate().map(|(i, aR)| (*aR, self.generators.h_bold(i))); + let ah = core::iter::once((alpha, self.generators.h())); + let mut AI_terms = alg.chain(arh).chain(ah).collect::>(); + let AI = multiexp(&AI_terms); + AI_terms.zeroize(); + AI + }; + let AO = { + let aog = witness.aO.0.iter().enumerate().map(|(i, aO)| (*aO, self.generators.g_bold(i))); + let bh = core::iter::once((beta, self.generators.h())); + let mut AO_terms = aog.chain(bh).collect::>(); + let AO = multiexp(&AO_terms); + AO_terms.zeroize(); + AO + }; + + let mut sL = ScalarVector(Vec::with_capacity(n)); + let mut sR = ScalarVector(Vec::with_capacity(n)); + for _ in 0 .. n { + sL.0.push(C::F::random(&mut *rng)); + sR.0.push(C::F::random(&mut *rng)); + } + let S = { + let slg = sL.0.iter().enumerate().map(|(i, sL)| (*sL, self.generators.g_bold(i))); + let srh = sR.0.iter().enumerate().map(|(i, sR)| (*sR, self.generators.h_bold(i))); + let rh = core::iter::once((rho, self.generators.h())); + let mut S_terms = slg.chain(srh).chain(rh).collect::>(); + let S = multiexp(&S_terms); + S_terms.zeroize(); + S + }; + + transcript.push_point(AI); + transcript.push_point(AO); + transcript.push_point(S); + let y = transcript.challenge::(); + let z = transcript.challenge::(); + let YzChallenges { y_inv, z } = self.yz_challenges(y, z); + let y = ScalarVector::powers(y, n); + + // t is a n'-term polynomial + // While Bulletproofs discuss it as a 6-term polynomial, Generalized Bulletproofs re-defines it + // as `2(n' + 1)`-term, where `n'` is `2 (c + 1)`. + // When `c = 0`, `n' = 2`, and t is `6` (which lines up with Bulletproofs having a 6-term + // polynomial). + + // ni = n' + let ni = 2 + (2 * (c / 2)); + // These indexes are from the Generalized Bulletproofs paper + #[rustfmt::skip] + let ilr = ni / 2; // 1 if c = 0 + #[rustfmt::skip] + let io = ni; // 2 if c = 0 + #[rustfmt::skip] + let is = ni + 1; // 3 if c = 0 + #[rustfmt::skip] + let jlr = ni / 2; // 1 if c = 0 + #[rustfmt::skip] + let jo = 0; // 0 if c = 0 + #[rustfmt::skip] + let js = ni + 1; // 3 if c = 0 + + // If c = 0, these indexes perfectly align with the stated powers of X from the Bulletproofs + // paper for the following coefficients + + // Declare the l and r polynomials, assigning the traditional coefficients to their positions + let mut l = vec![]; + let mut r = vec![]; + for _ in 0 .. (is + 1) { + l.push(ScalarVector::new(0)); + r.push(ScalarVector::new(0)); + } + + let mut l_weights = ScalarVector::new(n); + let mut r_weights = ScalarVector::new(n); + let mut o_weights = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + accumulate_vector(&mut l_weights, &constraint.WL, *z); + accumulate_vector(&mut r_weights, &constraint.WR, *z); + accumulate_vector(&mut o_weights, &constraint.WO, *z); + } + + l[ilr] = (r_weights * &y_inv) + &witness.aL; + l[io] = witness.aO.clone(); + l[is] = sL; + r[jlr] = l_weights + &(witness.aR.clone() * &y); + r[jo] = o_weights - &y; + r[js] = sR * &y; + + // Pad as expected + for l in &mut l { + debug_assert!((l.len() == 0) || (l.len() == n)); + if l.len() == 0 { + *l = ScalarVector::new(n); + } + } + for r in &mut r { + debug_assert!((r.len() == 0) || (r.len() == n)); + if r.len() == 0 { + *r = ScalarVector::new(n); + } + } + + // We now fill in the vector commitments + // We use unused coefficients of l increasing from 0 (skipping ilr), and unused coefficients of + // r decreasing from n' (skipping jlr) + + let mut cg_weights = Vec::with_capacity(witness.c.len()); + for i in 0 .. witness.c.len() { + let mut cg = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + if let Some(WCG) = constraint.WCG.get(i) { + accumulate_vector(&mut cg, WCG, *z); + } + } + cg_weights.push(cg); + } + + for (mut i, (c, cg_weights)) in witness.c.iter().zip(cg_weights).enumerate() { + if i >= ilr { + i += 1; + } + // Because i has skipped ilr, j will skip jlr + let j = ni - i; + + l[i] = c.g_values.clone(); + r[j] = cg_weights; + } + + // Multiply them to obtain t + let mut t = ScalarVector::new(1 + (2 * (l.len() - 1))); + for (i, l) in l.iter().enumerate() { + for (j, r) in r.iter().enumerate() { + let new_coeff = i + j; + t[new_coeff] += l.inner_product(r.0.iter()); + } + } + + // Per Bulletproofs, calculate masks tau for each t where (i > 0) && (i != 2) + // Per Generalized Bulletproofs, calculate masks tau for each t where i != n' + // With Bulletproofs, t[0] is zero, hence its omission, yet Generalized Bulletproofs uses it + let mut tau_before_ni = vec![]; + for _ in 0 .. ni { + tau_before_ni.push(C::F::random(&mut *rng)); + } + let mut tau_after_ni = vec![]; + for _ in 0 .. t.0[(ni + 1) ..].len() { + tau_after_ni.push(C::F::random(&mut *rng)); + } + // Calculate commitments to the coefficients of t, blinded by tau + debug_assert_eq!(t.0[0 .. ni].len(), tau_before_ni.len()); + for (t, tau) in t.0[0 .. ni].iter().zip(tau_before_ni.iter()) { + transcript.push_point(multiexp(&[(*t, self.generators.g()), (*tau, self.generators.h())])); + } + debug_assert_eq!(t.0[(ni + 1) ..].len(), tau_after_ni.len()); + for (t, tau) in t.0[(ni + 1) ..].iter().zip(tau_after_ni.iter()) { + transcript.push_point(multiexp(&[(*t, self.generators.g()), (*tau, self.generators.h())])); + } + + let x: ScalarVector = ScalarVector::powers(transcript.challenge::(), t.len()); + + let poly_eval = |poly: &[ScalarVector], x: &ScalarVector<_>| -> ScalarVector<_> { + let mut res = ScalarVector::::new(poly[0].0.len()); + for (i, coeff) in poly.iter().enumerate() { + res = res + &(coeff.clone() * x[i]); + } + res + }; + let l = poly_eval(&l, &x); + let r = poly_eval(&r, &x); + + let t_caret = l.inner_product(r.0.iter()); + + let mut V_weights = ScalarVector::new(self.V.len()); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + // We use `-z`, not `z`, as we write our constraint as `... + WV V = 0` not `= WV V + ..` + // This means we need to subtract `WV V` from both sides, which we accomplish here + accumulate_vector(&mut V_weights, &constraint.WV, -*z); + } + + let tau_x = { + let mut tau_x_poly = vec![]; + tau_x_poly.extend(tau_before_ni); + tau_x_poly.push(V_weights.inner_product(witness.v.iter().map(|v| &v.mask))); + tau_x_poly.extend(tau_after_ni); + + let mut tau_x = C::F::ZERO; + for (i, coeff) in tau_x_poly.into_iter().enumerate() { + tau_x += coeff * x[i]; + } + tau_x + }; + + // Calculate u for the powers of x variable to ilr/io/is + let u = { + // Calculate the first part of u + let mut u = (alpha * x[ilr]) + (beta * x[io]) + (rho * x[is]); + + // Incorporate the commitment masks multiplied by the associated power of x + for (mut i, commitment) in witness.c.iter().enumerate() { + // If this index is ni / 2, skip it + if i >= (ni / 2) { + i += 1; + } + u += x[i] * commitment.mask; + } + u + }; + + // Use the Inner-Product argument to prove for this + // P = t_caret * g + l * g_bold + r * (y_inv * h_bold) + + let mut P_terms = Vec::with_capacity(1 + (2 * self.generators.len())); + debug_assert_eq!(l.len(), r.len()); + for (i, (l, r)) in l.0.iter().zip(r.0.iter()).enumerate() { + P_terms.push((*l, self.generators.g_bold(i))); + P_terms.push((y_inv[i] * r, self.generators.h_bold(i))); + } + + // Protocol 1, inlined, since our IpStatement is for Protocol 2 + transcript.push_scalar(tau_x); + transcript.push_scalar(u); + transcript.push_scalar(t_caret); + let ip_x = transcript.challenge::(); + P_terms.push((ip_x * t_caret, self.generators.g())); + IpStatement::new( + self.generators, + y_inv, + ip_x, + // Safe since IpStatement isn't a ZK proof + P::Prover(multiexp_vartime(&P_terms)), + ) + .unwrap() + .prove(transcript, IpWitness::new(l, r).unwrap()) + .map_err(AcError::Ip) + } + + /// Verify a proof for this statement. + /// + /// This solely queues the statement for batch verification. The resulting BatchVerifier MUST + /// still be verified. + /// + /// If this proof returns an error, the BatchVerifier MUST be assumed corrupted and discarded. + pub fn verify( + self, + rng: &mut R, + verifier: &mut BatchVerifier, + transcript: &mut VerifierTranscript, + ) -> Result<(), AcError> { + if verifier.g_bold.len() < self.generators.len() { + verifier.g_bold.resize(self.generators.len(), C::F::ZERO); + verifier.h_bold.resize(self.generators.len(), C::F::ZERO); + verifier.h_sum.resize(self.generators.len(), C::F::ZERO); + } + + let n = self.n(); + let c = self.c(); + + let ni = 2 + (2 * (c / 2)); + + let ilr = ni / 2; + let io = ni; + let is = ni + 1; + let jlr = ni / 2; + + let l_r_poly_len = 1 + ni + 1; + let t_poly_len = (2 * l_r_poly_len) - 1; + + let AI = transcript.read_point::().map_err(|_| AcError::IncompleteProof)?; + let AO = transcript.read_point::().map_err(|_| AcError::IncompleteProof)?; + let S = transcript.read_point::().map_err(|_| AcError::IncompleteProof)?; + let y = transcript.challenge::(); + let z = transcript.challenge::(); + let YzChallenges { y_inv, z } = self.yz_challenges(y, z); + + let mut l_weights = ScalarVector::new(n); + let mut r_weights = ScalarVector::new(n); + let mut o_weights = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + accumulate_vector(&mut l_weights, &constraint.WL, *z); + accumulate_vector(&mut r_weights, &constraint.WR, *z); + accumulate_vector(&mut o_weights, &constraint.WO, *z); + } + let r_weights = r_weights * &y_inv; + + let delta = r_weights.inner_product(l_weights.0.iter()); + + let mut T_before_ni = Vec::with_capacity(ni); + let mut T_after_ni = Vec::with_capacity(t_poly_len - ni - 1); + for _ in 0 .. ni { + T_before_ni.push(transcript.read_point::().map_err(|_| AcError::IncompleteProof)?); + } + for _ in 0 .. (t_poly_len - ni - 1) { + T_after_ni.push(transcript.read_point::().map_err(|_| AcError::IncompleteProof)?); + } + let x: ScalarVector = ScalarVector::powers(transcript.challenge::(), t_poly_len); + + let tau_x = transcript.read_scalar::().map_err(|_| AcError::IncompleteProof)?; + let u = transcript.read_scalar::().map_err(|_| AcError::IncompleteProof)?; + let t_caret = transcript.read_scalar::().map_err(|_| AcError::IncompleteProof)?; + + // Lines 88-90, modified per Generalized Bulletproofs as needed w.r.t. t + { + let verifier_weight = C::F::random(&mut *rng); + // lhs of the equation, weighted to enable batch verification + verifier.g += t_caret * verifier_weight; + verifier.h += tau_x * verifier_weight; + + let mut V_weights = ScalarVector::new(self.V.len()); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + // We use `-z`, not `z`, as we write our constraint as `... + WV V = 0` not `= WV V + ..` + // This means we need to subtract `WV V` from both sides, which we accomplish here + accumulate_vector(&mut V_weights, &constraint.WV, -*z); + } + V_weights = V_weights * x[ni]; + + // rhs of the equation, negated to cause a sum to zero + // `delta - z...`, instead of `delta + z...`, is done for the same reason as in the above WV + // matrix transform + verifier.g -= verifier_weight * + x[ni] * + (delta - z.inner_product(self.constraints.iter().map(|constraint| &constraint.c))); + for pair in V_weights.0.into_iter().zip(self.V.0) { + verifier.additional.push((-verifier_weight * pair.0, pair.1)); + } + for (i, T) in T_before_ni.into_iter().enumerate() { + verifier.additional.push((-verifier_weight * x[i], T)); + } + for (i, T) in T_after_ni.into_iter().enumerate() { + verifier.additional.push((-verifier_weight * x[ni + 1 + i], T)); + } + } + + let verifier_weight = C::F::random(&mut *rng); + // Multiply `x` by `verifier_weight` as this effects `verifier_weight` onto most scalars and + // saves a notable amount of operations + let x = x * verifier_weight; + + // This following block effectively calculates P, within the multiexp + { + verifier.additional.push((x[ilr], AI)); + verifier.additional.push((x[io], AO)); + // h' ** y is equivalent to h as h' is h ** y_inv + let mut log2_n = 0; + while (1 << log2_n) != n { + log2_n += 1; + } + verifier.h_sum[log2_n] -= verifier_weight; + verifier.additional.push((x[is], S)); + + // Lines 85-87 calculate WL, WR, WO + // We preserve them in terms of g_bold and h_bold for a more efficient multiexp + let mut h_bold_scalars = l_weights * x[jlr]; + for (i, wr) in (r_weights * x[jlr]).0.into_iter().enumerate() { + verifier.g_bold[i] += wr; + } + // WO is weighted by x**jo where jo == 0, hence why we can ignore the x term + h_bold_scalars = h_bold_scalars + &(o_weights * verifier_weight); + + let mut cg_weights = Vec::with_capacity(self.C.len()); + for i in 0 .. self.C.len() { + let mut cg = ScalarVector::new(n); + for (constraint, z) in self.constraints.iter().zip(&z.0) { + if let Some(WCG) = constraint.WCG.get(i) { + accumulate_vector(&mut cg, WCG, *z); + } + } + cg_weights.push(cg); + } + + // Push the terms for C, which increment from 0, and the terms for WC, which decrement from + // n' + for (mut i, (C, WCG)) in self.C.0.into_iter().zip(cg_weights).enumerate() { + if i >= (ni / 2) { + i += 1; + } + let j = ni - i; + verifier.additional.push((x[i], C)); + h_bold_scalars = h_bold_scalars + &(WCG * x[j]); + } + + // All terms for h_bold here have actually been for h_bold', h_bold * y_inv + h_bold_scalars = h_bold_scalars * &y_inv; + for (i, scalar) in h_bold_scalars.0.into_iter().enumerate() { + verifier.h_bold[i] += scalar; + } + + // Remove u * h from P + verifier.h -= verifier_weight * u; + } + + // Prove for lines 88, 92 with an Inner-Product statement + // This inlines Protocol 1, as our IpStatement implements Protocol 2 + let ip_x = transcript.challenge::(); + // P is amended with this additional term + verifier.g += verifier_weight * ip_x * t_caret; + IpStatement::new(self.generators, y_inv, ip_x, P::Verifier { verifier_weight }) + .unwrap() + .verify(verifier, transcript) + .map_err(AcError::Ip)?; + + Ok(()) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/inner_product.rs b/crypto/evrf/generalized-bulletproofs/src/inner_product.rs new file mode 100644 index 00000000..e7127e00 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/inner_product.rs @@ -0,0 +1,370 @@ +use std_shims::{vec, vec::Vec}; + +use multiexp::multiexp_vartime; +use ciphersuite::{group::ff::Field, Ciphersuite}; + +#[rustfmt::skip] +use crate::{ScalarVector, PointVector, ProofGenerators, BatchVerifier, transcript::*, padded_pow_of_2}; + +/// An error from proving/verifying Inner-Product statements. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum IpError { + /// An incorrect amount of generators was provided. + IncorrectAmountOfGenerators, + /// The witness was inconsistent to the statement. + /// + /// Sanity checks on the witness are always performed. If the library is compiled with debug + /// assertions on, whether or not this witness actually opens `P` is checked. + InconsistentWitness, + /// The proof wasn't complete and the necessary values could not be read from the transcript. + IncompleteProof, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) enum P { + Verifier { verifier_weight: C::F }, + Prover(C::G), +} + +/// The Bulletproofs Inner-Product statement. +/// +/// This is for usage with Protocol 2 from the Bulletproofs paper. +#[derive(Clone, Debug)] +pub(crate) struct IpStatement<'a, C: Ciphersuite> { + generators: ProofGenerators<'a, C>, + // Weights for h_bold + h_bold_weights: ScalarVector, + // u as the discrete logarithm of G + u: C::F, + // P + P: P, +} + +/// The witness for the Bulletproofs Inner-Product statement. +#[derive(Clone, Debug)] +pub(crate) struct IpWitness { + // a + a: ScalarVector, + // b + b: ScalarVector, +} + +impl IpWitness { + /// Construct a new witness for an Inner-Product statement. + /// + /// If the witness is less than a power of two, it is padded to the nearest power of two. + /// + /// This functions return None if the lengths of a, b are mismatched or either are empty. + pub(crate) fn new(mut a: ScalarVector, mut b: ScalarVector) -> Option { + if a.0.is_empty() || (a.len() != b.len()) { + None?; + } + + // Pad to the nearest power of 2 + let missing = padded_pow_of_2(a.len()) - a.len(); + a.0.reserve(missing); + b.0.reserve(missing); + for _ in 0 .. missing { + a.0.push(C::F::ZERO); + b.0.push(C::F::ZERO); + } + + Some(Self { a, b }) + } +} + +impl<'a, C: Ciphersuite> IpStatement<'a, C> { + /// Create a new Inner-Product statement. + /// + /// This does not perform any transcripting of any variables within this statement. They must be + /// deterministic to the existing transcript. + pub(crate) fn new( + generators: ProofGenerators<'a, C>, + h_bold_weights: ScalarVector, + u: C::F, + P: P, + ) -> Result { + if generators.h_bold_slice().len() != h_bold_weights.len() { + Err(IpError::IncorrectAmountOfGenerators)? + } + Ok(Self { generators, h_bold_weights, u, P }) + } + + /// Prove for this Inner-Product statement. + /// + /// Returns an error if this statement couldn't be proven for (such as if the witness isn't + /// consistent). + pub(crate) fn prove( + self, + transcript: &mut Transcript, + witness: IpWitness, + ) -> Result<(), IpError> { + let (mut g_bold, mut h_bold, u, mut P, mut a, mut b) = { + let IpStatement { generators, h_bold_weights, u, P } = self; + let u = generators.g() * u; + + // Ensure we have the exact amount of generators + if generators.g_bold_slice().len() != witness.a.len() { + Err(IpError::IncorrectAmountOfGenerators)?; + } + // Acquire a local copy of the generators + let g_bold = PointVector::(generators.g_bold_slice().to_vec()); + let h_bold = PointVector::(generators.h_bold_slice().to_vec()).mul_vec(&h_bold_weights); + + let IpWitness { a, b } = witness; + + let P = match P { + P::Prover(point) => point, + P::Verifier { .. } => { + panic!("prove called with a P specification which was for the verifier") + } + }; + + // Ensure this witness actually opens this statement + #[cfg(debug_assertions)] + { + let ag = a.0.iter().cloned().zip(g_bold.0.iter().cloned()); + let bh = b.0.iter().cloned().zip(h_bold.0.iter().cloned()); + let cu = core::iter::once((a.inner_product(b.0.iter()), u)); + if P != multiexp_vartime(&ag.chain(bh).chain(cu).collect::>()) { + Err(IpError::InconsistentWitness)?; + } + } + + (g_bold, h_bold, u, P, a, b) + }; + + // `else: (n > 1)` case, lines 18-35 of the Bulletproofs paper + // This interprets `g_bold.len()` as `n` + while g_bold.len() > 1 { + // Split a, b, g_bold, h_bold as needed for lines 20-24 + let (a1, a2) = a.clone().split(); + let (b1, b2) = b.clone().split(); + + let (g_bold1, g_bold2) = g_bold.split(); + let (h_bold1, h_bold2) = h_bold.split(); + + let n_hat = g_bold1.len(); + + // Sanity + debug_assert_eq!(a1.len(), n_hat); + debug_assert_eq!(a2.len(), n_hat); + debug_assert_eq!(b1.len(), n_hat); + debug_assert_eq!(b2.len(), n_hat); + debug_assert_eq!(g_bold1.len(), n_hat); + debug_assert_eq!(g_bold2.len(), n_hat); + debug_assert_eq!(h_bold1.len(), n_hat); + debug_assert_eq!(h_bold2.len(), n_hat); + + // cl, cr, lines 21-22 + let cl = a1.inner_product(b2.0.iter()); + let cr = a2.inner_product(b1.0.iter()); + + let L = { + let mut L_terms = Vec::with_capacity(1 + (2 * g_bold1.len())); + for (a, g) in a1.0.iter().zip(g_bold2.0.iter()) { + L_terms.push((*a, *g)); + } + for (b, h) in b2.0.iter().zip(h_bold1.0.iter()) { + L_terms.push((*b, *h)); + } + L_terms.push((cl, u)); + // Uses vartime since this isn't a ZK proof + multiexp_vartime(&L_terms) + }; + + let R = { + let mut R_terms = Vec::with_capacity(1 + (2 * g_bold1.len())); + for (a, g) in a2.0.iter().zip(g_bold1.0.iter()) { + R_terms.push((*a, *g)); + } + for (b, h) in b1.0.iter().zip(h_bold2.0.iter()) { + R_terms.push((*b, *h)); + } + R_terms.push((cr, u)); + multiexp_vartime(&R_terms) + }; + + // Now that we've calculate L, R, transcript them to receive x (26-27) + transcript.push_point(L); + transcript.push_point(R); + let x: C::F = transcript.challenge::(); + let x_inv = x.invert().unwrap(); + + // The prover and verifier now calculate the following (28-31) + g_bold = PointVector(Vec::with_capacity(g_bold1.len())); + for (a, b) in g_bold1.0.into_iter().zip(g_bold2.0.into_iter()) { + g_bold.0.push(multiexp_vartime(&[(x_inv, a), (x, b)])); + } + h_bold = PointVector(Vec::with_capacity(h_bold1.len())); + for (a, b) in h_bold1.0.into_iter().zip(h_bold2.0.into_iter()) { + h_bold.0.push(multiexp_vartime(&[(x, a), (x_inv, b)])); + } + P = (L * (x * x)) + P + (R * (x_inv * x_inv)); + + // 32-34 + a = (a1 * x) + &(a2 * x_inv); + b = (b1 * x_inv) + &(b2 * x); + } + + // `if n = 1` case from line 14-17 + + // Sanity + debug_assert_eq!(g_bold.len(), 1); + debug_assert_eq!(h_bold.len(), 1); + debug_assert_eq!(a.len(), 1); + debug_assert_eq!(b.len(), 1); + + // We simply send a/b + transcript.push_scalar(a[0]); + transcript.push_scalar(b[0]); + Ok(()) + } + + /* + This has room for optimization worth investigating further. It currently takes + an iterative approach. It can be optimized further via divide and conquer. + + Assume there are 4 challenges. + + Iterative approach (current): + 1. Do the optimal multiplications across challenge column 0 and 1. + 2. Do the optimal multiplications across that result and column 2. + 3. Do the optimal multiplications across that result and column 3. + + Divide and conquer (worth investigating further): + 1. Do the optimal multiplications across challenge column 0 and 1. + 2. Do the optimal multiplications across challenge column 2 and 3. + 3. Multiply both results together. + + When there are 4 challenges (n=16), the iterative approach does 28 multiplications + versus divide and conquer's 24. + */ + fn challenge_products(challenges: &[(C::F, C::F)]) -> Vec { + let mut products = vec![C::F::ONE; 1 << challenges.len()]; + + if !challenges.is_empty() { + products[0] = challenges[0].1; + products[1] = challenges[0].0; + + for (j, challenge) in challenges.iter().enumerate().skip(1) { + let mut slots = (1 << (j + 1)) - 1; + while slots > 0 { + products[slots] = products[slots / 2] * challenge.0; + products[slots - 1] = products[slots / 2] * challenge.1; + + slots = slots.saturating_sub(2); + } + } + + // Sanity check since if the above failed to populate, it'd be critical + for product in &products { + debug_assert!(!bool::from(product.is_zero())); + } + } + + products + } + + /// Queue an Inner-Product proof for batch verification. + /// + /// This will return Err if there is an error. This will return Ok if the proof was successfully + /// queued for batch verification. The caller is required to verify the batch in order to ensure + /// the proof is actually correct. + /// + /// If this proof returns an error, the BatchVerifier MUST be assumed corrupted and discarded. + pub(crate) fn verify( + self, + verifier: &mut BatchVerifier, + transcript: &mut VerifierTranscript, + ) -> Result<(), IpError> { + if verifier.g_bold.len() < self.generators.len() { + verifier.g_bold.resize(self.generators.len(), C::F::ZERO); + verifier.h_bold.resize(self.generators.len(), C::F::ZERO); + verifier.h_sum.resize(self.generators.len(), C::F::ZERO); + } + + let IpStatement { generators, h_bold_weights, u, P } = self; + + // Calculate the discrete log w.r.t. 2 for the amount of generators present + let mut lr_len = 0; + while (1 << lr_len) < generators.g_bold_slice().len() { + lr_len += 1; + } + + let weight = match P { + P::Prover(_) => panic!("prove called with a P specification which was for the prover"), + P::Verifier { verifier_weight } => verifier_weight, + }; + + // Again, we start with the `else: (n > 1)` case + + // We need x, x_inv per lines 25-27 for lines 28-31 + let mut L = Vec::with_capacity(lr_len); + let mut R = Vec::with_capacity(lr_len); + let mut xs: Vec = Vec::with_capacity(lr_len); + for _ in 0 .. lr_len { + L.push(transcript.read_point::().map_err(|_| IpError::IncompleteProof)?); + R.push(transcript.read_point::().map_err(|_| IpError::IncompleteProof)?); + xs.push(transcript.challenge::()); + } + + // We calculate their inverse in batch + let mut x_invs = xs.clone(); + { + let mut scratch = vec![C::F::ZERO; x_invs.len()]; + ciphersuite::group::ff::BatchInverter::invert_with_external_scratch( + &mut x_invs, + &mut scratch, + ); + } + + // Now, with x and x_inv, we need to calculate g_bold', h_bold', P' + // + // For the sake of performance, we solely want to calculate all of these in terms of scalings + // for g_bold, h_bold, P, and don't want to actually perform intermediary scalings of the + // points + // + // L and R are easy, as it's simply x**2, x**-2 + // + // For the series of g_bold, h_bold, we use the `challenge_products` function + // For how that works, please see its own documentation + let product_cache = { + let mut challenges = Vec::with_capacity(lr_len); + + let x_iter = xs.into_iter().zip(x_invs); + let lr_iter = L.into_iter().zip(R); + for ((x, x_inv), (L, R)) in x_iter.zip(lr_iter) { + challenges.push((x, x_inv)); + verifier.additional.push((weight * x.square(), L)); + verifier.additional.push((weight * x_inv.square(), R)); + } + + Self::challenge_products(&challenges) + }; + + // And now for the `if n = 1` case + let a = transcript.read_scalar::().map_err(|_| IpError::IncompleteProof)?; + let b = transcript.read_scalar::().map_err(|_| IpError::IncompleteProof)?; + let c = a * b; + + // The multiexp of these terms equate to the final permutation of P + // We now add terms for a * g_bold' + b * h_bold' b + c * u, with the scalars negative such + // that the terms sum to 0 for an honest prover + + // The g_bold * a term case from line 16 + #[allow(clippy::needless_range_loop)] + for i in 0 .. generators.g_bold_slice().len() { + verifier.g_bold[i] -= weight * product_cache[i] * a; + } + // The h_bold * b term case from line 16 + for i in 0 .. generators.h_bold_slice().len() { + verifier.h_bold[i] -= + weight * product_cache[product_cache.len() - 1 - i] * b * h_bold_weights[i]; + } + // The c * u term case from line 16 + verifier.g -= weight * c * u; + + Ok(()) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/lib.rs b/crypto/evrf/generalized-bulletproofs/src/lib.rs new file mode 100644 index 00000000..d02c9e9c --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/lib.rs @@ -0,0 +1,336 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +use core::fmt; +use std_shims::{vec, vec::Vec, collections::HashSet}; + +use zeroize::Zeroize; + +use multiexp::{multiexp, multiexp_vartime}; +use ciphersuite::{ + group::{ff::Field, Group, GroupEncoding}, + Ciphersuite, +}; + +mod scalar_vector; +pub use scalar_vector::ScalarVector; +mod point_vector; +pub use point_vector::PointVector; + +/// The transcript formats. +pub mod transcript; + +pub(crate) mod inner_product; + +pub(crate) mod lincomb; + +/// The arithmetic circuit proof. +pub mod arithmetic_circuit_proof; + +/// Functionlity useful when testing. +#[cfg(any(test, feature = "tests"))] +pub mod tests; + +/// Calculate the nearest power of two greater than or equivalent to the argument. +pub(crate) fn padded_pow_of_2(i: usize) -> usize { + let mut next_pow_of_2 = 1; + while next_pow_of_2 < i { + next_pow_of_2 <<= 1; + } + next_pow_of_2 +} + +/// An error from working with generators. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum GeneratorsError { + /// The provided list of generators for `g` (bold) was empty. + GBoldEmpty, + /// The provided list of generators for `h` (bold) did not match `g` (bold) in length. + DifferingGhBoldLengths, + /// The amount of provided generators were not a power of two. + NotPowerOfTwo, + /// A generator was used multiple times. + DuplicatedGenerator, +} + +/// A full set of generators. +#[derive(Clone)] +pub struct Generators { + g: C::G, + h: C::G, + + g_bold: Vec, + h_bold: Vec, + h_sum: Vec, +} + +/// A batch verifier of proofs. +#[must_use] +#[derive(Clone)] +pub struct BatchVerifier { + /// The summed scalar for the G generator. + pub g: C::F, + /// The summed scalar for the G generator. + pub h: C::F, + + /// The summed scalars for the G_bold generators. + pub g_bold: Vec, + /// The summed scalars for the H_bold generators. + pub h_bold: Vec, + /// The summed scalars for the sums of all H generators prior to the index. + /// + /// This is not populated with the full set of summed H generators. This is only populated with + /// the powers of 2. Accordingly, an index i specifies a scalar for the sum of all H generators + /// from H**2**0 ..= H**2**i. + pub h_sum: Vec, + + /// Additional (non-fixed) points to include in the multiexp. + /// + /// This is used for proof-specific elements. + pub additional: Vec<(C::F, C::G)>, +} + +impl fmt::Debug for Generators { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let g = self.g.to_bytes(); + let g: &[u8] = g.as_ref(); + + let h = self.h.to_bytes(); + let h: &[u8] = h.as_ref(); + + fmt.debug_struct("Generators").field("g", &g).field("h", &h).finish_non_exhaustive() + } +} + +/// The generators for a specific proof. +/// +/// This potentially have been reduced in size from the original set of generators, as beneficial +/// to performance. +#[derive(Copy, Clone)] +pub struct ProofGenerators<'a, C: Ciphersuite> { + g: &'a C::G, + h: &'a C::G, + + g_bold: &'a [C::G], + h_bold: &'a [C::G], +} + +impl fmt::Debug for ProofGenerators<'_, C> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let g = self.g.to_bytes(); + let g: &[u8] = g.as_ref(); + + let h = self.h.to_bytes(); + let h: &[u8] = h.as_ref(); + + fmt.debug_struct("ProofGenerators").field("g", &g).field("h", &h).finish_non_exhaustive() + } +} + +impl Generators { + /// Construct an instance of Generators for usage with Bulletproofs. + pub fn new( + g: C::G, + h: C::G, + g_bold: Vec, + h_bold: Vec, + ) -> Result { + if g_bold.is_empty() { + Err(GeneratorsError::GBoldEmpty)?; + } + if g_bold.len() != h_bold.len() { + Err(GeneratorsError::DifferingGhBoldLengths)?; + } + if padded_pow_of_2(g_bold.len()) != g_bold.len() { + Err(GeneratorsError::NotPowerOfTwo)?; + } + + let mut set = HashSet::new(); + let mut add_generator = |generator: &C::G| { + assert!(!bool::from(generator.is_identity())); + let bytes = generator.to_bytes(); + !set.insert(bytes.as_ref().to_vec()) + }; + + assert!(!add_generator(&g), "g was prior present in empty set"); + if add_generator(&h) { + Err(GeneratorsError::DuplicatedGenerator)?; + } + for g in &g_bold { + if add_generator(g) { + Err(GeneratorsError::DuplicatedGenerator)?; + } + } + for h in &h_bold { + if add_generator(h) { + Err(GeneratorsError::DuplicatedGenerator)?; + } + } + + let mut running_h_sum = C::G::identity(); + let mut h_sum = vec![]; + let mut next_pow_of_2 = 1; + for (i, h) in h_bold.iter().enumerate() { + running_h_sum += h; + if (i + 1) == next_pow_of_2 { + h_sum.push(running_h_sum); + next_pow_of_2 *= 2; + } + } + + Ok(Generators { g, h, g_bold, h_bold, h_sum }) + } + + /// Create a BatchVerifier for proofs which use a consistent set of generators. + pub fn batch_verifier() -> BatchVerifier { + BatchVerifier { + g: C::F::ZERO, + h: C::F::ZERO, + + g_bold: vec![], + h_bold: vec![], + h_sum: vec![], + + additional: Vec::with_capacity(128), + } + } + + /// Verify all proofs queued for batch verification in this BatchVerifier. + #[must_use] + pub fn verify(&self, verifier: BatchVerifier) -> bool { + multiexp_vartime( + &[(verifier.g, self.g), (verifier.h, self.h)] + .into_iter() + .chain(verifier.g_bold.into_iter().zip(self.g_bold.iter().cloned())) + .chain(verifier.h_bold.into_iter().zip(self.h_bold.iter().cloned())) + .chain(verifier.h_sum.into_iter().zip(self.h_sum.iter().cloned())) + .chain(verifier.additional) + .collect::>(), + ) + .is_identity() + .into() + } + + /// The `g` generator. + pub fn g(&self) -> C::G { + self.g + } + + /// The `h` generator. + pub fn h(&self) -> C::G { + self.h + } + + /// A slice to view the `g` (bold) generators. + pub fn g_bold_slice(&self) -> &[C::G] { + &self.g_bold + } + + /// A slice to view the `h` (bold) generators. + pub fn h_bold_slice(&self) -> &[C::G] { + &self.h_bold + } + + /// Reduce a set of generators to the quantity necessary to support a certain amount of + /// in-circuit multiplications/terms in a Pedersen vector commitment. + /// + /// Returns None if reducing to 0 or if the generators reduced are insufficient to provide this + /// many generators. + pub fn reduce(&self, generators: usize) -> Option> { + if generators == 0 { + None?; + } + + // Round to the nearest power of 2 + let generators = padded_pow_of_2(generators); + if generators > self.g_bold.len() { + None?; + } + + Some(ProofGenerators { + g: &self.g, + h: &self.h, + + g_bold: &self.g_bold[.. generators], + h_bold: &self.h_bold[.. generators], + }) + } +} + +impl ProofGenerators<'_, C> { + pub(crate) fn len(&self) -> usize { + self.g_bold.len() + } + + pub(crate) fn g(&self) -> C::G { + *self.g + } + + pub(crate) fn h(&self) -> C::G { + *self.h + } + + pub(crate) fn g_bold(&self, i: usize) -> C::G { + self.g_bold[i] + } + + pub(crate) fn h_bold(&self, i: usize) -> C::G { + self.h_bold[i] + } + + pub(crate) fn g_bold_slice(&self) -> &[C::G] { + self.g_bold + } + + pub(crate) fn h_bold_slice(&self) -> &[C::G] { + self.h_bold + } +} + +/// The opening of a Pedersen commitment. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] +pub struct PedersenCommitment { + /// The value committed to. + pub value: C::F, + /// The mask blinding the value committed to. + pub mask: C::F, +} + +impl PedersenCommitment { + /// Commit to this value, yielding the Pedersen commitment. + pub fn commit(&self, g: C::G, h: C::G) -> C::G { + multiexp(&[(self.value, g), (self.mask, h)]) + } +} + +/// The opening of a Pedersen vector commitment. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct PedersenVectorCommitment { + /// The values committed to across the `g` (bold) generators. + pub g_values: ScalarVector, + /// The mask blinding the values committed to. + pub mask: C::F, +} + +impl PedersenVectorCommitment { + /// Commit to the vectors of values. + /// + /// This function returns None if the amount of generators is less than the amount of values + /// within the relevant vector. + pub fn commit(&self, g_bold: &[C::G], h: C::G) -> Option { + if g_bold.len() < self.g_values.len() { + None?; + }; + + let mut terms = vec![(self.mask, h)]; + for pair in self.g_values.0.iter().cloned().zip(g_bold.iter().cloned()) { + terms.push(pair); + } + let res = multiexp(&terms); + terms.zeroize(); + Some(res) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/lincomb.rs b/crypto/evrf/generalized-bulletproofs/src/lincomb.rs new file mode 100644 index 00000000..e08a6d48 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/lincomb.rs @@ -0,0 +1,227 @@ +use core::ops::{Add, Sub, Mul}; +use std_shims::{vec, vec::Vec}; + +use zeroize::Zeroize; + +use ciphersuite::group::ff::PrimeField; + +use crate::ScalarVector; + +/// A reference to a variable usable within linear combinations. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[allow(non_camel_case_types)] +pub enum Variable { + /// A variable within the left vector of vectors multiplied against each other. + aL(usize), + /// A variable within the right vector of vectors multiplied against each other. + aR(usize), + /// A variable within the output vector of the left vector multiplied by the right vector. + aO(usize), + /// A variable within a Pedersen vector commitment, committed to with a generator from `g` (bold). + CG { + /// The commitment being indexed. + commitment: usize, + /// The index of the variable. + index: usize, + }, + /// A variable within a Pedersen commitment. + V(usize), +} + +// Does a NOP as there shouldn't be anything critical here +impl Zeroize for Variable { + fn zeroize(&mut self) {} +} + +/// A linear combination. +/// +/// Specifically, `WL aL + WR aR + WO aO + WCG C_G + WV V + c`. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +#[must_use] +pub struct LinComb { + pub(crate) highest_a_index: Option, + pub(crate) highest_c_index: Option, + pub(crate) highest_v_index: Option, + + // Sparse representation of WL/WR/WO + pub(crate) WL: Vec<(usize, F)>, + pub(crate) WR: Vec<(usize, F)>, + pub(crate) WO: Vec<(usize, F)>, + // Sparse representation once within a commitment + pub(crate) WCG: Vec>, + // Sparse representation of WV + pub(crate) WV: Vec<(usize, F)>, + pub(crate) c: F, +} + +impl From for LinComb { + fn from(constrainable: Variable) -> LinComb { + LinComb::empty().term(F::ONE, constrainable) + } +} + +impl Add<&LinComb> for LinComb { + type Output = Self; + + fn add(mut self, constraint: &Self) -> Self { + self.highest_a_index = self.highest_a_index.max(constraint.highest_a_index); + self.highest_c_index = self.highest_c_index.max(constraint.highest_c_index); + self.highest_v_index = self.highest_v_index.max(constraint.highest_v_index); + + self.WL.extend(&constraint.WL); + self.WR.extend(&constraint.WR); + self.WO.extend(&constraint.WO); + while self.WCG.len() < constraint.WCG.len() { + self.WCG.push(vec![]); + } + for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) { + sWC.extend(cWC); + } + self.WV.extend(&constraint.WV); + self.c += constraint.c; + self + } +} + +impl Sub<&LinComb> for LinComb { + type Output = Self; + + fn sub(mut self, constraint: &Self) -> Self { + self.highest_a_index = self.highest_a_index.max(constraint.highest_a_index); + self.highest_c_index = self.highest_c_index.max(constraint.highest_c_index); + self.highest_v_index = self.highest_v_index.max(constraint.highest_v_index); + + self.WL.extend(constraint.WL.iter().map(|(i, weight)| (*i, -*weight))); + self.WR.extend(constraint.WR.iter().map(|(i, weight)| (*i, -*weight))); + self.WO.extend(constraint.WO.iter().map(|(i, weight)| (*i, -*weight))); + while self.WCG.len() < constraint.WCG.len() { + self.WCG.push(vec![]); + } + for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) { + sWC.extend(cWC.iter().map(|(i, weight)| (*i, -*weight))); + } + self.WV.extend(constraint.WV.iter().map(|(i, weight)| (*i, -*weight))); + self.c -= constraint.c; + self + } +} + +impl Mul for LinComb { + type Output = Self; + + fn mul(mut self, scalar: F) -> Self { + for (_, weight) in self.WL.iter_mut() { + *weight *= scalar; + } + for (_, weight) in self.WR.iter_mut() { + *weight *= scalar; + } + for (_, weight) in self.WO.iter_mut() { + *weight *= scalar; + } + for WC in self.WCG.iter_mut() { + for (_, weight) in WC { + *weight *= scalar; + } + } + for (_, weight) in self.WV.iter_mut() { + *weight *= scalar; + } + self.c *= scalar; + self + } +} + +impl LinComb { + /// Create an empty linear combination. + pub fn empty() -> Self { + Self { + highest_a_index: None, + highest_c_index: None, + highest_v_index: None, + WL: vec![], + WR: vec![], + WO: vec![], + WCG: vec![], + WV: vec![], + c: F::ZERO, + } + } + + /// Add a new instance of a term to this linear combination. + pub fn term(mut self, scalar: F, constrainable: Variable) -> Self { + match constrainable { + Variable::aL(i) => { + self.highest_a_index = self.highest_a_index.max(Some(i)); + self.WL.push((i, scalar)) + } + Variable::aR(i) => { + self.highest_a_index = self.highest_a_index.max(Some(i)); + self.WR.push((i, scalar)) + } + Variable::aO(i) => { + self.highest_a_index = self.highest_a_index.max(Some(i)); + self.WO.push((i, scalar)) + } + Variable::CG { commitment: i, index: j } => { + self.highest_c_index = self.highest_c_index.max(Some(i)); + self.highest_a_index = self.highest_a_index.max(Some(j)); + while self.WCG.len() <= i { + self.WCG.push(vec![]); + } + self.WCG[i].push((j, scalar)) + } + Variable::V(i) => { + self.highest_v_index = self.highest_v_index.max(Some(i)); + self.WV.push((i, scalar)); + } + }; + self + } + + /// Add to the constant c. + pub fn constant(mut self, scalar: F) -> Self { + self.c += scalar; + self + } + + /// View the current weights for aL. + pub fn WL(&self) -> &[(usize, F)] { + &self.WL + } + + /// View the current weights for aR. + pub fn WR(&self) -> &[(usize, F)] { + &self.WR + } + + /// View the current weights for aO. + pub fn WO(&self) -> &[(usize, F)] { + &self.WO + } + + /// View the current weights for CG. + pub fn WCG(&self) -> &[Vec<(usize, F)>] { + &self.WCG + } + + /// View the current weights for V. + pub fn WV(&self) -> &[(usize, F)] { + &self.WV + } + + /// View the current constant. + pub fn c(&self) -> F { + self.c + } +} + +pub(crate) fn accumulate_vector( + accumulator: &mut ScalarVector, + values: &[(usize, F)], + weight: F, +) { + for (i, coeff) in values { + accumulator[*i] += *coeff * weight; + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/point_vector.rs b/crypto/evrf/generalized-bulletproofs/src/point_vector.rs new file mode 100644 index 00000000..16bf6989 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/point_vector.rs @@ -0,0 +1,122 @@ +use core::ops::{Index, IndexMut}; +use std_shims::vec::Vec; + +use zeroize::Zeroize; + +use ciphersuite::Ciphersuite; + +#[cfg(test)] +use multiexp::multiexp; + +use crate::ScalarVector; + +/// A point vector struct with the functionality necessary for Bulletproofs. +/// +/// The math operations for this panic upon any invalid operation, such as if vectors of different +/// lengths are added. The full extent of invalidity is not fully defined. Only field access is +/// guaranteed to have a safe, public API. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] +pub struct PointVector(pub(crate) Vec); + +impl Index for PointVector { + type Output = C::G; + fn index(&self, index: usize) -> &C::G { + &self.0[index] + } +} + +impl IndexMut for PointVector { + fn index_mut(&mut self, index: usize) -> &mut C::G { + &mut self.0[index] + } +} + +impl PointVector { + /* + pub(crate) fn add(&self, point: impl AsRef) -> Self { + let mut res = self.clone(); + for val in res.0.iter_mut() { + *val += point.as_ref(); + } + res + } + pub(crate) fn sub(&self, point: impl AsRef) -> Self { + let mut res = self.clone(); + for val in res.0.iter_mut() { + *val -= point.as_ref(); + } + res + } + + pub(crate) fn mul(&self, scalar: impl core::borrow::Borrow) -> Self { + let mut res = self.clone(); + for val in res.0.iter_mut() { + *val *= scalar.borrow(); + } + res + } + + pub(crate) fn add_vec(&self, vector: &Self) -> Self { + debug_assert_eq!(self.len(), vector.len()); + let mut res = self.clone(); + for (i, val) in res.0.iter_mut().enumerate() { + *val += vector.0[i]; + } + res + } + + pub(crate) fn sub_vec(&self, vector: &Self) -> Self { + debug_assert_eq!(self.len(), vector.len()); + let mut res = self.clone(); + for (i, val) in res.0.iter_mut().enumerate() { + *val -= vector.0[i]; + } + res + } + */ + + pub(crate) fn mul_vec(&self, vector: &ScalarVector) -> Self { + debug_assert_eq!(self.len(), vector.len()); + let mut res = self.clone(); + for (i, val) in res.0.iter_mut().enumerate() { + *val *= vector.0[i]; + } + res + } + + #[cfg(test)] + pub(crate) fn multiexp(&self, vector: &crate::ScalarVector) -> C::G { + debug_assert_eq!(self.len(), vector.len()); + let mut res = Vec::with_capacity(self.len()); + for (point, scalar) in self.0.iter().copied().zip(vector.0.iter().copied()) { + res.push((scalar, point)); + } + multiexp(&res) + } + + /* + pub(crate) fn multiexp_vartime(&self, vector: &ScalarVector) -> C::G { + debug_assert_eq!(self.len(), vector.len()); + let mut res = Vec::with_capacity(self.len()); + for (point, scalar) in self.0.iter().copied().zip(vector.0.iter().copied()) { + res.push((scalar, point)); + } + multiexp_vartime(&res) + } + + pub(crate) fn sum(&self) -> C::G { + self.0.iter().sum() + } + */ + + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + pub(crate) fn split(mut self) -> (Self, Self) { + assert!(self.len() > 1); + let r = self.0.split_off(self.0.len() / 2); + debug_assert_eq!(self.len(), r.len()); + (self, PointVector(r)) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/scalar_vector.rs b/crypto/evrf/generalized-bulletproofs/src/scalar_vector.rs new file mode 100644 index 00000000..18c4f619 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/scalar_vector.rs @@ -0,0 +1,147 @@ +use core::ops::{Index, IndexMut, Add, Sub, Mul}; +use std_shims::{vec, vec::Vec}; + +use zeroize::Zeroize; + +use ciphersuite::group::ff::PrimeField; + +/// A scalar vector struct with the functionality necessary for Bulletproofs. +/// +/// The math operations for this panic upon any invalid operation, such as if vectors of different +/// lengths are added. The full extent of invalidity is not fully defined. Only `new`, `len`, +/// and field access is guaranteed to have a safe, public API. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct ScalarVector(pub(crate) Vec); + +impl Zeroize for ScalarVector { + fn zeroize(&mut self) { + self.0.zeroize() + } +} + +impl Index for ScalarVector { + type Output = F; + fn index(&self, index: usize) -> &F { + &self.0[index] + } +} +impl IndexMut for ScalarVector { + fn index_mut(&mut self, index: usize) -> &mut F { + &mut self.0[index] + } +} + +impl Add for ScalarVector { + type Output = ScalarVector; + fn add(mut self, scalar: F) -> Self { + for s in &mut self.0 { + *s += scalar; + } + self + } +} +impl Sub for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, scalar: F) -> Self { + for s in &mut self.0 { + *s -= scalar; + } + self + } +} +impl Mul for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, scalar: F) -> Self { + for s in &mut self.0 { + *s *= scalar; + } + self + } +} + +impl Add<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn add(mut self, other: &ScalarVector) -> Self { + assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s += o; + } + self + } +} +impl Sub<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, other: &ScalarVector) -> Self { + assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s -= o; + } + self + } +} +impl Mul<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, other: &ScalarVector) -> Self { + assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s *= o; + } + self + } +} + +impl ScalarVector { + /// Create a new scalar vector, initialized with `len` zero scalars. + pub fn new(len: usize) -> Self { + ScalarVector(vec![F::ZERO; len]) + } + + pub(crate) fn powers(x: F, len: usize) -> Self { + assert!(len != 0); + + let mut res = Vec::with_capacity(len); + res.push(F::ONE); + res.push(x); + for i in 2 .. len { + res.push(res[i - 1] * x); + } + res.truncate(len); + ScalarVector(res) + } + + /// The length of this scalar vector. + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.0.len() + } + + /* + pub(crate) fn sum(mut self) -> F { + self.0.drain(..).sum() + } + */ + + pub(crate) fn inner_product<'a, V: Iterator>(&self, vector: V) -> F { + let mut count = 0; + let mut res = F::ZERO; + for (a, b) in self.0.iter().zip(vector) { + res += *a * b; + count += 1; + } + debug_assert_eq!(self.len(), count); + res + } + + pub(crate) fn split(mut self) -> (Self, Self) { + assert!(self.len() > 1); + let r = self.0.split_off(self.0.len() / 2); + debug_assert_eq!(self.len(), r.len()); + (self, ScalarVector(r)) + } +} + +impl From> for ScalarVector { + fn from(vec: Vec) -> Self { + Self(vec) + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/tests/arithmetic_circuit_proof.rs b/crypto/evrf/generalized-bulletproofs/src/tests/arithmetic_circuit_proof.rs new file mode 100644 index 00000000..388c3aca --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/tests/arithmetic_circuit_proof.rs @@ -0,0 +1,222 @@ +use rand_core::{RngCore, OsRng}; + +use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto}; + +use crate::{ + ScalarVector, PedersenCommitment, PedersenVectorCommitment, Generators, + transcript::*, + arithmetic_circuit_proof::{ + Variable, LinComb, ArithmeticCircuitStatement, ArithmeticCircuitWitness, + }, + tests::generators, +}; + +#[test] +fn test_zero_arithmetic_circuit() { + let generators = generators(1); + + let value = ::F::random(&mut OsRng); + let gamma = ::F::random(&mut OsRng); + let commitment = (generators.g() * value) + (generators.h() * gamma); + let V = vec![commitment]; + + let aL = ScalarVector::<::F>(vec![::F::ZERO]); + let aR = aL.clone(); + + let mut transcript = Transcript::new([0; 32]); + let commitments = transcript.write_commitments(vec![], V); + let statement = ArithmeticCircuitStatement::::new( + generators.reduce(1).unwrap(), + vec![], + commitments.clone(), + ) + .unwrap(); + let witness = ArithmeticCircuitWitness::::new( + aL, + aR, + vec![], + vec![PedersenCommitment { value, mask: gamma }], + ) + .unwrap(); + + let proof = { + statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap(); + transcript.complete() + }; + let mut verifier = Generators::batch_verifier(); + + let mut transcript = VerifierTranscript::new([0; 32], &proof); + let verifier_commmitments = transcript.read_commitments(0, 1); + assert_eq!(commitments, verifier_commmitments.unwrap()); + statement.verify(&mut OsRng, &mut verifier, &mut transcript).unwrap(); + assert!(generators.verify(verifier)); +} + +#[test] +fn test_vector_commitment_arithmetic_circuit() { + let generators = generators(2); + let reduced = generators.reduce(2).unwrap(); + + let v1 = ::F::random(&mut OsRng); + let v2 = ::F::random(&mut OsRng); + let gamma = ::F::random(&mut OsRng); + let commitment = (reduced.g_bold(0) * v1) + (reduced.g_bold(1) * v2) + (generators.h() * gamma); + let V = vec![]; + let C = vec![commitment]; + + let zero_vec = + || ScalarVector::<::F>(vec![::F::ZERO]); + + let aL = zero_vec(); + let aR = zero_vec(); + + let mut transcript = Transcript::new([0; 32]); + let commitments = transcript.write_commitments(C, V); + let statement = ArithmeticCircuitStatement::::new( + reduced, + vec![LinComb::empty() + .term(::F::ONE, Variable::CG { commitment: 0, index: 0 }) + .term(::F::from(2u64), Variable::CG { commitment: 0, index: 1 }) + .constant(-(v1 + (v2 + v2)))], + commitments.clone(), + ) + .unwrap(); + let witness = ArithmeticCircuitWitness::::new( + aL, + aR, + vec![PedersenVectorCommitment { g_values: ScalarVector(vec![v1, v2]), mask: gamma }], + vec![], + ) + .unwrap(); + + let proof = { + statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap(); + transcript.complete() + }; + let mut verifier = Generators::batch_verifier(); + + let mut transcript = VerifierTranscript::new([0; 32], &proof); + let verifier_commmitments = transcript.read_commitments(1, 0); + assert_eq!(commitments, verifier_commmitments.unwrap()); + statement.verify(&mut OsRng, &mut verifier, &mut transcript).unwrap(); + assert!(generators.verify(verifier)); +} + +#[test] +fn fuzz_test_arithmetic_circuit() { + let generators = generators(32); + + for i in 0 .. 100 { + dbg!(i); + + // Create aL, aR, aO + let mut aL = ScalarVector(vec![]); + let mut aR = ScalarVector(vec![]); + while aL.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() { + aL.0.push(::F::random(&mut OsRng)); + } + while aR.len() < aL.len() { + aR.0.push(::F::random(&mut OsRng)); + } + let aO = aL.clone() * &aR; + + // Create C + let mut C = vec![]; + while C.len() < (OsRng.next_u64() % 16).try_into().unwrap() { + let mut g_values = ScalarVector(vec![]); + while g_values.0.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() { + g_values.0.push(::F::random(&mut OsRng)); + } + C.push(PedersenVectorCommitment { + g_values, + mask: ::F::random(&mut OsRng), + }); + } + + // Create V + let mut V = vec![]; + while V.len() < (OsRng.next_u64() % 4).try_into().unwrap() { + V.push(PedersenCommitment { + value: ::F::random(&mut OsRng), + mask: ::F::random(&mut OsRng), + }); + } + + // Generate random constraints + let mut constraints = vec![]; + for _ in 0 .. (OsRng.next_u64() % 8).try_into().unwrap() { + let mut eval = ::F::ZERO; + let mut constraint = LinComb::empty(); + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % aL.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::aL(index)); + eval += weight * aL[index]; + } + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % aR.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::aR(index)); + eval += weight * aR[index]; + } + + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % aO.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::aO(index)); + eval += weight * aO[index]; + } + + for (commitment, C) in C.iter().enumerate() { + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % C.g_values.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::CG { commitment, index }); + eval += weight * C.g_values[index]; + } + } + + if !V.is_empty() { + for _ in 0 .. (OsRng.next_u64() % 4) { + let index = usize::try_from(OsRng.next_u64()).unwrap() % V.len(); + let weight = ::F::random(&mut OsRng); + constraint = constraint.term(weight, Variable::V(index)); + eval += weight * V[index].value; + } + } + + constraint = constraint.constant(-eval); + + constraints.push(constraint); + } + + let mut transcript = Transcript::new([0; 32]); + let commitments = transcript.write_commitments( + C.iter().map(|C| C.commit(generators.g_bold_slice(), generators.h()).unwrap()).collect(), + V.iter().map(|V| V.commit(generators.g(), generators.h())).collect(), + ); + + let statement = ArithmeticCircuitStatement::::new( + generators.reduce(16).unwrap(), + constraints, + commitments.clone(), + ) + .unwrap(); + + let witness = ArithmeticCircuitWitness::::new(aL, aR, C.clone(), V.clone()).unwrap(); + + let proof = { + statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap(); + transcript.complete() + }; + let mut verifier = Generators::batch_verifier(); + + let mut transcript = VerifierTranscript::new([0; 32], &proof); + let verifier_commmitments = transcript.read_commitments(C.len(), V.len()); + assert_eq!(commitments, verifier_commmitments.unwrap()); + statement.verify(&mut OsRng, &mut verifier, &mut transcript).unwrap(); + assert!(generators.verify(verifier)); + } +} diff --git a/crypto/evrf/generalized-bulletproofs/src/tests/inner_product.rs b/crypto/evrf/generalized-bulletproofs/src/tests/inner_product.rs new file mode 100644 index 00000000..63bc9f92 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/tests/inner_product.rs @@ -0,0 +1,113 @@ +// The inner product relation is P = sum(g_bold * a, h_bold * b, g * (a * b)) + +use rand_core::OsRng; + +use ciphersuite::{ + group::{ff::Field, Group}, + Ciphersuite, Ristretto, +}; + +use crate::{ + ScalarVector, PointVector, Generators, + transcript::*, + inner_product::{P, IpStatement, IpWitness}, + tests::generators, +}; + +#[test] +fn test_zero_inner_product() { + let P = ::G::identity(); + + let generators = generators::(1); + let reduced = generators.reduce(1).unwrap(); + let witness = IpWitness::::new( + ScalarVector::<::F>::new(1), + ScalarVector::<::F>::new(1), + ) + .unwrap(); + + let proof = { + let mut transcript = Transcript::new([0; 32]); + IpStatement::::new( + reduced, + ScalarVector(vec![::F::ONE; 1]), + ::F::ONE, + P::Prover(P), + ) + .unwrap() + .clone() + .prove(&mut transcript, witness) + .unwrap(); + transcript.complete() + }; + + let mut verifier = Generators::batch_verifier(); + IpStatement::::new( + reduced, + ScalarVector(vec![::F::ONE; 1]), + ::F::ONE, + P::Verifier { verifier_weight: ::F::ONE }, + ) + .unwrap() + .verify(&mut verifier, &mut VerifierTranscript::new([0; 32], &proof)) + .unwrap(); + assert!(generators.verify(verifier)); +} + +#[test] +fn test_inner_product() { + // P = sum(g_bold * a, h_bold * b) + let generators = generators::(32); + let mut verifier = Generators::batch_verifier(); + for i in [1, 2, 4, 8, 16, 32] { + let generators = generators.reduce(i).unwrap(); + let g = generators.g(); + assert_eq!(generators.len(), i); + let mut g_bold = vec![]; + let mut h_bold = vec![]; + for i in 0 .. i { + g_bold.push(generators.g_bold(i)); + h_bold.push(generators.h_bold(i)); + } + let g_bold = PointVector::(g_bold); + let h_bold = PointVector::(h_bold); + + let mut a = ScalarVector::<::F>::new(i); + let mut b = ScalarVector::<::F>::new(i); + + for i in 0 .. i { + a[i] = ::F::random(&mut OsRng); + b[i] = ::F::random(&mut OsRng); + } + + let P = g_bold.multiexp(&a) + h_bold.multiexp(&b) + (g * a.inner_product(b.0.iter())); + + let witness = IpWitness::::new(a, b).unwrap(); + + let proof = { + let mut transcript = Transcript::new([0; 32]); + IpStatement::::new( + generators, + ScalarVector(vec![::F::ONE; i]), + ::F::ONE, + P::Prover(P), + ) + .unwrap() + .prove(&mut transcript, witness) + .unwrap(); + transcript.complete() + }; + + verifier.additional.push((::F::ONE, P)); + IpStatement::::new( + generators, + ScalarVector(vec![::F::ONE; i]), + ::F::ONE, + P::Verifier { verifier_weight: ::F::ONE }, + ) + .unwrap() + .verify(&mut verifier, &mut VerifierTranscript::new([0; 32], &proof)) + .unwrap(); + } + assert!(generators.verify(verifier)); +} diff --git a/crypto/evrf/generalized-bulletproofs/src/tests/mod.rs b/crypto/evrf/generalized-bulletproofs/src/tests/mod.rs new file mode 100644 index 00000000..1b64d378 --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/tests/mod.rs @@ -0,0 +1,27 @@ +use rand_core::OsRng; + +use ciphersuite::{group::Group, Ciphersuite}; + +use crate::{Generators, padded_pow_of_2}; + +#[cfg(test)] +mod inner_product; + +#[cfg(test)] +mod arithmetic_circuit_proof; + +/// Generate a set of generators for testing purposes. +/// +/// This should not be considered secure. +pub fn generators(n: usize) -> Generators { + assert_eq!(padded_pow_of_2(n), n, "amount of generators wasn't a power of 2"); + + let gens = || { + let mut res = Vec::with_capacity(n); + for _ in 0 .. n { + res.push(C::G::random(&mut OsRng)); + } + res + }; + Generators::new(C::G::random(&mut OsRng), C::G::random(&mut OsRng), gens(), gens()).unwrap() +} diff --git a/crypto/evrf/generalized-bulletproofs/src/transcript.rs b/crypto/evrf/generalized-bulletproofs/src/transcript.rs new file mode 100644 index 00000000..70fe9f8d --- /dev/null +++ b/crypto/evrf/generalized-bulletproofs/src/transcript.rs @@ -0,0 +1,211 @@ +use std_shims::{vec::Vec, io}; + +use blake2::{Digest, Blake2b512}; + +use ciphersuite::{ + group::{ + ff::{Field, PrimeField}, + GroupEncoding, + }, + Ciphersuite, +}; + +use crate::PointVector; + +const SCALAR: u8 = 0; +const POINT: u8 = 1; +const CHALLENGE: u8 = 2; + +fn challenge(digest: &mut Blake2b512) -> C::F { + digest.update([CHALLENGE]); + let chl = digest.clone().finalize().into(); + + let res = C::reduce_512(chl); + + // Negligible probability + if bool::from(res.is_zero()) { + panic!("zero challenge"); + } + + res +} + +/// Commitments written to/read from a transcript. +// We use a dedicated type for this to coerce the caller into transcripting the commitments as +// expected. +#[cfg_attr(test, derive(Clone, PartialEq, Debug))] +pub struct Commitments { + pub(crate) C: PointVector, + pub(crate) V: PointVector, +} + +impl Commitments { + /// The vector commitments. + pub fn C(&self) -> &[C::G] { + &self.C.0 + } + /// The non-vector commitments. + pub fn V(&self) -> &[C::G] { + &self.V.0 + } +} + +/// A transcript for proving proofs. +pub struct Transcript { + digest: Blake2b512, + transcript: Vec, +} + +/* + We define our proofs as Vec and derive our transcripts from the values we deserialize from + them. This format assumes the order of the values read, their size, and their quantity are + constant to the context. +*/ +impl Transcript { + /// Create a new transcript off some context. + pub fn new(context: [u8; 32]) -> Self { + let mut digest = Blake2b512::new(); + digest.update(context); + Self { digest, transcript: Vec::with_capacity(1024) } + } + + /// Push a scalar onto the transcript. + /// + /// The order and layout of this must be constant to the context. + pub fn push_scalar(&mut self, scalar: impl PrimeField) { + self.digest.update([SCALAR]); + let bytes = scalar.to_repr(); + self.digest.update(bytes); + self.transcript.extend(bytes.as_ref()); + } + + /// Push a point onto the transcript. + /// + /// The order and layout of this must be constant to the context. + pub fn push_point(&mut self, point: impl GroupEncoding) { + self.digest.update([POINT]); + let bytes = point.to_bytes(); + self.digest.update(bytes); + self.transcript.extend(bytes.as_ref()); + } + + /// Write the Pedersen (vector) commitments to this transcript. + pub fn write_commitments( + &mut self, + C: Vec, + V: Vec, + ) -> Commitments { + self.digest.update(u32::try_from(C.len()).unwrap().to_le_bytes()); + for C in &C { + self.push_point(*C); + } + self.digest.update(u32::try_from(V.len()).unwrap().to_le_bytes()); + for V in &V { + self.push_point(*V); + } + Commitments { C: PointVector(C), V: PointVector(V) } + } + + /// Sample a challenge. + pub fn challenge(&mut self) -> C::F { + challenge::(&mut self.digest) + } + + /// Sample a challenge as a byte array. + pub fn challenge_bytes(&mut self) -> [u8; 64] { + self.digest.update([CHALLENGE]); + self.digest.clone().finalize().into() + } + + /// Complete a transcript, yielding the fully serialized proof. + pub fn complete(self) -> Vec { + self.transcript + } +} + +/// A transcript for verifying proofs. +pub struct VerifierTranscript<'a> { + digest: Blake2b512, + transcript: &'a [u8], +} + +impl<'a> VerifierTranscript<'a> { + /// Create a new transcript to verify a proof with. + pub fn new(context: [u8; 32], proof: &'a [u8]) -> Self { + let mut digest = Blake2b512::new(); + digest.update(context); + Self { digest, transcript: proof } + } + + /// Read a scalar from the transcript. + /// + /// The order and layout of this must be constant to the context. + pub fn read_scalar(&mut self) -> io::Result { + // Read the scalar onto the transcript using the serialization present in the transcript + self.digest.update([SCALAR]); + let scalar_len = ::Repr::default().as_ref().len(); + if self.transcript.len() < scalar_len { + Err(io::Error::new(io::ErrorKind::Other, "not enough bytes to read_scalar"))?; + } + self.digest.update(&self.transcript[.. scalar_len]); + + // Read the actual scalar, where `read_F` ensures its canonically serialized + let scalar = C::read_F(&mut self.transcript)?; + Ok(scalar) + } + + /// Read a point from the transcript. + /// + /// The order and layout of this must be constant to the context. + pub fn read_point(&mut self) -> io::Result { + // Read the point onto the transcript using the serialization present in the transcript + self.digest.update([POINT]); + let point_len = ::Repr::default().as_ref().len(); + if self.transcript.len() < point_len { + Err(io::Error::new(io::ErrorKind::Other, "not enough bytes to read_point"))?; + } + self.digest.update(&self.transcript[.. point_len]); + + // Read the actual point, where `read_G` ensures its canonically serialized + let point = C::read_G(&mut self.transcript)?; + Ok(point) + } + + /// Read the Pedersen (Vector) Commitments from the transcript. + /// + /// The lengths of the vectors are not transcripted. + #[allow(clippy::type_complexity)] + pub fn read_commitments( + &mut self, + C: usize, + V: usize, + ) -> io::Result> { + self.digest.update(u32::try_from(C).unwrap().to_le_bytes()); + let mut C_vec = Vec::with_capacity(C); + for _ in 0 .. C { + C_vec.push(self.read_point::()?); + } + self.digest.update(u32::try_from(V).unwrap().to_le_bytes()); + let mut V_vec = Vec::with_capacity(V); + for _ in 0 .. V { + V_vec.push(self.read_point::()?); + } + Ok(Commitments { C: PointVector(C_vec), V: PointVector(V_vec) }) + } + + /// Sample a challenge. + pub fn challenge(&mut self) -> C::F { + challenge::(&mut self.digest) + } + + /// Sample a challenge as a byte array. + pub fn challenge_bytes(&mut self) -> [u8; 64] { + self.digest.update([CHALLENGE]); + self.digest.clone().finalize().into() + } + + /// Complete the transcript transcript, yielding what remains. + pub fn complete(self) -> &'a [u8] { + self.transcript + } +} diff --git a/crypto/evrf/secq256k1/Cargo.toml b/crypto/evrf/secq256k1/Cargo.toml new file mode 100644 index 00000000..4b5ec5ac --- /dev/null +++ b/crypto/evrf/secq256k1/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "secq256k1" +version = "0.1.0" +description = "An implementation of the curve secp256k1 cycles with" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/secq256k1" +authors = ["Luke Parker "] +keywords = ["secp256k1", "secq256k1", "group"] +edition = "2021" +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rustversion = "1" +hex-literal = { version = "0.4", default-features = false } + +std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, optional = true } + +rand_core = { version = "0.6", default-features = false } + +zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } +subtle = { version = "^2.4", default-features = false } + +generic-array = { version = "0.14", default-features = false } +crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] } + +k256 = { version = "0.13", default-features = false, features = ["arithmetic"] } + +blake2 = { version = "0.10", default-features = false } +ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false } +ec-divisors = { path = "../divisors", default-features = false } +generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets", default-features = false } + +[dev-dependencies] +hex = "0.4" + +rand_core = { version = "0.6", features = ["std"] } + +ff-group-tests = { path = "../../ff-group-tests" } + +[features] +alloc = ["std-shims", "zeroize/alloc", "ciphersuite/alloc"] +std = ["std-shims/std", "rand_core/std", "zeroize/std", "subtle/std", "blake2/std", "ciphersuite/std", "ec-divisors/std", "generalized-bulletproofs-ec-gadgets/std"] +default = ["std"] diff --git a/crypto/evrf/secq256k1/LICENSE b/crypto/evrf/secq256k1/LICENSE new file mode 100644 index 00000000..91d893c1 --- /dev/null +++ b/crypto/evrf/secq256k1/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crypto/evrf/secq256k1/README.md b/crypto/evrf/secq256k1/README.md new file mode 100644 index 00000000..b20ee31f --- /dev/null +++ b/crypto/evrf/secq256k1/README.md @@ -0,0 +1,5 @@ +# secq256k1 + +An implementation of the curve secp256k1 cycles with. + +Scalars and field elements are encoded in their big-endian formats. diff --git a/crypto/evrf/secq256k1/src/backend.rs b/crypto/evrf/secq256k1/src/backend.rs new file mode 100644 index 00000000..6f8653c8 --- /dev/null +++ b/crypto/evrf/secq256k1/src/backend.rs @@ -0,0 +1,295 @@ +use zeroize::Zeroize; + +// Use black_box when possible +#[rustversion::since(1.66)] +use core::hint::black_box; +#[rustversion::before(1.66)] +fn black_box(val: T) -> T { + val +} + +pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 { + let bit_ref = black_box(bit_ref); + + let mut bit = black_box(*bit_ref); + let res = black_box(bit as u8); + bit.zeroize(); + debug_assert!((res | 1) == 1); + + bit_ref.zeroize(); + res +} + +macro_rules! math_op { + ( + $Value: ident, + $Other: ident, + $Op: ident, + $op_fn: ident, + $Assign: ident, + $assign_fn: ident, + $function: expr + ) => { + impl $Op<$Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl $Assign<$Other> for $Value { + fn $assign_fn(&mut self, other: $Other) { + self.0 = $function(self.0, other.0); + } + } + impl<'a> $Op<&'a $Other> for $Value { + type Output = $Value; + fn $op_fn(self, other: &'a $Other) -> Self::Output { + Self($function(self.0, other.0)) + } + } + impl<'a> $Assign<&'a $Other> for $Value { + fn $assign_fn(&mut self, other: &'a $Other) { + self.0 = $function(self.0, other.0); + } + } + }; +} + +macro_rules! from_wrapper { + ($wrapper: ident, $inner: ident, $uint: ident) => { + impl From<$uint> for $wrapper { + fn from(a: $uint) -> $wrapper { + Self(Residue::new(&$inner::from(a))) + } + } + }; +} + +macro_rules! field { + ( + $FieldName: ident, + $ResidueType: ident, + + $MODULUS_STR: ident, + $MODULUS: ident, + $WIDE_MODULUS: ident, + + $NUM_BITS: literal, + $MULTIPLICATIVE_GENERATOR: literal, + $S: literal, + $ROOT_OF_UNITY: literal, + $DELTA: literal, + ) => { + use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::{Sum, Product}, + }; + + use subtle::{Choice, CtOption, ConstantTimeEq, ConstantTimeLess, ConditionallySelectable}; + use rand_core::RngCore; + + use crypto_bigint::{Integer, NonZero, Encoding, impl_modulus}; + + use ciphersuite::group::ff::{ + Field, PrimeField, FieldBits, PrimeFieldBits, helpers::sqrt_ratio_generic, + }; + + use $crate::backend::u8_from_bool; + + fn reduce(x: U512) -> U256 { + U256::from_le_slice(&x.rem(&NonZero::new($WIDE_MODULUS).unwrap()).to_le_bytes()[.. 32]) + } + + impl ConstantTimeEq for $FieldName { + fn ct_eq(&self, other: &Self) -> Choice { + self.0.ct_eq(&other.0) + } + } + + impl ConditionallySelectable for $FieldName { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + $FieldName(Residue::conditional_select(&a.0, &b.0, choice)) + } + } + + math_op!($FieldName, $FieldName, Add, add, AddAssign, add_assign, |x: $ResidueType, y| x + .add(&y)); + math_op!($FieldName, $FieldName, Sub, sub, SubAssign, sub_assign, |x: $ResidueType, y| x + .sub(&y)); + math_op!($FieldName, $FieldName, Mul, mul, MulAssign, mul_assign, |x: $ResidueType, y| x + .mul(&y)); + + from_wrapper!($FieldName, U256, u8); + from_wrapper!($FieldName, U256, u16); + from_wrapper!($FieldName, U256, u32); + from_wrapper!($FieldName, U256, u64); + from_wrapper!($FieldName, U256, u128); + + impl Neg for $FieldName { + type Output = $FieldName; + fn neg(self) -> $FieldName { + Self(self.0.neg()) + } + } + + impl<'a> Neg for &'a $FieldName { + type Output = $FieldName; + fn neg(self) -> Self::Output { + (*self).neg() + } + } + + impl $FieldName { + /// Perform an exponentation. + pub fn pow(&self, other: $FieldName) -> $FieldName { + let mut table = [Self(Residue::ONE); 16]; + table[1] = *self; + for i in 2 .. 16 { + table[i] = table[i - 1] * self; + } + + let mut res = Self(Residue::ONE); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res *= res; + } + } + + let mut factor = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + factor = Self::conditional_select(&factor, &candidate, usize::from(bits).ct_eq(&j)); + } + res *= factor; + bits = 0; + } + } + res + } + } + + impl Field for $FieldName { + const ZERO: Self = Self(Residue::ZERO); + const ONE: Self = Self(Residue::ONE); + + fn random(mut rng: impl RngCore) -> Self { + let mut bytes = [0; 64]; + rng.fill_bytes(&mut bytes); + $FieldName(Residue::new(&reduce(U512::from_be_slice(bytes.as_ref())))) + } + + fn square(&self) -> Self { + Self(self.0.square()) + } + fn double(&self) -> Self { + *self + self + } + + fn invert(&self) -> CtOption { + let res = self.0.invert(); + CtOption::new(Self(res.0), res.1.into()) + } + + fn sqrt(&self) -> CtOption { + // (p + 1) // 4, as valid since p % 4 == 3 + let mod_plus_one_div_four = $MODULUS.saturating_add(&U256::ONE).wrapping_div(&(4u8.into())); + let res = self.pow(Self($ResidueType::new_checked(&mod_plus_one_div_four).unwrap())); + CtOption::new(res, res.square().ct_eq(self)) + } + + fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) { + sqrt_ratio_generic(num, div) + } + } + + impl PrimeField for $FieldName { + type Repr = [u8; 32]; + + const MODULUS: &'static str = $MODULUS_STR; + + const NUM_BITS: u32 = $NUM_BITS; + const CAPACITY: u32 = $NUM_BITS - 1; + + const TWO_INV: Self = $FieldName($ResidueType::new(&U256::from_u8(2)).invert().0); + + const MULTIPLICATIVE_GENERATOR: Self = + Self(Residue::new(&U256::from_u8($MULTIPLICATIVE_GENERATOR))); + const S: u32 = $S; + + const ROOT_OF_UNITY: Self = $FieldName(Residue::new(&U256::from_be_hex($ROOT_OF_UNITY))); + const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0); + + const DELTA: Self = $FieldName(Residue::new(&U256::from_be_hex($DELTA))); + + fn from_repr(bytes: Self::Repr) -> CtOption { + let res = U256::from_be_slice(&bytes); + CtOption::new($FieldName(Residue::new(&res)), res.ct_lt(&$MODULUS)) + } + fn to_repr(&self) -> Self::Repr { + let mut repr = [0; 32]; + repr.copy_from_slice(&self.0.retrieve().to_be_bytes()); + repr + } + + fn is_odd(&self) -> Choice { + self.0.retrieve().is_odd() + } + } + + impl PrimeFieldBits for $FieldName { + type ReprBits = [u8; 32]; + + fn to_le_bits(&self) -> FieldBits { + let mut repr = [0; 32]; + repr.copy_from_slice(&self.0.retrieve().to_le_bytes()); + repr.into() + } + + fn char_le_bits() -> FieldBits { + let mut repr = [0; 32]; + repr.copy_from_slice(&MODULUS.to_le_bytes()); + repr.into() + } + } + + impl Sum<$FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + let mut res = $FieldName::ZERO; + for item in iter { + res += item; + } + res + } + } + + impl<'a> Sum<&'a $FieldName> for $FieldName { + fn sum>(iter: I) -> $FieldName { + iter.cloned().sum() + } + } + + impl Product<$FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + let mut res = $FieldName::ONE; + for item in iter { + res *= item; + } + res + } + } + + impl<'a> Product<&'a $FieldName> for $FieldName { + fn product>(iter: I) -> $FieldName { + iter.cloned().product() + } + } + }; +} diff --git a/crypto/evrf/secq256k1/src/lib.rs b/crypto/evrf/secq256k1/src/lib.rs new file mode 100644 index 00000000..40656db8 --- /dev/null +++ b/crypto/evrf/secq256k1/src/lib.rs @@ -0,0 +1,70 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(any(feature = "alloc", feature = "std"))] +use std_shims::io::{self, Read}; + +use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2}; +use ciphersuite::group::{ff::PrimeField, Group}; + +#[macro_use] +mod backend; + +mod scalar; +pub use scalar::Scalar; + +pub use k256::Scalar as FieldElement; + +mod point; +pub use point::Point; + +/// Ciphersuite for Secq256k1. +/// +/// hash_to_F is implemented with a naive concatenation of the dst and data, allowing transposition +/// between the two. This means `dst: b"abc", data: b"def"`, will produce the same scalar as +/// `dst: "abcdef", data: b""`. Please use carefully, not letting dsts be substrings of each other. +#[derive(Clone, Copy, PartialEq, Eq, Debug, zeroize::Zeroize)] +pub struct Secq256k1; +impl ciphersuite::Ciphersuite for Secq256k1 { + type F = Scalar; + type G = Point; + type H = blake2::Blake2b512; + + const ID: &'static [u8] = b"secq256k1"; + + fn generator() -> Self::G { + Point::generator() + } + + fn reduce_512(scalar: [u8; 64]) -> Self::F { + Scalar::wide_reduce(scalar) + } + + fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F { + use blake2::Digest; + Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap()) + } + + // We override the provided impl, which compares against the reserialization, because + // we already require canonicity + #[cfg(any(feature = "alloc", feature = "std"))] + #[allow(non_snake_case)] + fn read_G(reader: &mut R) -> io::Result { + use ciphersuite::group::GroupEncoding; + + let mut encoding = ::Repr::default(); + reader.read_exact(encoding.as_mut())?; + + let point = Option::::from(Self::G::from_bytes(&encoding)) + .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid point"))?; + Ok(point) + } +} + +impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Secq256k1 { + type ScalarBits = U<{ Scalar::NUM_BITS as usize }>; + type XCoefficients = Quot, U2>; + type XCoefficientsMinusOne = Diff; + type YxCoefficients = Diff, U1>, U2>, U2>; +} diff --git a/crypto/evrf/secq256k1/src/point.rs b/crypto/evrf/secq256k1/src/point.rs new file mode 100644 index 00000000..9b590cdf --- /dev/null +++ b/crypto/evrf/secq256k1/src/point.rs @@ -0,0 +1,418 @@ +use core::{ + ops::{DerefMut, Add, AddAssign, Neg, Sub, SubAssign, Mul, MulAssign}, + iter::Sum, +}; + +use rand_core::RngCore; + +use zeroize::Zeroize; +use subtle::{Choice, CtOption, ConstantTimeEq, ConditionallySelectable, ConditionallyNegatable}; + +use generic_array::{typenum::U33, GenericArray}; + +use ciphersuite::group::{ + ff::{Field, PrimeField, PrimeFieldBits}, + Group, GroupEncoding, + prime::PrimeGroup, +}; + +use crate::{backend::u8_from_bool, Scalar, FieldElement}; + +fn recover_y(x: FieldElement) -> CtOption { + // x**3 + B since a = 0 + ((x.square() * x) + FieldElement::from(7u64)).sqrt() +} + +/// Point. +#[derive(Clone, Copy, Debug, Zeroize)] +#[repr(C)] +pub struct Point { + x: FieldElement, // / Z + y: FieldElement, // / Z + z: FieldElement, +} + +impl ConstantTimeEq for Point { + fn ct_eq(&self, other: &Self) -> Choice { + let x1 = self.x * other.z; + let x2 = other.x * self.z; + + let y1 = self.y * other.z; + let y2 = other.y * self.z; + + // Identity or equivalent + (self.z.is_zero() & other.z.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2)) + } +} + +impl PartialEq for Point { + fn eq(&self, other: &Point) -> bool { + self.ct_eq(other).into() + } +} + +impl Eq for Point {} + +impl ConditionallySelectable for Point { + fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { + Point { + x: FieldElement::conditional_select(&a.x, &b.x, choice), + y: FieldElement::conditional_select(&a.y, &b.y, choice), + z: FieldElement::conditional_select(&a.z, &b.z, choice), + } + } +} + +impl Add for Point { + type Output = Point; + #[allow(non_snake_case)] + fn add(self, other: Self) -> Self { + // add-2015-rcb + + let a = FieldElement::ZERO; + let B = FieldElement::from(7u64); + let b3 = B + B + B; + + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + let X2 = other.x; + let Y2 = other.y; + let Z2 = other.z; + + let t0 = X1 * X2; + let t1 = Y1 * Y2; + let t2 = Z1 * Z2; + let t3 = X1 + Y1; + let t4 = X2 + Y2; + let t3 = t3 * t4; + let t4 = t0 + t1; + let t3 = t3 - t4; + let t4 = X1 + Z1; + let t5 = X2 + Z2; + let t4 = t4 * t5; + let t5 = t0 + t2; + let t4 = t4 - t5; + let t5 = Y1 + Z1; + let X3 = Y2 + Z2; + let t5 = t5 * X3; + let X3 = t1 + t2; + let t5 = t5 - X3; + let Z3 = a * t4; + let X3 = b3 * t2; + let Z3 = X3 + Z3; + let X3 = t1 - Z3; + let Z3 = t1 + Z3; + let Y3 = X3 * Z3; + let t1 = t0 + t0; + let t1 = t1 + t0; + let t2 = a * t2; + let t4 = b3 * t4; + let t1 = t1 + t2; + let t2 = t0 - t2; + let t2 = a * t2; + let t4 = t4 + t2; + let t0 = t1 * t4; + let Y3 = Y3 + t0; + let t0 = t5 * t4; + let X3 = t3 * X3; + let X3 = X3 - t0; + let t0 = t3 * t1; + let Z3 = t5 * Z3; + let Z3 = Z3 + t0; + Point { x: X3, y: Y3, z: Z3 } + } +} + +impl AddAssign for Point { + fn add_assign(&mut self, other: Point) { + *self = *self + other; + } +} + +impl Add<&Point> for Point { + type Output = Point; + fn add(self, other: &Point) -> Point { + self + *other + } +} + +impl AddAssign<&Point> for Point { + fn add_assign(&mut self, other: &Point) { + *self += *other; + } +} + +impl Neg for Point { + type Output = Point; + fn neg(self) -> Self { + Point { x: self.x, y: -self.y, z: self.z } + } +} + +impl Sub for Point { + type Output = Point; + #[allow(clippy::suspicious_arithmetic_impl)] + fn sub(self, other: Self) -> Self { + self + other.neg() + } +} + +impl SubAssign for Point { + fn sub_assign(&mut self, other: Point) { + *self = *self - other; + } +} + +impl Sub<&Point> for Point { + type Output = Point; + fn sub(self, other: &Point) -> Point { + self - *other + } +} + +impl SubAssign<&Point> for Point { + fn sub_assign(&mut self, other: &Point) { + *self -= *other; + } +} + +impl Group for Point { + type Scalar = Scalar; + fn random(mut rng: impl RngCore) -> Self { + loop { + let mut bytes = GenericArray::default(); + rng.fill_bytes(bytes.as_mut()); + let opt = Self::from_bytes(&bytes); + if opt.is_some().into() { + return opt.unwrap(); + } + } + } + fn identity() -> Self { + Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO } + } + fn generator() -> Self { + // Point with the lowest valid x-coordinate + Point { + x: FieldElement::from_repr( + hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000001") + .into(), + ) + .unwrap(), + y: FieldElement::from_repr( + hex_literal::hex!("0C7C97045A2074634909ABDF82C9BD0248916189041F2AF0C1B800D1FFC278C0") + .into(), + ) + .unwrap(), + z: FieldElement::ONE, + } + } + fn is_identity(&self) -> Choice { + self.z.ct_eq(&FieldElement::ZERO) + } + #[allow(non_snake_case)] + fn double(&self) -> Self { + // dbl-2007-bl + + let a = FieldElement::ZERO; + + let X1 = self.x; + let Y1 = self.y; + let Z1 = self.z; + + let XX = X1 * X1; + let ZZ = Z1 * Z1; + let w = (a * ZZ) + XX.double() + XX; + let s = (Y1 * Z1).double(); + let ss = s * s; + let sss = s * ss; + let R = Y1 * s; + let RR = R * R; + let B = X1 + R; + let B = (B * B) - XX - RR; + let h = (w * w) - B.double(); + let X3 = h * s; + let Y3 = w * (B - h) - RR.double(); + let Z3 = sss; + + let res = Self { x: X3, y: Y3, z: Z3 }; + // If self is identity, res will not be well-formed + // Accordingly, we return self if self was the identity + Self::conditional_select(&res, self, self.is_identity()) + } +} + +impl Sum for Point { + fn sum>(iter: I) -> Point { + let mut res = Self::identity(); + for i in iter { + res += i; + } + res + } +} + +impl<'a> Sum<&'a Point> for Point { + fn sum>(iter: I) -> Point { + Point::sum(iter.cloned()) + } +} + +impl Mul for Point { + type Output = Point; + fn mul(self, mut other: Scalar) -> Point { + // Precompute the optimal amount that's a multiple of 2 + let mut table = [Point::identity(); 16]; + table[1] = self; + for i in 2 .. 16 { + table[i] = table[i - 1] + self; + } + + let mut res = Self::identity(); + let mut bits = 0; + for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { + bits <<= 1; + let mut bit = u8_from_bool(bit.deref_mut()); + bits |= bit; + bit.zeroize(); + + if ((i + 1) % 4) == 0 { + if i != 3 { + for _ in 0 .. 4 { + res = res.double(); + } + } + + let mut term = table[0]; + for (j, candidate) in table[1 ..].iter().enumerate() { + let j = j + 1; + term = Self::conditional_select(&term, candidate, usize::from(bits).ct_eq(&j)); + } + res += term; + bits = 0; + } + } + other.zeroize(); + res + } +} + +impl MulAssign for Point { + fn mul_assign(&mut self, other: Scalar) { + *self = *self * other; + } +} + +impl Mul<&Scalar> for Point { + type Output = Point; + fn mul(self, other: &Scalar) -> Point { + self * *other + } +} + +impl MulAssign<&Scalar> for Point { + fn mul_assign(&mut self, other: &Scalar) { + *self *= *other; + } +} + +impl GroupEncoding for Point { + type Repr = GenericArray; + + fn from_bytes(bytes: &Self::Repr) -> CtOption { + // Extract and clear the sign bit + let sign = Choice::from(bytes[0] & 1); + + // Parse x, recover y + FieldElement::from_repr(*GenericArray::from_slice(&bytes[1 ..])).and_then(|x| { + let is_identity = x.is_zero(); + + let y = recover_y(x).map(|mut y| { + y.conditional_negate(y.is_odd().ct_eq(&!sign)); + y + }); + + // If this the identity, set y to 1 + let y = + CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity); + // If this the identity, set y to 1 and z to 0 (instead of 1) + let z = <_>::conditional_select(&FieldElement::ONE, &FieldElement::ZERO, is_identity); + // Create the point if we have a y solution + let point = y.map(|y| Point { x, y, z }); + + let not_negative_zero = !(is_identity & sign); + // Only return the point if it isn't -0 and the sign byte wasn't malleated + CtOption::conditional_select( + &CtOption::new(Point::identity(), 0.into()), + &point, + not_negative_zero & ((bytes[0] & 1).ct_eq(&bytes[0])), + ) + }) + } + + fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption { + Point::from_bytes(bytes) + } + + fn to_bytes(&self) -> Self::Repr { + let Some(z) = Option::::from(self.z.invert()) else { + return *GenericArray::from_slice(&[0; 33]); + }; + let x = self.x * z; + let y = self.y * z; + + let mut res = *GenericArray::from_slice(&[0; 33]); + res[1 ..].as_mut().copy_from_slice(&x.to_repr()); + + // The following conditional select normalizes the sign to 0 when x is 0 + let y_sign = u8::conditional_select(&y.is_odd().unwrap_u8(), &0, x.ct_eq(&FieldElement::ZERO)); + res[0] |= y_sign; + res + } +} + +impl PrimeGroup for Point {} + +impl ec_divisors::DivisorCurve for Point { + type FieldElement = FieldElement; + + fn a() -> Self::FieldElement { + FieldElement::from(0u64) + } + fn b() -> Self::FieldElement { + FieldElement::from(7u64) + } + + fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)> { + let z: Self::FieldElement = Option::from(point.z.invert())?; + Some((point.x * z, point.y * z)) + } +} + +#[test] +fn test_curve() { + ff_group_tests::group::test_prime_group_bits::<_, Point>(&mut rand_core::OsRng); +} + +#[test] +fn generator() { + assert_eq!( + Point::generator(), + Point::from_bytes(GenericArray::from_slice(&hex_literal::hex!( + "000000000000000000000000000000000000000000000000000000000000000001" + ))) + .unwrap() + ); +} + +#[test] +fn zero_x_is_invalid() { + assert!(Option::::from(recover_y(FieldElement::ZERO)).is_none()); +} + +// Checks random won't infinitely loop +#[test] +fn random() { + Point::random(&mut rand_core::OsRng); +} diff --git a/crypto/evrf/secq256k1/src/scalar.rs b/crypto/evrf/secq256k1/src/scalar.rs new file mode 100644 index 00000000..1bc930a2 --- /dev/null +++ b/crypto/evrf/secq256k1/src/scalar.rs @@ -0,0 +1,52 @@ +use zeroize::{DefaultIsZeroes, Zeroize}; + +use crypto_bigint::{ + U256, U512, + modular::constant_mod::{ResidueParams, Residue}, +}; + +const MODULUS_STR: &str = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F"; + +impl_modulus!(SecQ, U256, MODULUS_STR); +type ResidueType = Residue; + +/// The Scalar field of secq256k1. +/// +/// This is equivalent to the field secp256k1 is defined over. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +#[repr(C)] +pub struct Scalar(pub(crate) ResidueType); + +impl DefaultIsZeroes for Scalar {} + +pub(crate) const MODULUS: U256 = U256::from_be_hex(MODULUS_STR); + +const WIDE_MODULUS: U512 = U512::from_be_hex(concat!( + "0000000000000000000000000000000000000000000000000000000000000000", + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", +)); + +field!( + Scalar, + ResidueType, + MODULUS_STR, + MODULUS, + WIDE_MODULUS, + 256, + 3, + 1, + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + "0000000000000000000000000000000000000000000000000000000000000009", +); + +impl Scalar { + /// Perform a wide reduction, presumably to obtain a non-biased Scalar field element. + pub fn wide_reduce(bytes: [u8; 64]) -> Scalar { + Scalar(Residue::new(&reduce(U512::from_le_slice(bytes.as_ref())))) + } +} + +#[test] +fn test_scalar_field() { + ff_group_tests::prime_field::test_prime_field_bits::<_, Scalar>(&mut rand_core::OsRng); +} diff --git a/crypto/ff-group-tests/Cargo.toml b/crypto/ff-group-tests/Cargo.toml index aa328fa1..6b25a2b9 100644 --- a/crypto/ff-group-tests/Cargo.toml +++ b/crypto/ff-group-tests/Cargo.toml @@ -30,4 +30,4 @@ p256 = { version = "^0.13.1", default-features = false, features = ["std", "arit bls12_381 = "0.8" -pasta_curves = "0.5" +pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" } diff --git a/crypto/ff-group-tests/src/group.rs b/crypto/ff-group-tests/src/group.rs index 0f0aab4e..f2b69acc 100644 --- a/crypto/ff-group-tests/src/group.rs +++ b/crypto/ff-group-tests/src/group.rs @@ -154,18 +154,20 @@ pub fn test_group(rng: &mut R) { /// Test encoding and decoding of group elements. pub fn test_encoding() { - let test = |point: G, msg| { + let test = |point: G, msg| -> G { let bytes = point.to_bytes(); let mut repr = G::Repr::default(); repr.as_mut().copy_from_slice(bytes.as_ref()); - assert_eq!(point, G::from_bytes(&repr).unwrap(), "{msg} couldn't be encoded and decoded"); + let decoded = G::from_bytes(&repr).unwrap(); + assert_eq!(point, decoded, "{msg} couldn't be encoded and decoded"); assert_eq!( point, G::from_bytes_unchecked(&repr).unwrap(), "{msg} couldn't be encoded and decoded", ); + decoded }; - test(G::identity(), "identity"); + assert!(bool::from(test(G::identity(), "identity").is_identity())); test(G::generator(), "generator"); test(G::generator() + G::generator(), "(generator * 2)"); } diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 7c32b6f0..1210c9ed 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/frost" authors = ["Luke Parker "] keywords = ["frost", "multisig", "threshold"] edition = "2021" -rust-version = "1.79" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -thiserror = "1" +thiserror = { version = "2", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_chacha = { version = "0.3", default-features = false, features = ["std"] } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 5115244f..0351584a 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -203,14 +203,15 @@ pub trait SignMachine: Send + Sync + Sized { /// SignatureMachine this SignMachine turns into. type SignatureMachine: SignatureMachine; - /// Cache this preprocess for usage later. This cached preprocess MUST only be used once. Reuse - /// of it enables recovery of your private key share. Third-party recovery of a cached preprocess - /// also enables recovery of your private key share, so this MUST be treated with the same - /// security as your private key share. + /// Cache this preprocess for usage later. + /// + /// This cached preprocess MUST only be used once. Reuse of it enables recovery of your private + /// key share. Third-party recovery of a cached preprocess also enables recovery of your private + /// key share, so this MUST be treated with the same security as your private key share. fn cache(self) -> CachedPreprocess; /// Create a sign machine from a cached preprocess. - + /// /// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably /// cause the signer to leak their secret share. fn from_cache( @@ -219,11 +220,14 @@ pub trait SignMachine: Send + Sync + Sized { cache: CachedPreprocess, ) -> (Self, Self::Preprocess); - /// Read a Preprocess message. Despite taking self, this does not save the preprocess. - /// It must be externally cached and passed into sign. + /// Read a Preprocess message. + /// + /// Despite taking self, this does not save the preprocess. It must be externally cached and + /// passed into sign. fn read_preprocess(&self, reader: &mut R) -> io::Result; /// Sign a message. + /// /// Takes in the participants' preprocess messages. Returns the signature share to be broadcast /// to all participants, over an authenticated channel. The parties who participate here will /// become the signing set for this session. diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 7be6478a..dc0453a1 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -122,6 +122,7 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap"] keywords = ["multiexp", "ff", "group"] edition = "2021" -rust-version = "1.79" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index dfd8e033..604d0fd6 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -59,7 +59,7 @@ pub(crate) fn prep_bits>( for pair in pairs { let p = groupings.len(); let mut bits = pair.0.to_le_bits(); - groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]); + groupings.push(vec![0; bits.len().div_ceil(w_usize)]); for (i, mut bit) in bits.iter_mut().enumerate() { let mut bit = u8_from_bool(&mut bit); diff --git a/crypto/schnorr/Cargo.toml b/crypto/schnorr/Cargo.toml index 2ea04f5b..06a9710e 100644 --- a/crypto/schnorr/Cargo.toml +++ b/crypto/schnorr/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/schnorr" authors = ["Luke Parker "] keywords = ["schnorr", "ff", "group"] edition = "2021" -rust-version = "1.79" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/crypto/schnorrkel/Cargo.toml b/crypto/schnorrkel/Cargo.toml index 2508bef0..68be2135 100644 --- a/crypto/schnorrkel/Cargo.toml +++ b/crypto/schnorrkel/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/schnorrkel" authors = ["Luke Parker "] keywords = ["frost", "multisig", "threshold", "schnorrkel"] edition = "2021" -rust-version = "1.79" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index 84e08abf..566ad56b 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/transcript" authors = ["Luke Parker "] keywords = ["transcript"] edition = "2021" -rust-version = "1.79" +rust-version = "1.73" [package.metadata.docs.rs] all-features = true diff --git a/deny.toml b/deny.toml index c5fe2808..c284fc0f 100644 --- a/deny.toml +++ b/deny.toml @@ -11,6 +11,7 @@ ignore = [ "RUSTSEC-2021-0139", # https://github.com/serai-dex/serai/228 "RUSTSEC-2022-0061", # https://github.com/serai-dex/serai/227 "RUSTSEC-2024-0370", # proc-macro-error is unmaintained + "RUSTSEC-2024-0384", # instant is unmaintained ] [licenses] @@ -27,7 +28,8 @@ allow = [ "BSD-2-Clause", "BSD-3-Clause", "ISC", - "Unicode-DFS-2016", + "Zlib", + "Unicode-3.0", "OpenSSL", # Non-invasive copyleft @@ -39,16 +41,43 @@ allow = [ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-env" }, + { allow = ["AGPL-3.0"], name = "serai-task" }, - { allow = ["AGPL-3.0"], name = "ethereum-serai" }, + { allow = ["AGPL-3.0"], name = "ethereum-schnorr-contract" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" }, { allow = ["AGPL-3.0"], name = "serai-processor-messages" }, - { allow = ["AGPL-3.0"], name = "serai-processor" }, + { allow = ["AGPL-3.0"], name = "serai-processor-primitives" }, - { allow = ["AGPL-3.0"], name = "tributary-chain" }, + { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, + { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, + + { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, + { allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-smart-contract-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, + + { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, + { allow = ["AGPL-3.0"], name = "serai-processor-bin" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-ethereum-test-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-router" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-erc20" }, + { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, + { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, + + { allow = ["AGPL-3.0"], name = "tributary-sdk" }, + { allow = ["AGPL-3.0"], name = "serai-cosign" }, + { allow = ["AGPL-3.0"], name = "serai-coordinator-substrate" }, + { allow = ["AGPL-3.0"], name = "serai-coordinator-tributary" }, + { allow = ["AGPL-3.0"], name = "serai-coordinator-p2p" }, + { allow = ["AGPL-3.0"], name = "serai-coordinator-libp2p-p2p" }, { allow = ["AGPL-3.0"], name = "serai-coordinator" }, { allow = ["AGPL-3.0"], name = "serai-coins-pallet" }, @@ -105,4 +134,5 @@ allow-git = [ "https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate", + "https://github.com/kayabaNerve/pasta_curves", ] diff --git a/message-queue/Cargo.toml b/message-queue/Cargo.toml index 9eeaa5ce..7f180933 100644 --- a/message-queue/Cargo.toml +++ b/message-queue/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/message-queue/src/client.rs b/message-queue/src/client.rs index 3aaf5a24..b503c232 100644 --- a/message-queue/src/client.rs +++ b/message-queue/src/client.rs @@ -64,22 +64,20 @@ impl MessageQueue { Self::new(service, url, priv_key) } - #[must_use] - async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool { + async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> Result<(), String> { let msg = borsh::to_vec(&msg).unwrap(); - let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { - log::warn!("couldn't send the message len"); - return false; + match socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await { + Ok(()) => {} + Err(e) => Err(format!("couldn't send the message len: {e:?}"))?, }; - let Ok(()) = socket.write_all(&msg).await else { - log::warn!("couldn't write the message"); - return false; - }; - true + match socket.write_all(&msg).await { + Ok(()) => {} + Err(e) => Err(format!("couldn't write the message: {e:?}"))?, + } + Ok(()) } - pub async fn queue(&self, metadata: Metadata, msg: Vec) { - // TODO: Should this use OsRng? Deterministic or deterministic + random may be better. + pub async fn queue(&self, metadata: Metadata, msg: Vec) -> Result<(), String> { let nonce = Zeroizing::new(::F::random(&mut OsRng)); let nonce_pub = Ristretto::generator() * nonce.deref(); let sig = SchnorrSignature::::sign( @@ -97,6 +95,21 @@ impl MessageQueue { .serialize(); let msg = MessageQueueRequest::Queue { meta: metadata, msg, sig }; + + let mut socket = match TcpStream::connect(&self.url).await { + Ok(socket) => socket, + Err(e) => Err(format!("failed to connect to the message-queue service: {e:?}"))?, + }; + Self::send(&mut socket, msg.clone()).await?; + match socket.read_u8().await { + Ok(1) => {} + Ok(b) => Err(format!("message-queue didn't return for 1 for its ack, recieved: {b}"))?, + Err(e) => Err(format!("failed to read the response from the message-queue service: {e:?}"))?, + } + Ok(()) + } + + pub async fn queue_with_retry(&self, metadata: Metadata, msg: Vec) { let mut first = true; loop { // Sleep, so we don't hammer re-attempts @@ -105,14 +118,9 @@ impl MessageQueue { } first = false; - let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue }; - if !Self::send(&mut socket, msg.clone()).await { - continue; + if self.queue(metadata.clone(), msg.clone()).await.is_ok() { + break; } - if socket.read_u8().await.ok() != Some(1) { - continue; - } - break; } } @@ -136,7 +144,7 @@ impl MessageQueue { log::trace!("opened socket for next"); loop { - if !Self::send(&mut socket, msg.clone()).await { + if Self::send(&mut socket, msg.clone()).await.is_err() { continue 'outer; } let status = match socket.read_u8().await { @@ -224,7 +232,7 @@ impl MessageQueue { first = false; let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue }; - if !Self::send(&mut socket, msg.clone()).await { + if Self::send(&mut socket, msg.clone()).await.is_err() { continue; } if socket.read_u8().await.ok() != Some(1) { diff --git a/message-queue/src/main.rs b/message-queue/src/main.rs index b1c6e85b..39bbdd02 100644 --- a/message-queue/src/main.rs +++ b/message-queue/src/main.rs @@ -72,6 +72,10 @@ pub(crate) fn queue_message( // Assert one, and only one of these, is the coordinator assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator)); + // Lock the queue + let queue_lock = QUEUES.read().unwrap(); + let mut queue_lock = queue_lock[&(meta.from, meta.to)].write().unwrap(); + // Verify (from, to, intent) hasn't been prior seen fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec { [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat() @@ -93,7 +97,7 @@ pub(crate) fn queue_message( DbTxn::put(&mut txn, intent_key, []); // Queue it - let id = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message( + let id = queue_lock.queue_message( &mut txn, QueuedMessage { from: meta.from, diff --git a/mini/Cargo.toml b/mini/Cargo.toml index dfef7e56..075c2887 100644 --- a/mini/Cargo.toml +++ b/mini/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false +rust-version = "1.71" [package.metadata.docs.rs] all-features = true diff --git a/networks/bitcoin/Cargo.toml b/networks/bitcoin/Cargo.toml index 5ab44cc6..5a3ce471 100644 --- a/networks/bitcoin/Cargo.toml +++ b/networks/bitcoin/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] std-shims = { version = "0.1.1", path = "../../common/std-shims", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } zeroize = { version = "^1.5", default-features = false } rand_core = { version = "0.6", default-features = false } @@ -44,7 +44,7 @@ tokio = { version = "1", features = ["macros"] } std = [ "std-shims/std", - "thiserror", + "thiserror/std", "zeroize/std", "rand_core/std", diff --git a/networks/ethereum/.gitignore b/networks/ethereum/.gitignore deleted file mode 100644 index 2dccdce9..00000000 --- a/networks/ethereum/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# Solidity build outputs -cache -artifacts diff --git a/networks/ethereum/Cargo.toml b/networks/ethereum/Cargo.toml deleted file mode 100644 index cfe792b2..00000000 --- a/networks/ethereum/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -name = "ethereum-serai" -version = "0.1.0" -description = "An Ethereum library supporting Schnorr signing and on-chain verification" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum" -authors = ["Luke Parker ", "Elizabeth Binks "] -edition = "2021" -publish = false -rust-version = "1.79" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -thiserror = { version = "1", default-features = false } - -rand_core = { version = "0.6", default-features = false, features = ["std"] } - -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } - -group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } - -alloy-core = { version = "0.8", default-features = false } -alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } -alloy-consensus = { version = "0.4", default-features = false, features = ["k256"] } -alloy-network = { version = "0.4", default-features = false } -alloy-rpc-types-eth = { version = "0.4", default-features = false } -alloy-rpc-client = { version = "0.4", default-features = false } -alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } -alloy-provider = { version = "0.4", default-features = false } - -alloy-node-bindings = { version = "0.4", default-features = false, optional = true } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } - -tokio = { version = "1", features = ["macros"] } - -alloy-node-bindings = { version = "0.4", default-features = false } - -[features] -tests = ["alloy-node-bindings", "frost/tests"] diff --git a/networks/ethereum/README.md b/networks/ethereum/README.md deleted file mode 100644 index 0090b26b..00000000 --- a/networks/ethereum/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Ethereum - -This package contains Ethereum-related functionality, specifically deploying and -interacting with Serai contracts. - -While `monero-serai` and `bitcoin-serai` are general purpose libraries, -`ethereum-serai` is Serai specific. If any of the utilities are generally -desired, please fork and maintain your own copy to ensure the desired -functionality is preserved, or open an issue to request we make this library -general purpose. - -### Dependencies - -- solc -- [Foundry](https://github.com/foundry-rs/foundry) diff --git a/networks/ethereum/alloy-simple-request-transport/Cargo.toml b/networks/ethereum/alloy-simple-request-transport/Cargo.toml index d0cad613..de5925df 100644 --- a/networks/ethereum/alloy-simple-request-transport/Cargo.toml +++ b/networks/ethereum/alloy-simple-request-transport/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "alloy-simple-request-transport" -version = "0.1.0" +version = "0.1.1" description = "A transport for alloy based off simple-request" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/alloy-simple-request-transport" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.81" [package.metadata.docs.rs] all-features = true @@ -19,10 +19,10 @@ workspace = true tower = "0.5" serde_json = { version = "1", default-features = false } -simple-request = { path = "../../../common/request", default-features = false } +simple-request = { path = "../../../common/request", version = "0.1", default-features = false } -alloy-json-rpc = { version = "0.4", default-features = false } -alloy-transport = { version = "0.4", default-features = false } +alloy-json-rpc = { version = "0.9", default-features = false } +alloy-transport = { version = "0.9", default-features = false } [features] default = ["tls"] diff --git a/networks/ethereum/build-contracts/Cargo.toml b/networks/ethereum/build-contracts/Cargo.toml new file mode 100644 index 00000000..6b288d72 --- /dev/null +++ b/networks/ethereum/build-contracts/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "build-solidity-contracts" +version = "0.1.1" +description = "A helper function to build Solidity contracts" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/build-contracts" +authors = ["Luke Parker "] +edition = "2021" +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/networks/ethereum/build-contracts/LICENSE b/networks/ethereum/build-contracts/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/networks/ethereum/build-contracts/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/networks/ethereum/build-contracts/README.md b/networks/ethereum/build-contracts/README.md new file mode 100644 index 00000000..437f15c2 --- /dev/null +++ b/networks/ethereum/build-contracts/README.md @@ -0,0 +1,4 @@ +# Build Solidity Contracts + +A helper function to build Solidity contracts. This is intended to be called +from within build scripts. diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs new file mode 100644 index 00000000..b1c9c87f --- /dev/null +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -0,0 +1,103 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{path::PathBuf, fs, process::Command}; + +/// Build contracts from the specified path, outputting the artifacts to the specified path. +/// +/// Requires solc 0.8.26. +pub fn build( + include_paths: &[&str], + contracts_path: &str, + artifacts_path: &str, +) -> Result<(), String> { + if !fs::exists(artifacts_path) + .map_err(|e| format!("couldn't check if artifacts directory already exists: {e:?}"))? + { + fs::create_dir(artifacts_path) + .map_err(|e| format!("couldn't create the non-existent artifacts directory: {e:?}"))?; + } + + println!("cargo:rerun-if-changed={contracts_path}/*"); + println!("cargo:rerun-if-changed={artifacts_path}/*"); + + for line in String::from_utf8( + Command::new("solc") + .args(["--version"]) + .output() + .map_err(|_| "couldn't fetch solc output".to_string())? + .stdout, + ) + .map_err(|_| "solc stdout wasn't UTF-8")? + .lines() + { + if let Some(version) = line.strip_prefix("Version: ") { + let version = + version.split('+').next().ok_or_else(|| "no value present on line".to_string())?; + if version != "0.8.26" { + Err(format!("version was {version}, 0.8.26 required"))? + } + } + } + + #[rustfmt::skip] + let mut args = vec![ + "--base-path", ".", + "-o", artifacts_path, "--overwrite", + "--bin", "--bin-runtime", "--abi", + "--via-ir", "--optimize", + "--no-color", + ]; + for include_path in include_paths { + args.push("--include-path"); + args.push(include_path); + } + let mut args = args.into_iter().map(str::to_string).collect::>(); + + let mut queue = vec![PathBuf::from(contracts_path)]; + while let Some(folder) = queue.pop() { + for entry in fs::read_dir(folder).map_err(|e| format!("couldn't read directory: {e:?}"))? { + let entry = entry.map_err(|e| format!("couldn't read directory in entry: {e:?}"))?; + let kind = entry.file_type().map_err(|e| format!("couldn't fetch file type: {e:?}"))?; + if kind.is_dir() { + queue.push(entry.path()); + } + + if kind.is_file() && + entry + .file_name() + .into_string() + .map_err(|_| "file name wasn't a valid UTF-8 string".to_string())? + .ends_with(".sol") + { + args.push( + entry + .path() + .into_os_string() + .into_string() + .map_err(|_| "file path wasn't a valid UTF-8 string".to_string())?, + ); + } + + // We on purposely ignore symlinks to avoid recursive structures + } + } + + let solc = Command::new("solc") + .args(args.clone()) + .output() + .map_err(|_| "couldn't fetch solc output".to_string())?; + let stderr = + String::from_utf8(solc.stderr).map_err(|_| "solc stderr wasn't UTF-8".to_string())?; + if !solc.status.success() { + Err(format!("solc (`{}`) didn't successfully execute: {stderr}", args.join(" ")))?; + } + for line in stderr.lines() { + if line.contains("Error:") { + Err(format!("solc (`{}`) output had error: {stderr}", args.join(" ")))?; + } + } + + Ok(()) +} diff --git a/networks/ethereum/build.rs b/networks/ethereum/build.rs deleted file mode 100644 index 38fcfe00..00000000 --- a/networks/ethereum/build.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::process::Command; - -fn main() { - println!("cargo:rerun-if-changed=contracts/*"); - println!("cargo:rerun-if-changed=artifacts/*"); - - for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout) - .unwrap() - .lines() - { - if let Some(version) = line.strip_prefix("Version: ") { - let version = version.split('+').next().unwrap(); - assert_eq!(version, "0.8.25"); - } - } - - #[rustfmt::skip] - let args = [ - "--base-path", ".", - "-o", "./artifacts", "--overwrite", - "--bin", "--abi", - "--via-ir", "--optimize", - - "./contracts/IERC20.sol", - - "./contracts/Schnorr.sol", - "./contracts/Deployer.sol", - "./contracts/Sandbox.sol", - "./contracts/Router.sol", - - "./src/tests/contracts/Schnorr.sol", - "./src/tests/contracts/ERC20.sol", - - "--no-color", - ]; - let solc = Command::new("solc").args(args).output().unwrap(); - assert!(solc.status.success()); - for line in String::from_utf8(solc.stderr).unwrap().lines() { - assert!(!line.starts_with("Error:")); - } -} diff --git a/networks/ethereum/contracts/Deployer.sol b/networks/ethereum/contracts/Deployer.sol deleted file mode 100644 index 475be4c1..00000000 --- a/networks/ethereum/contracts/Deployer.sol +++ /dev/null @@ -1,52 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -/* -The expected deployment process of the Router is as follows: - -1) A transaction deploying Deployer is made. Then, a deterministic signature is - created such that an account with an unknown private key is the creator of - the contract. Anyone can fund this address, and once anyone does, the - transaction deploying Deployer can be published by anyone. No other - transaction may be made from that account. - -2) Anyone deploys the Router through the Deployer. This uses a sequential nonce - such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. - While such attacks would still be feasible if the Deployer's address was - controllable, the usage of a deterministic signature with a NUMS method - prevents that. - -This doesn't have any denial-of-service risks and will resolve once anyone steps -forward as deployer. This does fail to guarantee an identical address across -every chain, though it enables letting anyone efficiently ask the Deployer for -the address (with the Deployer having an identical address on every chain). - -Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the -Deployer contract to use a consistent salt for the Router, yet the Router must -be deployed with a specific public key for Serai. Since Ethereum isn't able to -determine a valid public key (one the result of a Serai DKG) from a dishonest -public key, we have to allow multiple deployments with Serai being the one to -determine which to use. - -The alternative would be to have a council publish the Serai key on-Ethereum, -with Serai verifying the published result. This would introduce a DoS risk in -the council not publishing the correct key/not publishing any key. -*/ - -contract Deployer { - event Deployment(bytes32 indexed init_code_hash, address created); - - error DeploymentFailed(); - - function deploy(bytes memory init_code) external { - address created; - assembly { - created := create(0, add(init_code, 0x20), mload(init_code)) - } - if (created == address(0)) { - revert DeploymentFailed(); - } - // These may be emitted out of order upon re-entrancy - emit Deployment(keccak256(init_code), created); - } -} diff --git a/networks/ethereum/contracts/Router.sol b/networks/ethereum/contracts/Router.sol deleted file mode 100644 index c5e1efa2..00000000 --- a/networks/ethereum/contracts/Router.sol +++ /dev/null @@ -1,222 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -import "./IERC20.sol"; - -import "./Schnorr.sol"; -import "./Sandbox.sol"; - -contract Router { - // Nonce is incremented for each batch of transactions executed/key update - uint256 public nonce; - - // Current public key's x-coordinate - // This key must always have the parity defined within the Schnorr contract - bytes32 public seraiKey; - - struct OutInstruction { - address to; - Call[] calls; - - uint256 value; - } - - struct Signature { - bytes32 c; - bytes32 s; - } - - event SeraiKeyUpdated( - uint256 indexed nonce, - bytes32 indexed key, - Signature signature - ); - event InInstruction( - address indexed from, - address indexed coin, - uint256 amount, - bytes instruction - ); - // success is a uint256 representing a bitfield of transaction successes - event Executed( - uint256 indexed nonce, - bytes32 indexed batch, - uint256 success, - Signature signature - ); - - // error types - error InvalidKey(); - error InvalidSignature(); - error InvalidAmount(); - error FailedTransfer(); - error TooManyTransactions(); - - modifier _updateSeraiKeyAtEndOfFn( - uint256 _nonce, - bytes32 key, - Signature memory sig - ) { - if ( - (key == bytes32(0)) || - ((bytes32(uint256(key) % Schnorr.Q)) != key) - ) { - revert InvalidKey(); - } - - _; - - seraiKey = key; - emit SeraiKeyUpdated(_nonce, key, sig); - } - - constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( - 0, - _seraiKey, - Signature({ c: bytes32(0), s: bytes32(0) }) - ) { - nonce = 1; - } - - // updateSeraiKey validates the given Schnorr signature against the current - // public key, and if successful, updates the contract's public key to the - // given one. - function updateSeraiKey( - bytes32 _seraiKey, - Signature calldata sig - ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { - bytes memory message = - abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); - nonce++; - - if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { - revert InvalidSignature(); - } - } - - function inInstruction( - address coin, - uint256 amount, - bytes memory instruction - ) external payable { - if (coin == address(0)) { - if (amount != msg.value) { - revert InvalidAmount(); - } - } else { - (bool success, bytes memory res) = - address(coin).call( - abi.encodeWithSelector( - IERC20.transferFrom.selector, - msg.sender, - address(this), - amount - ) - ); - - // Require there was nothing returned, which is done by some non-standard - // tokens, or that the ERC20 contract did in fact return true - bool nonStandardResOrTrue = - (res.length == 0) || abi.decode(res, (bool)); - if (!(success && nonStandardResOrTrue)) { - revert FailedTransfer(); - } - } - - /* - Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. - The amount instructed to transfer may not actually be the amount - transferred. - - If we add nonReentrant to every single function which can effect the - balance, we can check the amount exactly matches. This prevents transfers of - less value than expected occurring, at least, not without an additional - transfer to top up the difference (which isn't routed through this contract - and accordingly isn't trying to artificially create events). - - If we don't add nonReentrant, a transfer can be started, and then a new - transfer for the difference can follow it up (again and again until a - rounding error is reached). This contract would believe all transfers were - done in full, despite each only being done in part (except for the last - one). - - Given fee-on-transfer tokens aren't intended to be supported, the only - token planned to be supported is Dai and it doesn't have any fee-on-transfer - logic, fee-on-transfer tokens aren't even able to be supported at this time, - we simply classify this entire class of tokens as non-standard - implementations which induce undefined behavior. It is the Serai network's - role not to add support for any non-standard implementations. - */ - emit InInstruction(msg.sender, coin, amount, instruction); - } - - // execute accepts a list of transactions to execute as well as a signature. - // if signature verification passes, the given transactions are executed. - // if signature verification fails, this function will revert. - function execute( - OutInstruction[] calldata transactions, - Signature calldata sig - ) external { - if (transactions.length > 256) { - revert TooManyTransactions(); - } - - bytes memory message = - abi.encode("execute", block.chainid, nonce, transactions); - uint256 executed_with_nonce = nonce; - // This prevents re-entrancy from causing double spends yet does allow - // out-of-order execution via re-entrancy - nonce++; - - if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { - revert InvalidSignature(); - } - - uint256 successes; - for (uint256 i = 0; i < transactions.length; i++) { - bool success; - - // If there are no calls, send to `to` the value - if (transactions[i].calls.length == 0) { - (success, ) = transactions[i].to.call{ - value: transactions[i].value, - gas: 5_000 - }(""); - } else { - // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the - // calls through that - // - // We could use a single sandbox in order to reduce gas costs, yet that - // risks one person creating an approval that's hooked before another - // user's intended action executes, in order to drain their coins - // - // While technically, that would be a flaw in the sandboxed flow, this - // is robust and prevents such flaws from being possible - // - // We also don't want people to set state via the Sandbox and expect it - // future available when anyone else could set a distinct value - Sandbox sandbox = new Sandbox(); - (success, ) = address(sandbox).call{ - value: transactions[i].value, - // TODO: Have the Call specify the gas up front - gas: 350_000 - }( - abi.encodeWithSelector( - Sandbox.sandbox.selector, - transactions[i].calls - ) - ); - } - - assembly { - successes := or(successes, shl(i, success)) - } - } - emit Executed( - executed_with_nonce, - keccak256(message), - successes, - sig - ); - } -} diff --git a/networks/ethereum/contracts/Sandbox.sol b/networks/ethereum/contracts/Sandbox.sol deleted file mode 100644 index a82a3afd..00000000 --- a/networks/ethereum/contracts/Sandbox.sol +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.24; - -struct Call { - address to; - uint256 value; - bytes data; -} - -// A minimal sandbox focused on gas efficiency. -// -// The first call is executed if any of the calls fail, making it a fallback. -// All other calls are executed sequentially. -contract Sandbox { - error AlreadyCalled(); - error CallsFailed(); - - function sandbox(Call[] calldata calls) external payable { - // Prevent re-entrancy due to this executing arbitrary calls from anyone - // and anywhere - bool called; - assembly { called := tload(0) } - if (called) { - revert AlreadyCalled(); - } - assembly { tstore(0, 1) } - - // Execute the calls, starting from 1 - for (uint256 i = 1; i < calls.length; i++) { - (bool success, ) = - calls[i].to.call{ value: calls[i].value }(calls[i].data); - - // If this call failed, execute the fallback (call 0) - if (!success) { - (success, ) = - calls[0].to.call{ value: address(this).balance }(calls[0].data); - // If this call also failed, revert entirely - if (!success) { - revert CallsFailed(); - } - return; - } - } - - // We don't clear the re-entrancy guard as this contract should never be - // called again, so there's no reason to spend the effort - } -} diff --git a/networks/ethereum/contracts/Schnorr.sol b/networks/ethereum/contracts/Schnorr.sol deleted file mode 100644 index 8edcdffd..00000000 --- a/networks/ethereum/contracts/Schnorr.sol +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -// see https://github.com/noot/schnorr-verify for implementation details -library Schnorr { - // secp256k1 group order - uint256 constant public Q = - 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - - // Fixed parity for the public keys used in this contract - // This avoids spending a word passing the parity in a similar style to - // Bitcoin's Taproot - uint8 constant public KEY_PARITY = 27; - - error InvalidSOrA(); - error MalformedSignature(); - - // px := public key x-coord, where the public key has a parity of KEY_PARITY - // message := 32-byte hash of the message - // c := schnorr signature challenge - // s := schnorr signature - function verify( - bytes32 px, - bytes memory message, - bytes32 c, - bytes32 s - ) internal pure returns (bool) { - // ecrecover = (m, v, r, s) -> key - // We instead pass the following to obtain the nonce (not the key) - // Then we hash it and verify it matches the challenge - bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); - bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); - - // For safety, we want each input to ecrecover to be 0 (sa, px, ca) - // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero - // That leaves us to check `sa` are non-zero - if (sa == 0) revert InvalidSOrA(); - address R = ecrecover(sa, KEY_PARITY, px, ca); - if (R == address(0)) revert MalformedSignature(); - - // Check the signature is correct by rebuilding the challenge - return c == keccak256(abi.encodePacked(R, px, message)); - } -} diff --git a/networks/ethereum/relayer/Cargo.toml b/networks/ethereum/relayer/Cargo.toml index 89d8e99e..37b99827 100644 --- a/networks/ethereum/relayer/Cargo.toml +++ b/networks/ethereum/relayer/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false +rust-version = "1.72" [package.metadata.docs.rs] all-features = true diff --git a/networks/ethereum/relayer/README.md b/networks/ethereum/relayer/README.md index beed4b72..fc2d36fd 100644 --- a/networks/ethereum/relayer/README.md +++ b/networks/ethereum/relayer/README.md @@ -1,4 +1,4 @@ # Ethereum Transaction Relayer -This server collects Ethereum router commands to be published, offering an RPC -to fetch them. +This server collects Ethereum transactions to be published, offering an RPC to +fetch them. diff --git a/networks/ethereum/relayer/src/main.rs b/networks/ethereum/relayer/src/main.rs index 54593004..6424c90f 100644 --- a/networks/ethereum/relayer/src/main.rs +++ b/networks/ethereum/relayer/src/main.rs @@ -40,8 +40,8 @@ async fn main() { db }; - // Start command recipience server - // This should not be publicly exposed + // Start transaction recipience server + // This MUST NOT be publicly exposed // TODO: Add auth tokio::spawn({ let db = db.clone(); @@ -58,25 +58,27 @@ async fn main() { let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; - if buf.len() < 5 { + if buf.len() < (4 + 1) { break; } let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap()); let mut txn = db.txn(); + // Save the transaction txn.put(nonce.to_le_bytes(), &buf[4 ..]); txn.commit(); let Ok(()) = socket.write_all(&[1]).await else { break }; - log::info!("received signed command #{nonce}"); + log::info!("received transaction to publish (nonce {nonce})"); } }); } } }); - // Start command fetch server + // Start transaction fetch server // 5132 ^ ((b'E' << 8) | b'R') + 1 + // TODO: JSON-RPC server which returns this as JSON? let server = TcpListener::bind("0.0.0.0:20831").await.unwrap(); loop { let (mut socket, _) = server.accept().await.unwrap(); @@ -84,16 +86,17 @@ async fn main() { tokio::spawn(async move { let db = db.clone(); loop { - // Nonce to get the router comamnd for + // Nonce to get the unsigned transaction for let mut buf = vec![0; 4]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; - let command = db.get(&buf[.. 4]).unwrap_or(vec![]); - let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await + let transaction = db.get(&buf[.. 4]).unwrap_or(vec![]); + let Ok(()) = + socket.write_all(&u32::try_from(transaction.len()).unwrap().to_le_bytes()).await else { break; }; - let Ok(()) = socket.write_all(&command).await else { break }; + let Ok(()) = socket.write_all(&transaction).await else { break }; } }); } diff --git a/networks/ethereum/schnorr/Cargo.toml b/networks/ethereum/schnorr/Cargo.toml new file mode 100644 index 00000000..42797bb7 --- /dev/null +++ b/networks/ethereum/schnorr/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "ethereum-schnorr-contract" +version = "0.1.0" +description = "A Solidity contract to verify Schnorr signatures" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/schnorr" +authors = ["Luke Parker ", "Elizabeth Binks "] +edition = "2021" +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +subtle = { version = "2", default-features = false, features = ["std"] } +sha3 = { version = "0.10", default-features = false, features = ["std"] } +group = { version = "0.13", default-features = false, features = ["alloc"] } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } + +[build-dependencies] +build-solidity-contracts = { path = "../build-contracts", version = "0.1" } + +[dev-dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +k256 = { version = "^0.13.1", default-features = false, features = ["ecdsa"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-sol-types = { version = "0.8", default-features = false } + +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-rpc-types-eth = { version = "0.9", default-features = false } +alloy-rpc-client = { version = "0.9", default-features = false } +alloy-provider = { version = "0.9", default-features = false } + +alloy-node-bindings = { version = "0.9", default-features = false } + +tokio = { version = "1", default-features = false, features = ["macros"] } diff --git a/networks/ethereum/schnorr/LICENSE b/networks/ethereum/schnorr/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/networks/ethereum/schnorr/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/networks/ethereum/schnorr/README.md b/networks/ethereum/schnorr/README.md new file mode 100644 index 00000000..896c92c1 --- /dev/null +++ b/networks/ethereum/schnorr/README.md @@ -0,0 +1,6 @@ +# Ethereum Schnorr Contract + +An Ethereum contract to verify Schnorr signatures. + +This crate will fail to build if the expected version of `solc` is not +installed and available. diff --git a/networks/ethereum/schnorr/build.rs b/networks/ethereum/schnorr/build.rs new file mode 100644 index 00000000..cf12f948 --- /dev/null +++ b/networks/ethereum/schnorr/build.rs @@ -0,0 +1,4 @@ +fn main() { + let artifacts_path = std::env::var("OUT_DIR").unwrap().to_string() + "/ethereum-schnorr-contract"; + build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); +} diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol new file mode 100644 index 00000000..69e8d95c --- /dev/null +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +/// @title A library for verifying Schnorr signatures +/// @author Luke Parker +/// @author Elizabeth Binks +/// @notice Verifies a Schnorr signature for a specified public key +/** + * @dev This contract is not complete (in the cryptographic sense). Only a subset of potential + * public keys are representable here. + * + * See https://github.com/serai-dex/serai/blob/next/networks/ethereum/schnorr/src/tests/premise.rs + * for implementation details + */ +// TODO: Pin above link to a specific branch/commit once `next` is merged into `develop` +library Schnorr { + // secp256k1 group order + uint256 private constant Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + + // We fix the key to have: + // 1) An even y-coordinate + // 2) An x-coordinate < Q + uint8 private constant KEY_PARITY = 27; + + /// @notice Verifies a Schnorr signature for the specified public key + /** + * @dev The y-coordinate of the public key is assumed to be even. The x-coordinate of the public + * key is assumed to be less than the order of secp256k1. + * + * The challenge is calculated as `keccak256(abi.encodePacked(address(R), publicKey, message))` + * where `R` is the commitment to the Schnorr signature's nonce. + */ + /// @param publicKey The x-coordinate of the public key + /// @param message The (hash of the) message signed + /// @param c The challenge for the Schnorr signature + /// @param s The response to the challenge for the Schnorr signature + /// @return If the signature is valid + function verify(bytes32 publicKey, bytes32 message, bytes32 c, bytes32 s) + internal + pure + returns (bool) + { + // ecrecover = (m, v, r, s) -> key + // We instead pass the following to recover the Schnorr signature's nonce (not a public key) + bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(publicKey), Q)); + bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(publicKey), Q)); + + /* + The ecrecover precompile checks `r` and `s` (`publicKey` and `ca`) are non-zero, banning the + two keys with zero for their x-coordinate and zero challenges. Each already only had a + negligible probability of occuring (assuming zero x-coordinates are even on-curve in the first + place). + + `sa` is not checked to be non-zero yet it does not need to be. The inverse of it is never + taken. + */ + address R = ecrecover(sa, KEY_PARITY, publicKey, ca); + // The ecrecover failed + if (R == address(0)) return false; + + // Check the signature is correct by rebuilding the challenge + return c == keccak256(abi.encodePacked(R, publicKey, message)); + } +} diff --git a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol new file mode 100644 index 00000000..1a19371a --- /dev/null +++ b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +import "../Schnorr.sol"; + +/// @title A thin wrapper around the library for verifying Schnorr signatures to test it with +/// @author Luke Parker +/// @author Elizabeth Binks +contract TestSchnorr { + /// @notice Verifies a Schnorr signature for the specified public key + /** + * @dev The y-coordinate of the public key is assumed to be even. The x-coordinate of the public + * key is assumed to be less than the order of secp256k1. + * + * The challenge is calculated as `keccak256(abi.encodePacked(address(R), publicKey, message))` + * where `R` is the commitment to the Schnorr signature's nonce. + */ + /// @param publicKey The x-coordinate of the public key + /// @param message The (hash of the) message signed + /// @param c The challenge for the Schnorr signature + /// @param s The response to the challenge for the Schnorr signature + /// @return If the signature is valid + function verify(bytes32 publicKey, bytes calldata message, bytes32 c, bytes32 s) + external + pure + returns (bool) + { + return Schnorr.verify(publicKey, keccak256(message), c, s); + } +} diff --git a/networks/ethereum/schnorr/src/lib.rs b/networks/ethereum/schnorr/src/lib.rs new file mode 100644 index 00000000..4e2d6883 --- /dev/null +++ b/networks/ethereum/schnorr/src/lib.rs @@ -0,0 +1,12 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +mod public_key; +pub use public_key::PublicKey; +mod signature; +pub use signature::Signature; + +#[cfg(test)] +mod tests; diff --git a/networks/ethereum/schnorr/src/public_key.rs b/networks/ethereum/schnorr/src/public_key.rs new file mode 100644 index 00000000..fbf00584 --- /dev/null +++ b/networks/ethereum/schnorr/src/public_key.rs @@ -0,0 +1,73 @@ +use subtle::Choice; +use group::ff::PrimeField; +use k256::{ + elliptic_curve::{ + ops::Reduce, + point::{AffineCoordinates, DecompressPoint}, + }, + AffinePoint, ProjectivePoint, Scalar, U256 as KU256, +}; + +/// A public key for the Schnorr Solidity library. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct PublicKey { + A: ProjectivePoint, + x_coordinate: [u8; 32], +} + +impl PublicKey { + /// Construct a new `PublicKey`. + /// + /// This will return None if the provided point isn't eligible to be a public key (due to + /// bounds such as parity). + #[must_use] + pub fn new(A: ProjectivePoint) -> Option { + let affine = A.to_affine(); + + // Only allow even keys to save a word within Ethereum + if bool::from(affine.y_is_odd()) { + None?; + } + + let x_coordinate = affine.x(); + // Return None if the x-coordinate isn't mutual to both fields + // The trivial amount of public keys this makes non-representable aren't considered a concern + if >::reduce_bytes(&x_coordinate).to_repr() != x_coordinate { + None?; + } + + let x_coordinate: [u8; 32] = x_coordinate.into(); + // Returns None if the x-coordinate is 0 + // Such keys will never have their signatures able to be verified + if x_coordinate == [0; 32] { + None?; + } + Some(PublicKey { A, x_coordinate }) + } + + /// The point for this public key. + #[must_use] + pub fn point(&self) -> ProjectivePoint { + self.A + } + + /// The Ethereum representation of this public key. + #[must_use] + pub fn eth_repr(&self) -> [u8; 32] { + // We only encode the x-coordinate due to fixing the sign of the y-coordinate + self.x_coordinate + } + + /// Construct a PublicKey from its Ethereum representation. + // This wouldn't be possible if the x-coordinate had been reduced + #[must_use] + pub fn from_eth_repr(repr: [u8; 32]) -> Option { + let x_coordinate = repr; + + let y_is_odd = Choice::from(0); + let A_affine = + Option::::from(AffinePoint::decompress(&x_coordinate.into(), y_is_odd))?; + let A = ProjectivePoint::from(A_affine); + Some(PublicKey { A, x_coordinate }) + } +} diff --git a/networks/ethereum/schnorr/src/signature.rs b/networks/ethereum/schnorr/src/signature.rs new file mode 100644 index 00000000..105e6d4d --- /dev/null +++ b/networks/ethereum/schnorr/src/signature.rs @@ -0,0 +1,101 @@ +use std::io; + +use sha3::{Digest, Keccak256}; + +use group::ff::PrimeField; +use k256::{ + elliptic_curve::{ops::Reduce, sec1::ToEncodedPoint}, + ProjectivePoint, Scalar, U256 as KU256, +}; + +use crate::PublicKey; + +/// A signature for the Schnorr Solidity library. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Signature { + c: Scalar, + s: Scalar, +} + +impl Signature { + /// Construct a new `Signature`. + #[must_use] + pub fn new(c: Scalar, s: Scalar) -> Option { + if bool::from(c.is_zero()) { + None?; + } + Some(Signature { c, s }) + } + + /// The challenge for a signature. + /// + /// With negligible probability, this MAY return 0 which will create an invalid/unverifiable + /// signature. + #[must_use] + pub fn challenge(R: ProjectivePoint, key: &PublicKey, message: &[u8]) -> Scalar { + // H(R || A || m) + let mut hash = Keccak256::new(); + // We transcript the nonce as an address since ecrecover yields an address + hash.update({ + let uncompressed_encoded_point = R.to_encoded_point(false); + // Skip the prefix byte marking this as uncompressed + let x_and_y_coordinates = &uncompressed_encoded_point.as_ref()[1 ..]; + // Last 20 bytes of the hash of the x and y coordinates + &Keccak256::digest(x_and_y_coordinates)[12 ..] + }); + hash.update(key.eth_repr()); + hash.update(Keccak256::digest(message)); + >::reduce_bytes(&hash.finalize()) + } + + /// Verify a signature. + #[must_use] + pub fn verify(&self, key: &PublicKey, message: &[u8]) -> bool { + // Recover the nonce + let R = (ProjectivePoint::GENERATOR * self.s) - (key.point() * self.c); + // Check the challenge + Self::challenge(R, key, message) == self.c + } + + /// The challenge present within this signature. + pub fn c(&self) -> Scalar { + self.c + } + + /// The signature solution present within this signature. + pub fn s(&self) -> Scalar { + self.s + } + + /// Convert the signature to bytes. + #[must_use] + pub fn to_bytes(&self) -> [u8; 64] { + let mut res = [0; 64]; + res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); + res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); + res + } + + /// Write the signature. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.to_bytes()) + } + + /// Read a signature. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let mut read_F = || -> io::Result { + let mut bytes = [0; 32]; + reader.read_exact(&mut bytes)?; + Option::::from(Scalar::from_repr(bytes.into())) + .ok_or_else(|| io::Error::other("invalid scalar")) + }; + let c = read_F()?; + let s = read_F()?; + Ok(Signature { c, s }) + } + + /// Read a signature from bytes. + pub fn from_bytes(bytes: [u8; 64]) -> io::Result { + Self::read(&mut bytes.as_slice()) + } +} diff --git a/networks/ethereum/schnorr/src/tests/mod.rs b/networks/ethereum/schnorr/src/tests/mod.rs new file mode 100644 index 00000000..4b23a0ba --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/mod.rs @@ -0,0 +1,136 @@ +use std::sync::Arc; + +use rand_core::{RngCore, OsRng}; + +use group::ff::{Field, PrimeField}; +use k256::{Scalar, ProjectivePoint}; + +use alloy_core::primitives::Address; +use alloy_sol_types::SolCall; + +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + +use crate::{PublicKey, Signature}; + +mod public_key; +pub(crate) use public_key::test_key; +mod signature; +mod premise; + +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_types::sol!("contracts/tests/Schnorr.sol"); + pub(crate) use TestSchnorr::*; +} + +async fn setup_test() -> (AnvilInstance, Arc>, Address) { + let anvil = Anvil::new().spawn(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + )); + + let mut address = [0; 20]; + OsRng.fill_bytes(&mut address); + let address = Address::from(address); + let _: () = provider + .raw_request( + "anvil_setCode".into(), + [ + address.to_string(), + include_str!(concat!( + env!("OUT_DIR"), + "/ethereum-schnorr-contract/TestSchnorr.bin-runtime" + )) + .to_string(), + ], + ) + .await + .unwrap(); + + (anvil, provider, address) +} + +async fn call_verify( + provider: &RootProvider, + address: Address, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> bool { + let public_key: [u8; 32] = public_key.eth_repr(); + let c_bytes: [u8; 32] = signature.c().to_repr().into(); + let s_bytes: [u8; 32] = signature.s().to_repr().into(); + let call = TransactionRequest::default().to(address).input(TransactionInput::new( + abi::verifyCall::new(( + public_key.into(), + message.to_vec().into(), + c_bytes.into(), + s_bytes.into(), + )) + .abi_encode() + .into(), + )); + let bytes = provider.call(&call).await.unwrap(); + let res = abi::verifyCall::abi_decode_returns(&bytes, true).unwrap(); + + res._0 +} + +#[tokio::test] +async fn test_verify() { + let (_anvil, provider, address) = setup_test().await; + + for _ in 0 .. 100 { + let (key, public_key) = test_key(); + + let nonce = Scalar::random(&mut OsRng); + let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()]; + OsRng.fill_bytes(&mut message); + + let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &public_key, &message); + let s = nonce + (c * key); + + let sig = Signature::new(c, s).unwrap(); + assert!(sig.verify(&public_key, &message)); + assert!(call_verify(&provider, address, &public_key, &message, &sig).await); + + // Test setting `s = 0` doesn't pass verification + { + let zero_s = Signature::new(c, Scalar::ZERO).unwrap(); + assert!(!zero_s.verify(&public_key, &message)); + assert!(!call_verify(&provider, address, &public_key, &message, &zero_s).await); + } + + // Mutate the message and make sure the signature now fails to verify + { + let mut message = message.clone(); + message[0] = message[0].wrapping_add(1); + assert!(!sig.verify(&public_key, &message)); + assert!(!call_verify(&provider, address, &public_key, &message, &sig).await); + } + + // Mutate c and make sure the signature now fails to verify + { + let mutated_c = Signature::new(c + Scalar::ONE, s).unwrap(); + assert!(!mutated_c.verify(&public_key, &message)); + assert!(!call_verify(&provider, address, &public_key, &message, &mutated_c).await); + } + + // Mutate s and make sure the signature now fails to verify + { + let mutated_s = Signature::new(c, s + Scalar::ONE).unwrap(); + assert!(!mutated_s.verify(&public_key, &message)); + assert!(!call_verify(&provider, address, &public_key, &message, &mutated_s).await); + } + } +} diff --git a/networks/ethereum/schnorr/src/tests/premise.rs b/networks/ethereum/schnorr/src/tests/premise.rs new file mode 100644 index 00000000..dee78e44 --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/premise.rs @@ -0,0 +1,104 @@ +use rand_core::{RngCore, OsRng}; + +use sha3::{Digest, Keccak256}; +use group::ff::{Field, PrimeField}; +use k256::{ + elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + U256, Scalar, ProjectivePoint, +}; + +use alloy_core::primitives::Address; + +use crate::{Signature, tests::test_key}; + +// The ecrecover opcode, yet with if the y is odd replacing v +fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + let sig = ecdsa::Signature::from_scalars(r, s).ok()?; + let message: [u8; 32] = message.to_repr().into(); + alloy_core::primitives::PrimitiveSignature::from_signature_and_parity(sig, odd_y) + .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) + .ok() + .map(Into::into) +} + +// Test ecrecover behaves as expected +#[test] +fn test_ecrecover() { + let private = SigningKey::random(&mut OsRng); + let public = VerifyingKey::from(&private); + + // Sign the signature + const MESSAGE: &[u8] = b"Hello, World!"; + let (sig, recovery_id) = private + .as_nonzero_scalar() + .try_sign_prehashed(Scalar::random(&mut OsRng), &Keccak256::digest(MESSAGE)) + .unwrap(); + + // Sanity check the signature verifies + #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result + { + assert_eq!(public.verify_prehash(&Keccak256::digest(MESSAGE), &sig).unwrap(), ()); + } + + // Perform the ecrecover + assert_eq!( + ecrecover( + >::reduce_bytes(&Keccak256::digest(MESSAGE)), + u8::from(recovery_id.unwrap().is_y_odd()) == 1, + *sig.r(), + *sig.s() + ) + .unwrap(), + Address::from_raw_public_key(&public.to_encoded_point(false).as_ref()[1 ..]), + ); +} + +// Test that we can recover the nonce from a Schnorr signature via a call to ecrecover, the premise +// of efficiently verifying Schnorr signatures in an Ethereum contract +#[test] +fn nonce_recovery_via_ecrecover() { + let (key, public_key) = test_key(); + + let nonce = Scalar::random(&mut OsRng); + let R = ProjectivePoint::GENERATOR * nonce; + + let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()]; + OsRng.fill_bytes(&mut message); + + let c = Signature::challenge(R, &public_key, &message); + let s = nonce + (c * key); + + /* + An ECDSA signature is `(r, s)` with `s = (m + (r * x)) / k`, where: + - `m` is the hash of the message + - `r` is the x-coordinate of the nonce, reduced into a scalar + - `x` is the private key + - `k` is the nonce + + We fix the recovery ID to be for the even key with an x-coordinate < the order. Accordingly, + `k * G = Point::from(Even, r)`. This enables recovering the public key via + `((s * Point::from(Even, r)) - (m * G)) / r`. + + We want to calculate `R` from `(c, s)` where `s = r + cx`. That means we need to calculate + `(s * G) - (c * X)`. + + We can calculate `(s * G) - (c * X)` with `((s * Point::from(Even, r)) - (m * G)) / r` if: + - ECDSA `r` = `X.x`, the x-coordinate of the Schnorr public key + - ECDSA `s` = `c`, the Schnorr signature's challenge + - ECDSA `m` = Schnorr `s` + This gets us to `((c * X) - (s * G)) / X.x`. If we additionally scale the ECDSA `s, m` values + (the Schnorr `c, s` values) by `X.x`, we get `(c * X) - (s * G)`. This just requires negating + to achieve `(s * G) - (c * X)`. + + With `R`, we can recalculate and compare the challenges to confirm the signature is valid. + */ + let x_scalar = >::reduce_bytes(&public_key.point().to_affine().x()); + let sa = -(s * x_scalar); + let ca = -(c * x_scalar); + + let q = ecrecover(sa, false, x_scalar, ca).unwrap(); + assert_eq!(q, Address::from_raw_public_key(&R.to_encoded_point(false).as_ref()[1 ..])); +} diff --git a/networks/ethereum/schnorr/src/tests/public_key.rs b/networks/ethereum/schnorr/src/tests/public_key.rs new file mode 100644 index 00000000..9294cbac --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/public_key.rs @@ -0,0 +1,69 @@ +use rand_core::OsRng; + +use subtle::Choice; +use group::ff::{Field, PrimeField}; +use k256::{ + elliptic_curve::{ + FieldBytesEncoding, + ops::Reduce, + point::{AffineCoordinates, DecompressPoint}, + }, + AffinePoint, ProjectivePoint, Scalar, U256 as KU256, +}; + +use crate::PublicKey; + +// Generates a key usable within tests +pub(crate) fn test_key() -> (Scalar, PublicKey) { + loop { + let key = Scalar::random(&mut OsRng); + let point = ProjectivePoint::GENERATOR * key; + if let Some(public_key) = PublicKey::new(point) { + // While here, test `PublicKey::point` and its serialization functions + assert_eq!(point, public_key.point()); + assert_eq!(PublicKey::from_eth_repr(public_key.eth_repr()).unwrap(), public_key); + return (key, public_key); + } + } +} + +#[test] +fn test_odd_key() { + // We generate a valid key to ensure there's not some distinct reason this key is invalid + let (_, key) = test_key(); + // We then take its point and negate it so its y-coordinate is odd + let odd = -key.point(); + assert!(PublicKey::new(odd).is_none()); +} + +#[test] +fn test_non_mutual_key() { + let mut x_coordinate = KU256::from(-(Scalar::ONE)).saturating_add(&KU256::ONE); + + let y_is_odd = Choice::from(0); + let non_mutual = loop { + if let Some(point) = Option::::from(AffinePoint::decompress( + &FieldBytesEncoding::encode_field_bytes(&x_coordinate), + y_is_odd, + )) { + break point; + } + x_coordinate = x_coordinate.saturating_add(&KU256::ONE); + }; + let x_coordinate = non_mutual.x(); + assert!(>::reduce_bytes(&x_coordinate).to_repr() != x_coordinate); + + // Even point whose x-coordinate isn't mutual to both fields (making it non-zero) + assert!(PublicKey::new(non_mutual.into()).is_none()); +} + +#[test] +fn test_zero_key() { + let y_is_odd = Choice::from(0); + if let Some(A_affine) = + Option::::from(AffinePoint::decompress(&[0; 32].into(), y_is_odd)) + { + let A = ProjectivePoint::from(A_affine); + assert!(PublicKey::new(A).is_none()); + } +} diff --git a/networks/ethereum/schnorr/src/tests/signature.rs b/networks/ethereum/schnorr/src/tests/signature.rs new file mode 100644 index 00000000..27c640f8 --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/signature.rs @@ -0,0 +1,33 @@ +use rand_core::OsRng; + +use group::ff::Field; +use k256::Scalar; + +use crate::Signature; + +#[test] +fn test_zero_challenge() { + assert!(Signature::new(Scalar::ZERO, Scalar::random(&mut OsRng)).is_none()); +} + +#[test] +fn test_signature_serialization() { + let c = Scalar::random(&mut OsRng); + let s = Scalar::random(&mut OsRng); + let sig = Signature::new(c, s).unwrap(); + assert_eq!(sig.c(), c); + assert_eq!(sig.s(), s); + + let sig_bytes = sig.to_bytes(); + assert_eq!(Signature::from_bytes(sig_bytes).unwrap(), sig); + + { + let mut sig_written_bytes = vec![]; + sig.write(&mut sig_written_bytes).unwrap(); + assert_eq!(sig_bytes.as_slice(), &sig_written_bytes); + } + + let mut sig_read_slice = sig_bytes.as_slice(); + assert_eq!(Signature::read(&mut sig_read_slice).unwrap(), sig); + assert!(sig_read_slice.is_empty()); +} diff --git a/networks/ethereum/src/abi/mod.rs b/networks/ethereum/src/abi/mod.rs deleted file mode 100644 index 1ae23374..00000000 --- a/networks/ethereum/src/abi/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -use alloy_sol_types::sol; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod erc20_container { - use super::*; - sol!("contracts/IERC20.sol"); -} -pub use erc20_container::IERC20 as erc20; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod deployer_container { - use super::*; - sol!("contracts/Deployer.sol"); -} -pub use deployer_container::Deployer as deployer; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod router_container { - use super::*; - sol!(Router, "artifacts/Router.abi"); -} -pub use router_container::Router as router; diff --git a/networks/ethereum/src/crypto.rs b/networks/ethereum/src/crypto.rs deleted file mode 100644 index 6ea6a0b0..00000000 --- a/networks/ethereum/src/crypto.rs +++ /dev/null @@ -1,188 +0,0 @@ -use group::ff::PrimeField; -use k256::{ - elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, - ProjectivePoint, Scalar, U256 as KU256, -}; -#[cfg(test)] -use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; - -use frost::{ - algorithm::{Hram, SchnorrSignature}, - curve::{Ciphersuite, Secp256k1}, -}; - -use alloy_core::primitives::{Parity, Signature as AlloySignature}; -use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; - -use crate::abi::router::{Signature as AbiSignature}; - -pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { - alloy_core::primitives::keccak256(data).into() -} - -pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { - >::reduce_bytes(&keccak256(data).into()) -} - -pub fn address(point: &ProjectivePoint) -> [u8; 20] { - let encoded_point = point.to_encoded_point(false); - // Last 20 bytes of the hash of the concatenated x and y coordinates - // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point - keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() -} - -/// Deterministically sign a transaction. -/// -/// This function panics if passed a transaction with a non-None chain ID. -pub fn deterministically_sign(tx: &TxLegacy) -> Signed { - assert!( - tx.chain_id.is_none(), - "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" - ); - - let sig_hash = tx.signature_hash().0; - let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); - let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); - loop { - let r_bytes: [u8; 32] = r.to_repr().into(); - let s_bytes: [u8; 32] = s.to_repr().into(); - let v = Parity::NonEip155(false); - let signature = - AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); - let tx = tx.clone().into_signed(signature); - if tx.recover_signer().is_ok() { - return tx; - } - - // Re-hash until valid - r = hash_to_scalar(r_bytes.as_ref()); - s = hash_to_scalar(s_bytes.as_ref()); - } -} - -/// The public key for a Schnorr-signing account. -#[allow(non_snake_case)] -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct PublicKey { - pub(crate) A: ProjectivePoint, - pub(crate) px: Scalar, -} - -impl PublicKey { - /// Construct a new `PublicKey`. - /// - /// This will return None if the provided point isn't eligible to be a public key (due to - /// bounds such as parity). - #[allow(non_snake_case)] - pub fn new(A: ProjectivePoint) -> Option { - let affine = A.to_affine(); - // Only allow even keys to save a word within Ethereum - let is_odd = bool::from(affine.y_is_odd()); - if is_odd { - None?; - } - - let x_coord = affine.x(); - let x_coord_scalar = >::reduce_bytes(&x_coord); - // Return None if a reduction would occur - // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less - // headache/concern to have - // This does ban a trivial amoount of public keys - if x_coord_scalar.to_repr() != x_coord { - None?; - } - - Some(PublicKey { A, px: x_coord_scalar }) - } - - pub fn point(&self) -> ProjectivePoint { - self.A - } - - pub(crate) fn eth_repr(&self) -> [u8; 32] { - self.px.to_repr().into() - } - - #[cfg(test)] - pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { - #[allow(non_snake_case)] - let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); - Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) - } -} - -/// The HRAm to use for the Schnorr contract. -#[derive(Clone, Default)] -pub struct EthereumHram {} -impl Hram for EthereumHram { - #[allow(non_snake_case)] - fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - let x_coord = A.to_affine().x(); - - let mut data = address(R).to_vec(); - data.extend(x_coord.as_slice()); - data.extend(m); - - >::reduce_bytes(&keccak256(&data).into()) - } -} - -/// A signature for the Schnorr contract. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Signature { - pub(crate) c: Scalar, - pub(crate) s: Scalar, -} -impl Signature { - pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { - #[allow(non_snake_case)] - let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); - EthereumHram::hram(&R, &public_key.A, message) == self.c - } - - /// Construct a new `Signature`. - /// - /// This will return None if the signature is invalid. - pub fn new( - public_key: &PublicKey, - message: &[u8], - signature: SchnorrSignature, - ) -> Option { - let c = EthereumHram::hram(&signature.R, &public_key.A, message); - if !signature.verify(public_key.A, c) { - None?; - } - - let res = Signature { c, s: signature.s }; - assert!(res.verify(public_key, message)); - Some(res) - } - - pub fn c(&self) -> Scalar { - self.c - } - pub fn s(&self) -> Scalar { - self.s - } - - pub fn to_bytes(&self) -> [u8; 64] { - let mut res = [0; 64]; - res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); - res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); - res - } - - pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { - let mut reader = bytes.as_slice(); - let c = Secp256k1::read_F(&mut reader)?; - let s = Secp256k1::read_F(&mut reader)?; - Ok(Signature { c, s }) - } -} -impl From<&Signature> for AbiSignature { - fn from(sig: &Signature) -> AbiSignature { - let c: [u8; 32] = sig.c.to_repr().into(); - let s: [u8; 32] = sig.s.to_repr().into(); - AbiSignature { c: c.into(), s: s.into() } - } -} diff --git a/networks/ethereum/src/deployer.rs b/networks/ethereum/src/deployer.rs deleted file mode 100644 index c9f30968..00000000 --- a/networks/ethereum/src/deployer.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::sync::Arc; - -use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; -use alloy_consensus::{Signed, TxLegacy}; - -use alloy_sol_types::{SolCall, SolEvent}; - -use alloy_rpc_types_eth::{BlockNumberOrTag, Filter}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -use crate::{ - Error, - crypto::{self, keccak256, PublicKey}, - router::Router, -}; -pub use crate::abi::deployer as abi; - -/// The Deployer contract for the Router contract. -/// -/// This Deployer has a deterministic address, letting it be immediately identified on any -/// compatible chain. It then supports retrieving the Router contract's address (which isn't -/// deterministic) using a single log query. -#[derive(Clone, Debug)] -pub struct Deployer; -impl Deployer { - /// Obtain the transaction to deploy this contract, already signed. - /// - /// The account this transaction is sent from (which is populated in `from`) must be sufficiently - /// funded for this transaction to be submitted. This account has no known private key to anyone, - /// so ETH sent can be neither misappropriated nor returned. - pub fn deployment_tx() -> Signed { - let bytecode = include_str!("../artifacts/Deployer.bin"); - let bytecode = - Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); - - let tx = TxLegacy { - chain_id: None, - nonce: 0, - gas_price: 100_000_000_000u128, - // TODO: Use a more accurate gas limit - gas_limit: 1_000_000, - to: TxKind::Create, - value: U256::ZERO, - input: bytecode, - }; - - crypto::deterministically_sign(&tx) - } - - /// Obtain the deterministic address for this contract. - pub fn address() -> [u8; 20] { - let deployer_deployer = - Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); - **Address::create(&deployer_deployer, 0) - } - - /// Construct a new view of the `Deployer`. - pub async fn new(provider: Arc>) -> Result, Error> { - let address = Self::address(); - let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?; - // Contract has yet to be deployed - if code.is_empty() { - return Ok(None); - } - Ok(Some(Self)) - } - - /// Yield the `ContractCall` necessary to deploy the Router. - pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { - TxLegacy { - to: TxKind::Call(Self::address().into()), - input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), - gas_limit: 1_000_000, - ..Default::default() - } - } - - /// Find the first Router deployed with the specified key as its first key. - /// - /// This is the Router Serai will use, and is the only way to construct a `Router`. - pub async fn find_router( - &self, - provider: Arc>, - key: &PublicKey, - ) -> Result, Error> { - let init_code = Router::init_code(key); - let init_code_hash = keccak256(&init_code); - - #[cfg(not(test))] - let to_block = BlockNumberOrTag::Finalized; - #[cfg(test)] - let to_block = BlockNumberOrTag::Latest; - - // Find the first log using this init code (where the init code is binding to the key) - // TODO: Make an abstraction for event filtering (de-duplicating common code) - let filter = - Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); - let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); - let filter = filter.topic1(B256::from(init_code_hash)); - let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let Some(first_log) = logs.first() else { return Ok(None) }; - let router = first_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .created; - - Ok(Some(Router::new(provider, router))) - } -} diff --git a/networks/ethereum/src/erc20.rs b/networks/ethereum/src/erc20.rs deleted file mode 100644 index 6a32f7cc..00000000 --- a/networks/ethereum/src/erc20.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::{sync::Arc, collections::HashSet}; - -use alloy_core::primitives::{Address, B256, U256}; - -use alloy_sol_types::{SolInterface, SolEvent}; - -use alloy_rpc_types_eth::Filter; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -use crate::Error; -pub use crate::abi::erc20 as abi; -use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; - -#[derive(Clone, Debug)] -pub struct TopLevelErc20Transfer { - pub id: [u8; 32], - pub from: [u8; 20], - pub amount: U256, - pub data: Vec, -} - -/// A view for an ERC20 contract. -#[derive(Clone, Debug)] -pub struct Erc20(Arc>, Address); -impl Erc20 { - /// Construct a new view of the specified ERC20 contract. - pub fn new(provider: Arc>, address: [u8; 20]) -> Self { - Self(provider, Address::from(&address)) - } - - pub async fn top_level_transfers( - &self, - block: u64, - to: [u8; 20], - ) -> Result, Error> { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(Transfer::SIGNATURE_HASH); - let mut to_topic = [0; 32]; - to_topic[12 ..].copy_from_slice(&to); - let filter = filter.topic2(B256::from(to_topic)); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let mut handled = HashSet::new(); - - let mut top_level_transfers = vec![]; - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = - self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?; - - // If this is a top-level call... - if tx.to == Some(self.1) { - // And we recognize the call... - // Don't validate the encoding as this can't be re-encoded to an identical bytestring due - // to the InInstruction appended - if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { - // Extract the top-level call's from/to/value - let (from, call_to, value) = match call { - IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), - IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { - (from, call_to, value) - } - // Treat any other function selectors as unrecognized - _ => continue, - }; - - let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an - // internal transfer - if (log.from != from) || (call_to != to) || (value != log.value) { - continue; - } - - // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's - // the only log we handle - if handled.contains(&tx_id) { - continue; - } - handled.insert(tx_id); - - // Read the data appended after - let encoded = call.abi_encode(); - let data = tx.input.as_ref()[encoded.len() ..].to_vec(); - - // Push the transfer - top_level_transfers.push(TopLevelErc20Transfer { - // Since we'll only handle one log for this TX, set the ID to the TX ID - id: *tx_id, - from: *log.from.0, - amount: log.value, - data, - }); - } - } - } - Ok(top_level_transfers) - } -} diff --git a/networks/ethereum/src/lib.rs b/networks/ethereum/src/lib.rs deleted file mode 100644 index 38bd79e7..00000000 --- a/networks/ethereum/src/lib.rs +++ /dev/null @@ -1,35 +0,0 @@ -use thiserror::Error; - -pub mod alloy { - pub use alloy_core::primitives; - pub use alloy_core as core; - pub use alloy_sol_types as sol_types; - - pub use alloy_consensus as consensus; - pub use alloy_network as network; - pub use alloy_rpc_types_eth as rpc_types; - pub use alloy_simple_request_transport as simple_request_transport; - pub use alloy_rpc_client as rpc_client; - pub use alloy_provider as provider; -} - -pub mod crypto; - -pub(crate) mod abi; - -pub mod erc20; -pub mod deployer; -pub mod router; - -pub mod machine; - -#[cfg(any(test, feature = "tests"))] -pub mod tests; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] -pub enum Error { - #[error("failed to verify Schnorr signature")] - InvalidSignature, - #[error("couldn't make call/send TX")] - ConnectionError, -} diff --git a/networks/ethereum/src/machine.rs b/networks/ethereum/src/machine.rs deleted file mode 100644 index 0d5dc7a5..00000000 --- a/networks/ethereum/src/machine.rs +++ /dev/null @@ -1,414 +0,0 @@ -use std::{ - io::{self, Read}, - collections::HashMap, -}; - -use rand_core::{RngCore, CryptoRng}; - -use transcript::{Transcript, RecommendedTranscript}; - -use group::GroupEncoding; -use frost::{ - curve::{Ciphersuite, Secp256k1}, - Participant, ThresholdKeys, FrostError, - algorithm::Schnorr, - sign::*, -}; - -use alloy_core::primitives::U256; - -use crate::{ - crypto::{PublicKey, EthereumHram, Signature}, - router::{ - abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, - Router, - }, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Call { - pub to: [u8; 20], - pub value: U256, - pub data: Vec, -} -impl Call { - pub fn read(reader: &mut R) -> io::Result { - let mut to = [0; 20]; - reader.read_exact(&mut to)?; - - let value = { - let mut value_bytes = [0; 32]; - reader.read_exact(&mut value_bytes)?; - U256::from_le_slice(&value_bytes) - }; - - let mut data_len = { - let mut data_len = [0; 4]; - reader.read_exact(&mut data_len)?; - usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") - }; - - // A valid DoS would be to claim a 4 GB data is present for only 4 bytes - // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) - let mut data = vec![]; - while data_len > 0 { - let chunk_len = data_len.min(1024); - let mut chunk = vec![0; chunk_len]; - reader.read_exact(&mut chunk)?; - data.extend(&chunk); - data_len -= chunk_len; - } - - Ok(Call { to, value, data }) - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.to)?; - writer.write_all(&self.value.as_le_bytes())?; - - let data_len = u32::try_from(self.data.len()) - .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; - writer.write_all(&data_len.to_le_bytes())?; - writer.write_all(&self.data) - } -} -impl From for AbiCall { - fn from(call: Call) -> AbiCall { - AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum OutInstructionTarget { - Direct([u8; 20]), - Calls(Vec), -} -impl OutInstructionTarget { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => { - let mut addr = [0; 20]; - reader.read_exact(&mut addr)?; - Ok(OutInstructionTarget::Direct(addr)) - } - 1 => { - let mut calls_len = [0; 4]; - reader.read_exact(&mut calls_len)?; - let calls_len = u32::from_le_bytes(calls_len); - - let mut calls = vec![]; - for _ in 0 .. calls_len { - calls.push(Call::read(reader)?); - } - Ok(OutInstructionTarget::Calls(calls)) - } - _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, - } - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - OutInstructionTarget::Direct(addr) => { - writer.write_all(&[0])?; - writer.write_all(addr)?; - } - OutInstructionTarget::Calls(calls) => { - writer.write_all(&[1])?; - let call_len = u32::try_from(calls.len()) - .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; - writer.write_all(&call_len.to_le_bytes())?; - for call in calls { - call.write(writer)?; - } - } - } - Ok(()) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OutInstruction { - pub target: OutInstructionTarget, - pub value: U256, -} -impl OutInstruction { - fn read(reader: &mut R) -> io::Result { - let target = OutInstructionTarget::read(reader)?; - - let value = { - let mut value_bytes = [0; 32]; - reader.read_exact(&mut value_bytes)?; - U256::from_le_slice(&value_bytes) - }; - - Ok(OutInstruction { target, value }) - } - fn write(&self, writer: &mut W) -> io::Result<()> { - self.target.write(writer)?; - writer.write_all(&self.value.as_le_bytes()) - } -} -impl From for AbiOutInstruction { - fn from(instruction: OutInstruction) -> AbiOutInstruction { - match instruction.target { - OutInstructionTarget::Direct(addr) => { - AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } - } - OutInstructionTarget::Calls(calls) => AbiOutInstruction { - to: [0; 20].into(), - calls: calls.into_iter().map(Into::into).collect(), - value: instruction.value, - }, - } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum RouterCommand { - UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, - Execute { chain_id: U256, nonce: U256, outs: Vec }, -} - -impl RouterCommand { - pub fn msg(&self) -> Vec { - match self { - RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { - Router::update_serai_key_message(*chain_id, *nonce, key) - } - RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( - *chain_id, - *nonce, - outs.iter().map(|out| out.clone().into()).collect(), - ), - } - } - - pub fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => { - let mut chain_id = [0; 32]; - reader.read_exact(&mut chain_id)?; - - let mut nonce = [0; 32]; - reader.read_exact(&mut nonce)?; - - let key = PublicKey::new(Secp256k1::read_G(reader)?) - .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; - Ok(RouterCommand::UpdateSeraiKey { - chain_id: U256::from_le_slice(&chain_id), - nonce: U256::from_le_slice(&nonce), - key, - }) - } - 1 => { - let mut chain_id = [0; 32]; - reader.read_exact(&mut chain_id)?; - let chain_id = U256::from_le_slice(&chain_id); - - let mut nonce = [0; 32]; - reader.read_exact(&mut nonce)?; - let nonce = U256::from_le_slice(&nonce); - - let mut outs_len = [0; 4]; - reader.read_exact(&mut outs_len)?; - let outs_len = u32::from_le_bytes(outs_len); - - let mut outs = vec![]; - for _ in 0 .. outs_len { - outs.push(OutInstruction::read(reader)?); - } - - Ok(RouterCommand::Execute { chain_id, nonce, outs }) - } - _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, - } - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { - writer.write_all(&[0])?; - writer.write_all(&chain_id.as_le_bytes())?; - writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&key.A.to_bytes()) - } - RouterCommand::Execute { chain_id, nonce, outs } => { - writer.write_all(&[1])?; - writer.write_all(&chain_id.as_le_bytes())?; - writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; - for out in outs { - out.write(writer)?; - } - Ok(()) - } - } - } - - pub fn serialize(&self) -> Vec { - let mut res = vec![]; - self.write(&mut res).unwrap(); - res - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct SignedRouterCommand { - command: RouterCommand, - signature: Signature, -} - -impl SignedRouterCommand { - pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { - let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; - let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; - let signature = Signature { c, s }; - - if !signature.verify(key, &command.msg()) { - None? - } - Some(SignedRouterCommand { command, signature }) - } - - pub fn command(&self) -> &RouterCommand { - &self.command - } - - pub fn signature(&self) -> &Signature { - &self.signature - } - - pub fn read(reader: &mut R) -> io::Result { - let command = RouterCommand::read(reader)?; - - let mut sig = [0; 64]; - reader.read_exact(&mut sig)?; - let signature = Signature::from_bytes(sig)?; - - Ok(SignedRouterCommand { command, signature }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - self.command.write(writer)?; - writer.write_all(&self.signature.to_bytes()) - } -} - -pub struct RouterCommandMachine { - key: PublicKey, - command: RouterCommand, - machine: AlgorithmMachine>, -} - -impl RouterCommandMachine { - pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { - // The Schnorr algorithm should be fine without this, even when using the IETF variant - // If this is better and more comprehensive, we should do it, even if not necessary - let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); - let key = keys.group_key(); - transcript.append_message(b"key", key.to_bytes()); - transcript.append_message(b"command", command.serialize()); - - Some(Self { - key: PublicKey::new(key)?, - command, - machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), - }) - } -} - -impl PreprocessMachine for RouterCommandMachine { - type Preprocess = Preprocess; - type Signature = SignedRouterCommand; - type SignMachine = RouterCommandSignMachine; - - fn preprocess( - self, - rng: &mut R, - ) -> (Self::SignMachine, Self::Preprocess) { - let (machine, preprocess) = self.machine.preprocess(rng); - - (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) - } -} - -pub struct RouterCommandSignMachine { - key: PublicKey, - command: RouterCommand, - machine: AlgorithmSignMachine>, -} - -impl SignMachine for RouterCommandSignMachine { - type Params = (); - type Keys = ThresholdKeys; - type Preprocess = Preprocess; - type SignatureShare = SignatureShare; - type SignatureMachine = RouterCommandSignatureMachine; - - fn cache(self) -> CachedPreprocess { - unimplemented!( - "RouterCommand machines don't support caching their preprocesses due to {}", - "being already bound to a specific command" - ); - } - - fn from_cache( - (): (), - _: ThresholdKeys, - _: CachedPreprocess, - ) -> (Self, Self::Preprocess) { - unimplemented!( - "RouterCommand machines don't support caching their preprocesses due to {}", - "being already bound to a specific command" - ); - } - - fn read_preprocess(&self, reader: &mut R) -> io::Result { - self.machine.read_preprocess(reader) - } - - fn sign( - self, - commitments: HashMap, - msg: &[u8], - ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { - if !msg.is_empty() { - panic!("message was passed to a RouterCommand machine when it generates its own"); - } - - let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; - - Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) - } -} - -pub struct RouterCommandSignatureMachine { - key: PublicKey, - command: RouterCommand, - machine: - AlgorithmSignatureMachine>, -} - -impl SignatureMachine for RouterCommandSignatureMachine { - type SignatureShare = SignatureShare; - - fn read_share(&self, reader: &mut R) -> io::Result { - self.machine.read_share(reader) - } - - fn complete( - self, - shares: HashMap, - ) -> Result { - let sig = self.machine.complete(shares)?; - let signature = Signature::new(&self.key, &self.command.msg(), sig) - .expect("machine produced an invalid signature"); - Ok(SignedRouterCommand { command: self.command, signature }) - } -} diff --git a/networks/ethereum/src/router.rs b/networks/ethereum/src/router.rs deleted file mode 100644 index 62f95a67..00000000 --- a/networks/ethereum/src/router.rs +++ /dev/null @@ -1,443 +0,0 @@ -use std::{sync::Arc, io, collections::HashSet}; - -use k256::{ - elliptic_curve::{group::GroupEncoding, sec1}, - ProjectivePoint, -}; - -use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; -#[cfg(test)] -use alloy_core::primitives::B256; -use alloy_consensus::TxLegacy; - -use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; - -use alloy_rpc_types_eth::Filter; -#[cfg(test)] -use alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -pub use crate::{ - Error, - crypto::{PublicKey, Signature}, - abi::{erc20::Transfer, router as abi}, -}; -use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Coin { - Ether, - Erc20([u8; 20]), -} - -impl Coin { - pub fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - Ok(match kind[0] { - 0 => Coin::Ether, - 1 => { - let mut address = [0; 20]; - reader.read_exact(&mut address)?; - Coin::Erc20(address) - } - _ => Err(io::Error::other("unrecognized Coin type"))?, - }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Coin::Ether => writer.write_all(&[0]), - Coin::Erc20(token) => { - writer.write_all(&[1])?; - writer.write_all(token) - } - } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct InInstruction { - pub id: ([u8; 32], u64), - pub from: [u8; 20], - pub coin: Coin, - pub amount: U256, - pub data: Vec, - pub key_at_end_of_block: ProjectivePoint, -} - -impl InInstruction { - pub fn read(reader: &mut R) -> io::Result { - let id = { - let mut id_hash = [0; 32]; - reader.read_exact(&mut id_hash)?; - let mut id_pos = [0; 8]; - reader.read_exact(&mut id_pos)?; - let id_pos = u64::from_le_bytes(id_pos); - (id_hash, id_pos) - }; - - let mut from = [0; 20]; - reader.read_exact(&mut from)?; - - let coin = Coin::read(reader)?; - let mut amount = [0; 32]; - reader.read_exact(&mut amount)?; - let amount = U256::from_le_slice(&amount); - - let mut data_len = [0; 4]; - reader.read_exact(&mut data_len)?; - let data_len = usize::try_from(u32::from_le_bytes(data_len)) - .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; - let mut data = vec![0; data_len]; - reader.read_exact(&mut data)?; - - let mut key_at_end_of_block = ::Repr::default(); - reader.read_exact(&mut key_at_end_of_block)?; - let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) - .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; - - Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.id.0)?; - writer.write_all(&self.id.1.to_le_bytes())?; - - writer.write_all(&self.from)?; - - self.coin.write(writer)?; - writer.write_all(&self.amount.as_le_bytes())?; - - writer.write_all( - &u32::try_from(self.data.len()) - .map_err(|_| { - io::Error::other("InInstruction being written had data exceeding 2**32 in length") - })? - .to_le_bytes(), - )?; - writer.write_all(&self.data)?; - - writer.write_all(&self.key_at_end_of_block.to_bytes()) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Executed { - pub tx_id: [u8; 32], - pub nonce: u64, - pub signature: [u8; 64], -} - -/// The contract Serai uses to manage its state. -#[derive(Clone, Debug)] -pub struct Router(Arc>, Address); -impl Router { - pub(crate) fn code() -> Vec { - let bytecode = include_str!("../artifacts/Router.bin"); - Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() - } - - pub(crate) fn init_code(key: &PublicKey) -> Vec { - let mut bytecode = Self::code(); - // Append the constructor arguments - bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); - bytecode - } - - // This isn't pub in order to force users to use `Deployer::find_router`. - pub(crate) fn new(provider: Arc>, address: Address) -> Self { - Self(provider, address) - } - - pub fn address(&self) -> [u8; 20] { - **self.1 - } - - /// Get the key for Serai at the specified block. - #[cfg(test)] - pub async fn serai_key(&self, at: [u8; 32]) -> Result { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - .map_err(|_| Error::ConnectionError)?; - let res = - abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) - } - - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { - let mut buffer = b"updateSeraiKey".to_vec(); - buffer.extend(&chain_id.to_be_bytes::<32>()); - buffer.extend(&nonce.to_be_bytes::<32>()); - buffer.extend(&key.eth_repr()); - buffer - } - - /// Update the key representing Serai. - pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { - // TODO: Set a more accurate gas - TxLegacy { - to: TxKind::Call(self.1), - input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) - .abi_encode() - .into(), - gas_limit: 100_000, - ..Default::default() - } - } - - /// Get the current nonce for the published batches. - #[cfg(test)] - pub async fn nonce(&self, at: [u8; 32]) -> Result { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - .map_err(|_| Error::ConnectionError)?; - let res = - abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - Ok(res._0) - } - - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn execute_message( - chain_id: U256, - nonce: U256, - outs: Vec, - ) -> Vec { - ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() - } - - /// Execute a batch of `OutInstruction`s. - pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { - TxLegacy { - to: TxKind::Call(self.1), - input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), - // TODO - gas_limit: 100_000 + ((200_000 + 10_000) * u64::try_from(outs.len()).unwrap()), - ..Default::default() - } - } - - pub async fn key_at_end_of_block(&self, block: u64) -> Result, Error> { - let filter = Filter::new().from_block(0).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - if all_keys.is_empty() { - return Ok(None); - }; - - let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; - let last_key_x_coordinate = last_key_x_coordinate_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .key; - - let mut compressed_point = ::Repr::default(); - compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); - compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); - - let key = - Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?; - Ok(Some(key)) - } - - pub async fn in_instructions( - &self, - block: u64, - allowed_tokens: &HashSet<[u8; 20]>, - ) -> Result, Error> { - let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else { - return Ok(vec![]); - }; - - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let mut transfer_check = HashSet::new(); - let mut in_instructions = vec![]; - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let id = ( - log.block_hash.ok_or(Error::ConnectionError)?.into(), - log.log_index.ok_or(Error::ConnectionError)?, - ); - - let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = self - .0 - .get_transaction_by_hash(tx_hash) - .await - .ok() - .flatten() - .ok_or(Error::ConnectionError)?; - - let log = - log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let coin = if log.coin.0 == [0; 20] { - Coin::Ether - } else { - let token = *log.coin.0; - - if !allowed_tokens.contains(&token) { - continue; - } - - // If this also counts as a top-level transfer via the token, drop it - // - // Necessary in order to handle a potential edge case with some theoretical token - // implementations - // - // This will either let it be handled by the top-level transfer hook or will drop it - // entirely on the side of caution - if tx.to == Some(token.into()) { - continue; - } - - // Get all logs for this TX - let receipt = self - .0 - .get_transaction_receipt(tx_hash) - .await - .map_err(|_| Error::ConnectionError)? - .ok_or(Error::ConnectionError)?; - let tx_logs = receipt.inner.logs(); - - // Find a matching transfer log - let mut found_transfer = false; - for tx_log in tx_logs { - let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; - // Ensure we didn't already use this transfer to check a distinct InInstruction event - if transfer_check.contains(&log_index) { - continue; - } - - // Check if this log is from the token we expected to be transferred - if tx_log.address().0 != token { - continue; - } - // Check if this is a transfer log - // https://github.com/alloy-rs/core/issues/589 - if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { - continue; - } - let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; - // Check if this is a transfer to us for the expected amount - if (transfer.to == self.1) && (transfer.value == log.amount) { - transfer_check.insert(log_index); - found_transfer = true; - break; - } - } - if !found_transfer { - // This shouldn't be a ConnectionError - // This is an exploit, a non-conforming ERC20, or an invalid connection - // This should halt the process which is sufficient, yet this is sub-optimal - // TODO - Err(Error::ConnectionError)?; - } - - Coin::Erc20(token) - }; - - in_instructions.push(InInstruction { - id, - from: *log.from.0, - coin, - amount: log.amount, - data: log.instruction.as_ref().to_vec(), - key_at_end_of_block, - }); - } - - Ok(in_instructions) - } - - pub async fn executed_commands(&self, block: u64) -> Result, Error> { - let mut res = vec![]; - - { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); - - let log = - log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let mut signature = [0; 64]; - signature[.. 32].copy_from_slice(log.signature.c.as_ref()); - signature[32 ..].copy_from_slice(log.signature.s.as_ref()); - res.push(Executed { - tx_id, - nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - signature, - }); - } - } - - { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); - - let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let mut signature = [0; 64]; - signature[.. 32].copy_from_slice(log.signature.c.as_ref()); - signature[32 ..].copy_from_slice(log.signature.s.as_ref()); - res.push(Executed { - tx_id, - nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - signature, - }); - } - } - - Ok(res) - } - - #[cfg(feature = "tests")] - pub fn key_updated_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) - } - #[cfg(feature = "tests")] - pub fn executed_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) - } -} diff --git a/networks/ethereum/src/tests/abi/mod.rs b/networks/ethereum/src/tests/abi/mod.rs deleted file mode 100644 index 57ea8811..00000000 --- a/networks/ethereum/src/tests/abi/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -use alloy_sol_types::sol; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod schnorr_container { - use super::*; - sol!("src/tests/contracts/Schnorr.sol"); -} -pub(crate) use schnorr_container::TestSchnorr as schnorr; diff --git a/networks/ethereum/src/tests/contracts/Schnorr.sol b/networks/ethereum/src/tests/contracts/Schnorr.sol deleted file mode 100644 index 832cd2fe..00000000 --- a/networks/ethereum/src/tests/contracts/Schnorr.sol +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -import "../../../contracts/Schnorr.sol"; - -contract TestSchnorr { - function verify( - bytes32 px, - bytes calldata message, - bytes32 c, - bytes32 s - ) external pure returns (bool) { - return Schnorr.verify(px, message, c, s); - } -} diff --git a/networks/ethereum/src/tests/crypto.rs b/networks/ethereum/src/tests/crypto.rs deleted file mode 100644 index a668b2d6..00000000 --- a/networks/ethereum/src/tests/crypto.rs +++ /dev/null @@ -1,105 +0,0 @@ -use rand_core::OsRng; - -use group::ff::{Field, PrimeField}; -use k256::{ - ecdsa::{ - self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, - }, - Scalar, ProjectivePoint, -}; - -use frost::{ - curve::{Ciphersuite, Secp256k1}, - algorithm::{Hram, IetfSchnorr}, - tests::{algorithm_machines, sign}, -}; - -use crate::{crypto::*, tests::key_gen}; - -// The ecrecover opcode, yet with parity replacing v -pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - let sig = ecdsa::Signature::from_scalars(r, s).ok()?; - let message: [u8; 32] = message.to_repr().into(); - alloy_core::primitives::Signature::from_signature_and_parity( - sig, - alloy_core::primitives::Parity::Parity(odd_y), - ) - .ok()? - .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) - .ok() - .map(Into::into) -} - -#[test] -fn test_ecrecover() { - let private = SigningKey::random(&mut OsRng); - let public = VerifyingKey::from(&private); - - // Sign the signature - const MESSAGE: &[u8] = b"Hello, World!"; - let (sig, recovery_id) = private - .as_nonzero_scalar() - .try_sign_prehashed( - ::F::random(&mut OsRng), - &keccak256(MESSAGE).into(), - ) - .unwrap(); - - // Sanity check the signature verifies - #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result - { - assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); - } - - // Perform the ecrecover - assert_eq!( - ecrecover( - hash_to_scalar(MESSAGE), - u8::from(recovery_id.unwrap().is_y_odd()) == 1, - *sig.r(), - *sig.s() - ) - .unwrap(), - address(&ProjectivePoint::from(public.as_affine())) - ); -} - -// Run the sign test with the EthereumHram -#[test] -fn test_signing() { - let (keys, _) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let _sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); -} - -#[allow(non_snake_case)] -pub fn preprocess_signature_for_ecrecover( - R: ProjectivePoint, - public_key: &PublicKey, - m: &[u8], - s: Scalar, -) -> (Scalar, Scalar) { - let c = EthereumHram::hram(&R, &public_key.A, m); - let sa = -(s * public_key.px); - let ca = -(c * public_key.px); - (sa, ca) -} - -#[test] -fn test_ecrecover_hack() { - let (keys, public_key) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - - let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); - let q = ecrecover(sa, false, public_key.px, ca).unwrap(); - assert_eq!(q, address(&sig.R)); -} diff --git a/networks/ethereum/src/tests/mod.rs b/networks/ethereum/src/tests/mod.rs deleted file mode 100644 index dcdbedce..00000000 --- a/networks/ethereum/src/tests/mod.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::{sync::Arc, collections::HashMap}; - -use rand_core::OsRng; - -use k256::{Scalar, ProjectivePoint}; -use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; - -use alloy_core::{ - primitives::{Address, U256, Bytes, Signature, TxKind}, - hex::FromHex, -}; -use alloy_consensus::{SignableTransaction, TxLegacy}; - -use alloy_rpc_types_eth::TransactionReceipt; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -use crate::crypto::{address, deterministically_sign, PublicKey}; - -#[cfg(test)] -mod crypto; - -#[cfg(test)] -mod abi; -#[cfg(test)] -mod schnorr; -#[cfg(test)] -mod router; - -pub fn key_gen() -> (HashMap>, PublicKey) { - let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng); - let mut group_key = keys[&Participant::new(1).unwrap()].group_key(); - - let mut offset = Scalar::ZERO; - while PublicKey::new(group_key).is_none() { - offset += Scalar::ONE; - group_key += ProjectivePoint::GENERATOR; - } - for keys in keys.values_mut() { - *keys = keys.offset(offset); - } - let public_key = PublicKey::new(group_key).unwrap(); - - (keys, public_key) -} - -// TODO: Use a proper error here -pub async fn send( - provider: &RootProvider, - wallet: &k256::ecdsa::SigningKey, - mut tx: TxLegacy, -) -> Option { - let verifying_key = *wallet.verifying_key().as_affine(); - let address = Address::from(address(&verifying_key.into())); - - // https://github.com/alloy-rs/alloy/issues/539 - // let chain_id = provider.get_chain_id().await.unwrap(); - // tx.chain_id = Some(chain_id); - tx.chain_id = None; - tx.nonce = provider.get_transaction_count(address).await.unwrap(); - // 100 gwei - tx.gas_price = 100_000_000_000u128; - - let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); - assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); - assert!( - provider.get_balance(address).await.unwrap() > - ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) - ); - - let mut bytes = vec![]; - tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes); - let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?; - pending_tx.get_receipt().await.ok() -} - -pub async fn fund_account( - provider: &RootProvider, - wallet: &k256::ecdsa::SigningKey, - to_fund: Address, - value: U256, -) -> Option<()> { - let funding_tx = - TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() }; - assert!(send(provider, wallet, funding_tx).await.unwrap().status()); - - Some(()) -} - -// TODO: Use a proper error here -pub async fn deploy_contract( - client: Arc>, - wallet: &k256::ecdsa::SigningKey, - name: &str, -) -> Option

{ - let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); - let hex_bin = - if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; - let bin = Bytes::from_hex(hex_bin).unwrap(); - - let deployment_tx = TxLegacy { - chain_id: None, - nonce: 0, - // 100 gwei - gas_price: 100_000_000_000u128, - gas_limit: 1_000_000, - to: TxKind::Create, - value: U256::ZERO, - input: bin, - }; - - let deployment_tx = deterministically_sign(&deployment_tx); - - // Fund the deployer address - fund_account( - &client, - wallet, - deployment_tx.recover_signer().unwrap(), - U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price), - ) - .await?; - - let (deployment_tx, sig, _) = deployment_tx.into_parts(); - let mut bytes = vec![]; - deployment_tx.encode_with_signature_fields(&sig, &mut bytes); - let pending_tx = client.send_raw_transaction(&bytes).await.ok()?; - let receipt = pending_tx.get_receipt().await.ok()?; - assert!(receipt.status()); - - Some(receipt.contract_address.unwrap()) -} diff --git a/networks/ethereum/src/tests/router.rs b/networks/ethereum/src/tests/router.rs deleted file mode 100644 index 724348cc..00000000 --- a/networks/ethereum/src/tests/router.rs +++ /dev/null @@ -1,183 +0,0 @@ -use std::{convert::TryFrom, sync::Arc, collections::HashMap}; - -use rand_core::OsRng; - -use group::Group; -use k256::ProjectivePoint; -use frost::{ - curve::Secp256k1, - Participant, ThresholdKeys, - algorithm::IetfSchnorr, - tests::{algorithm_machines, sign}, -}; - -use alloy_core::primitives::{Address, U256}; - -use alloy_simple_request_transport::SimpleRequest; -use alloy_rpc_types_eth::BlockTransactionsKind; -use alloy_rpc_client::ClientBuilder; -use alloy_provider::{Provider, RootProvider}; - -use alloy_node_bindings::{Anvil, AnvilInstance}; - -use crate::{ - crypto::*, - deployer::Deployer, - router::{Router, abi as router}, - tests::{key_gen, send, fund_account}, -}; - -async fn setup_test() -> ( - AnvilInstance, - Arc>, - u64, - Router, - HashMap>, - PublicKey, -) { - let anvil = Anvil::new().spawn(); - - let provider = RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), - ); - let chain_id = provider.get_chain_id().await.unwrap(); - let wallet = anvil.keys()[0].clone().into(); - let client = Arc::new(provider); - - // Make sure the Deployer constructor returns None, as it doesn't exist yet - assert!(Deployer::new(client.clone()).await.unwrap().is_none()); - - // Deploy the Deployer - let tx = Deployer::deployment_tx(); - fund_account( - &client, - &wallet, - tx.recover_signer().unwrap(), - U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price), - ) - .await - .unwrap(); - - let (tx, sig, _) = tx.into_parts(); - let mut bytes = vec![]; - tx.encode_with_signature_fields(&sig, &mut bytes); - - let pending_tx = client.send_raw_transaction(&bytes).await.unwrap(); - let receipt = pending_tx.get_receipt().await.unwrap(); - assert!(receipt.status()); - let deployer = - Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed"); - - let (keys, public_key) = key_gen(); - - // Verify the Router constructor returns None, as it doesn't exist yet - assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none()); - - // Deploy the router - let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key)) - .await - .unwrap(); - assert!(receipt.status()); - let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap(); - - (anvil, client, chain_id, contract, keys, public_key) -} - -async fn latest_block_hash(client: &RootProvider) -> [u8; 32] { - client - .get_block(client.get_block_number().await.unwrap().into(), BlockTransactionsKind::Hashes) - .await - .unwrap() - .unwrap() - .header - .hash - .0 -} - -#[tokio::test] -async fn test_deploy_contract() { - let (_anvil, client, _, router, _, public_key) = setup_test().await; - - let block_hash = latest_block_hash(&client).await; - assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key); - assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); - // TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis -} - -pub fn hash_and_sign( - keys: &HashMap>, - public_key: &PublicKey, - message: &[u8], -) -> Signature { - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message); - - Signature::new(public_key, message, sig).unwrap() -} - -#[tokio::test] -async fn test_router_update_serai_key() { - let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; - - let next_key = loop { - let point = ProjectivePoint::random(&mut OsRng); - let Some(next_key) = PublicKey::new(point) else { continue }; - break next_key; - }; - - let message = Router::update_serai_key_message( - U256::try_from(chain_id).unwrap(), - U256::try_from(1u64).unwrap(), - &next_key, - ); - let sig = hash_and_sign(&keys, &public_key, &message); - - let first_block_hash = latest_block_hash(&client).await; - assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); - - let receipt = - send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig)) - .await - .unwrap(); - assert!(receipt.status()); - - let second_block_hash = latest_block_hash(&client).await; - assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key); - // Check this does still offer the historical state - assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); - // TODO: Check logs - - println!("gas used: {:?}", receipt.gas_used); - // println!("logs: {:?}", receipt.logs); -} - -#[tokio::test] -async fn test_router_execute() { - let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; - - let to = Address::from([0; 20]); - let value = U256::ZERO; - let tx = router::OutInstruction { to, value, calls: vec![] }; - let txs = vec![tx]; - - let first_block_hash = latest_block_hash(&client).await; - let nonce = contract.nonce(first_block_hash).await.unwrap(); - assert_eq!(nonce, U256::try_from(1u64).unwrap()); - - let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone()); - let sig = hash_and_sign(&keys, &public_key, &message); - - let receipt = - send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap(); - assert!(receipt.status()); - - let second_block_hash = latest_block_hash(&client).await; - assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap()); - // Check this does still offer the historical state - assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); - // TODO: Check logs - - println!("gas used: {:?}", receipt.gas_used); - // println!("logs: {:?}", receipt.logs); -} diff --git a/networks/ethereum/src/tests/schnorr.rs b/networks/ethereum/src/tests/schnorr.rs deleted file mode 100644 index 2c72ed19..00000000 --- a/networks/ethereum/src/tests/schnorr.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::sync::Arc; - -use rand_core::OsRng; - -use group::ff::PrimeField; -use k256::Scalar; - -use frost::{ - curve::Secp256k1, - algorithm::IetfSchnorr, - tests::{algorithm_machines, sign}, -}; - -use alloy_core::primitives::Address; - -use alloy_sol_types::SolCall; - -use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_rpc_client::ClientBuilder; -use alloy_provider::{Provider, RootProvider}; - -use alloy_node_bindings::{Anvil, AnvilInstance}; - -use crate::{ - Error, - crypto::*, - tests::{key_gen, deploy_contract, abi::schnorr as abi}, -}; - -async fn setup_test() -> (AnvilInstance, Arc>, Address) { - let anvil = Anvil::new().spawn(); - - let provider = RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), - ); - let wallet = anvil.keys()[0].clone().into(); - let client = Arc::new(provider); - - let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); - (anvil, client, address) -} - -#[tokio::test] -async fn test_deploy_contract() { - setup_test().await; -} - -pub async fn call_verify( - provider: &RootProvider, - contract: Address, - public_key: &PublicKey, - message: &[u8], - signature: &Signature, -) -> Result<(), Error> { - let px: [u8; 32] = public_key.px.to_repr().into(); - let c_bytes: [u8; 32] = signature.c.to_repr().into(); - let s_bytes: [u8; 32] = signature.s.to_repr().into(); - let call = TransactionRequest::default().to(contract).input(TransactionInput::new( - abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) - .abi_encode() - .into(), - )); - let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?; - let res = - abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - - if res._0 { - Ok(()) - } else { - Err(Error::InvalidSignature) - } -} - -#[tokio::test] -async fn test_ecrecover_hack() { - let (_anvil, client, contract) = setup_test().await; - - let (keys, public_key) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); - - call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); - // Test an invalid signature fails - let mut sig = sig; - sig.s += Scalar::ONE; - assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); -} diff --git a/networks/monero/generators/Cargo.toml b/networks/monero/generators/Cargo.toml index af8cbcd9..11a33897 100644 --- a/networks/monero/generators/Cargo.toml +++ b/networks/monero/generators/Cargo.toml @@ -6,6 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/generators" authors = ["Luke Parker "] edition = "2021" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/networks/monero/ringct/bulletproofs/Cargo.toml b/networks/monero/ringct/bulletproofs/Cargo.toml index 9c807193..03a23f19 100644 --- a/networks/monero/ringct/bulletproofs/Cargo.toml +++ b/networks/monero/ringct/bulletproofs/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } @@ -42,7 +42,7 @@ hex-literal = "0.4" std = [ "std-shims/std", - "thiserror", + "thiserror/std", "rand_core/std", "zeroize/std", diff --git a/networks/monero/ringct/bulletproofs/src/lib.rs b/networks/monero/ringct/bulletproofs/src/lib.rs index 2a789575..5ca8ec36 100644 --- a/networks/monero/ringct/bulletproofs/src/lib.rs +++ b/networks/monero/ringct/bulletproofs/src/lib.rs @@ -45,14 +45,13 @@ use crate::plus::{ mod tests; /// An error from proving/verifying Bulletproofs(+). -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, Copy, PartialEq, Eq, Debug, thiserror::Error)] pub enum BulletproofError { /// Proving/verifying a Bulletproof(+) range proof with no commitments. - #[cfg_attr(feature = "std", error("no commitments to prove the range for"))] + #[error("no commitments to prove the range for")] NoCommitments, /// Proving/verifying a Bulletproof(+) range proof with more commitments than supported. - #[cfg_attr(feature = "std", error("too many commitments to prove the range for"))] + #[error("too many commitments to prove the range for")] TooManyCommitments, } diff --git a/networks/monero/ringct/bulletproofs/src/original/mod.rs b/networks/monero/ringct/bulletproofs/src/original/mod.rs index f001bc9b..e1b494d2 100644 --- a/networks/monero/ringct/bulletproofs/src/original/mod.rs +++ b/networks/monero/ringct/bulletproofs/src/original/mod.rs @@ -56,7 +56,7 @@ impl AggregateRangeWitness { } } -impl<'a> AggregateRangeStatement<'a> { +impl AggregateRangeStatement<'_> { fn initial_transcript(&self) -> (Scalar, Vec) { let V = self.commitments.iter().map(|c| c * INV_EIGHT()).collect::>(); (keccak256_to_scalar(V.iter().flat_map(|V| V.compress().to_bytes()).collect::>()), V) diff --git a/networks/monero/ringct/clsag/Cargo.toml b/networks/monero/ringct/clsag/Cargo.toml index 801717c7..647125cd 100644 --- a/networks/monero/ringct/clsag/Cargo.toml +++ b/networks/monero/ringct/clsag/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } rand_core = { version = "0.6", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } @@ -46,7 +46,7 @@ frost = { package = "modular-frost", path = "../../../../crypto/frost", default- std = [ "std-shims/std", - "thiserror", + "thiserror/std", "rand_core/std", "zeroize/std", diff --git a/networks/monero/ringct/clsag/src/lib.rs b/networks/monero/ringct/clsag/src/lib.rs index 0aab537b..d21067ae 100644 --- a/networks/monero/ringct/clsag/src/lib.rs +++ b/networks/monero/ringct/clsag/src/lib.rs @@ -36,29 +36,28 @@ pub use multisig::{ClsagMultisigMaskSender, ClsagAddendum, ClsagMultisig}; mod tests; /// Errors when working with CLSAGs. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, Copy, PartialEq, Eq, Debug, thiserror::Error)] pub enum ClsagError { /// The ring was invalid (such as being too small or too large). - #[cfg_attr(feature = "std", error("invalid ring"))] + #[error("invalid ring")] InvalidRing, /// The discrete logarithm of the key, scaling G, wasn't equivalent to the signing ring member. - #[cfg_attr(feature = "std", error("invalid commitment"))] + #[error("invalid commitment")] InvalidKey, /// The commitment opening provided did not match the ring member's. - #[cfg_attr(feature = "std", error("invalid commitment"))] + #[error("invalid commitment")] InvalidCommitment, /// The key image was invalid (such as being identity or torsioned) - #[cfg_attr(feature = "std", error("invalid key image"))] + #[error("invalid key image")] InvalidImage, /// The `D` component was invalid. - #[cfg_attr(feature = "std", error("invalid D"))] + #[error("invalid D")] InvalidD, /// The `s` vector was invalid. - #[cfg_attr(feature = "std", error("invalid s"))] + #[error("invalid s")] InvalidS, /// The `c1` variable was invalid. - #[cfg_attr(feature = "std", error("invalid c1"))] + #[error("invalid c1")] InvalidC1, } diff --git a/networks/monero/ringct/clsag/src/multisig.rs b/networks/monero/ringct/clsag/src/multisig.rs index bfbb8fc5..70cba19e 100644 --- a/networks/monero/ringct/clsag/src/multisig.rs +++ b/networks/monero/ringct/clsag/src/multisig.rs @@ -20,7 +20,6 @@ use group::{ use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group as dfg; use frost::{ - dkg::lagrange, curve::Ed25519, Participant, FrostError, ThresholdKeys, ThresholdView, algorithm::{WriteAddendum, Algorithm}, @@ -233,8 +232,10 @@ impl Algorithm for ClsagMultisig { .append_message(b"key_image_share", addendum.key_image_share.compress().to_bytes()); // Accumulate the interpolated share - let interpolated_key_image_share = - addendum.key_image_share * lagrange::(l, view.included()); + let interpolated_key_image_share = addendum.key_image_share * + view + .interpolation_factor(l) + .ok_or(FrostError::InternalError("processing addendum of non-participant"))?; *self.image.as_mut().unwrap() += interpolated_key_image_share; self diff --git a/networks/monero/ringct/mlsag/Cargo.toml b/networks/monero/ringct/mlsag/Cargo.toml index d666ebfa..9c2bd568 100644 --- a/networks/monero/ringct/mlsag/Cargo.toml +++ b/networks/monero/ringct/mlsag/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } @@ -34,7 +34,7 @@ monero-primitives = { path = "../../primitives", version = "0.1", default-featur std = [ "std-shims/std", - "thiserror", + "thiserror/std", "zeroize/std", diff --git a/networks/monero/ringct/mlsag/src/lib.rs b/networks/monero/ringct/mlsag/src/lib.rs index f5164b88..bda8060e 100644 --- a/networks/monero/ringct/mlsag/src/lib.rs +++ b/networks/monero/ringct/mlsag/src/lib.rs @@ -19,23 +19,22 @@ use monero_generators::{H, hash_to_point}; use monero_primitives::keccak256_to_scalar; /// Errors when working with MLSAGs. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, Copy, PartialEq, Eq, Debug, thiserror::Error)] pub enum MlsagError { /// Invalid ring (such as too small or too large). - #[cfg_attr(feature = "std", error("invalid ring"))] + #[error("invalid ring")] InvalidRing, /// Invalid amount of key images. - #[cfg_attr(feature = "std", error("invalid amount of key images"))] + #[error("invalid amount of key images")] InvalidAmountOfKeyImages, /// Invalid ss matrix. - #[cfg_attr(feature = "std", error("invalid ss"))] + #[error("invalid ss")] InvalidSs, /// Invalid key image. - #[cfg_attr(feature = "std", error("invalid key image"))] + #[error("invalid key image")] InvalidKeyImage, /// Invalid ci vector. - #[cfg_attr(feature = "std", error("invalid ci"))] + #[error("invalid ci")] InvalidCi, } diff --git a/networks/monero/rpc/Cargo.toml b/networks/monero/rpc/Cargo.toml index e5e6a650..8f1cfd8e 100644 --- a/networks/monero/rpc/Cargo.toml +++ b/networks/monero/rpc/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } hex = { version = "0.4", default-features = false, features = ["alloc"] } @@ -34,7 +34,7 @@ monero-address = { path = "../wallet/address", default-features = false } std = [ "std-shims/std", - "thiserror", + "thiserror/std", "zeroize/std", "hex/std", diff --git a/networks/monero/rpc/src/lib.rs b/networks/monero/rpc/src/lib.rs index 3c8d337a..8ae8ef01 100644 --- a/networks/monero/rpc/src/lib.rs +++ b/networks/monero/rpc/src/lib.rs @@ -42,34 +42,33 @@ const GRACE_BLOCKS_FOR_FEE_ESTIMATE: u64 = 10; const TXS_PER_REQUEST: usize = 100; /// An error from the RPC. -#[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum RpcError { /// An internal error. - #[cfg_attr(feature = "std", error("internal error ({0})"))] + #[error("internal error ({0})")] InternalError(String), /// A connection error with the node. - #[cfg_attr(feature = "std", error("connection error ({0})"))] + #[error("connection error ({0})")] ConnectionError(String), /// The node is invalid per the expected protocol. - #[cfg_attr(feature = "std", error("invalid node ({0})"))] + #[error("invalid node ({0})")] InvalidNode(String), /// Requested transactions weren't found. - #[cfg_attr(feature = "std", error("transactions not found"))] + #[error("transactions not found")] TransactionsNotFound(Vec<[u8; 32]>), /// The transaction was pruned. /// /// Pruned transactions are not supported at this time. - #[cfg_attr(feature = "std", error("pruned transaction"))] + #[error("pruned transaction")] PrunedTransaction, /// A transaction (sent or received) was invalid. - #[cfg_attr(feature = "std", error("invalid transaction ({0:?})"))] + #[error("invalid transaction ({0:?})")] InvalidTransaction([u8; 32]), /// The returned fee was unusable. - #[cfg_attr(feature = "std", error("unexpected fee response"))] + #[error("unexpected fee response")] InvalidFee, /// The priority intended for use wasn't usable. - #[cfg_attr(feature = "std", error("invalid priority"))] + #[error("invalid priority")] InvalidPriority, } diff --git a/networks/monero/verify-chain/Cargo.toml b/networks/monero/verify-chain/Cargo.toml index e1aba16e..f7164357 100644 --- a/networks/monero/verify-chain/Cargo.toml +++ b/networks/monero/verify-chain/Cargo.toml @@ -6,8 +6,8 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/verify-chain" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.80" publish = false +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/networks/monero/wallet/Cargo.toml b/networks/monero/wallet/Cargo.toml index af787e49..0e151e0f 100644 --- a/networks/monero/wallet/Cargo.toml +++ b/networks/monero/wallet/Cargo.toml @@ -11,7 +11,6 @@ rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] -rust-version = "1.80" [package.metadata.cargo-machete] ignored = ["monero-clsag"] @@ -22,7 +21,7 @@ workspace = true [dependencies] std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } @@ -62,7 +61,7 @@ monero-simple-request-rpc = { path = "../rpc/simple-request", default-features = std = [ "std-shims/std", - "thiserror", + "thiserror/std", "zeroize/std", diff --git a/networks/monero/wallet/address/Cargo.toml b/networks/monero/wallet/address/Cargo.toml index a86ff73c..e2899b50 100644 --- a/networks/monero/wallet/address/Cargo.toml +++ b/networks/monero/wallet/address/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] std-shims = { path = "../../../../common/std-shims", version = "^0.1.1", default-features = false } -thiserror = { version = "1", default-features = false, optional = true } +thiserror = { version = "2", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] } @@ -40,7 +40,7 @@ serde_json = { version = "1", default-features = false, features = ["alloc"] } std = [ "std-shims/std", - "thiserror", + "thiserror/std", "zeroize/std", diff --git a/networks/monero/wallet/address/src/lib.rs b/networks/monero/wallet/address/src/lib.rs index 194d4469..24dba053 100644 --- a/networks/monero/wallet/address/src/lib.rs +++ b/networks/monero/wallet/address/src/lib.rs @@ -199,29 +199,25 @@ pub enum Network { } /// Errors when decoding an address. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, Copy, PartialEq, Eq, Debug, thiserror::Error)] pub enum AddressError { /// The address had an invalid (network, type) byte. - #[cfg_attr(feature = "std", error("invalid byte for the address's network/type ({0})"))] + #[error("invalid byte for the address's network/type ({0})")] InvalidTypeByte(u8), /// The address wasn't a valid Base58Check (as defined by Monero) string. - #[cfg_attr(feature = "std", error("invalid address encoding"))] + #[error("invalid address encoding")] InvalidEncoding, /// The data encoded wasn't the proper length. - #[cfg_attr(feature = "std", error("invalid length"))] + #[error("invalid length")] InvalidLength, /// The address had an invalid key. - #[cfg_attr(feature = "std", error("invalid key"))] + #[error("invalid key")] InvalidKey, /// The address was featured with unrecognized features. - #[cfg_attr(feature = "std", error("unknown features"))] + #[error("unknown features")] UnknownFeatures(u64), /// The network was for a different network than expected. - #[cfg_attr( - feature = "std", - error("different network ({actual:?}) than expected ({expected:?})") - )] + #[error("different network ({actual:?}) than expected ({expected:?})")] DifferentNetwork { /// The Network expected. expected: Network, diff --git a/networks/monero/wallet/src/scan.rs b/networks/monero/wallet/src/scan.rs index 19f4d50f..095a5f1a 100644 --- a/networks/monero/wallet/src/scan.rs +++ b/networks/monero/wallet/src/scan.rs @@ -67,14 +67,13 @@ impl Timelocked { } /// Errors when scanning a block. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, Copy, PartialEq, Eq, Debug, thiserror::Error)] pub enum ScanError { /// The block was for an unsupported protocol version. - #[cfg_attr(feature = "std", error("unsupported protocol version ({0})"))] + #[error("unsupported protocol version ({0})")] UnsupportedProtocol(u8), /// The ScannableBlock was invalid. - #[cfg_attr(feature = "std", error("invalid scannable block ({0})"))] + #[error("invalid scannable block ({0})")] InvalidScannableBlock(&'static str), } diff --git a/networks/monero/wallet/src/send/mod.rs b/networks/monero/wallet/src/send/mod.rs index ca92961f..3f1ae7d1 100644 --- a/networks/monero/wallet/src/send/mod.rs +++ b/networks/monero/wallet/src/send/mod.rs @@ -141,48 +141,44 @@ impl InternalPayment { } /// An error while sending Monero. -#[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum SendError { /// The RingCT type to produce proofs for this transaction with weren't supported. - #[cfg_attr(feature = "std", error("this library doesn't yet support that RctType"))] + #[error("this library doesn't yet support that RctType")] UnsupportedRctType, /// The transaction had no inputs specified. - #[cfg_attr(feature = "std", error("no inputs"))] + #[error("no inputs")] NoInputs, /// The decoy quantity was invalid for the specified RingCT type. - #[cfg_attr(feature = "std", error("invalid number of decoys"))] + #[error("invalid number of decoys")] InvalidDecoyQuantity, /// The transaction had no outputs specified. - #[cfg_attr(feature = "std", error("no outputs"))] + #[error("no outputs")] NoOutputs, /// The transaction had too many outputs specified. - #[cfg_attr(feature = "std", error("too many outputs"))] + #[error("too many outputs")] TooManyOutputs, /// The transaction did not have a change output, and did not have two outputs. /// /// Monero requires all transactions have at least two outputs, assuming one payment and one /// change (or at least one dummy and one change). Accordingly, specifying no change and only /// one payment prevents creating a valid transaction - #[cfg_attr(feature = "std", error("only one output and no change address"))] + #[error("only one output and no change address")] NoChange, /// Multiple addresses had payment IDs specified. - /// + ///o /// Only one payment ID is allowed per transaction. - #[cfg_attr(feature = "std", error("multiple addresses with payment IDs"))] + #[error("multiple addresses with payment IDs")] MultiplePaymentIds, /// Too much arbitrary data was specified. - #[cfg_attr(feature = "std", error("too much data"))] + #[error("too much data")] TooMuchArbitraryData, /// The created transaction was too large. - #[cfg_attr(feature = "std", error("too large of a transaction"))] + #[error("too large of a transaction")] TooLargeTransaction, /// This transaction could not pay for itself. - #[cfg_attr( - feature = "std", - error( - "not enough funds (inputs {inputs}, outputs {outputs}, necessary_fee {necessary_fee:?})" - ) + #[error( + "not enough funds (inputs {inputs}, outputs {outputs}, necessary_fee {necessary_fee:?})" )] NotEnoughFunds { /// The amount of funds the inputs contributed. @@ -196,20 +192,17 @@ pub enum SendError { necessary_fee: Option, }, /// This transaction is being signed with the wrong private key. - #[cfg_attr(feature = "std", error("wrong spend private key"))] + #[error("wrong spend private key")] WrongPrivateKey, /// This transaction was read from a bytestream which was malicious. - #[cfg_attr( - feature = "std", - error("this SignableTransaction was created by deserializing a malicious serialization") - )] + #[error("this SignableTransaction was created by deserializing a malicious serialization")] MaliciousSerialization, /// There was an error when working with the CLSAGs. - #[cfg_attr(feature = "std", error("clsag error ({0})"))] + #[error("clsag error ({0})")] ClsagError(ClsagError), /// There was an error when working with FROST. #[cfg(feature = "multisig")] - #[cfg_attr(feature = "std", error("frost error {0}"))] + #[error("frost error {0}")] FrostError(FrostError), } diff --git a/networks/monero/wallet/src/send/multisig.rs b/networks/monero/wallet/src/send/multisig.rs index b3d58ba5..d60c5a33 100644 --- a/networks/monero/wallet/src/send/multisig.rs +++ b/networks/monero/wallet/src/send/multisig.rs @@ -14,7 +14,6 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, Participant, FrostError, ThresholdKeys, - dkg::lagrange, sign::{ Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, @@ -34,7 +33,7 @@ use crate::send::{SendError, SignableTransaction, key_image_sort}; pub struct TransactionMachine { signable: SignableTransaction, - i: Participant, + keys: ThresholdKeys, // The key image generator, and the scalar offset from the spend key key_image_generators_and_offsets: Vec<(EdwardsPoint, Scalar)>, @@ -45,7 +44,7 @@ pub struct TransactionMachine { pub struct TransactionSignMachine { signable: SignableTransaction, - i: Participant, + keys: ThresholdKeys, key_image_generators_and_offsets: Vec<(EdwardsPoint, Scalar)>, clsags: Vec<(ClsagMultisigMaskSender, AlgorithmSignMachine)>, @@ -61,7 +60,7 @@ pub struct TransactionSignatureMachine { impl SignableTransaction { /// Create a FROST signing machine out of this signable transaction. - pub fn multisig(self, keys: &ThresholdKeys) -> Result { + pub fn multisig(self, keys: ThresholdKeys) -> Result { let mut clsags = vec![]; let mut key_image_generators_and_offsets = vec![]; @@ -85,12 +84,7 @@ impl SignableTransaction { clsags.push((clsag_mask_send, AlgorithmMachine::new(clsag, offset))); } - Ok(TransactionMachine { - signable: self, - i: keys.params().i(), - key_image_generators_and_offsets, - clsags, - }) + Ok(TransactionMachine { signable: self, keys, key_image_generators_and_offsets, clsags }) } } @@ -120,7 +114,7 @@ impl PreprocessMachine for TransactionMachine { TransactionSignMachine { signable: self.signable, - i: self.i, + keys: self.keys, key_image_generators_and_offsets: self.key_image_generators_and_offsets, clsags, @@ -173,12 +167,12 @@ impl SignMachine for TransactionSignMachine { // We do not need to be included here, yet this set of signers has yet to be validated // We explicitly remove ourselves to ensure we aren't included twice, if we were redundantly // included - commitments.remove(&self.i); + commitments.remove(&self.keys.params().i()); // Find out who's included let mut included = commitments.keys().copied().collect::>(); // This push won't duplicate due to the above removal - included.push(self.i); + included.push(self.keys.params().i()); // unstable sort may reorder elements of equal order // Given our lack of duplicates, we should have no elements of equal order included.sort_unstable(); @@ -192,12 +186,15 @@ impl SignMachine for TransactionSignMachine { } // Convert the serialized nonces commitments to a parallelized Vec + let view = self.keys.view(included.clone()).map_err(|_| { + FrostError::InvalidSigningSet("couldn't form an interpolated view of the key") + })?; let mut commitments = (0 .. self.clsags.len()) .map(|c| { included .iter() .map(|l| { - let preprocess = if *l == self.i { + let preprocess = if *l == self.keys.params().i() { self.our_preprocess[c].clone() } else { commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?[c].clone() @@ -206,7 +203,7 @@ impl SignMachine for TransactionSignMachine { // While here, calculate the key image as needed to call sign // The CLSAG algorithm will independently calculate the key image/verify these shares key_images[c] += - preprocess.addendum.key_image_share().0 * lagrange::(*l, &included).0; + preprocess.addendum.key_image_share().0 * view.interpolation_factor(*l).unwrap().0; Ok((*l, preprocess)) }) @@ -217,7 +214,7 @@ impl SignMachine for TransactionSignMachine { // The above inserted our own preprocess into these maps (which is unnecessary) // Remove it now for map in &mut commitments { - map.remove(&self.i); + map.remove(&self.keys.params().i()); } // The actual TX will have sorted its inputs by key image diff --git a/networks/monero/wallet/src/tests/extra.rs b/networks/monero/wallet/src/tests/extra.rs index 497602ce..1d21490a 100644 --- a/networks/monero/wallet/src/tests/extra.rs +++ b/networks/monero/wallet/src/tests/extra.rs @@ -8,7 +8,8 @@ use crate::{ // Tests derived from // https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/ // tests/unit_tests/test_tx_utils.cpp -// which is licensed as follows: +// which is licensed +#[allow(clippy::empty_line_after_outer_attr)] // rustfmt is for the comment, not for the const #[rustfmt::skip] /* Copyright (c) 2014-2022, The Monero Project @@ -105,15 +106,13 @@ fn padding_only_max_size() { #[test] fn padding_only_exceed_max_size() { let buf: Vec = vec![0; MAX_TX_EXTRA_PADDING_COUNT + 1]; - let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap(); - assert!(extra.0.is_empty()); + Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap_err(); } #[test] fn invalid_padding_only() { let buf: Vec = vec![0, 42]; - let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap(); - assert!(extra.0.is_empty()); + Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap_err(); } #[test] @@ -137,8 +136,7 @@ fn extra_nonce_only_wrong_size() { let mut buf: Vec = vec![0; 20]; buf[0] = 2; buf[1] = 255; - let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap(); - assert!(extra.0.is_empty()); + Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap_err(); } #[test] @@ -158,8 +156,7 @@ fn pub_key_and_padding() { fn pub_key_and_invalid_padding() { let mut buf: Vec = PUB_KEY_BYTES.to_vec(); buf.extend([0, 1]); - let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap(); - assert_eq!(extra.0, vec![ExtraField::PublicKey(pub_key())]); + Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap_err(); } #[test] @@ -185,8 +182,7 @@ fn extra_mysterious_minergate_only_wrong_size() { let mut buf: Vec = vec![0; 20]; buf[0] = 222; buf[1] = 255; - let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap(); - assert!(extra.0.is_empty()); + Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap_err(); } #[test] diff --git a/networks/monero/wallet/src/view_pair.rs b/networks/monero/wallet/src/view_pair.rs index 3b09f088..c09f2965 100644 --- a/networks/monero/wallet/src/view_pair.rs +++ b/networks/monero/wallet/src/view_pair.rs @@ -10,8 +10,7 @@ use crate::{ }; /// An error while working with a ViewPair. -#[derive(Clone, PartialEq, Eq, Debug)] -#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Clone, PartialEq, Eq, Debug, thiserror::Error)] pub enum ViewPairError { /// The spend key was torsioned. /// @@ -20,7 +19,7 @@ pub enum ViewPairError { // CLSAG seems to support it if the challenge does a torsion clear, FCMP++ should ship with a // torsion clear, yet it's not worth it to modify CLSAG sign to generate challenges until the // torsion clears and ensure spendability (nor can we reasonably guarantee that in the future) - #[cfg_attr(feature = "std", error("torsioned spend key"))] + #[error("torsioned spend key")] TorsionedSpendKey, } diff --git a/networks/monero/wallet/tests/runner/mod.rs b/networks/monero/wallet/tests/runner/mod.rs index b83f939a..5678ba1b 100644 --- a/networks/monero/wallet/tests/runner/mod.rs +++ b/networks/monero/wallet/tests/runner/mod.rs @@ -285,7 +285,7 @@ macro_rules! test { { let mut machines = HashMap::new(); for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) { - machines.insert(i, tx.clone().multisig(&keys[&i]).unwrap()); + machines.insert(i, tx.clone().multisig(keys[&i].clone()).unwrap()); } frost::tests::sign_without_caching(&mut OsRng, machines, &[]) diff --git a/orchestration/Cargo.toml b/orchestration/Cargo.toml index fca38066..00b0d99b 100644 --- a/orchestration/Cargo.toml +++ b/orchestration/Cargo.toml @@ -7,6 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/orchestration/" authors = ["Luke Parker "] keywords = [] edition = "2021" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -24,6 +25,8 @@ rand_chacha = { version = "0.3", default-features = false, features = ["std"] } transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] } ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +embedwards25519 = { path = "../crypto/evrf/embedwards25519" } +secq256k1 = { path = "../crypto/evrf/secq256k1" } zalloc = { path = "../common/zalloc" } diff --git a/orchestration/dev/networks/bitcoin/run.sh b/orchestration/dev/networks/bitcoin/run.sh index da7c95a8..bec89fa9 100755 --- a/orchestration/dev/networks/bitcoin/run.sh +++ b/orchestration/dev/networks/bitcoin/run.sh @@ -3,7 +3,7 @@ RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -txindex -regtest --port=8333 \ +bitcoind -regtest --port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ - $1 + $@ diff --git a/orchestration/dev/networks/ethereum-relayer/.folder b/orchestration/dev/networks/ethereum-relayer/.folder index 675d4438..e69de29b 100644 --- a/orchestration/dev/networks/ethereum-relayer/.folder +++ b/orchestration/dev/networks/ethereum-relayer/.folder @@ -1,11 +0,0 @@ -#!/bin/sh - -RPC_USER="${RPC_USER:=serai}" -RPC_PASS="${RPC_PASS:=seraidex}" - -# Run Monero -monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ - --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ - --rpc-access-control-origins "*" --disable-rpc-ban \ - --rpc-login=$RPC_USER:$RPC_PASS \ - $1 diff --git a/orchestration/dev/networks/monero/run.sh b/orchestration/dev/networks/monero/run.sh index 75a93e46..1186c4d1 100755 --- a/orchestration/dev/networks/monero/run.sh +++ b/orchestration/dev/networks/monero/run.sh @@ -8,4 +8,4 @@ monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS --log-level 2 \ - $1 + $@ diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 4655be01..7afec67d 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -25,6 +25,8 @@ use ciphersuite::{ }, Ciphersuite, Ristretto, }; +use embedwards25519::Embedwards25519; +use secq256k1::Secq256k1; mod mimalloc; use mimalloc::mimalloc; @@ -267,6 +269,55 @@ fn infrastructure_keys(network: Network) -> InfrastructureKeys { ]) } +struct EmbeddedCurveKeys { + embedwards25519: (Zeroizing>, Vec), + secq256k1: (Zeroizing>, Vec), +} + +fn embedded_curve_keys(network: Network) -> EmbeddedCurveKeys { + // Generate entropy for the embedded curve keys + + let entropy = { + let path = home::home_dir() + .unwrap() + .join(".serai") + .join(network.label()) + .join("embedded_curve_keys_entropy"); + // Check if there's existing entropy + if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) { + assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes"); + let mut res = Zeroizing::new([0; 32]); + res.copy_from_slice(entropy.as_ref()); + res + } else { + // If there isn't, generate fresh entropy + let mut res = Zeroizing::new([0; 32]); + OsRng.fill_bytes(res.as_mut()); + fs::write(&path, &res).unwrap(); + res + } + }; + + let mut transcript = + RecommendedTranscript::new(b"Serai Orchestrator Embedded Curve Keys Transcript"); + transcript.append_message(b"network", network.label().as_bytes()); + transcript.append_message(b"entropy", entropy); + let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"embedded_curve_keys")); + + EmbeddedCurveKeys { + embedwards25519: { + let key = Zeroizing::new(::F::random(&mut rng)); + let pub_key = Embedwards25519::generator() * key.deref(); + (Zeroizing::new(key.to_repr().as_slice().to_vec()), pub_key.to_bytes().to_vec()) + }, + secq256k1: { + let key = Zeroizing::new(::F::random(&mut rng)); + let pub_key = Secq256k1::generator() * key.deref(); + (Zeroizing::new(key.to_repr().as_slice().to_vec()), pub_key.to_bytes().to_vec()) + }, + } +} + fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); @@ -294,18 +345,15 @@ fn dockerfiles(network: Network) { monero_key.1, ); - let new_entropy = || { - let mut res = Zeroizing::new([0; 32]); - OsRng.fill_bytes(res.as_mut()); - res - }; + let embedded_curve_keys = embedded_curve_keys(network); processor( &orchestration_path, network, "bitcoin", coordinator_key.1, bitcoin_key.0, - new_entropy(), + embedded_curve_keys.embedwards25519.0.clone(), + embedded_curve_keys.secq256k1.0.clone(), ); processor( &orchestration_path, @@ -313,9 +361,18 @@ fn dockerfiles(network: Network) { "ethereum", coordinator_key.1, ethereum_key.0, - new_entropy(), + embedded_curve_keys.embedwards25519.0.clone(), + embedded_curve_keys.secq256k1.0.clone(), + ); + processor( + &orchestration_path, + network, + "monero", + coordinator_key.1, + monero_key.0, + embedded_curve_keys.embedwards25519.0.clone(), + embedded_curve_keys.embedwards25519.0.clone(), ); - processor(&orchestration_path, network, "monero", coordinator_key.1, monero_key.0, new_entropy()); let serai_key = { let serai_key = Zeroizing::new( @@ -346,6 +403,7 @@ fn key_gen(network: Network) { let _ = fs::create_dir_all(&serai_dir); fs::write(key_file, key.to_repr()).expect("couldn't write key"); + // TODO: Move embedded curve key gen here, and print them println!( "Public Key: {}", hex::encode((::generator() * key).to_bytes()) diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index cefe6455..00f9243d 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -12,16 +12,17 @@ pub fn processor( network: Network, coin: &'static str, _coordinator_key: ::G, - coin_key: Zeroizing<::F>, - entropy: Zeroizing<[u8; 32]>, + processor_key: Zeroizing<::F>, + substrate_evrf_key: Zeroizing>, + network_evrf_key: Zeroizing>, ) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( if coin == "ethereum" { r#" RUN cargo install svm-rs -RUN svm install 0.8.25 -RUN svm use 0.8.25 +RUN svm install 0.8.26 +RUN svm use 0.8.26 "# } else { "" @@ -53,8 +54,9 @@ RUN apt install -y ca-certificates let mut env_vars = vec![ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), - ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), - ("ENTROPY", hex::encode(entropy.as_ref())), + ("MESSAGE_QUEUE_KEY", hex::encode(processor_key.to_repr())), + ("SUBSTRATE_EVRF_KEY", hex::encode(substrate_evrf_key)), + ("NETWORK_EVRF_KEY", hex::encode(network_evrf_key)), ("NETWORK", coin.to_string()), ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_HOSTNAME", hostname), diff --git a/orchestration/testnet/networks/bitcoin/run.sh b/orchestration/testnet/networks/bitcoin/run.sh index dbec375a..6544243b 100755 --- a/orchestration/testnet/networks/bitcoin/run.sh +++ b/orchestration/testnet/networks/bitcoin/run.sh @@ -3,7 +3,7 @@ RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -txindex -testnet -port=8333 \ +bitcoind -testnet -port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ --datadir=/volume diff --git a/orchestration/testnet/networks/ethereum-relayer/.folder b/orchestration/testnet/networks/ethereum-relayer/.folder index 675d4438..e69de29b 100644 --- a/orchestration/testnet/networks/ethereum-relayer/.folder +++ b/orchestration/testnet/networks/ethereum-relayer/.folder @@ -1,11 +0,0 @@ -#!/bin/sh - -RPC_USER="${RPC_USER:=serai}" -RPC_PASS="${RPC_PASS:=seraidex}" - -# Run Monero -monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ - --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ - --rpc-access-control-origins "*" --disable-rpc-ban \ - --rpc-login=$RPC_USER:$RPC_PASS \ - $1 diff --git a/patches/directories-next/Cargo.toml b/patches/directories-next/Cargo.toml index 8c2b21dc..3ffcb6ce 100644 --- a/patches/directories-next/Cargo.toml +++ b/patches/directories-next/Cargo.toml @@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/patches/directorie authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/patches/option-ext/Cargo.toml b/patches/option-ext/Cargo.toml index 6f039c31..64bf3838 100644 --- a/patches/option-ext/Cargo.toml +++ b/patches/option-ext/Cargo.toml @@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/patches/option-ext authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/patches/parking_lot/Cargo.toml b/patches/parking_lot/Cargo.toml index 957b19bf..20cdd271 100644 --- a/patches/parking_lot/Cargo.toml +++ b/patches/parking_lot/Cargo.toml @@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/patches/parking_lo authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.70" [package.metadata.docs.rs] all-features = true diff --git a/patches/parking_lot_core/Cargo.toml b/patches/parking_lot_core/Cargo.toml index 37dcc703..cafd432b 100644 --- a/patches/parking_lot_core/Cargo.toml +++ b/patches/parking_lot_core/Cargo.toml @@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/patches/parking_lo authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.70" [package.metadata.docs.rs] all-features = true diff --git a/patches/rocksdb/Cargo.toml b/patches/rocksdb/Cargo.toml index 3a92fafc..6622694a 100644 --- a/patches/rocksdb/Cargo.toml +++ b/patches/rocksdb/Cargo.toml @@ -7,17 +7,16 @@ repository = "https://github.com/serai-dex/serai/tree/develop/patches/rocksdb" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.70" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -rocksdb = { version = "0.22", default-features = false } +rocksdb = { version = "0.23", default-features = false, features = ["bindgen-runtime"] } [features] -jemalloc = [] +jemalloc = [] # Dropped as this causes a compilation failure on windows snappy = ["rocksdb/snappy"] lz4 = ["rocksdb/lz4"] zstd = ["rocksdb/zstd"] diff --git a/patches/tiny-bip39/Cargo.toml b/patches/tiny-bip39/Cargo.toml deleted file mode 100644 index ff9b8a61..00000000 --- a/patches/tiny-bip39/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "tiny-bip39" -version = "1.0.2" -description = "tiny-bip39 which patches to the latest update" -license = "MIT" -repository = "https://github.com/serai-dex/serai/tree/develop/patches/tiny-bip39" -authors = ["Luke Parker "] -keywords = [] -edition = "2021" -rust-version = "1.70" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[package.metadata.cargo-machete] -ignored = ["tiny-bip39"] - -[lib] -name = "bip39" -path = "src/lib.rs" - -[dependencies] -tiny-bip39 = "2" diff --git a/patches/tiny-bip39/src/lib.rs b/patches/tiny-bip39/src/lib.rs deleted file mode 100644 index 3890f5ae..00000000 --- a/patches/tiny-bip39/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub use bip39::*; diff --git a/patches/zstd/Cargo.toml b/patches/zstd/Cargo.toml index 0d1368e4..488c80ff 100644 --- a/patches/zstd/Cargo.toml +++ b/patches/zstd/Cargo.toml @@ -7,7 +7,6 @@ repository = "https://github.com/serai-dex/serai/tree/develop/patches/zstd" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.70" [package.metadata.docs.rs] all-features = true diff --git a/processor/Cargo.toml b/processor/Cargo.toml deleted file mode 100644 index 9d29bc7c..00000000 --- a/processor/Cargo.toml +++ /dev/null @@ -1,94 +0,0 @@ -[package] -name = "serai-processor" -version = "0.1.0" -description = "Multichain processor premised on canonicity to reach distributed consensus automatically" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/processor" -authors = ["Luke Parker "] -keywords = [] -edition = "2021" -publish = false - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -# Macros -async-trait = { version = "0.1", default-features = false } -zeroize = { version = "1", default-features = false, features = ["std"] } -thiserror = { version = "1", default-features = false } - -# Libs -rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } - -# Encoders -const-hex = { version = "1", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } -borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } - -# Cryptography -ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } - -transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } -frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } -frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } - -# Bitcoin/Ethereum -k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } - -# Bitcoin -secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } -bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true } - -# Ethereum -ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true } - -# Monero -dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } -monero-simple-request-rpc = { path = "../networks/monero/rpc/simple-request", default-features = false, optional = true } -monero-wallet = { path = "../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } - -# Application -log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } -tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } - -zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db" } -serai-env = { path = "../common/env", optional = true } -# TODO: Replace with direct usage of primitives -serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } - -messages = { package = "serai-processor-messages", path = "./messages" } - -message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } - -sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } - -ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] } - -dockertest = "0.5" -serai-docker-tests = { path = "../tests/docker" } - -[features] -secp256k1 = ["k256", "frost/secp256k1"] -bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] - -ethereum = ["secp256k1", "ethereum-serai/tests"] - -ed25519 = ["dalek-ff-group", "frost/ed25519"] -monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] - -binaries = ["env_logger", "serai-env", "message-queue"] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] diff --git a/processor/README.md b/processor/README.md index 37d11e0d..e942f557 100644 --- a/processor/README.md +++ b/processor/README.md @@ -1,5 +1,5 @@ # Processor -The Serai processor scans a specified external network, communicating with the -coordinator. For details on its exact messaging flow, and overall policies, -please view `docs/processor`. +The Serai processors, built from the libraries here, scan an external network +and report the indexed data to the coordinator. For details on its exact +messaging flow, and overall policies, please view `docs/processor`. diff --git a/processor/TODO/main.rs b/processor/TODO/main.rs new file mode 100644 index 00000000..1585ea61 --- /dev/null +++ b/processor/TODO/main.rs @@ -0,0 +1,38 @@ +async fn handle_coordinator_msg( + txn: &mut D::Transaction<'_>, + network: &N, + coordinator: &mut Co, + tributary_mutable: &mut TributaryMutable, + substrate_mutable: &mut SubstrateMutable, + msg: &Message, +) { + match msg.msg.clone() { + CoordinatorMessage::Substrate(msg) => { + match msg { + messages::substrate::CoordinatorMessage::SubstrateBlock { + context, + block: substrate_block, + burns, + batches, + } => { + // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these + // plans + if !tributary_mutable.signers.is_empty() { + coordinator + .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck { + block: substrate_block, + plans: to_sign + .iter() + .filter_map(|signable| { + SessionDb::get(txn, signable.0.to_bytes().as_ref()) + .map(|session| PlanMeta { session, id: signable.1 }) + }) + .collect(), + }) + .await; + } + } + } + } + } +} diff --git a/processor/src/tests/addresses.rs b/processor/TODO/tests/addresses.rs similarity index 99% rename from processor/src/tests/addresses.rs rename to processor/TODO/tests/addresses.rs index 3d4d6d4c..1a06963a 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/TODO/tests/addresses.rs @@ -1,3 +1,5 @@ +// TODO + use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; diff --git a/processor/src/tests/batch_signer.rs b/processor/TODO/tests/batch_signer.rs similarity index 99% rename from processor/src/tests/batch_signer.rs rename to processor/TODO/tests/batch_signer.rs index 8da67ef1..84bd5e17 100644 --- a/processor/src/tests/batch_signer.rs +++ b/processor/TODO/tests/batch_signer.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use rand_core::{RngCore, OsRng}; diff --git a/processor/src/tests/cosigner.rs b/processor/TODO/tests/cosigner.rs similarity index 99% rename from processor/src/tests/cosigner.rs rename to processor/TODO/tests/cosigner.rs index a66161bf..98116bc3 100644 --- a/processor/src/tests/cosigner.rs +++ b/processor/TODO/tests/cosigner.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use rand_core::{RngCore, OsRng}; diff --git a/processor/TODO/tests/key_gen.rs b/processor/TODO/tests/key_gen.rs new file mode 100644 index 00000000..116db11e --- /dev/null +++ b/processor/TODO/tests/key_gen.rs @@ -0,0 +1,143 @@ +// TODO + +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use rand_core::OsRng; + +use ciphersuite::{ + group::{ff::Field, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::{Participant, ThresholdParams, evrf::*}; + +use serai_db::{DbTxn, Db, MemDb}; + +use sp_application_crypto::sr25519; +use serai_client::validator_sets::primitives::{Session, KeyPair}; + +use messages::key_gen::*; +use crate::{ + networks::Network, + key_gen::{KeyConfirmed, KeyGen}, +}; + +const SESSION: Session = Session(1); + +pub fn test_key_gen() { + let mut dbs = HashMap::new(); + let mut substrate_evrf_keys = HashMap::new(); + let mut network_evrf_keys = HashMap::new(); + let mut evrf_public_keys = vec![]; + let mut key_gens = HashMap::new(); + for i in 1 ..= 5 { + let db = MemDb::new(); + dbs.insert(i, db.clone()); + + let substrate_evrf_key = Zeroizing::new( + <::EmbeddedCurve as Ciphersuite>::F::random(&mut OsRng), + ); + substrate_evrf_keys.insert(i, substrate_evrf_key.clone()); + let network_evrf_key = Zeroizing::new( + <::EmbeddedCurve as Ciphersuite>::F::random(&mut OsRng), + ); + network_evrf_keys.insert(i, network_evrf_key.clone()); + + evrf_public_keys.push(( + (<::EmbeddedCurve as Ciphersuite>::generator() * *substrate_evrf_key) + .to_bytes(), + (<::EmbeddedCurve as Ciphersuite>::generator() * *network_evrf_key) + .to_bytes() + .as_ref() + .to_vec(), + )); + key_gens + .insert(i, KeyGen::::new(db, substrate_evrf_key.clone(), network_evrf_key.clone())); + } + + let mut participations = HashMap::new(); + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + let mut txn = dbs.get_mut(&i).unwrap().txn(); + let mut msgs = key_gen.handle( + &mut txn, + CoordinatorMessage::GenerateKey { + session: SESSION, + threshold: 3, + evrf_public_keys: evrf_public_keys.clone(), + }, + ); + assert_eq!(msgs.len(), 1); + let ProcessorMessage::Participation { session, participation } = msgs.swap_remove(0) else { + panic!("didn't get a participation") + }; + assert_eq!(session, SESSION); + participations.insert(i, participation); + txn.commit(); + } + + let mut res = None; + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + let mut txn = dbs.get_mut(&i).unwrap().txn(); + for j in 1 ..= 5 { + let mut msgs = key_gen.handle( + &mut txn, + CoordinatorMessage::Participation { + session: SESSION, + participant: Participant::new(u16::try_from(j).unwrap()).unwrap(), + participation: participations[&j].clone(), + }, + ); + if j != 3 { + assert!(msgs.is_empty()); + } + if j == 3 { + assert_eq!(msgs.len(), 1); + let ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } = + msgs.swap_remove(0) + else { + panic!("didn't get a generated key pair") + }; + assert_eq!(session, SESSION); + + if res.is_none() { + res = Some((substrate_key, network_key.clone())); + } + assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key)); + } + } + + txn.commit(); + } + let res = res.unwrap(); + + for i in 1 ..= 5 { + let key_gen = key_gens.get_mut(&i).unwrap(); + let mut txn = dbs.get_mut(&i).unwrap().txn(); + let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm( + &mut txn, + SESSION, + &KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), + ); + txn.commit(); + + assert_eq!(substrate_keys.len(), 1); + let substrate_keys = substrate_keys.swap_remove(0); + assert_eq!(network_keys.len(), 1); + let network_keys = network_keys.swap_remove(0); + + let params = + ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap(); + assert_eq!(substrate_keys.params(), params); + assert_eq!(network_keys.params(), params); + assert_eq!( + ( + substrate_keys.group_key().to_bytes(), + network_keys.group_key().to_bytes().as_ref().to_vec() + ), + res + ); + } +} diff --git a/processor/src/tests/literal/mod.rs b/processor/TODO/tests/literal/mod.rs similarity index 99% rename from processor/src/tests/literal/mod.rs rename to processor/TODO/tests/literal/mod.rs index 2e8160ec..86229e48 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/TODO/tests/literal/mod.rs @@ -1,3 +1,5 @@ +// TODO + use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerOperations, DockerTest, diff --git a/processor/src/tests/mod.rs b/processor/TODO/tests/mod.rs similarity index 99% rename from processor/src/tests/mod.rs rename to processor/TODO/tests/mod.rs index 7ab57bde..4691e523 100644 --- a/processor/src/tests/mod.rs +++ b/processor/TODO/tests/mod.rs @@ -1,3 +1,5 @@ +// TODO + use std::sync::OnceLock; mod key_gen; diff --git a/processor/src/tests/scanner.rs b/processor/TODO/tests/scanner.rs similarity index 99% rename from processor/src/tests/scanner.rs rename to processor/TODO/tests/scanner.rs index 6421c499..6ad87f78 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/TODO/tests/scanner.rs @@ -1,3 +1,5 @@ +// TODO + use core::{pin::Pin, time::Duration, future::Future}; use std::sync::Arc; @@ -71,7 +73,7 @@ pub async fn test_scanner( let block_id = block.id(); // Verify the Scanner picked them up - let verify_event = |mut scanner: ScannerHandle| async { + let verify_event = |mut scanner: ScannerHandle| async move { let outputs = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { diff --git a/processor/src/tests/signer.rs b/processor/TODO/tests/signer.rs similarity index 99% rename from processor/src/tests/signer.rs rename to processor/TODO/tests/signer.rs index 26b26b35..68cb66e0 100644 --- a/processor/src/tests/signer.rs +++ b/processor/TODO/tests/signer.rs @@ -1,3 +1,5 @@ +// TODO + use core::{pin::Pin, future::Future}; use std::collections::HashMap; @@ -184,7 +186,6 @@ pub async fn test_signer( let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let payments = vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: ExternalBalance { coin: match N::NETWORK { ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin, diff --git a/processor/src/tests/wallet.rs b/processor/TODO/tests/wallet.rs similarity index 99% rename from processor/src/tests/wallet.rs rename to processor/TODO/tests/wallet.rs index 74d7ccc0..89c1be56 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/TODO/tests/wallet.rs @@ -1,3 +1,5 @@ +// TODO + use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; @@ -88,7 +90,6 @@ pub async fn test_wallet( outputs.clone(), vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: ExternalBalance { coin: match N::NETWORK { ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin, @@ -115,7 +116,6 @@ pub async fn test_wallet( plans[0].payments, vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: ExternalBalance { coin: match N::NETWORK { ExternalNetworkId::Bitcoin => ExternalCoin::Bitcoin, diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml new file mode 100644 index 00000000..164036a0 --- /dev/null +++ b/processor/bin/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "serai-processor-bin" +version = "0.1.0" +description = "Framework for Serai processor binaries" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/bin" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +zeroize = { version = "1", default-features = false, features = ["std"] } + +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } + +serai-client = { path = "../../substrate/client", default-features = false } +serai-cosign = { path = "../../coordinator/cosign" } + +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-env = { path = "../../common/env" } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +signers = { package = "serai-processor-signers", path = "../signers" } + +message-queue = { package = "serai-message-queue", path = "../../message-queue" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/bin/LICENSE b/processor/bin/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/bin/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/bin/README.md b/processor/bin/README.md new file mode 100644 index 00000000..858a2925 --- /dev/null +++ b/processor/bin/README.md @@ -0,0 +1,3 @@ +# Serai Processor Bin + +The framework for Serai processor binaries, common to the Serai processors. diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs new file mode 100644 index 00000000..62eb1097 --- /dev/null +++ b/processor/bin/src/coordinator.rs @@ -0,0 +1,225 @@ +use core::future::Future; +use std::sync::{LazyLock, Arc, Mutex}; + +use tokio::sync::mpsc; + +use serai_client::{ + primitives::Signature, + validator_sets::primitives::{Session, SlashReport}, + in_instructions::primitives::SignedBatch, +}; + +use serai_cosign::SignedCosign; + +use serai_db::{Get, DbTxn, Db, create_db, db_channel}; + +use scanner::ScannerFeed; + +use message_queue::{Service, Metadata, client::MessageQueue}; + +create_db! { + ProcessorBinCoordinator { + SavedMessages: () -> u64, + } +} + +db_channel! { + ProcessorBinCoordinator { + ReceivedCoordinatorMessages: () -> Vec, + } +} + +// A lock to access SentCoordinatorMessages::send +static SEND_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); + +db_channel! { + ProcessorBinCoordinator { + SentCoordinatorMessages: () -> Vec, + } +} + +#[derive(Clone)] +pub(crate) struct CoordinatorSend { + db: crate::Db, + sent_message: mpsc::UnboundedSender<()>, +} + +impl CoordinatorSend { + fn send(&mut self, msg: &messages::ProcessorMessage) { + let _lock = SEND_LOCK.lock().unwrap(); + let mut txn = self.db.txn(); + SentCoordinatorMessages::send(&mut txn, &borsh::to_vec(msg).unwrap()); + txn.commit(); + self + .sent_message + .send(()) + .expect("failed to tell the Coordinator tasks there's a new message to send"); + } +} + +pub(crate) struct Coordinator { + received_message: mpsc::UnboundedReceiver<()>, + send: CoordinatorSend, +} + +impl Coordinator { + pub(crate) fn new(db: crate::Db) -> Self { + let (received_message_send, received_message_recv) = mpsc::unbounded_channel(); + let (sent_message_send, mut sent_message_recv) = mpsc::unbounded_channel(); + + let service = Service::Processor(S::NETWORK); + let message_queue = Arc::new(MessageQueue::from_env(service)); + + // Spawn a task to move messages from the message-queue to our database so we can achieve + // atomicity. This is the only place we read/ack messages from + tokio::spawn({ + let mut db = db.clone(); + let message_queue = message_queue.clone(); + async move { + loop { + let msg = message_queue.next(Service::Coordinator).await; + + let prior_msg = msg.id.checked_sub(1); + let saved_messages = SavedMessages::get(&db); + /* + This should either be: + A) The message after the message we just saved (as normal) + B) The message we just saved (if we rebooted and failed to ack it) + */ + assert!((saved_messages == prior_msg) || (saved_messages == Some(msg.id))); + if saved_messages < Some(msg.id) { + let mut txn = db.txn(); + ReceivedCoordinatorMessages::send(&mut txn, &msg.msg); + SavedMessages::set(&mut txn, &msg.id); + txn.commit(); + } + // Acknowledge this message + message_queue.ack(Service::Coordinator, msg.id).await; + + // Fire that there's a new message + // This assumes the success path, not the just-rebooted-path + received_message_send + .send(()) + .expect("failed to tell the Coordinator there's a new message"); + } + } + }); + + // Spawn a task to send messages to the message-queue + // TODO: Define a proper task for this and remove use of queue_with_retry + tokio::spawn({ + let mut db = db.clone(); + async move { + loop { + let mut txn = db.txn(); + match SentCoordinatorMessages::try_recv(&mut txn) { + Some(msg) => { + let metadata = Metadata { + from: service, + to: Service::Coordinator, + intent: borsh::from_slice::(&msg).unwrap().intent(), + }; + message_queue.queue_with_retry(metadata, msg).await; + txn.commit(); + } + None => { + let _ = + tokio::time::timeout(core::time::Duration::from_secs(6), sent_message_recv.recv()) + .await; + } + } + } + } + }); + + let send = CoordinatorSend { db, sent_message: sent_message_send }; + Coordinator { received_message: received_message_recv, send } + } + + pub(crate) fn coordinator_send(&self) -> CoordinatorSend { + self.send.clone() + } + + /// Fetch the next message from the Coordinator. + /// + /// This message is guaranteed to have never been handled before, where handling is defined as + /// this `txn` being committed. + pub(crate) async fn next_message( + &mut self, + txn: &mut impl DbTxn, + ) -> messages::CoordinatorMessage { + loop { + match ReceivedCoordinatorMessages::try_recv(txn) { + Some(msg) => { + return borsh::from_slice(&msg) + .expect("message wasn't a borsh-encoded CoordinatorMessage") + } + None => { + let _ = + tokio::time::timeout(core::time::Duration::from_secs(60), self.received_message.recv()) + .await; + } + } + } + } + + pub(crate) fn send_message(&mut self, msg: &messages::ProcessorMessage) { + self.send.send(msg); + } +} + +impl signers::Coordinator for CoordinatorSend { + type EphemeralError = (); + + fn send( + &mut self, + msg: messages::sign::ProcessorMessage, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Sign(msg)); + Ok(()) + } + } + + fn publish_cosign( + &mut self, + cosign: SignedCosign, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::CosignedBlock { cosign }, + )); + Ok(()) + } + } + + fn publish_signed_batch( + &mut self, + batch: SignedBatch, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedBatch { batch }, + )); + Ok(()) + } + } + + fn publish_slash_report_signature( + &mut self, + session: Session, + slash_report: SlashReport, + signature: Signature, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedSlashReport { + session, + slash_report, + signature: signature.0, + }, + )); + Ok(()) + } + } +} diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs new file mode 100644 index 00000000..5109dcbc --- /dev/null +++ b/processor/bin/src/lib.rs @@ -0,0 +1,308 @@ +use core::cmp::Ordering; + +use zeroize::{Zeroize, Zeroizing}; + +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::evrf::EvrfCurve; + +use serai_client::validator_sets::primitives::Session; + +use serai_env as env; +use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; + +use primitives::EncodableG; +use ::key_gen::{KeyGenParams, KeyGen}; +use scheduler::{SignableTransaction, TransactionFor}; +use scanner::{ScannerFeed, Scanner, KeyFor, Scheduler}; +use signers::{TransactionPublisher, Signers}; + +mod coordinator; +use coordinator::Coordinator; + +create_db! { + ProcessorBin { + ExternalKeyForSessionForSigners: (session: Session) -> EncodableG, + } +} + +db_channel! { + ProcessorBin { + KeyToActivate: () -> EncodableG + } +} + +/// The type used for the database. +#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] +pub type Db = std::sync::Arc; +/// The type used for the database. +#[cfg(feature = "rocksdb")] +pub type Db = serai_db::RocksDB; + +/// Initialize the processor. +/// +/// Yields the database. +#[allow(unused_variables, unreachable_code)] +pub fn init() -> Db { + // Override the panic handler with one which will panic if any tokio task panics + { + let existing = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic| { + existing(panic); + const MSG: &str = "exiting the process due to a task panicking"; + println!("{MSG}"); + log::error!("{MSG}"); + std::process::exit(1); + })); + } + + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); + } + env_logger::init(); + + #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] + let db = + serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + #[cfg(feature = "rocksdb")] + let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + db +} + +/// THe URL for the external network's node. +pub fn url() -> String { + let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); + let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); + let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); + "http://".to_string() + &login + "@" + &hostname + ":" + &port +} + +fn key_gen() -> KeyGen { + fn read_key_from_env(label: &'static str) -> Zeroizing { + let key_hex = + Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); + let bytes = Zeroizing::new( + hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), + ); + + let mut repr = ::Repr::default(); + if repr.as_ref().len() != bytes.len() { + panic!("{label} wasn't the correct length"); + } + repr.as_mut().copy_from_slice(bytes.as_slice()); + let res = Zeroizing::new( + Option::from(::from_repr(repr)) + .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), + ); + repr.as_mut().zeroize(); + res + } + KeyGen::new( + read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), + read_key_from_env::<::EmbeddedCurve>( + "NETWORK_EVRF_KEY", + ), + ) +} + +async fn first_block_after_time(feed: &S, serai_time: u64) -> u64 { + async fn first_block_after_time_iteration( + feed: &S, + serai_time: u64, + ) -> Result, S::EphemeralError> { + let latest = feed.latest_finalized_block_number().await?; + let latest_time = feed.time_of_block(latest).await?; + if latest_time < serai_time { + tokio::time::sleep(core::time::Duration::from_secs(serai_time - latest_time)).await; + return Ok(None); + } + + // A finalized block has a time greater than or equal to the time we want to start at + // Find the first such block with a binary search + // start_search and end_search are inclusive + let mut start_search = 0; + let mut end_search = latest; + while start_search != end_search { + // This on purposely chooses the earlier block in the case two blocks are both in the middle + let to_check = start_search + ((end_search - start_search) / 2); + let block_time = feed.time_of_block(to_check).await?; + match block_time.cmp(&serai_time) { + Ordering::Less => { + start_search = to_check + 1; + assert!(start_search <= end_search); + } + Ordering::Equal | Ordering::Greater => { + // This holds true since we pick the earlier block upon an even search distance + // If it didn't, this would cause an infinite loop + assert!(to_check < end_search); + end_search = to_check; + } + } + } + Ok(Some(start_search)) + } + loop { + match first_block_after_time_iteration(feed, serai_time).await { + Ok(Some(block)) => return block, + Ok(None) => { + log::info!("waiting for block to activate at (a block with timestamp >= {serai_time})"); + } + Err(e) => { + log::error!("couldn't find the first block Serai should scan due to an RPC error: {e:?}"); + } + } + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } +} + +/// Hooks to run during the main loop. +pub trait Hooks { + /// A hook to run upon receiving a message. + fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage); +} +impl Hooks for () { + fn on_message(_: &mut impl DbTxn, _: &messages::CoordinatorMessage) {} +} + +/// The main loop of a Processor, interacting with the Coordinator. +pub async fn main_loop< + H: Hooks, + S: ScannerFeed, + K: KeyGenParams>>, + Sch: Clone + + Scheduler< + S, + SignableTransaction: SignableTransaction, + >, +>( + mut db: Db, + feed: S, + scheduler: Sch, + publisher: impl TransactionPublisher>, +) { + let mut coordinator = Coordinator::new::(db.clone()); + + let mut key_gen = key_gen::(); + let mut scanner = Scanner::new(db.clone(), feed.clone(), scheduler.clone()).await; + let mut signers = + Signers::::new(db.clone(), coordinator.coordinator_send(), publisher); + + loop { + let db_clone = db.clone(); + let mut txn = db.txn(); + let msg = coordinator.next_message(&mut txn).await; + H::on_message(&mut txn, &msg); + let mut txn = Some(txn); + match msg { + messages::CoordinatorMessage::KeyGen(msg) => { + let txn = txn.as_mut().unwrap(); + let mut new_key = None; + // This is a computationally expensive call yet it happens infrequently + for msg in key_gen.handle(txn, msg) { + if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg { + new_key = Some(*session) + } + coordinator.send_message(&messages::ProcessorMessage::KeyGen(msg)); + } + + // If we were yielded a key, register it in the signers + if let Some(session) = new_key { + let (substrate_keys, network_keys) = KeyGen::::key_shares(txn, session) + .expect("generated key pair yet couldn't get key shares"); + signers.register_keys(txn, session, substrate_keys, network_keys); + } + } + + // These are cheap calls which are fine to be here in this loop + messages::CoordinatorMessage::Sign(msg) => { + let txn = txn.as_mut().unwrap(); + signers.queue_message(txn, &msg) + } + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { session, cosign }, + ) => { + let txn = txn.take().unwrap(); + signers.cosign_block(txn, session, &cosign) + } + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SignSlashReport { session, slash_report }, + ) => { + let txn = txn.take().unwrap(); + signers.sign_slash_report(txn, session, &slash_report) + } + + messages::CoordinatorMessage::Substrate(msg) => match msg { + messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => { + let txn = txn.as_mut().unwrap(); + let key = + EncodableG(K::decode_key(key_pair.1.as_ref()).expect("invalid key set on serai")); + + // Queue the key to be activated upon the next Batch + KeyToActivate::>::send(txn, &key); + + // Set the external key, as needed by the signers + ExternalKeyForSessionForSigners::>::set(txn, session, &key); + + // This is presumed extremely expensive, potentially blocking for several minutes, yet + // only happens for the very first set of keys + if session == Session(0) { + assert!(scanner.is_none()); + let start_block = first_block_after_time(&feed, serai_time).await; + scanner = Some( + Scanner::initialize(db_clone, feed.clone(), scheduler.clone(), start_block, key.0) + .await, + ); + } + } + messages::substrate::CoordinatorMessage::SlashesReported { session } => { + let txn = txn.as_mut().unwrap(); + + // Since this session had its slashes reported, it has finished all its signature + // protocols and has been fully retired. We retire it from the signers accordingly + let key = ExternalKeyForSessionForSigners::>::take(txn, session).unwrap().0; + + // This is a cheap call + signers.retire_session(txn, session, &key) + } + messages::substrate::CoordinatorMessage::Block { + serai_block_number: _, + batch, + mut burns, + } => { + let scanner = scanner.as_mut().unwrap(); + + if let Some(batch) = batch { + let key_to_activate = + KeyToActivate::>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); + + // This is a cheap call as it internally just queues this to be done later + let _: () = scanner.acknowledge_batch( + txn.take().unwrap(), + batch, + /* + `acknowledge_batch` takes burns to optimize handling returns with standard + payments. That's why handling these with a Batch (and not waiting until the + following potential `queue_burns` call makes sense. As for which Batch, the first + is equally valid unless we want to start introspecting (and should be our only + Batch anyways). + */ + std::mem::take(&mut burns), + key_to_activate, + ); + } + + // This is a cheap call as it internally just queues this to be done later + if !burns.is_empty() { + let _: () = scanner.queue_burns(txn.take().unwrap(), burns); + } + } + }, + }; + // If the txn wasn't already consumed and committed, commit it + if let Some(txn) = txn { + txn.commit(); + } + } +} diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml new file mode 100644 index 00000000..bc3a1dd0 --- /dev/null +++ b/processor/bitcoin/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "serai-bitcoin-processor" +version = "0.1.0" +description = "Serai Bitcoin Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/bitcoin" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } + +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } +bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } + +zalloc = { path = "../../common/zalloc" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-db = { path = "../../common/db" } + +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } +signers = { package = "serai-processor-signers", path = "../signers" } + +bin = { package = "serai-processor-bin", path = "../bin" } + +[features] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/bitcoin/LICENSE b/processor/bitcoin/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/bitcoin/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/bitcoin/README.md b/processor/bitcoin/README.md new file mode 100644 index 00000000..79d1cedd --- /dev/null +++ b/processor/bitcoin/README.md @@ -0,0 +1 @@ +# Serai Bitcoin Processor diff --git a/processor/bitcoin/src/db.rs b/processor/bitcoin/src/db.rs new file mode 100644 index 00000000..1d73ebfe --- /dev/null +++ b/processor/bitcoin/src/db.rs @@ -0,0 +1,8 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + BitcoinProcessor { + LatestBlockToYieldAsFinalized: () -> u64, + ScriptPubKey: (tx: [u8; 32], vout: u32) -> Vec, + } +} diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs new file mode 100644 index 00000000..41544134 --- /dev/null +++ b/processor/bitcoin/src/key_gen.rs @@ -0,0 +1,28 @@ +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::ThresholdKeys; + +use crate::{primitives::x_coord_to_even_point, scan::scanner}; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Bitcoin"; + + type ExternalNetworkCiphersuite = Secp256k1; + + fn tweak_keys(keys: &mut ThresholdKeys) { + *keys = bitcoin_serai::wallet::tweak_keys(keys); + // Also create a scanner to assert these keys, and all expected paths, are usable + scanner(keys.group_key()); + } + + fn encode_key(key: ::G) -> Vec { + let key = key.to_bytes(); + let key: &[u8] = key.as_ref(); + // Skip the parity encoding as we know this key is even + key[1 ..].to_vec() + } + + fn decode_key(key: &[u8]) -> Option<::G> { + x_coord_to_even_point(key) + } +} diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs new file mode 100644 index 00000000..302f670b --- /dev/null +++ b/processor/bitcoin/src/main.rs @@ -0,0 +1,286 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use bitcoin_serai::rpc::Rpc as BRpc; + +use ::primitives::task::{Task, ContinuallyRan}; + +mod primitives; +pub(crate) use crate::primitives::*; + +// Internal utilities for scanning transactions +mod scan; + +// App-logic trait satisfactions +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{Planner, Scheduler}; + +// Our custom code for Bitcoin +mod db; +mod txindex; +use txindex::TxIndexTask; + +pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { + use bitcoin_serai::bitcoin::hashes::Hash; + + let mut res = hash.to_byte_array(); + res.reverse(); + res +} + +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = Rpc { + db: db.clone(), + rpc: loop { + match BRpc::new(bin::url()).await { + Ok(rpc) => break rpc, + Err(e) => { + log::error!("couldn't connect to the Bitcoin node: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }, + }; + + let (index_task, index_handle) = Task::new(); + tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![])); + core::mem::forget(index_handle); + + bin::main_loop::<(), _, KeyGenParams, _>(db, feed.clone(), Scheduler::new(Planner), feed).await; +} + +/* +use bitcoin_serai::{ + bitcoin::{ + hashes::Hash as HashTrait, + key::{Parity, XOnlyPublicKey}, + consensus::{Encodable, Decodable}, + script::Instruction, + Transaction, Block, ScriptBuf, + opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, + }, + wallet::{ + tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, + SignableTransaction as BSignableTransaction, TransactionMachine, + }, + rpc::{RpcError, Rpc}, +}; + +#[cfg(test)] +use bitcoin_serai::bitcoin::{ + secp256k1::{SECP256K1, SecretKey, Message}, + PrivateKey, PublicKey, + sighash::{EcdsaSighashType, SighashCache}, + script::PushBytesBuf, + absolute::LockTime, + Amount as BAmount, Sequence, Script, Witness, OutPoint, + transaction::Version, + blockdata::transaction::{TxIn, TxOut}, +}; + +use serai_client::{ + primitives::{MAX_DATA_LEN, ExternalNetworkId, ExternalCoin, Amount, Balance}, + networks::bitcoin::Address, +}; +*/ + +/* +impl TransactionTrait for Transaction { + #[cfg(test)] + async fn fee(&self, network: &Bitcoin) -> u64 { + let mut value = 0; + for input in &self.input { + let output = input.previous_output; + let mut hash = *output.txid.as_raw_hash().as_byte_array(); + hash.reverse(); + value += network.rpc.get_transaction(&hash).await.unwrap().output + [usize::try_from(output.vout).unwrap()] + .value + .to_sat(); + } + for output in &self.output { + value -= output.value.to_sat(); + } + value + } +} + +impl Bitcoin { + pub(crate) async fn new(url: String) -> Bitcoin { + let mut res = Rpc::new(url.clone()).await; + while let Err(e) = res { + log::error!("couldn't connect to Bitcoin node: {e:?}"); + sleep(Duration::from_secs(5)).await; + res = Rpc::new(url.clone()).await; + } + Bitcoin { rpc: res.unwrap() } + } + + #[cfg(test)] + pub(crate) async fn fresh_chain(&self) { + if self.rpc.get_latest_block_number().await.unwrap() > 0 { + self + .rpc + .rpc_call( + "invalidateblock", + serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]), + ) + .await + .unwrap() + } + } + + // This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine + async fn median_fee(&self, block: &Block) -> Result { + let mut fees = vec![]; + if block.txdata.len() > 1 { + for tx in &block.txdata[1 ..] { + let mut in_value = 0; + for input in &tx.input { + let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); + input_tx.reverse(); + in_value += self + .rpc + .get_transaction(&input_tx) + .await + .map_err(|_| NetworkError::ConnectionError)? + .output[usize::try_from(input.previous_output.vout).unwrap()] + .value + .to_sat(); + } + let out = tx.output.iter().map(|output| output.value.to_sat()).sum::(); + fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap()); + } + } + fees.sort(); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); + + // The DUST constant documentation notes a relay rule practically enforcing a + // 1000 sat/kilo-vbyte minimum fee. + Ok(Fee(fee.max(1))) + } + + #[cfg(test)] + pub(crate) fn sign_btc_input_for_p2pkh( + tx: &Transaction, + input_index: usize, + private_key: &PrivateKey, + ) -> ScriptBuf { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + + let public_key = PublicKey::from_private_key(SECP256K1, private_key); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); + + let mut der = SECP256K1 + .sign_ecdsa_low_r( + &Message::from_digest_slice( + SighashCache::new(tx) + .legacy_signature_hash( + input_index, + &main_addr.script_pubkey(), + EcdsaSighashType::All.to_u32(), + ) + .unwrap() + .to_raw_hash() + .as_ref(), + ) + .unwrap(), + &private_key.inner, + ) + .serialize_der() + .to_vec(); + der.push(1); + + ScriptBuf::builder() + .push_slice(PushBytesBuf::try_from(der).unwrap()) + .push_key(&public_key) + .into_script() + } +} + +impl Network for Bitcoin { + // 2 inputs should be 2 * 230 = 460 weight units + // The output should be ~36 bytes, or 144 weight units + // The overhead should be ~20 bytes at most, or 80 weight units + // 684 weight units, 171 vbytes, round up to 200 + // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the + // aggregation TX + const COST_TO_AGGREGATE: u64 = 800; + + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block_number(id).await.unwrap() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { + self.rpc.get_transaction(&id.0).await.unwrap() + } + + #[cfg(test)] + async fn mine_block(&self) { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + + self + .rpc + .rpc_call::>( + "generatetoaddress", + serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]), + ) + .await + .unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Address) -> Block { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + + let secret_key = SecretKey::new(&mut rand_core::OsRng); + let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); + let public_key = PublicKey::from_private_key(SECP256K1, &private_key); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + self + .rpc + .rpc_call::>("generatetoaddress", serde_json::json!([100, main_addr])) + .await + .unwrap(); + + let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); + let mut tx = Transaction { + version: Version(2), + lock_time: LockTime::ZERO, + input: vec![TxIn { + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, + script_sig: Script::new().into(), + sequence: Sequence(u32::MAX), + witness: Witness::default(), + }], + output: vec![TxOut { + value: tx.output[0].value - BAmount::from_sat(10000), + script_pubkey: address.clone().into(), + }], + }; + tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.send_raw_transaction(&tx).await.unwrap(); + for _ in 0 .. Self::CONFIRMATIONS { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} +*/ diff --git a/processor/bitcoin/src/primitives/block.rs b/processor/bitcoin/src/primitives/block.rs new file mode 100644 index 00000000..02b8e595 --- /dev/null +++ b/processor/bitcoin/src/primitives/block.rs @@ -0,0 +1,80 @@ +use core::fmt; +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::bitcoin::block::{Header, Block as BBlock}; + +use serai_client::networks::bitcoin::Address; + +use serai_db::Db; +use primitives::{ReceivedOutput, EventualityTracker}; + +use crate::{hash_bytes, scan::scanner, output::Output, transaction::Eventuality}; + +#[derive(Clone, Debug)] +pub(crate) struct BlockHeader(pub(crate) Header); +impl primitives::BlockHeader for BlockHeader { + fn id(&self) -> [u8; 32] { + hash_bytes(self.0.block_hash().to_raw_hash()) + } + fn parent(&self) -> [u8; 32] { + hash_bytes(self.0.prev_blockhash.to_raw_hash()) + } +} + +#[derive(Clone)] +pub(crate) struct Block(pub(crate) D, pub(crate) BBlock); +impl fmt::Debug for Block { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Block").field("1", &self.1).finish_non_exhaustive() + } +} + +impl primitives::Block for Block { + type Header = BlockHeader; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + primitives::BlockHeader::id(&BlockHeader(self.1.header)) + } + + fn scan_for_outputs_unordered( + &self, + _latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { + let scanner = scanner(key); + + let mut res = vec![]; + // We skip the coinbase transaction as its burdened by maturity + for tx in &self.1.txdata[1 ..] { + for output in scanner.scan_transaction(tx) { + res.push(Output::new(&self.0, key, tx, output)); + } + } + res + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + for tx in &self.1.txdata[1 ..] { + let id = hash_bytes(tx.compute_txid().to_raw_hash()); + if let Some(eventuality) = eventualities.active_eventualities.remove(id.as_slice()) { + res.insert(id, eventuality); + } + } + res + } +} diff --git a/processor/bitcoin/src/primitives/mod.rs b/processor/bitcoin/src/primitives/mod.rs new file mode 100644 index 00000000..e089c623 --- /dev/null +++ b/processor/bitcoin/src/primitives/mod.rs @@ -0,0 +1,20 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::bitcoin::key::{Parity, XOnlyPublicKey}; + +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; + +pub(crate) fn x_coord_to_even_point(key: &[u8]) -> Option<::G> { + if key.len() != 32 { + None? + }; + + // Read the x-only public key + let key = XOnlyPublicKey::from_slice(key).ok()?; + // Convert to a full public key + let key = key.public_key(Parity::Even); + // Convert to k256 (from libsecp256k1) + Secp256k1::read_G(&mut key.serialize().as_slice()).ok() +} diff --git a/processor/bitcoin/src/primitives/output.rs b/processor/bitcoin/src/primitives/output.rs new file mode 100644 index 00000000..44f422c2 --- /dev/null +++ b/processor/bitcoin/src/primitives/output.rs @@ -0,0 +1,170 @@ +use std::io; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::{ + hashes::Hash as HashTrait, consensus::Encodable, script::Instruction, transaction::Transaction, + }, + wallet::ReceivedOutput as WalletOutput, +}; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::Get; + +use serai_client::{ + primitives::{ExternalCoin, Amount, ExternalBalance, ExternalAddress}, + networks::bitcoin::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +use crate::{ + primitives::x_coord_to_even_point, + scan::{offsets_for_key, presumed_origin, extract_serai_data}, +}; + +#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] +pub(crate) struct OutputId([u8; 36]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 36]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output { + kind: OutputType, + presumed_origin: Option
, + pub(crate) output: WalletOutput, + data: Vec, +} + +impl Output { + pub(crate) fn new( + getter: &impl Get, + key: ::G, + tx: &Transaction, + output: WalletOutput, + ) -> Self { + Self { + kind: offsets_for_key(key) + .into_iter() + .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) + .expect("scanned output for unknown offset"), + presumed_origin: presumed_origin(getter, tx), + output, + data: extract_serai_data(tx), + } + } + + pub(crate) fn new_with_presumed_origin( + key: ::G, + tx: &Transaction, + presumed_origin: Option
, + output: WalletOutput, + ) -> Self { + Self { + kind: offsets_for_key(key) + .into_iter() + .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) + .expect("scanned output for unknown offset"), + presumed_origin, + output, + data: extract_serai_data(tx), + } + } +} + +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + self.kind + } + + fn id(&self) -> Self::Id { + let mut id = OutputId::default(); + self.output.outpoint().consensus_encode(&mut id.as_mut()).unwrap(); + id + } + + fn transaction_id(&self) -> Self::TransactionId { + let mut res = self.output.outpoint().txid.to_raw_hash().to_byte_array(); + res.reverse(); + res + } + + fn key(&self) -> ::G { + // We read the key from the script pubkey so we don't have to independently store it + let script = &self.output.output().script_pubkey; + + // These assumptions are safe since it's an output we successfully scanned + assert!(script.is_p2tr()); + let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { + panic!("last item in v1 Taproot script wasn't bytes") + }; + let key = x_coord_to_even_point(key.as_ref()) + .expect("last item in scanned v1 Taproot script wasn't a valid x-only public key"); + + // The output's key minus the output's offset is the root key + key - (::G::GENERATOR * self.output.offset()) + } + + fn presumed_origin(&self) -> Option
{ + self.presumed_origin.clone() + } + + fn balance(&self) -> ExternalBalance { + ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(self.output.value()) } + } + + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.kind.write(writer)?; + let presumed_origin: Option = self.presumed_origin.clone().map(Into::into); + writer.write_all(&presumed_origin.encode())?; + self.output.write(writer)?; + writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.data) + } + + fn read(mut reader: &mut R) -> io::Result { + Ok(Output { + kind: OutputType::read(reader)?, + presumed_origin: { + Option::::decode(&mut IoReader(&mut reader)) + .map_err(|e| io::Error::other(format!("couldn't decode ExternalAddress: {e:?}")))? + .map(|address| { + Address::try_from(address) + .map_err(|()| io::Error::other("couldn't decode Address from ExternalAddress")) + }) + .transpose()? + }, + output: WalletOutput::read(reader)?, + data: { + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + data + }, + }) + } +} diff --git a/processor/bitcoin/src/primitives/transaction.rs b/processor/bitcoin/src/primitives/transaction.rs new file mode 100644 index 00000000..9b81d2f0 --- /dev/null +++ b/processor/bitcoin/src/primitives/transaction.rs @@ -0,0 +1,171 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Secp256k1; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use bitcoin_serai::{ + bitcoin::{ + consensus::{Encodable, Decodable}, + ScriptBuf, Transaction as BTransaction, + }, + wallet::{ + ReceivedOutput, TransactionError, SignableTransaction as BSignableTransaction, + TransactionMachine, + }, +}; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::networks::bitcoin::Address; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(pub(crate) BTransaction); + +impl From for Transaction { + fn from(tx: BTransaction) -> Self { + Self(tx) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + let tx = + BTransaction::consensus_decode(&mut io::BufReader::new(reader)).map_err(io::Error::other)?; + Ok(Self(tx)) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + let mut writer = io::BufWriter::new(writer); + self.0.consensus_encode(&mut writer)?; + writer.into_inner()?; + Ok(()) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction { + pub(crate) inputs: Vec, + pub(crate) payments: Vec<(ScriptBuf, u64)>, + pub(crate) change: Option
, + pub(crate) fee_per_vbyte: u64, +} + +impl SignableTransaction { + fn signable(self) -> Result { + BSignableTransaction::new( + self.inputs, + &self.payments, + self.change.map(ScriptBuf::from), + None, + self.fee_per_vbyte, + ) + } +} + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(SignableTransaction, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + self + .0 + .signable() + .expect("signing an invalid SignableTransaction") + .multisig(&self.1) + .expect("incorrect keys used for SignableTransaction") + .preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Secp256k1; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let inputs = { + let mut input_len = [0; 4]; + reader.read_exact(&mut input_len)?; + let mut inputs = vec![]; + for _ in 0 .. u32::from_le_bytes(input_len) { + inputs.push(ReceivedOutput::read(reader)?); + } + inputs + }; + + let payments = Vec::<(Vec, u64)>::deserialize_reader(reader)?; + let change = <_>::deserialize_reader(reader)?; + let fee_per_vbyte = <_>::deserialize_reader(reader)?; + + Ok(Self { + inputs, + payments: payments + .into_iter() + .map(|(address, amount)| (ScriptBuf::from_bytes(address), amount)) + .collect(), + change, + fee_per_vbyte, + }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; + for input in &self.inputs { + input.write(writer)?; + } + + for payment in &self.payments { + (payment.0.as_script().as_bytes(), payment.1).serialize(writer)?; + } + self.change.serialize(writer)?; + self.fee_per_vbyte.serialize(writer)?; + + Ok(()) + } + + fn id(&self) -> [u8; 32] { + self.clone().signable().unwrap().txid() + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub(crate) struct Eventuality { + pub(crate) txid: [u8; 32], + pub(crate) singular_spent_output: Option, +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + self.txid + } + + // We define the lookup as our ID since the resolving transaction only has a singular possible ID + fn lookup(&self) -> Vec { + self.txid.to_vec() + } + + fn singular_spent_output(&self) -> Option { + self.singular_spent_output.clone() + } + + fn read(reader: &mut impl io::Read) -> io::Result { + Self::deserialize_reader(reader) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.serialize(writer) + } +} diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs new file mode 100644 index 00000000..4289c714 --- /dev/null +++ b/processor/bitcoin/src/rpc.rs @@ -0,0 +1,181 @@ +use core::future::Future; + +use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; + +use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount}; + +use serai_db::Db; +use scanner::ScannerFeed; +use signers::TransactionPublisher; + +use crate::{ + db, + transaction::Transaction, + block::{BlockHeader, Block}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) db: D, + pub(crate) rpc: BRpc, +} + +impl ScannerFeed for Rpc { + const NETWORK: ExternalNetworkId = ExternalNetworkId::Bitcoin; + // 6 confirmations is widely accepted as secure and shouldn't occur + const CONFIRMATIONS: u64 = 6; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 6; + + const TEN_MINUTES: u64 = 1; + + type Block = Block; + + type EphemeralError = RpcError; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let number = usize::try_from(number).unwrap(); + + /* + The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the + median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve + CLTV). This creates a monotonic median time which we use as the block time. + */ + // This implements `GetMedianTimePast` + let median = { + const MEDIAN_TIMESPAN: usize = 11; + let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); + for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { + timestamps + .push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); + } + timestamps.sort(); + timestamps[timestamps.len() / 2] + }; + + /* + This block's timestamp is guaranteed to be greater than this median: + https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 + /src/validation.cpp#L4182-L4184 + + This does not guarantee the median always increases however. Take the following trivial + example, as the window is initially built: + + 0 block has time 0 // Prior blocks: [] + 1 block has time 1 // Prior blocks: [0] + 2 block has time 2 // Prior blocks: [0, 1] + 3 block has time 2 // Prior blocks: [0, 1, 2] + + These two blocks have the same time (both greater than the median of their prior blocks) and + the same median. + + The median will never decrease however. The values pushed onto the window will always be + greater than the median. If a value greater than the median is popped, the median will + remain the same (due to the counterbalance of the pushed value). If a value less than the + median is popped, the median will increase (either to another instance of the same value, + yet one closer to the end of the repeating sequence, or to a higher value). + */ + Ok(median.into()) + } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { + Ok(BlockHeader( + self + .rpc + .get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?) + .await? + .header, + )) + } + } + + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + Ok(Block( + self.db.clone(), + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, + )) + } + } + + fn dust(coin: ExternalCoin) -> Amount { + assert_eq!(coin, ExternalCoin::Bitcoin); + + /* + A Taproot input is: + - 36 bytes for the OutPoint + - 0 bytes for the script (+1 byte for the length) + - 4 bytes for the sequence + Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format + + There's also: + - 1 byte for the witness length + - 1 byte for the signature length + - 64 bytes for the signature + which have the SegWit discount. + + (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units + 230 ceil div 4 = 57 vbytes + + Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: + - 1000 sat/kilo-vbyte for a transaction to be relayed + - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte + The DUST constant needs to be determined by the latter. + Since these are solely relay rules, and may be raised, we require all outputs be spendable + under a 5000 sat/kilo-vbyte fee rate. + + 5000 sat/kilo-vbyte = 5 sat/vbyte + 5 * 57 = 285 sats/spent-output + + Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding + 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. + + Increase by an order of magnitude, in order to ensure this is actually worth our time, and we + get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD. + */ + Amount(10_000) + } + + fn cost_to_aggregate( + &self, + coin: ExternalCoin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin, ExternalCoin::Bitcoin); + // TODO + Ok(Amount(0)) + } + } +} + +impl TransactionPublisher for Rpc { + type EphemeralError = RpcError; + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) } + } +} diff --git a/processor/bitcoin/src/scan.rs b/processor/bitcoin/src/scan.rs new file mode 100644 index 00000000..6d7fab88 --- /dev/null +++ b/processor/bitcoin/src/scan.rs @@ -0,0 +1,125 @@ +use std::{sync::LazyLock, collections::HashMap}; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::{ + blockdata::opcodes, + script::{Instruction, ScriptBuf}, + Transaction, + }, + wallet::Scanner, +}; + +use serai_client::networks::bitcoin::Address; + +use serai_db::Get; +use primitives::OutputType; + +use crate::hash_bytes; + +const KEY_DST: &[u8] = b"Serai Bitcoin Processor Key Offset"; +static BRANCH_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"branch")); +static CHANGE_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"change")); +static FORWARD_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"forward")); + +// Unfortunately, we have per-key offsets as it's the root key plus the base offset may not be +// even. While we could tweak the key until all derivations are even, that'd require significantly +// more tweaking. This algorithmic complexity is preferred. +pub(crate) fn offsets_for_key( + key: ::G, +) -> HashMap::F> { + let mut offsets = HashMap::from([(OutputType::External, ::F::ZERO)]); + + // We create an actual Bitcoin scanner as upon adding an offset, it yields the tweaked offset + // actually used + let mut scanner = Scanner::new(key).unwrap(); + let mut register = |kind, offset| { + let tweaked_offset = scanner.register_offset(offset).expect("offset collision"); + offsets.insert(kind, tweaked_offset); + }; + + register(OutputType::Branch, *BRANCH_BASE_OFFSET); + register(OutputType::Change, *CHANGE_BASE_OFFSET); + register(OutputType::Forwarded, *FORWARD_BASE_OFFSET); + + offsets +} + +pub(crate) fn scanner(key: ::G) -> Scanner { + let mut scanner = Scanner::new(key).unwrap(); + for (_, offset) in offsets_for_key(key) { + let tweaked_offset = scanner.register_offset(offset).unwrap(); + assert_eq!(tweaked_offset, offset); + } + scanner +} + +pub(crate) fn presumed_origin(getter: &impl Get, tx: &Transaction) -> Option
{ + for input in &tx.input { + let txid = hash_bytes(input.previous_output.txid.to_raw_hash()); + let vout = input.previous_output.vout; + if let Some(address) = + Address::new(crate::txindex::script_pubkey_for_on_chain_output(getter, txid, vout)) + { + return Some(address); + } + } + None? +} + +// Checks if this script matches SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. +fn matches_segwit_data(script: &ScriptBuf) -> Option { + let mut ins = script.instructions(); + + // first item should be SHA256 code + if ins.next()?.ok()?.opcode()? != opcodes::all::OP_SHA256 { + return Some(false); + } + + // next should be a data push + ins.next()?.ok()?.push_bytes()?; + + // next should be a equality check + if ins.next()?.ok()?.opcode()? != opcodes::all::OP_EQUALVERIFY { + return Some(false); + } + + Some(true) +} + +// Extract the data for Serai from a transaction +pub(crate) fn extract_serai_data(tx: &Transaction) -> Vec { + // Check for an OP_RETURN output + let mut data = (|| { + for output in &tx.output { + if output.script_pubkey.is_op_return() { + match output.script_pubkey.instructions_minimal().last() { + Some(Ok(Instruction::PushBytes(data))) => return Some(data.as_bytes().to_vec()), + _ => continue, + } + } + } + None + })(); + + // Check the inputs + if data.is_none() { + for input in &tx.input { + let witness = input.witness.to_vec(); + // The witness has to have at least 2 items, msg and the redeem script + if witness.len() >= 2 { + let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); + if matches_segwit_data(&redeem_script) == Some(true) { + data = Some(witness[witness.len() - 2].clone()); // len() - 1 is the redeem_script + break; + } + } + } + } + + data.unwrap_or(vec![]) +} diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs new file mode 100644 index 00000000..00f4a072 --- /dev/null +++ b/processor/bitcoin/src/scheduler.rs @@ -0,0 +1,213 @@ +use core::future::Future; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::ScriptBuf, + wallet::{TransactionError, SignableTransaction as BSignableTransaction, p2tr_script_buf}, +}; + +use serai_client::{ + primitives::{ExternalCoin, Amount}, + networks::bitcoin::Address, +}; + +use serai_db::Db; +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; +use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler}; + +use crate::{ + scan::{offsets_for_key, scanner}, + output::Output, + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; + Address::new( + p2tr_script_buf(key + offset) + .expect("creating address from Serai key which wasn't properly tweaked"), + ) + .expect("couldn't create Serai-representable address for P2TR script") +} + +fn signable_transaction( + _reference_block: &BlockFor>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, +) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { + assert!( + inputs.len() < + , EffectedReceivedOutputs>>>::MAX_INPUTS + ); + assert!( + (payments.len() + usize::from(u8::from(change.is_some()))) < + , EffectedReceivedOutputs>>>::MAX_OUTPUTS + ); + + // TODO + let fee_per_vbyte = 1; + + let inputs = inputs.into_iter().map(|input| input.output).collect::>(); + + let mut payments = payments + .into_iter() + .map(|payment| { + (ScriptBuf::from(payment.address().clone()), { + let balance = payment.balance(); + assert_eq!(balance.coin, ExternalCoin::Bitcoin); + balance.amount.0 + }) + }) + .collect::>(); + /* + Push a payment to a key with a known private key which anyone can spend. If this transaction + gets stuck, this lets anyone create a child transaction spending this output, raising the fee, + getting the transaction unstuck (via CPFP). + */ + payments.push(( + // The generator is even so this is valid + p2tr_script_buf(::G::GENERATOR).unwrap(), + // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai + // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` + bitcoin_serai::wallet::DUST, + )); + + let change = change + .map(, EffectedReceivedOutputs>>>::change_address); + + BSignableTransaction::new( + inputs.clone(), + &payments, + change.clone().map(ScriptBuf::from), + None, + fee_per_vbyte, + ) + .map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst)) +} + +#[derive(Clone)] +pub(crate) struct Planner; +impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { + type EphemeralError = (); + + type SignableTransaction = SignableTransaction; + + /* + Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). + + A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While + our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in + the future (if the size decreases or we misevaluate it). It also offers a minimal amount of + benefit when we are able to logarithmically accumulate inputs/fulfill payments. + + For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and + 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 + bytes. + + 100,000 / 192 = 520 + 520 * 192 leaves 160 bytes of overhead for the transaction structure itself. + */ + const MAX_INPUTS: usize = 520; + // We always reserve one output to create an anyone-can-spend output enabling anyone to use CPFP + // to unstick any transactions which had too low of a fee. + const MAX_OUTPUTS: usize = 519; + + fn branch_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + &self, + reference_block: &BlockFor>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> impl Send + Future> { + async move { + Ok(match signable_transaction::(reference_block, inputs, payments, change) { + Ok(tx) => Amount(tx.1.needed_fee()), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to calculate_fee"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + }) + } + } + + fn plan( + &self, + reference_block: &BlockFor>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> impl Send + + Future< + Output = Result< + PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>>, + Self::EphemeralError, + >, + > { + async move { + let key = inputs.first().unwrap().key(); + for input in &inputs { + assert_eq!(key, input.key()); + } + + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + match signable_transaction::(reference_block, inputs.clone(), payments, change) { + Ok(tx) => Ok(PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, + auxilliary: EffectedReceivedOutputs({ + let tx = tx.1.transaction(); + let scanner = scanner(key); + + let mut res = vec![]; + for output in scanner.scan_transaction(tx) { + res.push(Output::new_with_presumed_origin( + key, + tx, + // It shouldn't matter if this is wrong as we should never try to return these + // We still provide an accurate value to ensure a lack of discrepancies + Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), + output, + )); + } + res + }), + }), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to plan"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { .. }) => { + panic!("plan called for a transaction without enough funds") + } + } + } + } +} + +pub(crate) type Scheduler = GenericScheduler, Planner>; diff --git a/processor/bitcoin/src/txindex.rs b/processor/bitcoin/src/txindex.rs new file mode 100644 index 00000000..2ba40ca8 --- /dev/null +++ b/processor/bitcoin/src/txindex.rs @@ -0,0 +1,110 @@ +use core::future::Future; + +use bitcoin_serai::bitcoin::ScriptBuf; + +use serai_db::{Get, DbTxn, Db}; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; + +use crate::{db, rpc::Rpc, hash_bytes}; + +pub(crate) fn script_pubkey_for_on_chain_output( + getter: &impl Get, + txid: [u8; 32], + vout: u32, +) -> ScriptBuf { + // We index every single output on the blockchain, so this shouldn't be possible + ScriptBuf::from_bytes( + db::ScriptPubKey::get(getter, txid, vout) + .expect("requested script public key for unknown output"), + ) +} + +/* + We want to be able to return received outputs. We do that by iterating over the inputs to find an + address format we recognize, then setting that address as the address to return to. + + Since inputs only contain the script signatures, yet addresses are for script public keys, we + need to pull up the output spent by an input and read the script public key from that. While we + could use `txindex=1`, and an asynchronous call to the Bitcoin node, we: + + 1) Can maintain a much smaller index ourselves + 2) Don't want the asynchronous call (which would require the flow be async, allowed to + potentially error, and more latent) + 3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet) + + This task builds that index. +*/ +pub(crate) struct TxIndexTask(pub(crate) Rpc); + +impl ContinuallyRan for TxIndexTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let latest_block_number = self + .0 + .rpc + .get_latest_block_number() + .await + .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?; + let latest_block_number = u64::try_from(latest_block_number).unwrap(); + // `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself) + let finalized_block_number = + latest_block_number.checked_sub(Rpc::::CONFIRMATIONS - 1).ok_or(format!( + "blockchain only just started and doesn't have {} blocks yet", + Rpc::::CONFIRMATIONS + ))?; + + /* + `finalized_block_number` is the latest block number minus confirmations. The blockchain may + undetectably re-organize though, as while the scanner will maintain an index of finalized + blocks and panics on reorganization, this runs prior to the scanner and that index. + + A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this + saves the script public keys *by the transaction hash an output index*. Accordingly, it + isn't invalidated on reorganization. The only risk would be if the new chain reorganized to + include a transaction to Serai which we didn't index the parents of. If that happens, we'll + panic when we scan the transaction, causing the invariant to be detected. + */ + + let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db); + let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1); + + let mut iterated = false; + for b in next_block ..= finalized_block_number { + iterated = true; + + // Fetch the block + let block_hash = self + .0 + .rpc + .get_block_hash(b.try_into().unwrap()) + .await + .map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?; + let block = self + .0 + .rpc + .get_block(&block_hash) + .await + .map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?; + + let mut txn = self.0.db.txn(); + + for tx in &block.txdata { + let txid = hash_bytes(tx.compute_txid().to_raw_hash()); + for (o, output) in tx.output.iter().enumerate() { + let o = u32::try_from(o).unwrap(); + // Set the script public key for this transaction + db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes()); + } + } + + db::LatestBlockToYieldAsFinalized::set(&mut txn, &b); + txn.commit(); + } + Ok(iterated) + } + } +} diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml new file mode 100644 index 00000000..94594b93 --- /dev/null +++ b/processor/ethereum/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "serai-ethereum-processor" +version = "0.1.0" +description = "Serai Ethereum Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } + +const-hex = { version = "1", default-features = false, features = ["std"] } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } + +k256 = { version = "^0.13.1", default-features = false, features = ["std"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-rlp = { version = "0.3", default-features = false } + +alloy-rpc-types-eth = { version = "0.9", default-features = false } +alloy-transport = { version = "0.9", default-features = false } +alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-rpc-client = { version = "0.9", default-features = false } +alloy-provider = { version = "0.9", default-features = false } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] } + +zalloc = { path = "../../common/zalloc" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-env = { path = "../../common/env" } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +smart-contract-scheduler = { package = "serai-processor-smart-contract-scheduler", path = "../scheduler/smart-contract" } +signers = { package = "serai-processor-signers", path = "../signers" } + +ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../networks/ethereum/schnorr" } +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "./primitives" } +ethereum-router = { package = "serai-processor-ethereum-router", path = "./router" } +ethereum-erc20 = { package = "serai-processor-ethereum-erc20", path = "./erc20" } + +bin = { package = "serai-processor-bin", path = "../bin" } + +[features] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/ethereum/LICENSE b/processor/ethereum/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/ethereum/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/README.md b/processor/ethereum/README.md new file mode 100644 index 00000000..5301c64b --- /dev/null +++ b/processor/ethereum/README.md @@ -0,0 +1 @@ +# Serai Ethereum Processor diff --git a/processor/ethereum/TODO/old_processor.rs b/processor/ethereum/TODO/old_processor.rs new file mode 100644 index 00000000..f95b8225 --- /dev/null +++ b/processor/ethereum/TODO/old_processor.rs @@ -0,0 +1,164 @@ +TODO + + async fn publish_completion( + &self, + completion: &::Completion, + ) -> Result<(), NetworkError> { + // Publish this to the dedicated TX server for a solver to actually publish + #[cfg(not(test))] + { + } + + // Publish this using a dummy account we fund with magic RPC commands + #[cfg(test)] + { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let mut tx = match completion.command() { + RouterCommand::UpdateSeraiKey { key, .. } => { + router.update_serai_key(key, completion.signature()) + } + RouterCommand::Execute { outs, .. } => router.execute( + &outs.iter().cloned().map(Into::into).collect::>(), + completion.signature(), + ), + }; + tx.gas_limit = 1_000_000u64.into(); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(tx); + + if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + } + + Ok(()) + } + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction { + // We mine 96 blocks to ensure the 32 blocks relevant are finalized + // Back-check the prior two epochs in response to this + // TODO: Review why this is sub(3) and not sub(2) + for block in block.saturating_sub(3) ..= block { + match eventuality.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let block = u64::try_from(block).unwrap(); + let filter = router + .key_updated_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if let Some(log) = logs.first() { + return self + .provider + .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + }; + + let filter = router + .executed_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if logs.is_empty() { + continue; + } + return self + .provider + .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + } + } + } + panic!("couldn't find completion in any three of checked blocks"); + } + + #[cfg(test)] + async fn mine_block(&self) { + self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, send_to: Self::Address) -> Self::Block { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + use ethereum_serai::alloy::sol_types::SolCall; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); + let tx = ethereum_serai::alloy::consensus::TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 1_000_000_000u128, + gas_limit: 200_000u128, + to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), + // 1 ETH + value, + input: ethereum_serai::router::abi::inInstructionCall::new(( + [0; 20].into(), + value, + vec![].into(), + )) + .abi_encode() + .into(), + }; + + use ethereum_serai::alloy::{primitives::Signature, consensus::SignableTransaction}; + let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + // Mine an epoch containing this TX + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + // Yield the freshly mined block + self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() + } diff --git a/processor/ethereum/TODO/tests/crypto.rs b/processor/ethereum/TODO/tests/crypto.rs new file mode 100644 index 00000000..20ba40b8 --- /dev/null +++ b/processor/ethereum/TODO/tests/crypto.rs @@ -0,0 +1,31 @@ +// TODO + +use rand_core::OsRng; + +use group::ff::{Field, PrimeField}; +use k256::{ + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + Scalar, ProjectivePoint, +}; + +use frost::{ + curve::{Ciphersuite, Secp256k1}, + algorithm::{Hram, IetfSchnorr}, + tests::{algorithm_machines, sign}, +}; + +use crate::{crypto::*, tests::key_gen}; + +// Run the sign test with the EthereumHram +#[test] +fn test_signing() { + let (keys, _) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + + let algo = IetfSchnorr::::ietf(); + let _sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); +} diff --git a/processor/ethereum/TODO/tests/mod.rs b/processor/ethereum/TODO/tests/mod.rs new file mode 100644 index 00000000..2e3e22b1 --- /dev/null +++ b/processor/ethereum/TODO/tests/mod.rs @@ -0,0 +1,45 @@ +// TODO + +use std::{sync::Arc, collections::HashMap}; + +use rand_core::OsRng; + +use k256::{Scalar, ProjectivePoint}; +use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; + +use alloy_core::{ + primitives::{Address, U256, Bytes, Signature, TxKind}, + hex::FromHex, +}; +use alloy_consensus::{SignableTransaction, TxLegacy}; + +use alloy_rpc_types_eth::TransactionReceipt; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::crypto::{address, deterministically_sign, PublicKey}; + +#[cfg(test)] +mod crypto; + +#[cfg(test)] +use contracts::tests as abi; +#[cfg(test)] +mod router; + +pub fn key_gen() -> (HashMap>, PublicKey) { + let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng); + let mut group_key = keys[&Participant::new(1).unwrap()].group_key(); + + let mut offset = Scalar::ZERO; + while PublicKey::new(group_key).is_none() { + offset += Scalar::ONE; + group_key += ProjectivePoint::GENERATOR; + } + for keys in keys.values_mut() { + *keys = keys.offset(offset); + } + let public_key = PublicKey::new(group_key).unwrap(); + + (keys, public_key) +} diff --git a/processor/ethereum/deployer/Cargo.toml b/processor/ethereum/deployer/Cargo.toml new file mode 100644 index 00000000..3e0f7d5b --- /dev/null +++ b/processor/ethereum/deployer/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "serai-processor-ethereum-deployer" +version = "0.1.0" +description = "The deployer for Serai's Ethereum contracts" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/deployer" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-core = { version = "0.8", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-consensus = { version = "0.9", default-features = false } + +alloy-rpc-types-eth = { version = "0.9", default-features = false } +alloy-transport = { version = "0.9", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.9", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false } + +[dev-dependencies] +alloy-rpc-client = { version = "0.9", default-features = false } +alloy-node-bindings = { version = "0.9", default-features = false } + +tokio = { version = "1.0", default-features = false, features = ["rt-multi-thread", "macros"] } + +ethereum-test-primitives = { package = "serai-ethereum-test-primitives", path = "../test-primitives" } diff --git a/processor/ethereum/deployer/LICENSE b/processor/ethereum/deployer/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/ethereum/deployer/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/deployer/README.md b/processor/ethereum/deployer/README.md new file mode 100644 index 00000000..f2ea6fae --- /dev/null +++ b/processor/ethereum/deployer/README.md @@ -0,0 +1,29 @@ +# Ethereum Smart Contracts Deployer + +The deployer for Serai's Ethereum contracts. + +## Goals + +It should be possible to efficiently locate the Serai Router on a blockchain +with the EVM, without relying on any centralized (or even federated) entities. +While deploying and locating an instance of the Router would be trivial, by +using a fixed signature for the deployment transaction, the Router must be +constructed with the correct key for the Serai network (or set to have the +correct key post-construction). Since this cannot be guaranteed to occur, the +process must be retryable and the first successful invocation must be +efficiently findable. + +## Methodology + +We define a contract, the Deployer, to deploy the Router. This contract could +use `CREATE2` with the key representing Serai as the salt, yet this would be +open to collision attacks with just 2\*\*80 complexity. Instead, we use +`CREATE` which would require 2\*\*80 on-chain transactions (infeasible) to use +as the basis of a collision. + +In order to efficiently find the contract for a key, the Deployer contract +saves the addresses of deployed contracts (indexed by the initialization code's +hash). This allows using a single call to a contract with a known address to +find the proper Router. Saving the address to the state enables finding the +Router's address even if the connected-to node's logs have been pruned for +historical blocks. diff --git a/processor/ethereum/deployer/build.rs b/processor/ethereum/deployer/build.rs new file mode 100644 index 00000000..1906f1df --- /dev/null +++ b/processor/ethereum/deployer/build.rs @@ -0,0 +1,5 @@ +fn main() { + let artifacts_path = + std::env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-deployer"; + build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); +} diff --git a/processor/ethereum/deployer/contracts/Deployer.sol b/processor/ethereum/deployer/contracts/Deployer.sol new file mode 100644 index 00000000..862a27fd --- /dev/null +++ b/processor/ethereum/deployer/contracts/Deployer.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +/* + The expected deployment process of Serai's Router is as follows: + + 1) A transaction deploying Deployer is made. Then, a deterministic signature + is created such that an account with an unknown private key is the creator + of the contract. Anyone can fund this address, and once anyone does, the + transaction deploying Deployer can be published by anyone. No other + transaction may be made from that account. + + 2) Anyone deploys the Router through the Deployer. This uses a sequential + nonce such that meet-in-the-middle attacks, with complexity 2**80, aren't + feasible. While such attacks would still be feasible if the Deployer's + address was controllable, the usage of a deterministic signature with a + NUMS method prevents that. + + This doesn't have any denial-of-service risks and will resolve once anyone + steps forward as deployer. This does fail to guarantee an identical address + for the Router across every chain, though it enables anyone to efficiently + ask the Deployer for the address (with the Deployer having an identical + address on every chain). + + Unfortunately, guaranteeing identical addresses for the Router isn't + feasible. We'd need the Deployer contract to use a consistent salt for the + Router, yet the Router must be deployed with a specific public key for Serai. + Since Ethereum isn't able to determine a valid public key (one the result of + a Serai DKG) from a dishonest public key (one arbitrary), we have to allow + multiple deployments with Serai being the one to determine which to use. + + The alternative would be to have a council publish the Serai key on-Ethereum, + with Serai verifying the published result. This would introduce a DoS risk in + the council not publishing the correct key/not publishing any key. + + This design does not work (well) with contracts expecting initialization due + to only allowing deploying init code once (which assumes contracts are + distinct via their constructors). Such designs are unused by Serai so that is + accepted. +*/ + +/// @title Deployer of contracts for the Serai network +/// @author Luke Parker +contract Deployer { + /// @return The deployment for some (hashed) init code + mapping(bytes32 => address) public deployments; + + /// @notice Raised if the provided init code was already prior deployed + error PriorDeployed(); + /// @notice Raised if the deployment fails + error DeploymentFailed(); + + /// @notice Deploy the specified init code with `CREATE` + /// @dev This init code is expected to be unique and not prior deployed + /// @param initCode The init code to pass to `CREATE` + function deploy(bytes memory initCode) external { + // Deploy the contract + address createdContract; + // slither-disable-next-line assembly + assembly { + createdContract := create(0, add(initCode, 0x20), mload(initCode)) + } + if (createdContract == address(0)) { + revert DeploymentFailed(); + } + + bytes32 initCodeHash = keccak256(initCode); + + /* + Check this wasn't prior deployed. + + This is a post-check, not a pre-check (in violation of the CEI pattern). + If we used a pre-check, a deployed contract could re-enter the Deployer + to deploy the same contract multiple times due to the inner call updating + state and then the outer call overwriting it. The post-check causes the + outer call to error once the inner call updates state. + + This does mean contract deployment may fail if deployment causes + arbitrary execution which maliciously nests deployment of the + being-deployed contract. Such an inner call won't fail, yet the outer + call would. The usage of a re-entrancy guard would cause the inner call + to fail while the outer call succeeds. This is considered so edge-case it + isn't worth handling. + */ + if (deployments[initCodeHash] != address(0)) { + revert PriorDeployed(); + } + + // Write the deployment to storage + deployments[initCodeHash] = createdContract; + } +} diff --git a/processor/ethereum/deployer/src/lib.rs b/processor/ethereum/deployer/src/lib.rs new file mode 100644 index 00000000..10140303 --- /dev/null +++ b/processor/ethereum/deployer/src/lib.rs @@ -0,0 +1,155 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::sync::Arc; + +use alloy_core::primitives::{hex, Address, U256, Bytes, TxKind}; +use alloy_consensus::{Signed, TxLegacy}; + +use alloy_sol_types::SolCall; + +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +#[cfg(test)] +mod tests; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/Deployer.sol"); +} + +const INITCODE: &[u8] = { + const INITCODE_HEX: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin")); + const INITCODE: [u8; INITCODE_HEX.len() / 2] = + match hex::const_decode_to_array::<{ INITCODE_HEX.len() / 2 }>(INITCODE_HEX) { + Ok(initcode) => initcode, + Err(_) => panic!("Deployer.bin did not contain valid hex"), + }; + &INITCODE +}; + +/// The Deployer contract for the Serai Router contract. +/// +/// This Deployer has a deterministic address, letting it be immediately identified on any instance +/// of the EVM. It then supports retrieving the deployed contracts addresses (which aren't +/// deterministic) using a single call. +#[derive(Clone, Debug)] +pub struct Deployer(Arc>); +impl Deployer { + /// Obtain the transaction to deploy this contract, already signed. + /// + /// The account this transaction is sent from (which is populated in `from`) must be sufficiently + /// funded for this transaction to be submitted. This account has no known private key to anyone + /// so ETH sent can be neither misappropriated nor returned. + pub fn deployment_tx() -> Signed { + let initcode = Bytes::from_static(INITCODE); + + // Legacy transactions are used to ensure the widest possible degree of support across EVMs + let tx = TxLegacy { + chain_id: None, + nonce: 0, + /* + This needs to use a fixed gas price to achieve a deterministic address. The gas price is + fixed to 100 gwei, which should be generous, in order to make this unlikely to get stuck. + While potentially expensive, this only has to occur per chain this is deployed on. + + If this is too low of a gas price, private mempools can be used, with other transactions in + the bundle raising the gas price to acceptable levels. While this strategy could be + entirely relied upon, allowing the gas price paid to reflect the network's actual gas + price, that wouldn't work for EVM networks without private mempools. + + That leaves this as failing only if it violates a protocol constant, or if the gas price is + too low on a network without private mempools to publish via. In that case, this code + should to be forked to accept an enum of which network the deployment is for (with the gas + price derivative of that, as common as possible across networks to minimize the amount of + addresses representing the Deployer). + */ + gas_price: 100_000_000_000u128, + /* + This is twice the cost of deployment as of Ethereum's Cancun upgrade. The wide margin is to + increase the likelihood of surviving changes to the cost of contract deployment (notably + the gas cost of calldata). While wasteful, this only has to be done once per chain and is + accepted accordingly. + + If this is ever unacceptable, the parameterization suggested in case the `gas_price` is + unacceptable should be implemented. + */ + gas_limit: 300_698, + to: TxKind::Create, + value: U256::ZERO, + input: initcode, + }; + + ethereum_primitives::deterministically_sign(tx) + } + + /// Obtain the deterministic address for this contract. + pub fn address() -> Address { + let deployer_deployer = + Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); + Address::create(&deployer_deployer, 0) + } + + /// Obtain the unsigned transaction to deploy a contract. + /// + /// This will not have its `nonce`, `gas_price`, nor `gas_limit` filled out. + pub fn deploy_tx(init_code: Vec) -> TxLegacy { + TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 0, + gas_limit: 0, + to: TxKind::Call(Self::address()), + value: U256::ZERO, + input: abi::Deployer::deployCall::new((init_code.into(),)).abi_encode().into(), + } + } + + /// Construct a new view of the Deployer. + /// + /// This will return `None` if the Deployer has yet to be deployed on-chain. + pub async fn new( + provider: Arc>, + ) -> Result, RpcError> { + let address = Self::address(); + let code = provider.get_code_at(address).await?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self(provider))) + } + + /// Find the deployment of a contract. + pub async fn find_deployment( + &self, + init_code_hash: [u8; 32], + ) -> Result, RpcError> { + let call = TransactionRequest::default().to(Self::address()).input(TransactionInput::new( + abi::Deployer::deploymentsCall::new((init_code_hash.into(),)).abi_encode().into(), + )); + let bytes = self.0.call(&call).await?; + let deployment = abi::Deployer::deploymentsCall::abi_decode_returns(&bytes, true) + .map_err(|e| { + TransportErrorKind::Custom( + format!("node returned a non-address for function returning address: {e:?}").into(), + ) + })? + ._0; + + if **deployment == [0; 20] { + return Ok(None); + } + Ok(Some(deployment)) + } +} diff --git a/processor/ethereum/deployer/src/tests.rs b/processor/ethereum/deployer/src/tests.rs new file mode 100644 index 00000000..6e4570ff --- /dev/null +++ b/processor/ethereum/deployer/src/tests.rs @@ -0,0 +1,107 @@ +use std::sync::Arc; + +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::Anvil; + +use crate::{ + abi::Deployer::{PriorDeployed, DeploymentFailed, DeployerErrors}, + Deployer, +}; + +#[tokio::test] +async fn test_deployer() { + const CANCUN: &str = "cancun"; + const LATEST: &str = "latest"; + + for network in [CANCUN, LATEST] { + let anvil = Anvil::new().arg("--hardfork").arg(network).spawn(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + )); + + // Deploy the Deployer + { + let deployment_tx = Deployer::deployment_tx(); + let gas_programmed = deployment_tx.tx().gas_limit; + let receipt = ethereum_test_primitives::publish_tx(&provider, deployment_tx).await; + assert!(receipt.status()); + assert_eq!(receipt.contract_address.unwrap(), Deployer::address()); + + if network == CANCUN { + // Check the gas programmed was twice the gas used + // We only check this for cancun as the constant was programmed per cancun's gas pricing + assert_eq!(2 * receipt.gas_used, gas_programmed); + } + } + + // Deploy the deployer with the deployer + let mut deploy_tx = Deployer::deploy_tx(crate::INITCODE.to_vec()); + deploy_tx.gas_price = 100_000_000_000u128; + deploy_tx.gas_limit = 1_000_000; + { + let deploy_tx = ethereum_primitives::deterministically_sign(deploy_tx.clone()); + let receipt = ethereum_test_primitives::publish_tx(&provider, deploy_tx).await; + assert!(receipt.status()); + } + + // Verify we can now find the deployer + { + let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); + let deployed_deployer = deployer + .find_deployment(ethereum_primitives::keccak256(crate::INITCODE)) + .await + .unwrap() + .unwrap(); + assert_eq!( + provider.get_code_at(deployed_deployer).await.unwrap(), + provider.get_code_at(Deployer::address()).await.unwrap(), + ); + assert!(deployed_deployer != Deployer::address()); + } + + // Verify deploying the same init code multiple times fails + { + let mut deploy_tx = deploy_tx; + // Change the gas price to cause a distinct message, and with it, a distinct signer + deploy_tx.gas_price += 1; + let deploy_tx = ethereum_primitives::deterministically_sign(deploy_tx); + let receipt = ethereum_test_primitives::publish_tx(&provider, deploy_tx.clone()).await; + assert!(!receipt.status()); + + let call = TransactionRequest::default() + .to(Deployer::address()) + .input(TransactionInput::new(deploy_tx.tx().input.clone())); + let call_err = provider.call(&call).await.unwrap_err(); + assert!(matches!( + call_err.as_error_resp().unwrap().as_decoded_error::(true).unwrap(), + DeployerErrors::PriorDeployed(PriorDeployed {}), + )); + } + + // Verify deployment failures yield errors properly + { + // 0xfe is an invalid opcode which is guaranteed to remain invalid + let mut deploy_tx = Deployer::deploy_tx(vec![0xfe]); + deploy_tx.gas_price = 100_000_000_000u128; + deploy_tx.gas_limit = 1_000_000; + + let deploy_tx = ethereum_primitives::deterministically_sign(deploy_tx); + let receipt = ethereum_test_primitives::publish_tx(&provider, deploy_tx.clone()).await; + assert!(!receipt.status()); + + let call = TransactionRequest::default() + .to(Deployer::address()) + .input(TransactionInput::new(deploy_tx.tx().input.clone())); + let call_err = provider.call(&call).await.unwrap_err(); + assert!(matches!( + call_err.as_error_resp().unwrap().as_decoded_error::(true).unwrap(), + DeployerErrors::DeploymentFailed(DeploymentFailed {}), + )); + } + } +} diff --git a/processor/ethereum/erc20/Cargo.toml b/processor/ethereum/erc20/Cargo.toml new file mode 100644 index 00000000..078192a4 --- /dev/null +++ b/processor/ethereum/erc20/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "serai-processor-ethereum-erc20" +version = "0.1.0" +description = "A library for the Serai Processor to interact with ERC20s" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/erc20" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-core = { version = "0.8", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.9", default-features = false } +alloy-transport = { version = "0.9", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.9", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } + +futures-util = { version = "0.3", default-features = false, features = ["std"] } diff --git a/processor/ethereum/erc20/LICENSE b/processor/ethereum/erc20/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/ethereum/erc20/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/erc20/README.md b/processor/ethereum/erc20/README.md new file mode 100644 index 00000000..f1e447b0 --- /dev/null +++ b/processor/ethereum/erc20/README.md @@ -0,0 +1,3 @@ +# ERC20 + +A library for the Serai Processor to interact with ERC20s. diff --git a/networks/ethereum/contracts/IERC20.sol b/processor/ethereum/erc20/contracts/IERC20.sol similarity index 69% rename from networks/ethereum/contracts/IERC20.sol rename to processor/ethereum/erc20/contracts/IERC20.sol index 70f1f93c..6298592a 100644 --- a/networks/ethereum/contracts/IERC20.sol +++ b/processor/ethereum/erc20/contracts/IERC20.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: CC0 -pragma solidity ^0.8.0; +pragma solidity ^0.8.26; interface IERC20 { event Transfer(address indexed from, address indexed to, uint256 value); @@ -18,3 +18,17 @@ interface IERC20 { function approve(address spender, uint256 value) external returns (bool); function allowance(address owner, address spender) external view returns (uint256); } + +interface SeraiIERC20 { + function transferWithInInstruction01BB244A8A( + address to, + uint256 value, + bytes calldata inInstruction + ) external returns (bool); + function transferFromWithInInstruction00081948E0( + address from, + address to, + uint256 value, + bytes calldata inInstruction + ) external returns (bool); +} diff --git a/processor/ethereum/erc20/src/lib.rs b/processor/ethereum/erc20/src/lib.rs new file mode 100644 index 00000000..20e086aa --- /dev/null +++ b/processor/ethereum/erc20/src/lib.rs @@ -0,0 +1,252 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::ops::RangeInclusive; +use std::collections::HashMap; + +use alloy_core::primitives::{Address, U256}; + +use alloy_sol_types::{SolInterface, SolEvent}; + +use alloy_rpc_types_eth::{Log, Filter, TransactionTrait}; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use ethereum_primitives::LogIndex; + +use futures_util::stream::{StreamExt, FuturesUnordered}; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(missing_docs)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/IERC20.sol"); +} +use abi::IERC20::{IERC20Calls, transferCall, transferFromCall}; +use abi::SeraiIERC20::SeraiIERC20Calls; +pub use abi::IERC20::Transfer; +pub use abi::SeraiIERC20::{ + transferWithInInstruction01BB244A8ACall as transferWithInInstructionCall, + transferFromWithInInstruction00081948E0Call as transferFromWithInInstructionCall, +}; + +#[cfg(test)] +mod tests; + +/// A top-level ERC20 transfer +/// +/// This does not include `token`, `to` fields. Those are assumed contextual to the creation of +/// this. +#[derive(Clone, Debug)] +pub struct TopLevelTransfer { + /// The ID of the event for this transfer. + pub id: LogIndex, + /// The hash of the transaction which caused this transfer. + pub transaction_hash: [u8; 32], + /// The address which made the transfer. + pub from: Address, + /// The amount transferred. + pub amount: U256, + /// The data appended after the call itself. + pub data: Vec, +} + +/// The result of `Erc20::top_level_transfers_unordered`. +pub struct TopLevelTransfers { + /// Every `Transfer` log of the contextual ERC20 to the contextual account, indexed by + /// their transaction. + /// + /// The ERC20/account is labelled contextual as it isn't directly named here. Instead, they're + /// assumed contextual to how this was created. + pub logs: HashMap<[u8; 32], Vec>, + /// All of the top-level transfers of the contextual ERC20 to the contextual account. + /// + /// The ERC20/account is labelled contextual as it isn't directly named here. Instead, they're + /// assumed contextual to how this was created. + pub transfers: Vec, +} + +/// A view for an ERC20 contract. +#[derive(Clone, Debug)] +pub struct Erc20; +impl Erc20 { + /// The filter for transfer logs of the specified ERC20, to the specified recipient. + fn transfer_filter(blocks: RangeInclusive, erc20: Address, to: Address) -> Filter { + let filter = Filter::new().select(blocks); + filter.address(erc20).event_signature(Transfer::SIGNATURE_HASH).topic2(to.into_word()) + } + + /// Yield the top-level transfer for the specified transaction (if one exists). + /// + /// The passed-in logs MUST be the logs for this transaction. The logs MUST be filtered to the + /// `Transfer` events of the intended token and the intended `to` transferred to. These + /// properties are completely unchecked and assumed to be the case. + /// + /// This does NOT yield THE top-level transfer. If multiple `Transfer` events have identical + /// structure to the top-level transfer call, the first `Transfer` event present in the logs is + /// considered the top-level transfer. + // Yielding THE top-level transfer would require tracing the transaction execution and isn't + // worth the effort. + async fn top_level_transfer( + provider: &RootProvider, + erc20: Address, + transaction_hash: [u8; 32], + transfer_logs: &[Log], + ) -> Result, RpcError> { + // Fetch the transaction + let transaction = + provider.get_transaction_by_hash(transaction_hash.into()).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have the transaction which emitted a log it had".to_string().into(), + ) + })?; + + // If this transaction didn't call this ERC20 at a top-level, return + if transaction.inner.to() != Some(erc20) { + return Ok(None); + } + + // Don't validate the encoding as this can't be re-encoded to an identical bytestring due + // to the additional data appended after the call itself + let Ok(call) = IERC20Calls::abi_decode(transaction.inner.input(), false) else { + return Ok(None); + }; + + // Extract the top-level call's from/to/value + let (from, to, value) = match call { + IERC20Calls::transfer(transferCall { to, value }) => (transaction.from, to, value), + IERC20Calls::transferFrom(transferFromCall { from, to, value }) => (from, to, value), + // Treat any other function selectors as unrecognized + _ => return Ok(None), + }; + + // Find the log for this top-level transfer + for log in transfer_logs { + // Since the caller is responsible for filtering these to `Transfer` events, we can assume + // this is a non-compliant ERC20 or an error with the logs fetched. We assume ERC20 + // compliance here, making this an RPC error + let log = log.log_decode::().map_err(|_| { + TransportErrorKind::Custom("log didn't include a valid transfer event".to_string().into()) + })?; + + let block_hash = log.block_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its block hash set".to_string().into()) + })?; + let log_index = log.log_index.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its index set".to_string().into()) + })?; + let log = log.inner.data; + + // Ensure the top-level transfer is equivalent to the transfer this log represents + if !((log.from == from) && (log.to == to) && (log.value == value)) { + continue; + } + + // Read the data appended after + let data = if let Ok(call) = SeraiIERC20Calls::abi_decode(transaction.inner.input(), true) { + match call { + SeraiIERC20Calls::transferWithInInstruction01BB244A8A( + transferWithInInstructionCall { inInstruction, .. }, + ) | + SeraiIERC20Calls::transferFromWithInInstruction00081948E0( + transferFromWithInInstructionCall { inInstruction, .. }, + ) => Vec::from(inInstruction), + } + } else { + // If there was no additional data appended, use an empty Vec (which has no data) + // This has a slight information loss in that it's None -> Some(vec![]), but it's fine + vec![] + }; + + return Ok(Some(TopLevelTransfer { + id: LogIndex { block_hash: *block_hash, index_within_block: log_index }, + transaction_hash, + from: log.from, + amount: log.value, + data, + })); + } + + Ok(None) + } + + /// Fetch all top-level transfers to the specified address for this token. + /// + /// The `transfers` in the result are unordered. The `logs` are sorted by index. + pub async fn top_level_transfers_unordered( + provider: &RootProvider, + blocks: RangeInclusive, + erc20: Address, + to: Address, + ) -> Result> { + let mut logs = { + // Get all transfers within these blocks + let logs = provider.get_logs(&Self::transfer_filter(blocks, erc20, to)).await?; + + // The logs, indexed by their transactions + let mut transaction_logs = HashMap::new(); + // Index the logs by their transactions + for log in logs { + // Double check the address which emitted this log + if log.address() != erc20 { + Err(TransportErrorKind::Custom( + "node returned logs for a different address than requested".to_string().into(), + ))?; + } + // Double check the event signature for this log + if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { + Err(TransportErrorKind::Custom( + "node returned a log for a different topic than filtered to".to_string().into(), + ))?; + } + // Double check the `to` topic + if log.topics().get(2) != Some(&to.into_word()) { + Err(TransportErrorKind::Custom( + "node returned a transfer for a different `to` than filtered to".to_string().into(), + ))?; + } + + let tx_id = log + .transaction_hash + .ok_or_else(|| { + TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into()) + })? + .0; + + transaction_logs.entry(tx_id).or_insert_with(|| Vec::with_capacity(1)).push(log); + } + + transaction_logs + }; + + let mut transfers = vec![]; + { + // Use `FuturesUnordered` so these RPC calls run in parallel + let mut futures = FuturesUnordered::new(); + for (tx_id, transfer_logs) in &mut logs { + // Sort the logs to ensure the the earliest logs are first + transfer_logs.sort_by_key(|log| log.log_index); + futures.push(Self::top_level_transfer(provider, erc20, *tx_id, transfer_logs)); + } + + while let Some(transfer) = futures.next().await { + match transfer { + // Top-level transfer + Ok(Some(transfer)) => transfers.push(transfer), + // Not a top-level transfer + Ok(None) => continue, + // Failed to get this transaction's information so abort + Err(e) => Err(e)?, + } + } + } + + Ok(TopLevelTransfers { logs, transfers }) + } +} diff --git a/processor/ethereum/erc20/src/tests.rs b/processor/ethereum/erc20/src/tests.rs new file mode 100644 index 00000000..037c7862 --- /dev/null +++ b/processor/ethereum/erc20/src/tests.rs @@ -0,0 +1,15 @@ +use alloy_sol_types::SolCall; + +#[test] +fn selector_collisions() { + assert_eq!( + crate::abi::IERC20::transferCall::SELECTOR, + crate::abi::SeraiIERC20::transferWithInInstruction01BB244A8ACall::SELECTOR + ); + assert_eq!( + crate::abi::IERC20::transferFromCall::SELECTOR, + crate::abi::SeraiIERC20::transferFromWithInInstruction00081948E0Call::SELECTOR + ); +} + +// This is primarily tested via serai-processor-ethereum-router diff --git a/processor/ethereum/primitives/Cargo.toml b/processor/ethereum/primitives/Cargo.toml new file mode 100644 index 00000000..89869cb8 --- /dev/null +++ b/processor/ethereum/primitives/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "serai-processor-ethereum-primitives" +version = "0.1.0" +description = "Primitives for Serai's Ethereum Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/primitives" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +group = { version = "0.13", default-features = false } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } + +alloy-primitives = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.9", default-features = false, features = ["k256"] } diff --git a/processor/ethereum/primitives/LICENSE b/processor/ethereum/primitives/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/ethereum/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/primitives/README.md b/processor/ethereum/primitives/README.md new file mode 100644 index 00000000..90da68c6 --- /dev/null +++ b/processor/ethereum/primitives/README.md @@ -0,0 +1,3 @@ +# Ethereum Processor Primitives + +This library contains miscellaneous primitives and helper functions. diff --git a/processor/ethereum/primitives/src/borsh.rs b/processor/ethereum/primitives/src/borsh.rs new file mode 100644 index 00000000..d7f30dbf --- /dev/null +++ b/processor/ethereum/primitives/src/borsh.rs @@ -0,0 +1,24 @@ +use ::borsh::{io, BorshSerialize, BorshDeserialize}; + +use alloy_primitives::{U256, Address}; + +/// Serialize a U256 with a borsh-compatible API. +pub fn serialize_u256(value: &U256, writer: &mut impl io::Write) -> io::Result<()> { + let value: [u8; 32] = value.to_be_bytes(); + value.serialize(writer) +} + +/// Deserialize an address with a borsh-compatible API. +pub fn deserialize_u256(reader: &mut impl io::Read) -> io::Result { + <[u8; 32]>::deserialize_reader(reader).map(|value| U256::from_be_bytes(value)) +} + +/// Serialize an address with a borsh-compatible API. +pub fn serialize_address(address: &Address, writer: &mut impl io::Write) -> io::Result<()> { + <[u8; 20]>::from(address.0).serialize(writer) +} + +/// Deserialize an address with a borsh-compatible API. +pub fn deserialize_address(reader: &mut impl io::Read) -> io::Result
{ + <[u8; 20]>::deserialize_reader(reader).map(|address| Address(address.into())) +} diff --git a/processor/ethereum/primitives/src/lib.rs b/processor/ethereum/primitives/src/lib.rs new file mode 100644 index 00000000..2727ea22 --- /dev/null +++ b/processor/ethereum/primitives/src/lib.rs @@ -0,0 +1,95 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use ::borsh::{BorshSerialize, BorshDeserialize}; + +use group::ff::PrimeField; +use k256::Scalar; + +use alloy_primitives::PrimitiveSignature; +use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; + +mod borsh; +pub use borsh::*; + +/// An index of a log within a block. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] +#[borsh(crate = "::borsh")] +pub struct LogIndex { + /// The hash of the block which produced this log. + pub block_hash: [u8; 32], + /// The index of this log within the execution of the block. + pub index_within_block: u64, +} + +/// The Keccak256 hash function. +pub fn keccak256(data: impl AsRef<[u8]>) -> [u8; 32] { + alloy_primitives::keccak256(data.as_ref()).into() +} + +/// Deterministically sign a transaction. +/// +/// This signs a transaction via setting a signature of `r = 1, s = 1`. The purpose of this is to +/// be able to send a transaction from an account which no one knows the private key for and no +/// other messages may be signed for from. +/// +/// This function panics if passed a transaction with a non-None chain ID. This is because the +/// signer for this transaction is only singular across any/all EVM instances if it isn't binding +/// to an instance. +pub fn deterministically_sign(tx: TxLegacy) -> Signed { + assert!( + tx.chain_id.is_none(), + "chain ID was Some when deterministically signing a TX (causing a non-singular signer)" + ); + + /* + ECDSA signatures are: + - x = private key + - k = rand() + - R = k * G + - r = R.x() + - s = (H(m) + (r * x)) * k.invert() + + Key recovery is performed via: + - a = s * R = (H(m) + (r * x)) * G + - b = a - (H(m) * G) = (r * x) * G + - X = b / r = x * G + - X = ((s * R) - (H(m) * G)) * r.invert() + + This requires `r` be non-zero and `R` be recoverable from `r` and the parity byte. For + `r = 1, s = 1`, this sets `X` to `R - (H(m) * G)`. Since there is an `R` recoverable for + `r = 1`, since the `R` is a point with an unknown discrete logarithm w.r.t. the generator, and + since the resulting key is dependent on the message signed for, this will always work to + the specification. + */ + + let r = Scalar::ONE; + let s = Scalar::ONE; + let r_bytes: [u8; 32] = r.to_repr().into(); + let s_bytes: [u8; 32] = s.to_repr().into(); + let signature = + PrimitiveSignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), false); + + let res = tx.into_signed(signature); + debug_assert!(res.recover_signer().is_ok()); + res +} + +#[test] +fn test_deterministically_sign() { + let tx = TxLegacy { chain_id: None, ..Default::default() }; + let signed = deterministically_sign(tx.clone()); + + assert!(signed.recover_signer().is_ok()); + let one = alloy_primitives::U256::from(1u64); + assert_eq!(signed.signature().r(), one); + assert_eq!(signed.signature().s(), one); + + let mut other_tx = tx.clone(); + other_tx.nonce += 1; + // Signing a distinct message should yield a distinct signer + assert!( + signed.recover_signer().unwrap() != deterministically_sign(other_tx).recover_signer().unwrap() + ); +} diff --git a/processor/ethereum/router/Cargo.toml b/processor/ethereum/router/Cargo.toml new file mode 100644 index 00000000..5cdd0b3a --- /dev/null +++ b/processor/ethereum/router/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "serai-processor-ethereum-router" +version = "0.1.0" +description = "The Router used by the Serai Processor for Ethereum" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/router" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +group = { version = "0.13", default-features = false } +k256 = { version = "0.13", default-features = false, features = ["std", "arithmetic"] } + +alloy-core = { version = "0.8", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-consensus = { version = "0.9", default-features = false } + +alloy-rpc-types-eth = { version = "0.9", default-features = false } +alloy-transport = { version = "0.9", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.9", default-features = false } + +revm = { version = "19", default-features = false, features = ["std"] } + +ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../../networks/ethereum/schnorr", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } +ethereum-deployer = { package = "serai-processor-ethereum-deployer", path = "../deployer", default-features = false } +erc20 = { package = "serai-processor-ethereum-erc20", path = "../erc20", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +serai-client = { path = "../../../substrate/client", default-features = false, features = ["ethereum"] } + +futures-util = { version = "0.3", default-features = false, features = ["std"] } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false } + +syn = { version = "2", default-features = false, features = ["proc-macro"] } + +syn-solidity = { version = "0.8", default-features = false } +alloy-sol-macro-input = { version = "0.8", default-features = false } +alloy-sol-macro-expander = { version = "0.8", default-features = false } + +[dev-dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +k256 = { version = "0.13", default-features = false, features = ["std"] } + +alloy-provider = { version = "0.9", default-features = false, features = ["debug-api", "trace-api"] } +alloy-rpc-client = { version = "0.9", default-features = false } +alloy-node-bindings = { version = "0.9", default-features = false } + +tokio = { version = "1.0", default-features = false, features = ["rt-multi-thread", "macros"] } + +ethereum-test-primitives = { package = "serai-ethereum-test-primitives", path = "../test-primitives" } diff --git a/processor/ethereum/router/LICENSE b/processor/ethereum/router/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/ethereum/router/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/router/README.md b/processor/ethereum/router/README.md new file mode 100644 index 00000000..efb4d0a4 --- /dev/null +++ b/processor/ethereum/router/README.md @@ -0,0 +1,5 @@ +# Ethereum Router + +The [Router contract](./contracts/Router.sol) is extensively documented to ensure clarity and +understanding of the design decisions made. Please refer to it for understanding of why/what this +is. diff --git a/processor/ethereum/router/build.rs b/processor/ethereum/router/build.rs new file mode 100644 index 00000000..f80a0b77 --- /dev/null +++ b/processor/ethereum/router/build.rs @@ -0,0 +1,68 @@ +use std::{env, fs}; + +use alloy_sol_macro_input::SolInputKind; + +fn write(sol: syn_solidity::File, file: &str) { + let sol = alloy_sol_macro_expander::expand::expand(sol).unwrap(); + fs::write(file, sol.to_string()).unwrap(); +} + +fn sol(sol_files: &[&str], file: &str) { + let mut sol = String::new(); + for sol_file in sol_files { + sol += &fs::read_to_string(sol_file).unwrap(); + } + let SolInputKind::Sol(sol) = syn::parse_str(&sol).unwrap() else { + panic!("parsed .sols file wasn't SolInputKind::Sol"); + }; + write(sol, file); +} + +fn main() { + let artifacts_path = + env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-router"; + + if !fs::exists(&artifacts_path).unwrap() { + fs::create_dir(&artifacts_path).unwrap(); + } + + build_solidity_contracts::build( + &["../../../networks/ethereum/schnorr/contracts", "../erc20/contracts", "contracts"], + "contracts", + &artifacts_path, + ) + .unwrap(); + // These are detected multiple times and distinguished, hence their renaming to canonical forms + let router_bin = artifacts_path.clone() + "/Router.bin"; + let _ = fs::remove_file(&router_bin); // Remove the file if it already exists, if we can + fs::rename(artifacts_path.clone() + "/Router_sol_Router.bin", &router_bin).unwrap(); + + let router_bin_runtime = artifacts_path.clone() + "/Router.bin-runtime"; + let _ = fs::remove_file(&router_bin_runtime); + fs::rename(artifacts_path.clone() + "/Router_sol_Router.bin-runtime", router_bin_runtime) + .unwrap(); + + // This cannot be handled with the sol! macro. The Router requires an import + // https://github.com/alloy-rs/core/issues/602 + sol( + &[ + "../../../networks/ethereum/schnorr/contracts/Schnorr.sol", + "contracts/IRouter.sol", + "contracts/Router.sol", + ], + &(artifacts_path.clone() + "/router.rs"), + ); + + let test_artifacts_path = artifacts_path + "/tests"; + if !fs::exists(&test_artifacts_path).unwrap() { + fs::create_dir(&test_artifacts_path).unwrap(); + } + + // Build the test contracts + build_solidity_contracts::build( + &["../../../networks/ethereum/schnorr/contracts", "../erc20/contracts", "contracts"], + "contracts/tests", + &test_artifacts_path, + ) + .unwrap(); +} diff --git a/processor/ethereum/router/contracts/IRouter.sol b/processor/ethereum/router/contracts/IRouter.sol new file mode 100644 index 00000000..1cf61f8e --- /dev/null +++ b/processor/ethereum/router/contracts/IRouter.sol @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.26; + +/// @title Serai Router (without functions overriden by selector collisions) +/// @author Luke Parker +/// @notice Intakes coins for the Serai network and handles relaying batches of transfers out +interface IRouterWithoutCollisions { + /// @notice Emitted when the next key for Serai's Ethereum validators is set + /// @param nonce The nonce consumed to update this key + /// @param key The key updated to + event NextSeraiKeySet(uint256 indexed nonce, bytes32 indexed key); + + /// @notice Emitted when the key for Serai's Ethereum validators is updated + /// @param nonce The nonce consumed to update this key + /// @param key The key updated to + event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); + + /// @notice Emitted when an InInstruction occurs + /// @param from The address which called `inInstruction` and caused this event to be emitted + /// @param coin The coin transferred in + /// @param amount The amount of the coin transferred in + /// @param instruction The Shorthand-encoded InInstruction for Serai to decode and handle + event InInstruction( + address indexed from, address indexed coin, uint256 amount, bytes instruction + ); + + /// @notice Emitted when a batch of `OutInstruction`s occurs + /// @param nonce The nonce consumed to execute this batch of transactions + /// @param messageHash The hash of the message signed for the executed batch + /// @param resultsLength The length of the results bitvec (represented as bytes) + /** + * @param results The result of each `OutInstruction` executed. This is a bitvec with true + * representing success and false representing failure. The low bit in the first byte is used + * for the first `OutInstruction`, before the next bit, and so on, before the next byte. An + * `OutInstruction` is considered as having succeeded if the call transferring ETH doesn't fail, + * the ERC20 transfer doesn't fail, and any executed code doesn't revert. + */ + event Batch( + uint256 indexed nonce, bytes32 indexed messageHash, uint256 resultsLength, bytes results + ); + + /// @notice Emitted when `escapeHatch` is invoked + /// @param escapeTo The address to escape to + event EscapeHatch(uint256 indexed nonce, address indexed escapeTo); + + /// @notice Emitted when coins escape through the escape hatch + /// @param coin The coin which escaped + /// @param amount The amount which escaped + event Escaped(address indexed coin, uint256 amount); + + /// @notice The Serai key verifying the signature wasn't set + error SeraiKeyWasNone(); + /// @notice The key for Serai was invalid + /// @dev This is incomplete and not always guaranteed to be thrown upon an invalid key + error InvalidSeraiKey(); + /// @notice The contract has had its escape hatch invoked and won't accept further actions + error EscapeHatchInvoked(); + /// @notice The signature was invalid + error InvalidSignature(); + + /// @notice The amount specified didn't match `msg.value` + error AmountMismatchesMsgValue(); + /// @notice The call to an ERC20's `transferFrom` failed + error TransferFromFailed(); + + /// @notice The code wasn't to-be-executed by self + error CodeNotBySelf(); + /// @notice A non-reentrant function was re-entered + error Reentered(); + + /// @notice An invalid address to escape to was specified. + error InvalidEscapeAddress(); + /// @notice The escape address wasn't a contract. + error EscapeAddressWasNotAContract(); + /// @notice Escaping when escape hatch wasn't invoked. + error EscapeHatchNotInvoked(); + /// @notice Escaping failed to transfer out. + error EscapeFailed(); + + /// @notice Transfer coins into Serai with an instruction + /// @param coin The coin to transfer in (address(0) if Ether) + /// @param amount The amount to transfer in (msg.value if Ether) + /** + * @param instruction The Shorthand-encoded InInstruction for Serai to associate with this + * transfer in + */ + // Re-entrancy doesn't bork this function + // slither-disable-next-line reentrancy-events + function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable; + + /// @notice Execute some arbitrary code within a secure sandbox + /** + * @dev This performs sandboxing by deploying this code with `CREATE`. This is an external + * function as we can't meter `CREATE`/internal functions. We work around this by calling this + * function with `CALL` (which we can meter). This does forward `msg.value` to the newly + * deployed contract. + */ + /// @param code The code to execute + function executeArbitraryCode(bytes memory code) external payable; + + /// @notice Escape coins after the escape hatch has been invoked + /// @param coin The coin to escape + function escape(address coin) external; + + /// @notice Fetch the next nonce to use by an action published to this contract + /// return The next nonce to use by an action published to this contract + function nextNonce() external view returns (uint256); + + /// @notice Fetch the next key for Serai's Ethereum validator set + /// @return The next key for Serai's Ethereum validator set or bytes32(0) if none is currently set + function nextSeraiKey() external view returns (bytes32); + + /// @notice Fetch the current key for Serai's Ethereum validator set + /** + * @return The current key for Serai's Ethereum validator set or bytes32(0) if none is currently + * set + */ + function seraiKey() external view returns (bytes32); + + /// @notice Fetch the address escaped to + /// @return The address which was escaped to (address(0) if the escape hatch hasn't been invoked) + function escapedTo() external view returns (address); +} + +/// @title Serai Router +/// @author Luke Parker +/// @notice Intakes coins for the Serai network and handles relaying batches of transfers out +interface IRouter is IRouterWithoutCollisions { + /// @title A signature + /// @dev Thin wrapper around `c, s` to simplify the API + struct Signature { + bytes32 c; + bytes32 s; + } + + /// @title The type of destination + /** + * @dev A destination is either an ABI-encoded address or an ABI-encoded `CodeDestination` + * containing code to deploy (invoking its constructor). + */ + enum DestinationType { + Address, + Code + } + + /// @title A code destination + /** + * @dev If transferring an ERC20 to this destination, it will be transferred to the address the + * code will be deployed to. If transferring ETH, it will be transferred with the deployment of + * the code. `code` is deployed with CREATE (calling its constructor). The entire deployment + * (and associated sandboxing) must consume less than `gasLimit` units of gas or it will revert. + */ + struct CodeDestination { + uint32 gasLimit; + bytes code; + } + + /// @title An instruction to transfer coins out + /// @dev Specifies a destination and amount but not the coin as that's assumed to be contextual + struct OutInstruction { + DestinationType destinationType; + bytes destination; + uint256 amount; + } + + /// @notice Update the key representing Serai's Ethereum validators + /** + * @dev This does not validate the passed-in key as much as possible. This is accepted as the key + * won't actually be rotated to until it provides a signature confirming the update however + * (proving signatures can be made by the key in question and verified via our Schnorr + * contract). + */ + // @param signature The signature by the current key authorizing this update + /// @param signature The signature by the current key authorizing this update + /// @param nextSeraiKeyVar The key to update to, once it confirms the update + function updateSeraiKey(Signature calldata signature, bytes32 nextSeraiKeyVar) external; + + /// @notice Confirm the next key representing Serai's Ethereum validators, updating to it + /// @param signature The signature by the next key confirming its validity + function confirmNextSeraiKey(Signature calldata signature) external; + + /// @notice Execute a batch of `OutInstruction`s + /** + * @dev All `OutInstruction`s in a batch are only for a single coin to simplify handling of the + * fee + */ + /// @param signature The signature by the current key for Serai's Ethereum validators + /// @param coin The coin all of these `OutInstruction`s are for + /// @param fee The fee to pay (in coin) to the caller for their relaying of this batch + /// @param outs The `OutInstruction`s to act on + function execute( + Signature calldata signature, + address coin, + uint256 fee, + OutInstruction[] calldata outs + ) external; + + /// @notice Escapes to a new smart contract + /// @dev This should be used upon an invariant being reached or new functionality being needed + /// @param signature The signature by the current key for Serai's Ethereum validators + /// @param escapeTo The address to escape to + function escapeHatch(Signature calldata signature, address escapeTo) external; +} diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol new file mode 100644 index 00000000..03eaac0e --- /dev/null +++ b/processor/ethereum/router/contracts/Router.sol @@ -0,0 +1,759 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +import "IERC20.sol"; + +import "Schnorr.sol"; + +import "IRouter.sol"; + +/* + The Router directly performs low-level calls in order to have direct control over gas. Since this + contract is meant to relay an entire batch of outs in a single transaction, the ability to exactly + meter individual outs is critical. + + We don't check the return values as we don't care if the calls succeeded. We solely care we made + them. If someone configures an external contract in a way which borks, we explicitly define that + as their fault and out-of-scope to this contract. + + If an actual invariant within Serai exists, an escape hatch exists to move to a new contract. Any + improperly handled actions can be re-signed and re-executed at that point in time. + + Historically, the call-stack-depth limit would've made this design untenable. Due to EIP-150, even + with 1 billion gas transactions, the call-stack-depth limit remains unreachable. + + The `execute` function pays a relayer, as expected for use in the account-abstraction model. Other + functions also expect relayers, yet do not explicitly pay fees. Those calls are expected to be + justified via the backpressure of transactions with fees. +*/ +// slither-disable-start low-level-calls,unchecked-lowlevel + +/// @title Serai Router +/// @author Luke Parker +/// @notice Intakes coins for the Serai network and handles relaying batches of transfers out +contract Router is IRouterWithoutCollisions { + /// @dev The code hash for a non-empty account without code + bytes32 constant ACCOUNT_WITHOUT_CODE_CODEHASH = keccak256(""); + + /// @dev The address in transient storage used for the reentrancy guard + bytes32 constant REENTRANCY_GUARD_SLOT = bytes32(uint256(keccak256("ReentrancyGuard Router")) - 1); + + /** + * @dev The amount of gas to use when interacting with ERC20s + * + * The ERC20s integrated are presumed to have a constant gas cost, meaning this fixed gas cost + * can only be insufficient if: + * + * A) An integrated ERC20 uses more gas than this limit (presumed not to be the case) + * B) An integrated ERC20 is upgraded (integrated ERC20s are presumed to not be upgradeable) + * C) The ERC20 call has a variable gas cost and the user set a hook on receive which caused + * this (in which case, we accept such interactions failing) + * D) The user was blacklisted and any transfers to them cause out of gas errors (in which + * case, we again accept dropping this) + * E) Other extreme edge cases, for which such tokens are assumed to not be integrated + * F) Ethereum opcodes are repriced in a sufficiently breaking fashion + * + * This should be in such excess of the gas requirements of integrated tokens we'll survive + * repricing, so long as the repricing doesn't revolutionize EVM gas costs as we know it. In such + * a case, Serai would have to migrate to a new smart contract using `escapeHatch`. That also + * covers all other potential exceptional cases. + */ + uint256 constant ERC20_GAS = 100_000; + + /** + * @dev The next nonce used to determine the address of contracts deployed with CREATE. This is + * used to predict the addresses of deployed contracts ahead of time. + */ + /* + We don't expose a getter for this as it shouldn't be expected to have any specific value at a + given moment in time. If someone wants to know the address of their deployed contract, they can + have it emit an event and verify the emitting contract is the expected one. + */ + uint256 private _smartContractNonce; + + /** + * @dev The nonce to verify the next signature with, incremented upon an action to prevent + * replays/out-of-order execution + */ + uint256 private _nextNonce; + + /** + * @dev The next public key for Serai's Ethereum validator set, in the form the Schnorr library + * expects + */ + bytes32 private _nextSeraiKey; + + /** + * @dev The current public key for Serai's Ethereum validator set, in the form the Schnorr library + * expects + */ + bytes32 private _seraiKey; + + /// @dev The address escaped to + address private _escapedTo; + + /// @dev Acquire the re-entrancy lock for the lifetime of this transaction + modifier nonReentrant() { + bytes32 reentrancyGuardSlot = REENTRANCY_GUARD_SLOT; + bytes32 priorEntered; + // slither-disable-next-line assembly + assembly { + priorEntered := tload(reentrancyGuardSlot) + tstore(reentrancyGuardSlot, 1) + } + if (priorEntered != bytes32(0)) { + revert Reentered(); + } + + _; + + // Clear the re-entrancy guard to allow multiple transactions to non-re-entrant functions within + // a transaction + // slither-disable-next-line assembly + assembly { + tstore(reentrancyGuardSlot, 0) + } + } + + /// @dev Set the next Serai key. This does not read from/write to `_nextNonce` + /// @param nonceUpdatedWith The nonce used to set the next key + /// @param nextSeraiKeyVar The key to set as next + function _setNextSeraiKey(uint256 nonceUpdatedWith, bytes32 nextSeraiKeyVar) private { + // Explicitly disallow 0 so we can always consider 0 as None and non-zero as Some + if (nextSeraiKeyVar == bytes32(0)) { + revert InvalidSeraiKey(); + } + _nextSeraiKey = nextSeraiKeyVar; + emit NextSeraiKeySet(nonceUpdatedWith, nextSeraiKeyVar); + } + + /// @notice The constructor for the relayer + /// @param initialSeraiKey The initial key for Serai's Ethereum validators + constructor(bytes32 initialSeraiKey) { + // Nonces are incremented by 1 upon account creation, prior to any code execution, per EIP-161 + // This is incompatible with any networks which don't have their nonces start at 0 + _smartContractNonce = 1; + + // Set the next Serai key + _setNextSeraiKey(0, initialSeraiKey); + // Set the current Serai key to None + _seraiKey = bytes32(0); + + // We just consumed nonce 0 when setting the initial Serai key + _nextNonce = 1; + + // We haven't escaped to any address yet + _escapedTo = address(0); + } + + /** + * @dev Verify a signature of the calldata, placed immediately after the function selector. The + * calldata should be signed with the nonce taking the place of the signature's commitment to + * its nonce, and the signature solution zeroed. + */ + /// @param key The key to verify the signature with + function verifySignature(bytes32 key) + private + returns (uint256 nonceUsed, bytes memory message, bytes32 messageHash) + { + // If the escape hatch was triggered, reject further signatures + if (_escapedTo != address(0)) { + revert EscapeHatchInvoked(); + } + + /* + If this key isn't set, reject it. + + The Schnorr contract should already reject this public key yet it's best to be explicit. + */ + if (key == bytes32(0)) { + revert SeraiKeyWasNone(); + } + + message = msg.data; + uint256 messageLen = message.length; + /* + function selector, signature + + This check means we don't read memory, and as we attempt to clear portions, write past it + (triggering undefined behavior). + */ + if (messageLen < 68) { + revert InvalidSignature(); + } + + // Read _nextNonce into memory as the nonce we'll use + nonceUsed = _nextNonce; + + // Declare memory to copy the signature out to + bytes32 signatureC; + bytes32 signatureS; + + uint256 chainID = block.chainid; + // slither-disable-next-line assembly + assembly { + // Read the signature (placed after the function signature) + signatureC := mload(add(message, 36)) + signatureS := mload(add(message, 68)) + + // Overwrite the signature challenge with the chain ID + mstore(add(message, 36), chainID) + // Overwrite the signature response with the nonce + mstore(add(message, 68), nonceUsed) + + // Calculate the message hash + messageHash := keccak256(add(message, 32), messageLen) + } + + // Verify the signature + if (!Schnorr.verify(key, messageHash, signatureC, signatureS)) { + revert InvalidSignature(); + } + + // Set the next nonce + unchecked { + _nextNonce = nonceUsed + 1; + } + + /* + Advance the message past the function selector, enabling decoding the arguments. Ideally, we'd + also advance past the signature (to simplify decoding arguments and save some memory). This + would transfrom message from: + + message (pointer) + v + ------------------------------------------------------------ + | 32-byte length | 4-byte selector | Signature | Arguments | + ------------------------------------------------------------ + + to: + + message (pointer) + v + ---------------------------------------------- + | Junk 68 bytes | 32-byte length | Arguments | + ---------------------------------------------- + + Unfortunately, doing so corrupts the offsets defined within the ABI itself. We settle for a + transform to: + + message (pointer) + v + --------------------------------------------------------- + | Junk 4 bytes | 32-byte length | Signature | Arguments | + --------------------------------------------------------- + */ + // slither-disable-next-line assembly + assembly { + message := add(message, 4) + mstore(message, sub(messageLen, 4)) + } + } + + /// @notice Start updating the key representing Serai's Ethereum validators + /** + * @dev This does not validate the passed-in key as much as possible. This is accepted as the key + * won't actually be rotated to until it provides a signature confirming the update however + * (proving signatures can be made by the key in question and verified via our Schnorr + * contract). + * + * The hex bytes are to cause a collision with `IRouter.updateSeraiKey`. + */ + // @param signature The signature by the current key authorizing this update + // @param nextSeraiKey The key to update to + function updateSeraiKey5A8542A2() external { + (uint256 nonceUsed, bytes memory args,) = verifySignature(_seraiKey); + /* + We could replace this with a length check (if we don't simply assume the calldata is valid as + it was properly signed) + mload to save 24 gas but it's not worth the complexity. + */ + (,, bytes32 nextSeraiKeyVar) = abi.decode(args, (bytes32, bytes32, bytes32)); + _setNextSeraiKey(nonceUsed, nextSeraiKeyVar); + } + + /// @notice Confirm the next key representing Serai's Ethereum validators, updating to it + /// @dev The hex bytes are to cause a collision with `IRouter.confirmSeraiKey`. + // @param signature The signature by the next key confirming its validity + function confirmNextSeraiKey34AC53AC() external { + // Checks + bytes32 nextSeraiKeyVar = _nextSeraiKey; + (uint256 nonceUsed,,) = verifySignature(nextSeraiKeyVar); + // Effects + _nextSeraiKey = bytes32(0); + _seraiKey = nextSeraiKeyVar; + emit SeraiKeyUpdated(nonceUsed, nextSeraiKeyVar); + } + + /// @notice Transfer coins into Serai with an instruction + /// @param coin The coin to transfer in (address(0) if Ether) + /// @param amount The amount to transfer in (msg.value if Ether) + /** + * @param instruction The Shorthand-encoded InInstruction for Serai to associate with this + * transfer in + */ + // This function doesn't require nonReentrant as re-entrancy isn't an issue with this function + // slither-disable-next-line reentrancy-events + function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable { + // Check there is an active key + if (_seraiKey == bytes32(0)) { + revert SeraiKeyWasNone(); + } + + // Don't allow further InInstructions once the escape hatch has been invoked + if (_escapedTo != address(0)) { + revert EscapeHatchInvoked(); + } + + // Check the transfer + if (coin == address(0)) { + if (amount != msg.value) revert AmountMismatchesMsgValue(); + } else { + (bool success, bytes memory res) = address(coin).call( + abi.encodeWithSelector(IERC20.transferFrom.selector, msg.sender, address(this), amount) + ); + + /* + Require there was nothing returned, which is done by some non-standard tokens, or that the + ERC20 contract did in fact return true + */ + bool nonStandardResOrTrue = + (res.length == 0) || ((res.length == 32) && abi.decode(res, (bool))); + if (!(success && nonStandardResOrTrue)) revert TransferFromFailed(); + } + + /* + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. The amount + instructed to be transferred may not actually be the amount transferred. + + If we add nonReentrant to every single function which can effect the balance, we can check the + amount exactly matches. This prevents transfers of less value than expected occurring, at + least, not without an additional transfer to top up the difference (which isn't routed through + this contract and accordingly isn't trying to artificially create events from this contract). + + If we don't add nonReentrant, a transfer can be started, and then a new transfer for the + difference can follow it up (again and again until a rounding error is reached). This contract + would believe all transfers were done in full, despite each only being done in part (except + for the last one). + + Given fee-on-transfer tokens aren't intended to be supported, the only token actively planned + to be supported is Dai and it doesn't have any fee-on-transfer logic, and how fee-on-transfer + tokens aren't even able to be supported at this time by the larger Serai network, we simply + classify this entire class of tokens as non-standard implementations which induce undefined + behavior. + + It is the Serai network's role not to add support for any non-standard implementations. + */ + emit InInstruction(msg.sender, coin, amount, instruction); + } + + /// @dev Perform an Ether/ERC20 transfer out + /// @param to The address to transfer the coins to + /// @param coin The coin to transfer (address(0) if Ether) + /// @param amount The amount of the coin to transfer + /// @param contractDestination If we're transferring to a contract we just deployed + /** + * @return success If the coins were successfully transferred out. For Ethereum, this is if the + * call succeeded. For the ERC20, it's if the call succeeded and returned true or nothing. + */ + // execute has this annotation yet this still flags (even when it doesn't have its own loop) + // slither-disable-next-line calls-loop + function transferOut(address to, address coin, uint256 amount, bool contractDestination) + private + returns (bool success) + { + if (coin == address(0)) { + // This uses assembly to prevent return bombs + // slither-disable-next-line assembly + assembly { + success := + call( + // explicit gas + 0, + to, + amount, + // calldata + 0, + 0, + // return data + 0, + 0 + ) + } + } else { + bytes4 selector; + if (contractDestination) { + /* + If this is an out of DestinationType::Contract, we only grant an approval. We don't + perform a transfer. This allows the contract, or our expectation of the contract as far as + our obligation to it, to be borked and for Serai to potentially it accordingly. + + Unfortunately, this isn't a feasible flow for Ether unless we set Ether approvals within + our contract (for entities to collect later) which is of sufficient complexity to not be + worth the effort. We also don't have the `CREATE` complexity when transferring Ether to + contracts we deploy. + */ + selector = IERC20.approve.selector; + } else { + /* + For non-contracts, we don't place the burden of the transferFrom flow and directly + transfer. + */ + selector = IERC20.transfer.selector; + } + + /* + `coin` is either signed (from `execute`) or called from `escape` (which can safely be + arbitrarily called). We accordingly don't need to be worried about return bombs here. + */ + // slither-disable-next-line return-bomb + (bool erc20Success, bytes memory res) = + address(coin).call{ gas: ERC20_GAS }(abi.encodeWithSelector(selector, to, amount)); + + /* + Require there was nothing returned, which is done by some non-standard tokens, or that the + ERC20 contract did in fact return true. + */ + // slither-disable-next-line incorrect-equality + bool nonStandardResOrTrue = + (res.length == 0) || ((res.length == 32) && abi.decode(res, (bool))); + success = erc20Success && nonStandardResOrTrue; + } + } + + /// @notice The header for an address, when encoded with RLP for the purposes of CREATE + /// @dev 0x80 + 20, shifted left 30 bytes + uint256 constant ADDRESS_HEADER = (0x80 + 20) << (30 * 8); + + /// @notice Calculate the next address which will be deployed to by CREATE + /** + * @dev While CREATE2 is preferable inside smart contracts, CREATE2 is fundamentally vulnerable to + * collisions. Our usage of CREATE forces an incremental nonce infeasible to brute force. While + * addresses are still variable to the Router address, the Router address itself is the product + * of an incremental nonce (the Deployer's). The Deployer's address is constant (generated via + * NUMS methods), finally ensuring the security of this. + * + * This is written to be constant-gas, allowing state-independent gas prediction. + * + * This has undefined behavior when `nonce` is zero (EIP-161 makes this irrelevant). + */ + /// @param nonce The nonce to use for CREATE + function createAddress(uint256 nonce) internal view returns (address) { + unchecked { + // The amount of bytes needed to represent the nonce + uint256 bitsNeeded = 0; + // This only iterates up to 64-bits as this will never exceed 2**64 as a matter of + // practicality + for (uint256 bits = 0; bits <= 64; bits += 8) { + bool valueFits = nonce < (uint256(1) << bits); + bool notPriorSet = bitsNeeded == 0; + // If the value fits, and the bits weren't prior set, we should set the bits now + uint256 shouldSet; + // slither-disable-next-line assembly + assembly { + shouldSet := and(valueFits, notPriorSet) + } + // Carry the existing bitsNeeded value, set bits if should set + bitsNeeded += (shouldSet * bits); + } + uint256 bytesNeeded = bitsNeeded / 8; + + // if the nonce is an RLP string or not + bool nonceIsNotStringBool = nonce <= 0x7f; + uint256 nonceIsNotString; + // slither-disable-next-line assembly + assembly { + nonceIsNotString := nonceIsNotStringBool + } + // slither-disable-next-line incorrect-exp This is meant to be a xor + uint256 nonceIsString = nonceIsNotString ^ 1; + + // Define the RLP length + // slither-disable-next-line divide-before-multiply + uint256 rlpEncodingLen = 23 + (nonceIsString * bytesNeeded); + + uint256 rlpEncoding = + // The header, which does not include itself in its length, shifted into the first byte + ((0xc0 + (rlpEncodingLen - 1)) << 248) + // The address header, which is constant + | ADDRESS_HEADER + // Shift the address from bytes 12 .. 32 to 2 .. 22 + | (uint256(uint160(address(this))) << 80) + // Shift the nonce (one byte) or the nonce's header from byte 31 to byte 22 + | (((nonceIsNotString * nonce) + (nonceIsString * (0x80 + bytesNeeded))) << 72) + // Shift past the unnecessary bytes + | (nonce * nonceIsString) << (72 - bitsNeeded); + + // Store this to the scratch space + bytes memory rlp; + // slither-disable-next-line assembly + assembly { + mstore(0, rlpEncodingLen) + mstore(32, rlpEncoding) + rlp := 0 + } + + return address(uint160(uint256(keccak256(rlp)))); + } + } + + /// @notice Execute some arbitrary code within a secure sandbox + /** + * @dev This performs sandboxing by deploying this code with `CREATE`. This is an external + * function as we can't meter `CREATE`/internal functions. We work around this by calling this + * function with `CALL` (which we can meter). This does forward `msg.value` to the newly + * deployed contract. + */ + /// @param code The code to execute + function executeArbitraryCode(bytes memory code) external payable { + /* + execute assumes that from the time it reads `_smartContractNonce` until the time it calls this + function, no mutations to it will occur. If any mutations could occur, it'd lead to a fault + where tokens could be sniped by: + + 1) An out occurring, transferring tokens to an about-to-be-deployed smart contract + 2) The token contract re-entering the Router to deploy a new smart contract which claims the + tokens + 3) The Router then deploying the intended smart contract (ignoring whatever result it may + have) + + This does assume a malicious token, or a token with callbacks which can be set by a malicious + adversary, yet the way to ensure it's a non-issue is to not allow other entities to mutate + `_smartContractNonce`. + */ + if (msg.sender != address(this)) { + revert CodeNotBySelf(); + } + + // Because we're creating a contract, increment our nonce + _smartContractNonce += 1; + + uint256 msgValue = msg.value; + address contractAddress; + // We need to use assembly here because Solidity doesn't expose CREATE + // slither-disable-next-line assembly + assembly { + contractAddress := create(msgValue, add(code, 0x20), mload(code)) + } + } + + /// @notice Execute a batch of `OutInstruction`s + /** + * @dev All `OutInstruction`s in a batch are only for a single coin to simplify handling of the + * fee. + * + * The hex bytes are to cause a function selector collision with `IRouter.execute`. + * + * Re-entrancy is prevented because we emit a bitmask of which `OutInstruction`s succeeded. Doing + * that requires executing the `OutInstruction`s, which may re-enter here. While our application + * of CEI with `verifySignature` prevents replays, re-entrancy would allow out-of-order + * completion for the execution of batches (despite their in-order start of execution) which + * isn't a headache worth dealing with. + * + * Re-entrancy is also explicitly required due to how `_smartContractNonce` is handled. + */ + // @param signature The signature by the current key for Serai's Ethereum validators + // @param coin The coin all of these `OutInstruction`s are for + // @param fee The fee to pay (in coin) to the caller for their relaying of this batch + // @param outs The `OutInstruction`s to act on + // Each individual call is explicitly metered to ensure there isn't a DoS here + // slither-disable-next-line calls-loop,reentrancy-events + function execute4DE42904() external nonReentrant { + (uint256 nonceUsed, bytes memory args, bytes32 message) = verifySignature(_seraiKey); + (,, address coin, uint256 fee, IRouter.OutInstruction[] memory outs) = + abi.decode(args, (bytes32, bytes32, address, uint256, IRouter.OutInstruction[])); + + // Define a bitmask to store the results of all following `OutInstruction`s + bytes memory results = new bytes((outs.length + 7) / 8); + + // slither-disable-next-line reentrancy-events + for (uint256 i = 0; i < outs.length; i++) { + bool success = true; + + // If the destination is an address, we perform a direct transfer + if (outs[i].destinationType == IRouter.DestinationType.Address) { + /* + This may cause a revert if the destination isn't actually a valid address. Serai is + trusted to not pass a malformed destination, yet if it ever did, it could simply re-sign a + corrected batch using this nonce. + */ + address destination = abi.decode(outs[i].destination, (address)); + success = transferOut(destination, coin, outs[i].amount, false); + } else { + // Prepare the transfer + uint256 ethValue = 0; + if (coin == address(0)) { + // If it's Ether, we transfer the amount with the call + ethValue = outs[i].amount; + } else { + /* + If it's an ERC20, we calculate the address of the will-be contract and transfer to it + before deployment. This avoids needing to deploy the contract, then call transfer, then + call the contract again. + + We use CREATE, not CREATE2, despite the difficulty in calculating the address + in-contract, for reasons explained within `createAddress`'s documentation. + + If this is ever borked, the fact we only set an approval allows recovery. + */ + address nextAddress = createAddress(_smartContractNonce); + success = transferOut(nextAddress, coin, outs[i].amount, true); + } + + /* + If success is false, we presume it a fault with an ERC20, not with us, and move on. If we + reverted here, we'd halt the execution of every single batch (now and future). By simply + moving on, we may have reached some invariant with this specific ERC20, yet the project + entire isn't put into a halted state. + + Since the recipient is a fresh account, this presumably isn't the recipient being + blacklisted (the most likely invariant upon the integration of a popular, + otherwise-standard ERC20). That means there likely is some invariant with this integration + to be resolved later. Given our ability to sign new batches with the necessary + corrections, this is accepted. + */ + if (success) { + (IRouter.CodeDestination memory destination) = + abi.decode(outs[i].destination, (IRouter.CodeDestination)); + + /* + Perform the deployment with the defined gas budget. + + We don't care if the following call fails as we don't want to block/retry if it does. + Failures are considered the recipient's fault. We explicitly do not want the surface + area/inefficiency of caching these for later attempted retires. + + We don't have to worry about a return bomb here as this is our own function which + doesn't return any data. + */ + (success,) = address(this).call{ gas: destination.gasLimit, value: ethValue }( + abi.encodeWithSelector(Router.executeArbitraryCode.selector, destination.code) + ); + } + } + + if (success) { + results[i / 8] |= bytes1(uint8(1 << (i % 8))); + } + } + + /* + Emit batch execution with the status of all included events. + + This is an effect after interactions yet we have a reentrancy guard making this safe. + */ + emit Batch(nonceUsed, message, outs.length, results); + + // Transfer the fee to the relayer + transferOut(msg.sender, coin, fee, false); + } + + /// @notice Escapes to a new smart contract + /** + * @dev This should be used upon an invariant being reached or new functionality being needed. + * + * The hex bytes are to cause a collision with `IRouter.escapeHatch`. + */ + // @param signature The signature by the current key for Serai's Ethereum validators + // @param escapeTo The address to escape to + function escapeHatchDCDD91CC() external { + // Verify the signature + (uint256 nonceUsed, bytes memory args,) = verifySignature(_seraiKey); + + (,, address escapeTo) = abi.decode(args, (bytes32, bytes32, address)); + + if (escapeTo == address(0)) { + revert InvalidEscapeAddress(); + } + + /* + We could define the escape hatch as having its own confirmation flow, as new keys do, but new + contracts don't face all of the cryptographic concerns faced by new keys. New contracts also + would presumably be moved to after strict review, making the chance of specifying the wrong + contract incredibly unlikely. + + The only check performed accordingly (with no confirmation flow) is that the new contract is + in fact a contract. This is done to confirm the contract was successfully deployed on this + blockchain. + + This check is also comprehensive to the zero-address case, but this function doesn't have to + be perfectly optimized and it's better to explicitly handle that due to it being its own + invariant. + */ + { + bytes32 codehash = escapeTo.codehash; + if ((codehash == bytes32(0)) || (codehash == ACCOUNT_WITHOUT_CODE_CODEHASH)) { + revert EscapeAddressWasNotAContract(); + } + } + + /* + We want to define the escape hatch so coins here now, and latently received, can be forwarded. + If the last Serai key set could update the escape hatch, they could siphon off latently + received coins without penalty (if they update the escape hatch after unstaking). + */ + if (_escapedTo != address(0)) { + revert EscapeHatchInvoked(); + } + + _escapedTo = escapeTo; + emit EscapeHatch(nonceUsed, escapeTo); + } + + /// @notice Escape coins after the escape hatch has been invoked + /// @param coin The coin to escape + // slither-disable-next-line reentrancy-events Out-of-order events aren't an issue here + function escape(address coin) external { + if (_escapedTo == address(0)) { + revert EscapeHatchNotInvoked(); + } + + // Fetch the amount to escape + uint256 amount = address(this).balance; + if (coin != address(0)) { + amount = IERC20(coin).balanceOf(address(this)); + } + + // Perform the transfer + // While this can be re-entered to try escaping our balance twice, the outer call will fail + /* + We don't flag the escape hatch as a contract destination, despite being a contract, as the + escape hatch's invocation is permanent. If the coins do not go through the escape hatch, they + will never go anywhere (ignoring any unspent approvals voided by this action). + */ + if (!transferOut(_escapedTo, coin, amount, false)) { + revert EscapeFailed(); + } + + // Since we successfully escaped this amount, emit the event for it + emit Escaped(coin, amount); + } + + /// @notice Fetch the next nonce to use by an action published to this contract + /// return The next nonce to use by an action published to this contract + function nextNonce() external view returns (uint256) { + return _nextNonce; + } + + /// @notice Fetch the next key for Serai's Ethereum validator set + /// @return The next key for Serai's Ethereum validator set or bytes32(0) if none is currently set + function nextSeraiKey() external view returns (bytes32) { + return _nextSeraiKey; + } + + /// @notice Fetch the current key for Serai's Ethereum validator set + /** + * @return The current key for Serai's Ethereum validator set or bytes32(0) if none is currently + * set + */ + function seraiKey() external view returns (bytes32) { + return _seraiKey; + } + + /// @notice Fetch the address escaped to + /// @return The address which was escaped to (address(0) if the escape hatch hasn't been invoked) + function escapedTo() external view returns (address) { + return _escapedTo; + } +} + +// slither-disable-end low-level-calls,unchecked-lowlevel diff --git a/processor/ethereum/router/contracts/tests/CreateAddress.sol b/processor/ethereum/router/contracts/tests/CreateAddress.sol new file mode 100644 index 00000000..6aa57629 --- /dev/null +++ b/processor/ethereum/router/contracts/tests/CreateAddress.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +import "Router.sol"; + +// Wrap the Router with a contract which exposes the createAddress function +contract CreateAddress is Router { + constructor() Router(bytes32(uint256(1))) { } + + function createAddressForSelf(uint256 nonce) external returns (address) { + return Router.createAddress(nonce); + } +} diff --git a/networks/ethereum/src/tests/contracts/ERC20.sol b/processor/ethereum/router/contracts/tests/ERC20.sol similarity index 69% rename from networks/ethereum/src/tests/contracts/ERC20.sol rename to processor/ethereum/router/contracts/tests/ERC20.sol index e157974c..f10ac0cd 100644 --- a/networks/ethereum/src/tests/contracts/ERC20.sol +++ b/processor/ethereum/router/contracts/tests/ERC20.sol @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; contract TestERC20 { event Transfer(address indexed from, address indexed to, uint256 value); @@ -8,44 +8,57 @@ contract TestERC20 { function name() public pure returns (string memory) { return "Test ERC20"; } + function symbol() public pure returns (string memory) { return "TEST"; } + function decimals() public pure returns (uint8) { return 18; } - function totalSupply() public pure returns (uint256) { - return 1_000_000 * 10e18; - } + uint256 public totalSupply; mapping(address => uint256) balances; mapping(address => mapping(address => uint256)) allowances; - constructor() { - balances[msg.sender] = totalSupply(); - } - function balanceOf(address owner) public view returns (uint256) { return balances[owner]; } + function transfer(address to, uint256 value) public returns (bool) { balances[msg.sender] -= value; balances[to] += value; + emit Transfer(msg.sender, to, value); return true; } + function transferFrom(address from, address to, uint256 value) public returns (bool) { allowances[from][msg.sender] -= value; balances[from] -= value; balances[to] += value; + emit Transfer(from, to, value); return true; } function approve(address spender, uint256 value) public returns (bool) { allowances[msg.sender][spender] = value; + emit Approval(msg.sender, spender, value); return true; } + function allowance(address owner, address spender) public view returns (uint256) { return allowances[owner][spender]; } + + function mint(address owner, uint256 value) external { + balances[owner] += value; + totalSupply += value; + emit Transfer(address(0), owner, value); + } + + function magicApprove(address owner, address spender, uint256 value) external { + allowances[owner][spender] = value; + emit Approval(owner, spender, value); + } } diff --git a/processor/ethereum/router/contracts/tests/Reentrancy.sol b/processor/ethereum/router/contracts/tests/Reentrancy.sol new file mode 100644 index 00000000..979fd74d --- /dev/null +++ b/processor/ethereum/router/contracts/tests/Reentrancy.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +import "Router.sol"; + +// This inherits from the Router for visibility over Reentered +contract Reentrancy { + error Reentered(); + + constructor() { + (bool success, bytes memory res) = + msg.sender.call(abi.encodeWithSelector(Router.execute4DE42904.selector, "")); + require(!success); + // We can't compare `bytes memory` so we hash them and compare the hashes + require(keccak256(res) == keccak256(abi.encode(Reentered.selector))); + } +} diff --git a/processor/ethereum/router/src/gas.rs b/processor/ethereum/router/src/gas.rs new file mode 100644 index 00000000..266ad586 --- /dev/null +++ b/processor/ethereum/router/src/gas.rs @@ -0,0 +1,358 @@ +use k256::{Scalar, ProjectivePoint}; + +use alloy_core::primitives::{Address, U160, U256}; +use alloy_sol_types::SolCall; + +use revm::{ + primitives::*, + interpreter::{gas::*, opcode::InstructionTables, *}, + db::{emptydb::EmptyDB, in_memory_db::InMemoryDB}, + Handler, Context, EvmBuilder, Evm, +}; + +use ethereum_schnorr::{PublicKey, Signature}; + +use crate::*; + +// The chain ID used for gas estimation +const CHAIN_ID: U256 = U256::from_be_slice(&[1]); + +/// The object used for estimating gas. +/// +/// Due to `execute` heavily branching, we locally simulate calls with revm. +pub(crate) type GasEstimator = Evm<'static, (), InMemoryDB>; + +impl Router { + const SMART_CONTRACT_NONCE_STORAGE_SLOT: U256 = U256::from_be_slice(&[0]); + const NONCE_STORAGE_SLOT: U256 = U256::from_be_slice(&[1]); + const SERAI_KEY_STORAGE_SLOT: U256 = U256::from_be_slice(&[3]); + + // Gas allocated for ERC20 calls + #[cfg(test)] + pub(crate) const GAS_FOR_ERC20_CALL: u64 = 100_000; + + /* + The gas limits to use for non-Execute transactions. + + These don't branch on the success path, allowing constants to be used out-right. These + constants target the Cancun network upgrade and are validated by the tests. + + While whoever publishes these transactions may be able to query a gas estimate, it may not be + reasonable to. If the signing context is a distributed group, as Serai frequently employs, a + non-deterministic gas (such as estimates from the local nodes) would require a consensus + protocol to determine which to use. + + These gas limits may break if/when gas opcodes undergo repricing. In that case, this library is + expected to be modified with these made parameters. The caller would then be expected to pass + the correct set of prices for the network they're operating on. + */ + /// The gas used by `confirmSeraiKey`. + pub const CONFIRM_NEXT_SERAI_KEY_GAS: u64 = 57_736; + /// The gas used by `updateSeraiKey`. + pub const UPDATE_SERAI_KEY_GAS: u64 = 60_045; + /// The gas used by `escapeHatch`. + pub const ESCAPE_HATCH_GAS: u64 = 61_094; + + /// The key to use when performing gas estimations. + /// + /// There has to be a key to verify the signatures of the messages signed. + fn gas_estimation_key() -> (Scalar, PublicKey) { + (Scalar::ONE, PublicKey::new(ProjectivePoint::GENERATOR).unwrap()) + } + + pub(crate) fn gas_estimator(&self, erc20: Option
) -> GasEstimator { + // The DB to use + let db = { + const BYTECODE: &[u8] = { + const BYTECODE_HEX: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/serai-processor-ethereum-router/Router.bin-runtime" + )); + const BYTECODE: [u8; BYTECODE_HEX.len() / 2] = + match hex::const_decode_to_array::<{ BYTECODE_HEX.len() / 2 }>(BYTECODE_HEX) { + Ok(bytecode) => bytecode, + Err(_) => panic!("Router.bin-runtime did not contain valid hex"), + }; + &BYTECODE + }; + let bytecode = Bytecode::new_legacy(Bytes::from_static(BYTECODE)); + + let mut db = InMemoryDB::new(EmptyDB::new()); + // Insert the Router into the state + db.insert_account_info( + self.address, + AccountInfo { + balance: U256::from(0), + // Per EIP-161 + nonce: 1, + code_hash: bytecode.hash_slow(), + code: Some(bytecode), + }, + ); + + // Insert the value for _smartContractNonce set in the constructor + // All operations w.r.t. execute in constant-time, making the actual value irrelevant + db.insert_account_storage( + self.address, + Self::SMART_CONTRACT_NONCE_STORAGE_SLOT, + U256::from(1), + ) + .unwrap(); + + // Insert a non-zero nonce, as the zero nonce will update to the initial key and never be + // used for any gas estimations of `execute`, the only function estimated + db.insert_account_storage(self.address, Self::NONCE_STORAGE_SLOT, U256::from(1)).unwrap(); + + // Insert the public key to verify with + db.insert_account_storage( + self.address, + Self::SERAI_KEY_STORAGE_SLOT, + U256::from_be_bytes(Self::gas_estimation_key().1.eth_repr()), + ) + .unwrap(); + + db + }; + + // Create a custom handler so we can assume every CALL is the worst-case + let handler = { + let mut instructions = InstructionTables::<'_, _>::new_plain::(); + instructions.update_boxed(revm::interpreter::opcode::CALL, { + move |call_op, interpreter, host: &mut Context<_, _>| { + let (address_called, value, return_addr, return_len) = { + let stack = &mut interpreter.stack; + + let address = stack.peek(1).unwrap(); + let value = stack.peek(2).unwrap(); + let return_addr = stack.peek(5).unwrap(); + let return_len = stack.peek(6).unwrap(); + + ( + address, + value, + usize::try_from(return_addr).unwrap(), + usize::try_from(return_len).unwrap(), + ) + }; + let address_called = + Address::from(U160::from_be_slice(&address_called.to_be_bytes::<32>()[12 ..])); + + // Have the original call op incur its costs as programmed + call_op(interpreter, host); + + /* + Unfortunately, the call opcode executed only sets itself up, it doesn't handle the + entire inner call for us. We manually do so here by shimming the intended result. The + other option, on this path chosen, would be to shim the call-frame execution ourselves + and only then manipulate the result. + + Ideally, we wouldn't override CALL, yet STOP/RETURN (the tail of the CALL) to avoid all + of this. Those overrides weren't being successfully hit in initial experiments, and + while this solution does appear overly complicated, it's sufficiently tested to justify + itself. + + revm does cost the entire gas limit during the call setup. After the call completes, + it refunds whatever was unused. Since we manually complete the call here ourselves, + but don't implement that refund logic as we want the worst-case scenario, we do + successfully implement complete costing of the gas limit. + */ + + // Perform the call value transfer, which also marks the recipient as warm + assert!(host + .evm + .inner + .journaled_state + .transfer( + &interpreter.contract.target_address, + &address_called, + value, + &mut host.evm.inner.db + ) + .unwrap() + .is_none()); + + // Clear the call-to-be + debug_assert!(matches!(interpreter.next_action, InterpreterAction::Call { .. })); + interpreter.next_action = InterpreterAction::None; + interpreter.instruction_result = InstructionResult::Continue; + + // Clear the existing return data + interpreter.return_data_buffer.clear(); + + /* + If calling an ERC20, trigger the return data's worst-case by returning `true` + (as expected by compliant ERC20s). Else return none, as we expect none or won't bother + copying/decoding the return data. + + This doesn't affect calls to ecrecover as those use STATICCALL and this overrides CALL + alone. + */ + if Some(address_called) == erc20 { + interpreter.return_data_buffer = true.abi_encode().into(); + } + // Also copy the return data into memory + let return_len = return_len.min(interpreter.return_data_buffer.len()); + let needed_memory_size = return_addr + return_len; + if interpreter.shared_memory.len() < needed_memory_size { + assert!(interpreter.resize_memory(needed_memory_size)); + } + interpreter + .shared_memory + .slice_mut(return_addr, return_len) + .copy_from_slice(&interpreter.return_data_buffer[.. return_len]); + + // Finally, push the result of the call onto the stack + interpreter.stack.push(U256::from(1)).unwrap(); + } + }); + let mut handler = Handler::mainnet::(); + handler.set_instruction_table(instructions); + + handler + }; + + EvmBuilder::default() + .with_db(db) + .with_handler(handler) + .modify_cfg_env(|cfg| { + cfg.chain_id = CHAIN_ID.try_into().unwrap(); + }) + .modify_tx_env(|tx| { + tx.gas_limit = u64::MAX; + tx.transact_to = self.address.into(); + }) + .build() + } + + /// The worst-case gas cost for a legacy transaction which executes this batch. + /// + /// This assumes the fee will be non-zero. + pub fn execute_gas(&self, coin: Coin, fee_per_gas: U256, outs: &OutInstructions) -> u64 { + // Unfortunately, we can't cache this in self, despite the following code being written such + // that a common EVM instance could be used, as revm's types aren't Send/Sync and we expect the + // Router to be send/sync + let mut gas_estimator = self.gas_estimator(match coin { + Coin::Ether => None, + Coin::Erc20(erc20) => Some(erc20), + }); + + let fee = match coin { + Coin::Ether => { + // Use a fee of 1 so the fee payment is recognized as positive-value + let fee = U256::from(1); + + // Set a balance of the amount sent out to ensure we don't error on that premise + { + let db = gas_estimator.db_mut(); + let account = db.load_account(self.address).unwrap(); + account.info.balance = fee + outs.0.iter().map(|out| out.amount).sum::(); + } + + fee + } + Coin::Erc20(_) => U256::from(0), + }; + + // Sign a dummy signature + let (private_key, public_key) = Self::gas_estimation_key(); + let c = Signature::challenge( + // Use a nonce of 1 + ProjectivePoint::GENERATOR, + &public_key, + &Self::execute_message(CHAIN_ID, 1, coin, fee, outs.clone()), + ); + let s = Scalar::ONE + (c * private_key); + let sig = Signature::new(c, s).unwrap(); + + // Write the current transaction + /* + revm has poor documentation on if the EVM instance can be dirtied, which would be the concern + if we shared a mutable reference to a singular instance across invocations, but our + consistent use of nonce #1 shows storage read/writes aren't being persisted. They're solely + returned upon execution in a `state` field we ignore. + */ + { + let tx = gas_estimator.tx_mut(); + tx.caller = Address::from({ + /* + We assume the transaction sender is not the destination of any `OutInstruction`, making + all transfers to destinations cold. A malicious adversary could create an + `OutInstruction` whose destination is the caller stubbed here, however, to make us + under-estimate. + + We prevent this by defining the caller as the hash of the `OutInstruction`s, forcing a + hash collision to cause an `OutInstruction` destination to be warm when it wasn't warmed + by either being the Router, being the ERC20, or by being the destination of a distinct + `OutInstruction`. All of those cases will affect the gas used in reality accordingly. + */ + let hash = ethereum_primitives::keccak256(outs.0.abi_encode()); + <[u8; 20]>::try_from(&hash[12 ..]).unwrap() + }); + tx.data = abi::executeCall::new(( + abi::Signature::from(&sig), + Address::from(coin), + fee, + outs.0.clone(), + )) + .abi_encode() + .into(); + } + + // Execute the transaction + let mut gas = match gas_estimator.transact().unwrap().result { + ExecutionResult::Success { gas_used, gas_refunded, .. } => { + assert_eq!(gas_refunded, 0); + gas_used + } + res => panic!("estimated execute transaction failed: {res:?}"), + }; + + // The transaction uses gas based on the amount of non-zero bytes in the calldata, which is + // variable to the fee, which is variable to the gad used. This iterates until parity + let initial_gas = |fee, sig| { + let gas = calculate_initial_tx_gas( + SpecId::CANCUN, + &abi::executeCall::new((sig, Address::from(coin), fee, outs.0.clone())).abi_encode(), + false, + &[], + 0, + ); + assert_eq!(gas.floor_gas, 0); + gas.initial_gas + }; + let mut current_initial_gas = initial_gas(fee, abi::Signature::from(&sig)); + loop { + let fee = fee_per_gas * U256::from(gas); + let new_initial_gas = + initial_gas(fee, abi::Signature { c: [0xff; 32].into(), s: [0xff; 32].into() }); + if current_initial_gas >= new_initial_gas { + return gas; + } + + gas += new_initial_gas - current_initial_gas; + current_initial_gas = new_initial_gas; + } + } + + /// The estimated fee for this `OutInstruction`. + /// + /// This does not model the quadratic costs incurred when in a batch, nor other misc costs such + /// as the potential to cause one less zero byte in the fee's encoding. This is intended to + /// produce a per-`OutInstruction` fee to deduct from each `OutInstruction`, before all + /// `OutInstruction`s incur an amortized fee of what remains for the batch itself. + pub fn execute_out_instruction_gas_estimate( + &mut self, + coin: Coin, + instruction: abi::OutInstruction, + ) -> u64 { + #[allow(clippy::map_entry)] // clippy doesn't realize the multiple mutable borrows + if !self.empty_execute_gas.contains_key(&coin) { + // This can't be de-duplicated across ERC20s due to the zero bytes in the address + let gas = self.execute_gas(coin, U256::from(0), &OutInstructions(vec![])); + self.empty_execute_gas.insert(coin, gas); + } + + let gas = self.execute_gas(coin, U256::from(0), &OutInstructions(vec![instruction])); + gas - self.empty_execute_gas[&coin] + } +} diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs new file mode 100644 index 00000000..f052763e --- /dev/null +++ b/processor/ethereum/router/src/lib.rs @@ -0,0 +1,803 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::ops::RangeInclusive; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, +}; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use group::ff::PrimeField; + +use alloy_core::primitives::{hex, Address, U256, TxKind}; +use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; + +use alloy_consensus::TxLegacy; + +use alloy_rpc_types_eth::{BlockId, Log, Filter, TransactionInput, TransactionRequest}; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use scale::Encode; +use serai_client::{ + in_instructions::primitives::Shorthand, networks::ethereum::Address as SeraiAddress, +}; + +use ethereum_primitives::LogIndex; +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_deployer::Deployer; +use erc20::{Transfer, TopLevelTransfer, TopLevelTransfers, Erc20}; + +use futures_util::stream::{StreamExt, FuturesUnordered}; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod _irouter_abi { + alloy_sol_macro::sol!("contracts/IRouter.sol"); +} + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod _router_abi { + include!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/router.rs")); +} + +mod abi { + pub use super::_router_abi::IRouterWithoutCollisions::*; + pub use super::_router_abi::IRouter::*; + pub use super::_router_abi::Router::constructorCall; +} +use abi::{ + NextSeraiKeySet as NextSeraiKeySetEvent, SeraiKeyUpdated as SeraiKeyUpdatedEvent, + InInstruction as InInstructionEvent, Batch as BatchEvent, EscapeHatch as EscapeHatchEvent, + Escaped as EscapedEvent, +}; + +mod gas; + +#[cfg(test)] +mod tests; + +impl From<&Signature> for abi::Signature { + fn from(signature: &Signature) -> Self { + Self { + c: <[u8; 32]>::from(signature.c().to_repr()).into(), + s: <[u8; 32]>::from(signature.s().to_repr()).into(), + } + } +} + +/// A coin on Ethereum. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] +pub enum Coin { + /// Ether, the native coin of Ethereum. + Ether, + /// An ERC20 token. + Erc20( + #[borsh( + serialize_with = "ethereum_primitives::serialize_address", + deserialize_with = "ethereum_primitives::deserialize_address" + )] + Address, + ), +} +impl From for Address { + fn from(coin: Coin) -> Address { + match coin { + Coin::Ether => Address::ZERO, + Coin::Erc20(address) => address, + } + } +} +impl From
for Coin { + fn from(address: Address) -> Coin { + if address == Address::ZERO { + Coin::Ether + } else { + Coin::Erc20(address) + } + } +} + +/// An InInstruction from the Router. +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct InInstruction { + /// The ID for this `InInstruction`. + pub id: LogIndex, + /// The hash of the transaction which caused this. + pub transaction_hash: [u8; 32], + /// The address which transferred these coins to Serai. + #[borsh( + serialize_with = "ethereum_primitives::serialize_address", + deserialize_with = "ethereum_primitives::deserialize_address" + )] + pub from: Address, + /// The coin transferred. + pub coin: Coin, + /// The amount transferred. + #[borsh( + serialize_with = "ethereum_primitives::serialize_u256", + deserialize_with = "ethereum_primitives::deserialize_u256" + )] + pub amount: U256, + /// The data associated with the transfer. + pub data: Vec, +} + +impl From<&(SeraiAddress, U256)> for abi::OutInstruction { + fn from((address, amount): &(SeraiAddress, U256)) -> Self { + #[allow(non_snake_case)] + let (destinationType, destination) = match address { + SeraiAddress::Address(address) => { + // Per the documentation, `DestinationType::Address`'s value is an ABI-encoded address + (abi::DestinationType::Address, (Address::from(address)).abi_encode()) + } + SeraiAddress::Contract(contract) => ( + abi::DestinationType::Code, + (abi::CodeDestination { + gasLimit: contract.gas_limit(), + code: contract.code().to_vec().into(), + }) + .abi_encode(), + ), + }; + abi::OutInstruction { destinationType, destination: destination.into(), amount: *amount } + } +} + +/// A list of `OutInstruction`s. +#[derive(Clone)] +pub struct OutInstructions(Vec); +impl From<&[(SeraiAddress, U256)]> for OutInstructions { + fn from(outs: &[(SeraiAddress, U256)]) -> Self { + Self(outs.iter().map(Into::into).collect()) + } +} + +/// An action which was executed by the Router. +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub enum Executed { + /// Next key was set. + NextSeraiKeySet { + /// The nonce this was done with. + nonce: u64, + /// The key set. + key: [u8; 32], + }, + /// The next key was updated to. + SeraiKeyUpdated { + /// The nonce this was done with. + nonce: u64, + /// The key set. + key: [u8; 32], + }, + /// Executed batch of `OutInstruction`s. + Batch { + /// The nonce this was done with. + nonce: u64, + /// The hash of the signed message for the Batch executed. + message_hash: [u8; 32], + /// The results of the `OutInstruction`s executed. + results: Vec, + }, + /// The escape hatch was set. + EscapeHatch { + /// The nonce this was done with. + nonce: u64, + /// The address set to escape to. + #[borsh( + serialize_with = "ethereum_primitives::serialize_address", + deserialize_with = "ethereum_primitives::deserialize_address" + )] + escape_to: Address, + }, +} + +impl Executed { + /// The nonce consumed by this executed event. + /// + /// This is a `u64` despite the contract defining the nonce as a `u256`. Since the nonce is + /// incremental, the u64 will never be exhausted. + pub fn nonce(&self) -> u64 { + match self { + Executed::NextSeraiKeySet { nonce, .. } | + Executed::SeraiKeyUpdated { nonce, .. } | + Executed::Batch { nonce, .. } | + Executed::EscapeHatch { nonce, .. } => *nonce, + } + } +} + +/// An Escape from the Router. +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct Escape { + /// The coin escaped. + pub coin: Coin, + /// The amount escaped. + #[borsh( + serialize_with = "ethereum_primitives::serialize_u256", + deserialize_with = "ethereum_primitives::deserialize_u256" + )] + pub amount: U256, +} + +/// A view of the Router for Serai. +#[derive(Clone, Debug)] +pub struct Router { + provider: Arc>, + address: Address, + empty_execute_gas: HashMap, +} +impl Router { + fn init_code(key: &PublicKey) -> Vec { + const INITCODE: &[u8] = { + const INITCODE_HEX: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/Router.bin")); + const INITCODE: [u8; INITCODE_HEX.len() / 2] = + match hex::const_decode_to_array::<{ INITCODE_HEX.len() / 2 }>(INITCODE_HEX) { + Ok(bytecode) => bytecode, + Err(_) => panic!("Router.bin did not contain valid hex"), + }; + &INITCODE + }; + + // Append the constructor arguments + let mut initcode = INITCODE.to_vec(); + initcode.extend((abi::constructorCall { initialSeraiKey: key.eth_repr().into() }).abi_encode()); + initcode + } + + /// Obtain the transaction to deploy this contract. + /// + /// This transaction assumes the `Deployer` has already been deployed. The gas limit and gas + /// price are not set and are left to the caller. + pub fn deployment_tx(initial_serai_key: &PublicKey) -> TxLegacy { + Deployer::deploy_tx(Self::init_code(initial_serai_key)) + } + + /// Create a new view of the Router. + /// + /// This performs an on-chain lookup for the first deployed Router constructed with this public + /// key. This lookup is of a constant amount of calls and does not read any logs. + pub async fn new( + provider: Arc>, + initial_serai_key: &PublicKey, + ) -> Result, RpcError> { + let Some(deployer) = Deployer::new(provider.clone()).await? else { + return Ok(None); + }; + let Some(address) = deployer + .find_deployment(ethereum_primitives::keccak256(Self::init_code(initial_serai_key))) + .await? + else { + return Ok(None); + }; + Ok(Some(Self { provider, address, empty_execute_gas: HashMap::new() })) + } + + /// The address of the router. + pub fn address(&self) -> Address { + self.address + } + + /// Get the message to be signed in order to confirm the next key for Serai. + pub fn confirm_next_serai_key_message(chain_id: U256, nonce: u64) -> Vec { + abi::confirmNextSeraiKeyCall::new((abi::Signature { + c: chain_id.into(), + s: U256::try_from(nonce).unwrap().into(), + },)) + .abi_encode() + } + + /// Construct a transaction to confirm the next key representing Serai. + /// + /// The gas limit and gas price are not set and are left to the caller. + pub fn confirm_next_serai_key(&self, sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.address), + input: abi::confirmNextSeraiKeyCall::new((abi::Signature::from(sig),)).abi_encode().into(), + ..Default::default() + } + } + + /// Get the message to be signed in order to update the key for Serai. + pub fn update_serai_key_message(chain_id: U256, nonce: u64, key: &PublicKey) -> Vec { + abi::updateSeraiKeyCall::new(( + abi::Signature { c: chain_id.into(), s: U256::try_from(nonce).unwrap().into() }, + key.eth_repr().into(), + )) + .abi_encode() + } + + /// Construct a transaction to update the key representing Serai. + /// + /// The gas limit and gas price are not set and are left to the caller. + pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.address), + input: abi::updateSeraiKeyCall::new(( + abi::Signature::from(sig), + public_key.eth_repr().into(), + )) + .abi_encode() + .into(), + ..Default::default() + } + } + + /// Construct a transaction to send coins with an InInstruction to Serai. + /// + /// If coin is an ERC20, this will not create a transaction calling the Router but will create a + /// top-level transfer of the ERC20 to the Router. This avoids needing to call `approve` before + /// publishing the transaction calling the Router. + /// + /// The gas limit and gas price are not set and are left to the caller. + pub fn in_instruction(&self, coin: Coin, amount: U256, in_instruction: &Shorthand) -> TxLegacy { + match coin { + Coin::Ether => TxLegacy { + to: self.address.into(), + input: abi::inInstructionCall::new((coin.into(), amount, in_instruction.encode().into())) + .abi_encode() + .into(), + value: amount, + ..Default::default() + }, + Coin::Erc20(erc20) => TxLegacy { + to: erc20.into(), + input: erc20::transferWithInInstructionCall::new(( + self.address, + amount, + in_instruction.encode().into(), + )) + .abi_encode() + .into(), + ..Default::default() + }, + } + } + + /// Get the message to be signed in order to execute a series of `OutInstruction`s. + pub fn execute_message( + chain_id: U256, + nonce: u64, + coin: Coin, + fee: U256, + outs: OutInstructions, + ) -> Vec { + abi::executeCall::new(( + abi::Signature { c: chain_id.into(), s: U256::try_from(nonce).unwrap().into() }, + Address::from(coin), + fee, + outs.0, + )) + .abi_encode() + } + + /// Construct a transaction to execute a batch of `OutInstruction`s. + /// + /// The gas limit and gas price are not set and are left to the caller. + pub fn execute(&self, coin: Coin, fee: U256, outs: OutInstructions, sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.address), + input: abi::executeCall::new((abi::Signature::from(sig), Address::from(coin), fee, outs.0)) + .abi_encode() + .into(), + ..Default::default() + } + } + + /// Get the message to be signed in order to trigger the escape hatch. + pub fn escape_hatch_message(chain_id: U256, nonce: u64, escape_to: Address) -> Vec { + abi::escapeHatchCall::new(( + abi::Signature { c: chain_id.into(), s: U256::try_from(nonce).unwrap().into() }, + escape_to, + )) + .abi_encode() + } + + /// Construct a transaction to trigger the escape hatch. + /// + /// The gas limit and gas price are not set and are left to the caller. + pub fn escape_hatch(&self, escape_to: Address, sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.address), + input: abi::escapeHatchCall::new((abi::Signature::from(sig), escape_to)).abi_encode().into(), + ..Default::default() + } + } + + /// Construct a transaction to escape coins via the escape hatch. + /// + /// The gas limit and gas price are not set and are left to the caller. + pub fn escape(&self, coin: Coin) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.address), + input: abi::escapeCall::new((Address::from(coin),)).abi_encode().into(), + ..Default::default() + } + } + + /// Fetch the `InInstruction`s for the Router for the specified inclusive range of blocks. + /// + /// This includes all `InInstruction` events from the Router and all top-level transfers to the + /// Router. + /// + /// This is not guaranteed to return them in any order. + pub async fn in_instructions_unordered( + &self, + blocks: RangeInclusive, + allowed_erc20s: &HashSet
, + ) -> Result, RpcError> { + // The InInstruction events for this block + let in_instruction_logs = { + // https://github.com/rust-lang/rust/issues/27186 + let filter = Filter::new().select(blocks.clone()).address(self.address); + let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); + self.provider.get_logs(&filter).await? + }; + + // Define the Vec for the result now that we have the logs as a size hint + let mut in_instructions = Vec::with_capacity(in_instruction_logs.len()); + + // Handle the top-level transfers for this block + let mut justifying_erc20_transfer_logs = HashSet::new(); + let erc20_transfer_logs = { + let mut transfers = FuturesUnordered::new(); + for erc20 in allowed_erc20s { + transfers.push({ + // https://github.com/rust-lang/rust/issues/27186 + let blocks: RangeInclusive = blocks.clone(); + async move { + let transfers = + Erc20::top_level_transfers_unordered(&self.provider, blocks, *erc20, self.address) + .await; + (erc20, transfers) + } + }); + } + + let mut logs = HashMap::with_capacity(allowed_erc20s.len()); + while let Some((token, transfers)) = transfers.next().await { + let TopLevelTransfers { logs: token_logs, transfers } = transfers?; + logs.insert(token, token_logs); + // Map the top-level transfer to an InInstruction + for transfer in transfers { + let TopLevelTransfer { id, transaction_hash, from, amount, data } = transfer; + justifying_erc20_transfer_logs.insert(transfer.id); + let in_instruction = + InInstruction { id, transaction_hash, from, coin: Coin::Erc20(*token), amount, data }; + in_instructions.push(in_instruction); + } + } + logs + }; + + // Now handle the InInstruction events + for log in in_instruction_logs { + // Double check the address which emitted this log + if log.address() != self.address { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + // Double check this is a InInstruction log + if log.topics().first() != Some(&InInstructionEvent::SIGNATURE_HASH) { + continue; + } + + let log_index = |log: &Log| -> Result { + Ok(LogIndex { + block_hash: log + .block_hash + .ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its block hash set".to_string().into()) + })? + .into(), + index_within_block: log.log_index.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its index set".to_string().into()) + })?, + }) + }; + + let id = log_index(&log)?; + + let transaction_hash = log.transaction_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its transaction hash set".to_string().into()) + })?; + let transaction_hash = *transaction_hash; + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to InInstructionEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + let coin = Coin::from(log.coin); + + let in_instruction = InInstruction { + id, + transaction_hash, + from: log.from, + coin, + amount: log.amount, + data: log.instruction.as_ref().to_vec(), + }; + + match coin { + Coin::Ether => {} + Coin::Erc20(token) => { + // Check this is an allowed token + if !allowed_erc20s.contains(&token) { + continue; + } + + /* + We check that for all InInstructions for ERC20s emitted, a corresponding transfer + occurred. + + We don't do this for ETH as it'd require tracing the transaction, which is non-trivial. + It also isn't necessary as all of this is solely defense in depth. + */ + let mut justified = false; + // These logs are returned from `top_level_transfers_unordered` and we don't require any + // ordering of them + for log in erc20_transfer_logs[&token].get(&transaction_hash).unwrap_or(&vec![]) { + let log_index = log_index(log)?; + + // Ensure we didn't already use this transfer to justify a distinct InInstruction + if justifying_erc20_transfer_logs.contains(&log_index) { + continue; + } + + // Check if this log is from the token we expected to be transferred + if log.address() != Address::from(in_instruction.coin) { + continue; + } + // Check if this is a transfer log + if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { + continue; + } + let Ok(transfer) = Transfer::decode_log(&log.inner.clone(), true) else { continue }; + // Check if this aligns with the InInstruction + if (transfer.from == in_instruction.from) && + (transfer.to == self.address) && + (transfer.value == in_instruction.amount) + { + justifying_erc20_transfer_logs.insert(log_index); + justified = true; + break; + } + } + if !justified { + // This is an exploit, a non-conforming ERC20, or an invalid connection + Err(TransportErrorKind::Custom( + "ERC20 InInstruction with no matching transfer log".to_string().into(), + ))?; + } + } + } + in_instructions.push(in_instruction); + } + + Ok(in_instructions) + } + + /// Fetch the executed actions for the specified range of blocks. + pub async fn executed( + &self, + blocks: RangeInclusive, + ) -> Result, RpcError> { + fn decode(log: &Log) -> Result> { + Ok( + log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to event yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data, + ) + } + + let filter = Filter::new().select(blocks).address(self.address); + let mut logs = self.provider.get_logs(&filter).await?; + logs.sort_by_key(|log| (log.block_number, log.log_index)); + + let mut res = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.address { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + match log.topics().first() { + Some(&NextSeraiKeySetEvent::SIGNATURE_HASH) => { + let event = decode::(&log)?; + res.push(Executed::NextSeraiKeySet { + nonce: event.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("failed to convert nonce to u64: {e:?}").into()) + })?, + key: event.key.into(), + }); + } + Some(&SeraiKeyUpdatedEvent::SIGNATURE_HASH) => { + let event = decode::(&log)?; + res.push(Executed::SeraiKeyUpdated { + nonce: event.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("failed to convert nonce to u64: {e:?}").into()) + })?, + key: event.key.into(), + }); + } + Some(&BatchEvent::SIGNATURE_HASH) => { + let event = decode::(&log)?; + res.push(Executed::Batch { + nonce: event.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("failed to convert nonce to u64: {e:?}").into()) + })?, + message_hash: event.messageHash.into(), + results: { + let results_len = usize::try_from(event.resultsLength).map_err(|e| { + TransportErrorKind::Custom( + format!("failed to convert resultsLength to usize: {e:?}").into(), + ) + })?; + if results_len.div_ceil(8) != event.results.len() { + Err(TransportErrorKind::Custom( + "resultsLength didn't align with results length".to_string().into(), + ))?; + } + let mut results = Vec::with_capacity(results_len); + for b in 0 .. results_len { + let byte = event.results[b / 8]; + results.push(((byte >> (b % 8)) & 1) == 1); + } + results + }, + }); + } + Some(&EscapeHatchEvent::SIGNATURE_HASH) => { + let event = decode::(&log)?; + res.push(Executed::EscapeHatch { + nonce: event.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("failed to convert nonce to u64: {e:?}").into()) + })?, + escape_to: event.escapeTo, + }); + } + Some(&InInstructionEvent::SIGNATURE_HASH | &EscapedEvent::SIGNATURE_HASH) => {} + unrecognized => Err(TransportErrorKind::Custom( + format!("unrecognized event yielded by the Router: {:?}", unrecognized.map(hex::encode)) + .into(), + ))?, + } + } + + Ok(res) + } + + /// Fetch the `Escape`s from the smart contract through the escape hatch. + pub async fn escapes( + &self, + blocks: RangeInclusive, + ) -> Result, RpcError> { + let filter = Filter::new().select(blocks).address(self.address); + let mut logs = + self.provider.get_logs(&filter.event_signature(EscapedEvent::SIGNATURE_HASH)).await?; + logs.sort_by_key(|log| (log.block_number, log.log_index)); + + let mut res = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.address { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + // Double check the topic + if log.topics().first() != Some(&EscapedEvent::SIGNATURE_HASH) { + Err(TransportErrorKind::Custom( + "node returned a log for a different topic than filtered to".to_string().into(), + ))?; + } + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to event yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + res.push(Escape { coin: Coin::from(log.coin), amount: log.amount }); + } + + Ok(res) + } + + async fn fetch_key( + &self, + block: BlockId, + call: Vec, + ) -> Result, RpcError> { + let call = + TransactionRequest::default().to(self.address).input(TransactionInput::new(call.into())); + let bytes = self.provider.call(&call).block(block).await?; + // This is fine as both key calls share a return type + let res = abi::nextSeraiKeyCall::abi_decode_returns(&bytes, true) + .map_err(|e| TransportErrorKind::Custom(format!("failed to decode key: {e:?}").into()))?; + let eth_repr = <[u8; 32]>::from(res._0); + Ok(if eth_repr == [0; 32] { + None + } else { + Some(PublicKey::from_eth_repr(eth_repr).ok_or_else(|| { + TransportErrorKind::Custom("invalid key set on router".to_string().into()) + })?) + }) + } + + /// Fetch the next key for Serai's Ethereum validators + pub async fn next_key( + &self, + block: BlockId, + ) -> Result, RpcError> { + self.fetch_key(block, abi::nextSeraiKeyCall::new(()).abi_encode()).await + } + + /// Fetch the current key for Serai's Ethereum validators + pub async fn key( + &self, + block: BlockId, + ) -> Result, RpcError> { + self.fetch_key(block, abi::seraiKeyCall::new(()).abi_encode()).await + } + + /// Fetch the nonce of the next action to execute + pub async fn next_nonce(&self, block: BlockId) -> Result> { + let call = TransactionRequest::default() + .to(self.address) + .input(TransactionInput::new(abi::nextNonceCall::new(()).abi_encode().into())); + let bytes = self.provider.call(&call).block(block).await?; + let res = abi::nextNonceCall::abi_decode_returns(&bytes, true) + .map_err(|e| TransportErrorKind::Custom(format!("failed to decode nonce: {e:?}").into()))?; + Ok(u64::try_from(res._0).map_err(|_| { + TransportErrorKind::Custom("nonce returned exceeded 2**64".to_string().into()) + })?) + } + + /// Fetch the address the escape hatch was set to + pub async fn escaped_to( + &self, + block: BlockId, + ) -> Result, RpcError> { + let call = TransactionRequest::default() + .to(self.address) + .input(TransactionInput::new(abi::escapedToCall::new(()).abi_encode().into())); + let bytes = self.provider.call(&call).block(block).await?; + let res = abi::escapedToCall::abi_decode_returns(&bytes, true).map_err(|e| { + TransportErrorKind::Custom(format!("failed to decode the address escaped to: {e:?}").into()) + })?; + Ok(if res._0 == Address([0; 20].into()) { None } else { Some(res._0) }) + } +} diff --git a/processor/ethereum/router/src/tests/constants.rs b/processor/ethereum/router/src/tests/constants.rs new file mode 100644 index 00000000..db24971f --- /dev/null +++ b/processor/ethereum/router/src/tests/constants.rs @@ -0,0 +1,21 @@ +use alloy_sol_types::SolCall; + +#[test] +fn selector_collisions() { + assert_eq!( + crate::_irouter_abi::IRouter::confirmNextSeraiKeyCall::SELECTOR, + crate::_router_abi::Router::confirmNextSeraiKey34AC53ACCall::SELECTOR + ); + assert_eq!( + crate::_irouter_abi::IRouter::updateSeraiKeyCall::SELECTOR, + crate::_router_abi::Router::updateSeraiKey5A8542A2Call::SELECTOR + ); + assert_eq!( + crate::_irouter_abi::IRouter::executeCall::SELECTOR, + crate::_router_abi::Router::execute4DE42904Call::SELECTOR + ); + assert_eq!( + crate::_irouter_abi::IRouter::escapeHatchCall::SELECTOR, + crate::_router_abi::Router::escapeHatchDCDD91CCCall::SELECTOR + ); +} diff --git a/processor/ethereum/router/src/tests/create_address.rs b/processor/ethereum/router/src/tests/create_address.rs new file mode 100644 index 00000000..339c44b2 --- /dev/null +++ b/processor/ethereum/router/src/tests/create_address.rs @@ -0,0 +1,85 @@ +use alloy_core::primitives::{hex, U256, Bytes, TxKind}; +use alloy_sol_types::SolCall; + +use alloy_consensus::TxLegacy; + +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_provider::Provider; + +use revm::{primitives::SpecId, interpreter::gas::calculate_initial_tx_gas}; + +use crate::tests::Test; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/tests/CreateAddress.sol"); +} + +#[tokio::test] +async fn test_create_address() { + let test = Test::new().await; + + let address = { + const BYTECODE: &[u8] = { + const BYTECODE_HEX: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/serai-processor-ethereum-router/tests/CreateAddress.bin" + )); + const BYTECODE: [u8; BYTECODE_HEX.len() / 2] = + match hex::const_decode_to_array::<{ BYTECODE_HEX.len() / 2 }>(BYTECODE_HEX) { + Ok(bytecode) => bytecode, + Err(_) => panic!("CreateAddress.bin did not contain valid hex"), + }; + &BYTECODE + }; + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + gas_limit: 1_100_000, + to: TxKind::Create, + value: U256::ZERO, + input: Bytes::from_static(BYTECODE), + }; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx).await; + receipt.contract_address.unwrap() + }; + + // Check `createAddress` correctly encodes the nonce for every single meaningful bit pattern + // The only meaningful patterns are < 0x80, == 0x80, and then each length greater > 0x80 + // The following covers all three + let mut nonce = 1u64; + let mut gas = None; + while nonce.checked_add(nonce).is_some() { + let input = + (abi::CreateAddress::createAddressForSelfCall { nonce: U256::from(nonce) }).abi_encode(); + + // Make sure the function works as expected + let call = + TransactionRequest::default().to(address).input(TransactionInput::new(input.clone().into())); + assert_eq!( + &test.provider.call(&call).await.unwrap().as_ref()[12 ..], + address.create(nonce).as_slice(), + ); + + // Check the function is constant-gas + let gas_used = test.provider.estimate_gas(&call).await.unwrap(); + let initial_gas = calculate_initial_tx_gas(SpecId::CANCUN, &input, false, &[], 0).initial_gas; + let this_call = gas_used - initial_gas; + if gas.is_none() { + gas = Some(this_call); + } + assert_eq!(gas, Some(this_call)); + + nonce <<= 1; + } + + println!("createAddress gas: {}", gas.unwrap()); +} diff --git a/processor/ethereum/router/src/tests/erc20.rs b/processor/ethereum/router/src/tests/erc20.rs new file mode 100644 index 00000000..02dc957e --- /dev/null +++ b/processor/ethereum/router/src/tests/erc20.rs @@ -0,0 +1,98 @@ +use alloy_core::primitives::{hex, Address, U256, Bytes, TxKind}; +use alloy_sol_types::{SolValue, SolCall}; + +use alloy_consensus::TxLegacy; + +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_provider::Provider; + +use crate::tests::Test; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/tests/ERC20.sol"); +} + +pub struct Erc20(Address); +impl Erc20 { + pub(crate) async fn deploy(test: &Test) -> Self { + const BYTECODE: &[u8] = { + const BYTECODE_HEX: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/serai-processor-ethereum-router/tests/TestERC20.bin" + )); + const BYTECODE: [u8; BYTECODE_HEX.len() / 2] = + match hex::const_decode_to_array::<{ BYTECODE_HEX.len() / 2 }>(BYTECODE_HEX) { + Ok(bytecode) => bytecode, + Err(_) => panic!("TestERC20.bin did not contain valid hex"), + }; + &BYTECODE + }; + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: TxKind::Create, + value: U256::ZERO, + input: Bytes::from_static(BYTECODE), + }; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx).await; + Self(receipt.contract_address.unwrap()) + } + + pub(crate) fn address(&self) -> Address { + self.0 + } + + pub(crate) async fn approve(&self, test: &Test, owner: Address, spender: Address, amount: U256) { + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: self.0.into(), + value: U256::ZERO, + input: abi::TestERC20::magicApproveCall::new((owner, spender, amount)).abi_encode().into(), + }; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx).await; + assert!(receipt.status()); + } + + pub(crate) async fn mint(&self, test: &Test, account: Address, amount: U256) { + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: self.0.into(), + value: U256::ZERO, + input: abi::TestERC20::mintCall::new((account, amount)).abi_encode().into(), + }; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx).await; + assert!(receipt.status()); + } + + pub(crate) async fn balance_of(&self, test: &Test, account: Address) -> U256 { + let call = TransactionRequest::default().to(self.0).input(TransactionInput::new( + abi::TestERC20::balanceOfCall::new((account,)).abi_encode().into(), + )); + U256::abi_decode(&test.provider.call(&call).await.unwrap(), true).unwrap() + } + + pub(crate) async fn router_approval(&self, test: &Test, account: Address) -> U256 { + let call = TransactionRequest::default().to(self.0).input(TransactionInput::new( + abi::TestERC20::allowanceCall::new((test.router.address(), account)).abi_encode().into(), + )); + U256::abi_decode(&test.provider.call(&call).await.unwrap(), true).unwrap() + } +} diff --git a/processor/ethereum/router/src/tests/escape_hatch.rs b/processor/ethereum/router/src/tests/escape_hatch.rs new file mode 100644 index 00000000..28be1a64 --- /dev/null +++ b/processor/ethereum/router/src/tests/escape_hatch.rs @@ -0,0 +1,172 @@ +use alloy_core::primitives::{Address, U256}; + +use alloy_consensus::TxLegacy; + +use alloy_provider::Provider; + +use crate::tests::*; + +impl Test { + pub(crate) fn escape_hatch_tx(&self, escape_to: Address) -> TxLegacy { + let msg = Router::escape_hatch_message(self.chain_id, self.state.next_nonce, escape_to); + let sig = sign(self.state.key.unwrap(), &msg); + let mut tx = self.router.escape_hatch(escape_to, &sig); + tx.gas_limit = Router::ESCAPE_HATCH_GAS + 5_000; + tx + } + + pub(crate) async fn escape_hatch(&mut self) { + let mut escape_to = [0; 20]; + OsRng.fill_bytes(&mut escape_to); + let escape_to = Address(escape_to.into()); + + // Set the code of the address to escape to so it isn't flagged as a non-contract + let () = self.provider.raw_request("anvil_setCode".into(), (escape_to, [0])).await.unwrap(); + + let mut tx = self.escape_hatch_tx(escape_to); + tx.gas_price = 100_000_000_000; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await; + assert!(receipt.status()); + // This encodes an address which has 12 bytes of padding + assert_eq!( + CalldataAgnosticGas::calculate(tx.tx().input.as_ref(), 12, receipt.gas_used), + Router::ESCAPE_HATCH_GAS + ); + + { + let block = receipt.block_number.unwrap(); + let executed = self.router.executed(block ..= block).await.unwrap(); + assert_eq!(executed.len(), 1); + assert_eq!(executed[0], Executed::EscapeHatch { nonce: self.state.next_nonce, escape_to }); + } + + self.state.next_nonce += 1; + self.state.escaped_to = Some(escape_to); + self.verify_state().await; + } + + pub(crate) fn escape_tx(&self, coin: Coin) -> TxLegacy { + let mut tx = self.router.escape(coin); + tx.gas_limit = 100_000; + tx.gas_price = 100_000_000_000; + tx + } +} + +#[tokio::test] +async fn test_escape_hatch() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + // Queue another key so the below test cases can run + test.update_serai_key().await; + + { + // The zero address should be invalid to escape to + assert!(matches!( + test.call_and_decode_err(test.escape_hatch_tx([0; 20].into())).await, + IRouterErrors::InvalidEscapeAddress(IRouter::InvalidEscapeAddress {}) + )); + // Empty addresses should be invalid to escape to + assert!(matches!( + test.call_and_decode_err(test.escape_hatch_tx([1; 20].into())).await, + IRouterErrors::EscapeAddressWasNotAContract(IRouter::EscapeAddressWasNotAContract {}) + )); + // Non-empty addresses without code should be invalid to escape to + let tx = ethereum_primitives::deterministically_sign(TxLegacy { + to: Address([1; 20].into()).into(), + gas_limit: 21_000, + gas_price: 100_000_000_000, + value: U256::from(1), + ..Default::default() + }); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await; + assert!(receipt.status()); + assert!(matches!( + test.call_and_decode_err(test.escape_hatch_tx([1; 20].into())).await, + IRouterErrors::EscapeAddressWasNotAContract(IRouter::EscapeAddressWasNotAContract {}) + )); + + // Escaping at this point in time should fail + assert!(matches!( + test.call_and_decode_err(test.router.escape(Coin::Ether)).await, + IRouterErrors::EscapeHatchNotInvoked(IRouter::EscapeHatchNotInvoked {}) + )); + } + + // Invoke the escape hatch + test.escape_hatch().await; + + // Now that the escape hatch has been invoked, all of the following calls should fail + { + assert!(matches!( + test.call_and_decode_err(test.update_serai_key_tx().1).await, + IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {}) + )); + assert!(matches!( + test.call_and_decode_err(test.confirm_next_serai_key_tx()).await, + IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {}) + )); + assert!(matches!( + test.call_and_decode_err(test.eth_in_instruction_tx().3).await, + IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {}) + )); + assert!(matches!( + test + .call_and_decode_err(test.execute_tx(Coin::Ether, U256::from(0), [].as_slice().into()).1) + .await, + IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {}) + )); + // We reject further attempts to update the escape hatch to prevent the last key from being + // able to switch from the honest escape hatch to siphoning via a malicious escape hatch (such + // as after the validators represented unstake) + assert!(matches!( + test.call_and_decode_err(test.escape_hatch_tx(test.state.escaped_to.unwrap())).await, + IRouterErrors::EscapeHatchInvoked(IRouter::EscapeHatchInvoked {}) + )); + } + + // Check the escape fn itself + + // ETH + { + let () = test + .provider + .raw_request("anvil_setBalance".into(), (test.router.address(), 1)) + .await + .unwrap(); + let tx = ethereum_primitives::deterministically_sign(test.escape_tx(Coin::Ether)); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await; + assert!(receipt.status()); + + let block = receipt.block_number.unwrap(); + assert_eq!( + test.router.escapes(block ..= block).await.unwrap(), + vec![Escape { coin: Coin::Ether, amount: U256::from(1) }], + ); + + assert_eq!(test.provider.get_balance(test.router.address()).await.unwrap(), U256::from(0)); + assert_eq!( + test.provider.get_balance(test.state.escaped_to.unwrap()).await.unwrap(), + U256::from(1) + ); + } + + // ERC20 + { + let erc20 = Erc20::deploy(&test).await; + let coin = Coin::Erc20(erc20.address()); + let amount = U256::from(1); + erc20.mint(&test, test.router.address(), amount).await; + + let tx = ethereum_primitives::deterministically_sign(test.escape_tx(coin)); + let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await; + assert!(receipt.status()); + + let block = receipt.block_number.unwrap(); + assert_eq!(test.router.escapes(block ..= block).await.unwrap(), vec![Escape { coin, amount }],); + assert_eq!(erc20.balance_of(&test, test.router.address()).await, U256::from(0)); + assert_eq!(erc20.balance_of(&test, test.state.escaped_to.unwrap()).await, amount); + } +} diff --git a/processor/ethereum/router/src/tests/in_instruction.rs b/processor/ethereum/router/src/tests/in_instruction.rs new file mode 100644 index 00000000..20ddfd02 --- /dev/null +++ b/processor/ethereum/router/src/tests/in_instruction.rs @@ -0,0 +1,182 @@ +use std::collections::HashSet; + +use alloy_core::primitives::U256; +use alloy_sol_types::SolCall; + +use alloy_consensus::{TxLegacy, Signed}; + +use scale::Encode; +use serai_client::{ + primitives::SeraiAddress, + in_instructions::primitives::{ + InInstruction as SeraiInInstruction, RefundableInInstruction, Shorthand, + }, +}; + +use ethereum_primitives::LogIndex; + +use crate::{InInstruction, tests::*}; + +impl Test { + pub(crate) fn in_instruction() -> Shorthand { + Shorthand::Raw(RefundableInInstruction { + origin: None, + instruction: SeraiInInstruction::Transfer(SeraiAddress([0xff; 32])), + }) + } + + pub(crate) fn eth_in_instruction_tx(&self) -> (Coin, U256, Shorthand, TxLegacy) { + let coin = Coin::Ether; + let amount = U256::from(1); + let shorthand = Self::in_instruction(); + + let mut tx = self.router.in_instruction(coin, amount, &shorthand); + tx.gas_limit = 1_000_000; + tx.gas_price = 100_000_000_000; + + (coin, amount, shorthand, tx) + } + + pub(crate) async fn publish_in_instruction_tx( + &self, + tx: Signed, + coin: Coin, + amount: U256, + shorthand: &Shorthand, + ) { + let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await; + assert!(receipt.status()); + + let block = receipt.block_number.unwrap(); + + if matches!(coin, Coin::Erc20(_)) { + // If we don't whitelist this token, we shouldn't be yielded an InInstruction + let in_instructions = + self.router.in_instructions_unordered(block ..= block, &HashSet::new()).await.unwrap(); + assert!(in_instructions.is_empty()); + } + + let in_instructions = self + .router + .in_instructions_unordered( + block ..= block, + &if let Coin::Erc20(token) = coin { HashSet::from([token]) } else { HashSet::new() }, + ) + .await + .unwrap(); + assert_eq!(in_instructions.len(), 1); + + let in_instruction_log_index = receipt.inner.logs().iter().find_map(|log| { + (log.topics().first() == Some(&crate::InInstructionEvent::SIGNATURE_HASH)) + .then(|| log.log_index.unwrap()) + }); + // If this isn't an InInstruction event, it'll be a top-level transfer event + let log_index = in_instruction_log_index.unwrap_or(0); + + assert_eq!( + in_instructions[0], + InInstruction { + id: LogIndex { block_hash: *receipt.block_hash.unwrap(), index_within_block: log_index }, + transaction_hash: **tx.hash(), + from: tx.recover_signer().unwrap(), + coin, + amount, + data: shorthand.encode(), + } + ); + } +} + +#[tokio::test] +async fn test_no_in_instruction_before_key() { + let test = Test::new().await; + + // We shouldn't be able to publish `InInstruction`s before publishing a key + let (_coin, _amount, _shorthand, tx) = test.eth_in_instruction_tx(); + assert!(matches!( + test.call_and_decode_err(tx).await, + IRouterErrors::SeraiKeyWasNone(IRouter::SeraiKeyWasNone {}) + )); +} + +#[tokio::test] +async fn test_eth_in_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + let (coin, amount, shorthand, tx) = test.eth_in_instruction_tx(); + + // This should fail if the value mismatches the amount + { + let mut tx = tx.clone(); + tx.value = U256::ZERO; + assert!(matches!( + test.call_and_decode_err(tx).await, + IRouterErrors::AmountMismatchesMsgValue(IRouter::AmountMismatchesMsgValue {}) + )); + } + + let tx = ethereum_primitives::deterministically_sign(tx); + test.publish_in_instruction_tx(tx, coin, amount, &shorthand).await; +} + +#[tokio::test] +async fn test_erc20_router_in_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + let erc20 = Erc20::deploy(&test).await; + + let coin = Coin::Erc20(erc20.address()); + let amount = U256::from(1); + let shorthand = Test::in_instruction(); + + // The provided `in_instruction` function will use a top-level transfer for ERC20 InInstructions, + // so we have to manually write this call + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000, + gas_limit: 1_000_000, + to: test.router.address().into(), + value: U256::ZERO, + input: crate::abi::inInstructionCall::new((coin.into(), amount, shorthand.encode().into())) + .abi_encode() + .into(), + }; + + // If no `approve` was granted, this should fail + assert!(matches!( + test.call_and_decode_err(tx.clone()).await, + IRouterErrors::TransferFromFailed(IRouter::TransferFromFailed {}) + )); + + let tx = ethereum_primitives::deterministically_sign(tx); + { + let signer = tx.recover_signer().unwrap(); + erc20.mint(&test, signer, amount).await; + erc20.approve(&test, signer, test.router.address(), amount).await; + } + + test.publish_in_instruction_tx(tx, coin, amount, &shorthand).await; +} + +#[tokio::test] +async fn test_erc20_top_level_transfer_in_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + let erc20 = Erc20::deploy(&test).await; + + let coin = Coin::Erc20(erc20.address()); + let amount = U256::from(1); + let shorthand = Test::in_instruction(); + + let mut tx = test.router.in_instruction(coin, amount, &shorthand); + tx.gas_price = 100_000_000_000; + tx.gas_limit = 1_000_000; + + let tx = ethereum_primitives::deterministically_sign(tx); + erc20.mint(&test, tx.recover_signer().unwrap(), amount).await; + test.publish_in_instruction_tx(tx, coin, amount, &shorthand).await; +} diff --git a/processor/ethereum/router/src/tests/mod.rs b/processor/ethereum/router/src/tests/mod.rs new file mode 100644 index 00000000..403e871e --- /dev/null +++ b/processor/ethereum/router/src/tests/mod.rs @@ -0,0 +1,869 @@ +use std::sync::Arc; + +use rand_core::{RngCore, OsRng}; + +use group::ff::Field; +use k256::{Scalar, ProjectivePoint}; + +use alloy_core::primitives::{Address, U256}; +use alloy_sol_types::{SolValue, SolCall, SolEvent}; + +use alloy_consensus::{TxLegacy, Signed}; + +use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInput, TransactionRequest}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{ + Provider, RootProvider, + ext::{DebugApi, TraceApi}, +}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + +use serai_client::networks::ethereum::{ContractDeployment, Address as SeraiEthereumAddress}; + +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_deployer::Deployer; + +use crate::{ + _irouter_abi::IRouterWithoutCollisions::{ + self as IRouter, IRouterWithoutCollisionsErrors as IRouterErrors, + }, + Coin, OutInstructions, Router, Executed, Escape, +}; + +mod constants; + +mod erc20; +use erc20::Erc20; + +mod create_address; +mod in_instruction; +mod escape_hatch; + +pub(crate) fn test_key() -> (Scalar, PublicKey) { + loop { + let key = Scalar::random(&mut OsRng); + let point = ProjectivePoint::GENERATOR * key; + if let Some(public_key) = PublicKey::new(point) { + return (key, public_key); + } + } +} + +fn sign(key: (Scalar, PublicKey), msg: &[u8]) -> Signature { + let nonce = Scalar::random(&mut OsRng); + let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &key.1, msg); + let s = nonce + (c * key.0); + Signature::new(c, s).unwrap() +} + +/// Calculate the gas used by a transaction if none of its calldata's bytes were zero +struct CalldataAgnosticGas; +impl CalldataAgnosticGas { + #[must_use] + fn calculate(input: &[u8], mut constant_zero_bytes: usize, gas_used: u64) -> u64 { + use revm::{primitives::SpecId, interpreter::gas::calculate_initial_tx_gas}; + + let mut without_variable_zero_bytes = Vec::with_capacity(input.len()); + for byte in input { + if (constant_zero_bytes > 0) && (*byte == 0) { + constant_zero_bytes -= 1; + without_variable_zero_bytes.push(0); + } else { + // If this is a variably zero byte, or a non-zero byte, push a non-zero byte + without_variable_zero_bytes.push(0xff); + } + } + gas_used + + (calculate_initial_tx_gas(SpecId::CANCUN, &without_variable_zero_bytes, false, &[], 0) + .initial_gas - + calculate_initial_tx_gas(SpecId::CANCUN, input, false, &[], 0).initial_gas) + } +} + +struct RouterState { + next_key: Option<(Scalar, PublicKey)>, + key: Option<(Scalar, PublicKey)>, + next_nonce: u64, + escaped_to: Option
, +} + +struct Test { + #[allow(unused)] + anvil: AnvilInstance, + provider: Arc>, + chain_id: U256, + router: Router, + state: RouterState, +} + +impl Test { + async fn verify_state(&self) { + assert_eq!( + self.router.next_key(BlockNumberOrTag::Latest.into()).await.unwrap(), + self.state.next_key.map(|key| key.1) + ); + assert_eq!( + self.router.key(BlockNumberOrTag::Latest.into()).await.unwrap(), + self.state.key.map(|key| key.1) + ); + assert_eq!( + self.router.next_nonce(BlockNumberOrTag::Latest.into()).await.unwrap(), + self.state.next_nonce + ); + assert_eq!( + self.router.escaped_to(BlockNumberOrTag::Latest.into()).await.unwrap(), + self.state.escaped_to, + ); + } + + async fn new() -> Self { + // The following is explicitly only evaluated against the cancun network upgrade at this time + let anvil = Anvil::new() + .arg("--hardfork") + .arg("cancun") + .arg("--tracing") + .arg("--no-request-size-limit") + .arg("--disable-block-gas-limit") + .spawn(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + )); + let chain_id = U256::from(provider.get_chain_id().await.unwrap()); + + let (private_key, public_key) = test_key(); + assert!(Router::new(provider.clone(), &public_key).await.unwrap().is_none()); + + // Deploy the Deployer + let receipt = ethereum_test_primitives::publish_tx(&provider, Deployer::deployment_tx()).await; + assert!(receipt.status()); + + let mut tx = Router::deployment_tx(&public_key); + tx.gas_limit = 1_100_000; + tx.gas_price = 100_000_000_000; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&provider, tx).await; + assert!(receipt.status()); + + let router = Router::new(provider.clone(), &public_key).await.unwrap().unwrap(); + let state = RouterState { + next_key: Some((private_key, public_key)), + key: None, + // Nonce 0 should've been consumed by setting the next key to the key initialized with + next_nonce: 1, + escaped_to: None, + }; + + // Confirm nonce 0 was used as such + { + let block = receipt.block_number.unwrap(); + let executed = router.executed(block ..= block).await.unwrap(); + assert_eq!(executed.len(), 1); + assert_eq!(executed[0], Executed::NextSeraiKeySet { nonce: 0, key: public_key.eth_repr() }); + } + + let res = Test { anvil, provider, chain_id, router, state }; + res.verify_state().await; + res + } + + async fn call_and_decode_err(&self, tx: TxLegacy) -> IRouterErrors { + let call = TransactionRequest::default() + .to(self.router.address()) + .input(TransactionInput::new(tx.input)); + let call_err = self.provider.call(&call).await.unwrap_err(); + call_err.as_error_resp().unwrap().as_decoded_error::(true).unwrap() + } + + fn confirm_next_serai_key_tx(&self) -> TxLegacy { + let msg = Router::confirm_next_serai_key_message(self.chain_id, self.state.next_nonce); + let sig = sign(self.state.next_key.unwrap(), &msg); + + self.router.confirm_next_serai_key(&sig) + } + + async fn confirm_next_serai_key(&mut self) { + let mut tx = self.confirm_next_serai_key_tx(); + tx.gas_limit = Router::CONFIRM_NEXT_SERAI_KEY_GAS + 5_000; + tx.gas_price = 100_000_000_000; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await; + assert!(receipt.status()); + // Only check the gas is equal when writing to a previously unallocated storage slot, as this + // is the highest possible gas cost and what the constant is derived from + if self.state.key.is_none() { + assert_eq!( + CalldataAgnosticGas::calculate(tx.tx().input.as_ref(), 0, receipt.gas_used), + Router::CONFIRM_NEXT_SERAI_KEY_GAS, + ); + } else { + assert!( + CalldataAgnosticGas::calculate(tx.tx().input.as_ref(), 0, receipt.gas_used) < + Router::CONFIRM_NEXT_SERAI_KEY_GAS + ); + } + + { + let block = receipt.block_number.unwrap(); + let executed = self.router.executed(block ..= block).await.unwrap(); + assert_eq!(executed.len(), 1); + assert_eq!( + executed[0], + Executed::SeraiKeyUpdated { + nonce: self.state.next_nonce, + key: self.state.next_key.unwrap().1.eth_repr() + } + ); + } + + self.state.next_nonce += 1; + self.state.key = self.state.next_key; + self.state.next_key = None; + self.verify_state().await; + } + + fn update_serai_key_tx(&self) -> ((Scalar, PublicKey), TxLegacy) { + let next_key = test_key(); + + let msg = Router::update_serai_key_message(self.chain_id, self.state.next_nonce, &next_key.1); + let sig = sign(self.state.key.unwrap(), &msg); + + (next_key, self.router.update_serai_key(&next_key.1, &sig)) + } + + async fn update_serai_key(&mut self) { + let (next_key, mut tx) = self.update_serai_key_tx(); + tx.gas_limit = Router::UPDATE_SERAI_KEY_GAS + 5_000; + tx.gas_price = 100_000_000_000; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await; + assert!(receipt.status()); + if self.state.next_key.is_none() { + assert_eq!( + CalldataAgnosticGas::calculate(tx.tx().input.as_ref(), 0, receipt.gas_used), + Router::UPDATE_SERAI_KEY_GAS, + ); + } else { + assert!( + CalldataAgnosticGas::calculate(tx.tx().input.as_ref(), 0, receipt.gas_used) < + Router::UPDATE_SERAI_KEY_GAS + ); + } + + { + let block = receipt.block_number.unwrap(); + let executed = self.router.executed(block ..= block).await.unwrap(); + assert_eq!(executed.len(), 1); + assert_eq!( + executed[0], + Executed::NextSeraiKeySet { nonce: self.state.next_nonce, key: next_key.1.eth_repr() } + ); + } + + self.state.next_nonce += 1; + self.state.next_key = Some(next_key); + self.verify_state().await; + } + + fn execute_tx( + &self, + coin: Coin, + fee: U256, + out_instructions: OutInstructions, + ) -> ([u8; 32], TxLegacy) { + let msg = Router::execute_message( + self.chain_id, + self.state.next_nonce, + coin, + fee, + out_instructions.clone(), + ); + let msg_hash = ethereum_primitives::keccak256(&msg); + let sig = loop { + let sig = sign(self.state.key.unwrap(), &msg); + // Standardize the zero bytes in the signature for calldata gas reasons + let has_zero_byte = sig.to_bytes().iter().filter(|b| **b == 0).count() != 0; + if has_zero_byte { + continue; + } + break sig; + }; + + let tx = self.router.execute(coin, fee, out_instructions, &sig); + (msg_hash, tx) + } + + async fn execute( + &mut self, + coin: Coin, + fee: U256, + out_instructions: OutInstructions, + results: Vec, + ) -> (Signed, u64) { + let (message_hash, mut tx) = self.execute_tx(coin, fee, out_instructions); + tx.gas_limit = 100_000_000; + tx.gas_price = 100_000_000_000; + let tx = ethereum_primitives::deterministically_sign(tx); + let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await; + assert!(receipt.status()); + + // We don't check the gas for `execute` here, instead at the call-sites where we have + // beneficial context + + { + let block = receipt.block_number.unwrap(); + let executed = self.router.executed(block ..= block).await.unwrap(); + assert_eq!(executed.len(), 1); + assert_eq!( + executed[0], + Executed::Batch { nonce: self.state.next_nonce, message_hash, results } + ); + } + + self.state.next_nonce += 1; + self.verify_state().await; + + (tx.clone(), receipt.gas_used) + } + + async fn gas_unused_by_calls(&self, tx: &Signed) -> u64 { + let mut unused_gas = 0; + + // Handle the difference between the gas limits and gas used values + let traces = self.provider.trace_transaction(*tx.hash()).await.unwrap(); + // Skip the initial call to the Router and the call to ecrecover + let mut traces = traces.iter().skip(2); + while let Some(trace) = traces.next() { + let trace = &trace.trace; + // We're tracing the Router's immediate actions, and it doesn't immediately call CREATE + // It only makes a call to itself which calls CREATE + let gas_provided = trace.action.as_call().as_ref().unwrap().gas; + let gas_spent = trace.result.as_ref().unwrap().gas_used(); + unused_gas += gas_provided - gas_spent; + + let mut subtraces = trace.subtraces; + while subtraces != 0 { + // Skip the subtraces (and their subtraces) for this call (such as CREATE) + subtraces += traces.next().unwrap().trace.subtraces; + subtraces -= 1; + } + } + + // Also handle any refunds + { + let trace = + self.provider.debug_trace_transaction(*tx.hash(), Default::default()).await.unwrap(); + let refund = + trace.try_into_default_frame().unwrap().struct_logs.last().unwrap().refund_counter; + // This isn't capped to 1/5th of the TX's gas usage yet that's fine as none of our tests are + // so refund intensive + unused_gas += refund.unwrap_or(0) + } + + unused_gas + } +} + +#[tokio::test] +async fn test_constructor() { + // `Test::new` internalizes all checks on initial state + Test::new().await; +} + +#[tokio::test] +async fn test_confirm_next_serai_key() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; +} + +#[tokio::test] +async fn test_no_serai_key() { + // Before we confirm a key, any operations requiring a signature shouldn't work + { + let mut test = Test::new().await; + + // Corrupt the test's state so we can obtain signed TXs + test.state.key = Some(test_key()); + + assert!(matches!( + test.call_and_decode_err(test.update_serai_key_tx().1).await, + IRouterErrors::SeraiKeyWasNone(IRouter::SeraiKeyWasNone {}) + )); + assert!(matches!( + test + .call_and_decode_err(test.execute_tx(Coin::Ether, U256::from(0), [].as_slice().into()).1) + .await, + IRouterErrors::SeraiKeyWasNone(IRouter::SeraiKeyWasNone {}) + )); + assert!(matches!( + test.call_and_decode_err(test.escape_hatch_tx(Address::ZERO)).await, + IRouterErrors::SeraiKeyWasNone(IRouter::SeraiKeyWasNone {}) + )); + } + + // And if there's no key to confirm, any operations requiring a signature shouldn't work + { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + test.state.next_key = Some(test_key()); + assert!(matches!( + test.call_and_decode_err(test.confirm_next_serai_key_tx()).await, + IRouterErrors::SeraiKeyWasNone(IRouter::SeraiKeyWasNone {}) + )); + } +} + +#[tokio::test] +async fn test_invalid_signature() { + let mut test = Test::new().await; + + { + let mut tx = test.confirm_next_serai_key_tx(); + // Cut it down to the function signature + tx.input = tx.input.as_ref()[.. 4].to_vec().into(); + assert!(matches!( + test.call_and_decode_err(tx).await, + IRouterErrors::InvalidSignature(IRouter::InvalidSignature {}) + )); + } + + { + let mut tx = test.confirm_next_serai_key_tx(); + // Mutate the signature + let mut input = Vec::::from(tx.input); + *input.last_mut().unwrap() = input.last().unwrap().wrapping_add(1); + tx.input = input.into(); + assert!(matches!( + test.call_and_decode_err(tx).await, + IRouterErrors::InvalidSignature(IRouter::InvalidSignature {}) + )); + } + + test.confirm_next_serai_key().await; + + { + let mut tx = test.update_serai_key_tx().1; + // Mutate the message + let mut input = Vec::::from(tx.input); + *input.last_mut().unwrap() = input.last().unwrap().wrapping_add(1); + tx.input = input.into(); + assert!(matches!( + test.call_and_decode_err(tx).await, + IRouterErrors::InvalidSignature(IRouter::InvalidSignature {}) + )); + } +} + +#[tokio::test] +async fn test_update_serai_key() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + test.update_serai_key().await; + + // We should be able to update while an update is pending as well (in case the new key never + // confirms) + test.update_serai_key().await; + + // But we shouldn't be able to update the key to None + { + let msg = crate::abi::updateSeraiKeyCall::new(( + crate::abi::Signature { + c: test.chain_id.into(), + s: U256::try_from(test.state.next_nonce).unwrap().into(), + }, + [0; 32].into(), + )) + .abi_encode(); + let sig = sign(test.state.key.unwrap(), &msg); + + assert!(matches!( + test + .call_and_decode_err(TxLegacy { + input: crate::abi::updateSeraiKeyCall::new(( + crate::abi::Signature::from(&sig), + [0; 32].into(), + )) + .abi_encode() + .into(), + ..Default::default() + }) + .await, + IRouterErrors::InvalidSeraiKey(IRouter::InvalidSeraiKey {}) + )); + } + + // Once we update to a new key, we should, of course, be able to continue to rotate keys + test.confirm_next_serai_key().await; +} + +#[tokio::test] +async fn test_execute_arbitrary_code() { + let test = Test::new().await; + + assert!(matches!( + test + .call_and_decode_err(TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000, + gas_limit: 1_000_000, + to: test.router.address().into(), + value: U256::ZERO, + input: crate::abi::executeArbitraryCodeCall::new((vec![].into(),)).abi_encode().into(), + }) + .await, + IRouterErrors::CodeNotBySelf(IRouter::CodeNotBySelf {}) + )); +} + +// Code which returns true +#[rustfmt::skip] +fn return_true_code() -> Vec { + vec![ + 0x60, // push 1 byte | 3 gas + 0x01, // the value 1 + 0x5f, // push 0 | 2 gas + 0x52, // mstore to offset 0 the value 1 | 3 gas + 0x60, // push 1 byte | 3 gas + 0x20, // the value 32 + 0x5f, // push 0 | 2 gas + 0xf3, // return from offset 0 1 word | 0 gas + // 13 gas for the execution plus a single word of memory for 16 gas total + ] +} + +#[tokio::test] +async fn test_empty_execute() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + { + let gas = test.router.execute_gas(Coin::Ether, U256::from(1), &[].as_slice().into()); + let fee = U256::from(gas); + + let () = test + .provider + .raw_request("anvil_setBalance".into(), (test.router.address(), fee)) + .await + .unwrap(); + + let (tx, gas_used) = test.execute(Coin::Ether, fee, [].as_slice().into(), vec![]).await; + // We don't use the call gas stipend here + const UNUSED_GAS: u64 = revm::interpreter::gas::CALL_STIPEND; + assert_eq!(gas_used + UNUSED_GAS, gas); + + assert_eq!(test.provider.get_balance(test.router.address()).await.unwrap(), U256::from(0)); + let minted_to_sender = u128::from(tx.tx().gas_limit) * tx.tx().gas_price; + let spent_by_sender = u128::from(gas_used) * tx.tx().gas_price; + assert_eq!( + test.provider.get_balance(tx.recover_signer().unwrap()).await.unwrap() - + U256::from(minted_to_sender - spent_by_sender), + U256::from(fee) + ); + } + + { + let token = Address::from([0xff; 20]); + { + let code = return_true_code(); + // Deploy our 'token' + let () = test.provider.raw_request("anvil_setCode".into(), (token, code)).await.unwrap(); + let call = + TransactionRequest::default().to(token).input(TransactionInput::new(vec![].into())); + // Check it returns the expected result + assert_eq!( + test.provider.call(&call).await.unwrap().as_ref(), + U256::from(1).abi_encode().as_slice() + ); + // Check it has the expected gas cost (16 is documented in `return_true_code`) + assert_eq!(test.provider.estimate_gas(&call).await.unwrap(), 21_000 + 16); + } + + let gas = test.router.execute_gas(Coin::Erc20(token), U256::from(0), &[].as_slice().into()); + let fee = U256::from(0); + let (_tx, gas_used) = test.execute(Coin::Erc20(token), fee, [].as_slice().into(), vec![]).await; + const UNUSED_GAS: u64 = Router::GAS_FOR_ERC20_CALL - 16; + assert_eq!(gas_used + UNUSED_GAS, gas); + } +} + +#[tokio::test] +async fn test_eth_address_out_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + let mut rand_address = [0xff; 20]; + OsRng.fill_bytes(&mut rand_address); + let amount_out = U256::from(2); + let out_instructions = + OutInstructions::from([(SeraiEthereumAddress::Address(rand_address), amount_out)].as_slice()); + + let gas = test.router.execute_gas(Coin::Ether, U256::from(1), &out_instructions); + let fee = U256::from(gas); + + let () = test + .provider + .raw_request("anvil_setBalance".into(), (test.router.address(), amount_out + fee)) + .await + .unwrap(); + + let (tx, gas_used) = test.execute(Coin::Ether, fee, out_instructions, vec![true]).await; + const UNUSED_GAS: u64 = 2 * revm::interpreter::gas::CALL_STIPEND; + assert_eq!(gas_used + UNUSED_GAS, gas); + + assert_eq!(test.provider.get_balance(test.router.address()).await.unwrap(), U256::from(0)); + let minted_to_sender = u128::from(tx.tx().gas_limit) * tx.tx().gas_price; + let spent_by_sender = u128::from(gas_used) * tx.tx().gas_price; + assert_eq!( + test.provider.get_balance(tx.recover_signer().unwrap()).await.unwrap() - + U256::from(minted_to_sender - spent_by_sender), + U256::from(fee) + ); + assert_eq!(test.provider.get_balance(rand_address.into()).await.unwrap(), amount_out); +} + +#[tokio::test] +async fn test_erc20_address_out_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + let erc20 = Erc20::deploy(&test).await; + let coin = Coin::Erc20(erc20.address()); + + let mut rand_address = [0xff; 20]; + OsRng.fill_bytes(&mut rand_address); + let amount_out = U256::from(2); + let out_instructions = + OutInstructions::from([(SeraiEthereumAddress::Address(rand_address), amount_out)].as_slice()); + + let gas = test.router.execute_gas(coin, U256::from(1), &out_instructions); + let fee = U256::from(gas); + + // Mint to the Router the necessary amount of the ERC20 + erc20.mint(&test, test.router.address(), amount_out + fee).await; + + let (tx, gas_used) = test.execute(coin, fee, out_instructions, vec![true]).await; + // Uses traces due to the complexity of modeling Erc20::transfer + let unused_gas = test.gas_unused_by_calls(&tx).await; + assert_eq!(gas_used + unused_gas, gas); + + assert_eq!(erc20.balance_of(&test, test.router.address()).await, U256::from(0)); + assert_eq!(erc20.balance_of(&test, tx.recover_signer().unwrap()).await, U256::from(fee)); + assert_eq!(erc20.balance_of(&test, rand_address.into()).await, amount_out); +} + +#[tokio::test] +async fn test_eth_code_out_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + let () = test + .provider + .raw_request("anvil_setBalance".into(), (test.router.address(), 1_000_000)) + .await + .unwrap(); + + let code = return_true_code(); + let amount_out = U256::from(2); + let out_instructions = OutInstructions::from( + [( + SeraiEthereumAddress::Contract(ContractDeployment::new(50_000, code.clone()).unwrap()), + amount_out, + )] + .as_slice(), + ); + + let gas = test.router.execute_gas(Coin::Ether, U256::from(1), &out_instructions); + let fee = U256::from(gas); + let (tx, gas_used) = test.execute(Coin::Ether, fee, out_instructions, vec![true]).await; + + // We use call-traces here to determine how much gas was allowed but unused due to the complexity + // of modeling the call to the Router itself and the following CREATE + let unused_gas = test.gas_unused_by_calls(&tx).await; + assert_eq!(gas_used + unused_gas, gas); + + assert_eq!( + test.provider.get_balance(test.router.address()).await.unwrap(), + U256::from(1_000_000) - amount_out - fee + ); + let minted_to_sender = u128::from(tx.tx().gas_limit) * tx.tx().gas_price; + let spent_by_sender = u128::from(gas_used) * tx.tx().gas_price; + assert_eq!( + test.provider.get_balance(tx.recover_signer().unwrap()).await.unwrap() - + U256::from(minted_to_sender - spent_by_sender), + U256::from(fee) + ); + let deployed = test.router.address().create(1); + assert_eq!(test.provider.get_balance(deployed).await.unwrap(), amount_out); + // The init code we use returns true, which will become the deployed contract's code + assert_eq!(test.provider.get_code_at(deployed).await.unwrap().to_vec(), true.abi_encode()); +} + +#[tokio::test] +async fn test_erc20_code_out_instruction() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + let erc20 = Erc20::deploy(&test).await; + let coin = Coin::Erc20(erc20.address()); + + let code = return_true_code(); + let amount_out = U256::from(2); + let out_instructions = OutInstructions::from( + [(SeraiEthereumAddress::Contract(ContractDeployment::new(50_000, code).unwrap()), amount_out)] + .as_slice(), + ); + + let gas = test.router.execute_gas(coin, U256::from(1), &out_instructions); + let fee = U256::from(gas); + + // Mint to the Router the necessary amount of the ERC20 + erc20.mint(&test, test.router.address(), amount_out + fee).await; + + let (tx, gas_used) = test.execute(coin, fee, out_instructions, vec![true]).await; + + let unused_gas = test.gas_unused_by_calls(&tx).await; + assert_eq!(gas_used + unused_gas, gas); + + assert_eq!(erc20.balance_of(&test, test.router.address()).await, U256::from(amount_out)); + assert_eq!(erc20.balance_of(&test, tx.recover_signer().unwrap()).await, U256::from(fee)); + let deployed = test.router.address().create(1); + assert_eq!(erc20.router_approval(&test, deployed).await, amount_out); + assert_eq!(test.provider.get_code_at(deployed).await.unwrap().to_vec(), true.abi_encode()); +} + +#[tokio::test] +async fn test_result_decoding() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + // Create three OutInstructions, where the last one errors + let out_instructions = OutInstructions::from( + [ + (SeraiEthereumAddress::Address([0; 20]), U256::from(0)), + (SeraiEthereumAddress::Address([0; 20]), U256::from(0)), + (SeraiEthereumAddress::Contract(ContractDeployment::new(0, vec![]).unwrap()), U256::from(0)), + ] + .as_slice(), + ); + + let gas = test.router.execute_gas(Coin::Ether, U256::from(0), &out_instructions); + + // We should decode these in the correct order (not `false, true, true`) + let (_tx, gas_used) = + test.execute(Coin::Ether, U256::from(0), out_instructions, vec![true, true, false]).await; + // We don't check strict equality as we don't know how much gas was used by the reverted call + // (even with the trace), solely that it used less than or equal to the limit + assert!(gas_used <= gas); +} + +#[tokio::test] +async fn test_reentrancy() { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + const BYTECODE: &[u8] = { + const BYTECODE_HEX: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/serai-processor-ethereum-router/tests/Reentrancy.bin" + )); + const BYTECODE: [u8; BYTECODE_HEX.len() / 2] = + match alloy_core::primitives::hex::const_decode_to_array::<{ BYTECODE_HEX.len() / 2 }>( + BYTECODE_HEX, + ) { + Ok(bytecode) => bytecode, + Err(_) => panic!("Reentrancy.bin did not contain valid hex"), + }; + &BYTECODE + }; + + let out_instructions = OutInstructions::from( + [( + // The Reentrancy contract, in its constructor, will re-enter and verify the proper error is + // returned + SeraiEthereumAddress::Contract(ContractDeployment::new(50_000, BYTECODE.to_vec()).unwrap()), + U256::from(0), + )] + .as_slice(), + ); + + let gas = test.router.execute_gas(Coin::Ether, U256::from(0), &out_instructions); + let (_tx, gas_used) = + test.execute(Coin::Ether, U256::from(0), out_instructions, vec![true]).await; + // Even though this doesn't have failed `OutInstruction`s, our logic is incomplete upon any + // failed internal calls for some reason. That's fine, as the gas yielded is still the worst-case + // (which this isn't a counter-example to) and is validated to be the worst-case, but is peculiar + assert!(gas_used <= gas); +} + +#[tokio::test] +async fn fuzz_test_out_instructions_gas() { + for _ in 0 .. 10 { + let mut test = Test::new().await; + test.confirm_next_serai_key().await; + + // Generate a random OutInstructions + let mut out_instructions = vec![]; + let mut prior_addresses = vec![]; + for _ in 0 .. (OsRng.next_u64() % 50) { + let amount_out = U256::from(OsRng.next_u64() % 2); + if (OsRng.next_u64() % 2) == 1 { + let mut code = return_true_code(); + + // Extend this with random data to make it somewhat random, despite the constant returned + // code (though the estimator will never run the initcode and realize that) + let ext = vec![0; usize::try_from(OsRng.next_u64() % 400).unwrap()]; + code.extend(&ext); + + out_instructions.push(( + SeraiEthereumAddress::Contract(ContractDeployment::new(100_000, ext).unwrap()), + amount_out, + )); + } else { + // Occasionally reuse addresses (cold/warm slots) + let address = if (!prior_addresses.is_empty()) && ((OsRng.next_u64() % 2) == 1) { + prior_addresses[usize::try_from( + OsRng.next_u64() % u64::try_from(prior_addresses.len()).unwrap(), + ) + .unwrap()] + } else { + let mut rand_address = [0; 20]; + OsRng.fill_bytes(&mut rand_address); + prior_addresses.push(rand_address); + rand_address + }; + out_instructions.push((SeraiEthereumAddress::Address(address), amount_out)); + } + } + let out_instructions_original = out_instructions.clone(); + let out_instructions = OutInstructions::from(out_instructions.as_slice()); + + // Randomly decide the coin + let coin = if (OsRng.next_u64() % 2) == 1 { + let () = test + .provider + .raw_request("anvil_setBalance".into(), (test.router.address(), 1_000_000_000)) + .await + .unwrap(); + Coin::Ether + } else { + let erc20 = Erc20::deploy(&test).await; + erc20.mint(&test, test.router.address(), U256::from(1_000_000_000)).await; + Coin::Erc20(erc20.address()) + }; + + let fee_per_gas = U256::from(1) + U256::from(OsRng.next_u64() % 10); + let gas = test.router.execute_gas(coin, fee_per_gas, &out_instructions); + let fee = U256::from(gas) * fee_per_gas; + // All of these should have succeeded + let (tx, gas_used) = + test.execute(coin, fee, out_instructions.clone(), vec![true; out_instructions.0.len()]).await; + let unused_gas = test.gas_unused_by_calls(&tx).await; + assert_eq!( + gas_used + unused_gas, + gas, + "{coin:?} {fee_per_gas:?} {out_instructions_original:?}" + ); + } +} diff --git a/processor/ethereum/src/key_gen.rs b/processor/ethereum/src/key_gen.rs new file mode 100644 index 00000000..581684ef --- /dev/null +++ b/processor/ethereum/src/key_gen.rs @@ -0,0 +1,25 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; +use dkg::ThresholdKeys; + +use ethereum_schnorr::PublicKey; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Ethereum"; + + type ExternalNetworkCiphersuite = Secp256k1; + + fn tweak_keys(keys: &mut ThresholdKeys) { + while PublicKey::new(keys.group_key()).is_none() { + *keys = keys.offset(::F::ONE); + } + } + + fn encode_key(key: ::G) -> Vec { + PublicKey::new(key).unwrap().eth_repr().to_vec() + } + + fn decode_key(key: &[u8]) -> Option<::G> { + PublicKey::from_eth_repr(key.try_into().ok()?).map(|key| key.point()) + } +} diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs new file mode 100644 index 00000000..1a7ff773 --- /dev/null +++ b/processor/ethereum/src/main.rs @@ -0,0 +1,97 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use core::time::Duration; +use std::sync::Arc; + +use alloy_core::primitives::U256; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use serai_client::validator_sets::primitives::Session; + +use serai_env as env; +use serai_db::{Get, DbTxn, create_db}; + +use ::primitives::EncodableG; +use ::key_gen::KeyGenParams as KeyGenParamsTrait; + +mod primitives; +pub(crate) use crate::primitives::*; + +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{SmartContract, Scheduler}; +mod publisher; +use publisher::TransactionPublisher; + +create_db! { + EthereumProcessor { + // The initial key for Serai on Ethereum + InitialSeraiKey: () -> EncodableG, + } +} + +struct SetInitialKey; +impl bin::Hooks for SetInitialKey { + fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage) { + if let messages::CoordinatorMessage::Substrate( + messages::substrate::CoordinatorMessage::SetKeys { session, key_pair, .. }, + ) = msg + { + assert_eq!(*session, Session(0)); + let key = KeyGenParams::decode_key(key_pair.1.as_ref()) + .expect("invalid Ethereum key confirmed on Substrate"); + InitialSeraiKey::set(txn, &EncodableG(key)); + } + } +} + +#[tokio::main] +async fn main() { + let db = bin::init(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(bin::url()), true), + )); + + let chain_id = { + let mut delay = Duration::from_secs(5); + loop { + match provider.get_chain_id().await { + Ok(chain_id) => break chain_id, + Err(e) => { + log::error!("failed to fetch the chain ID on boot: {e:?}"); + tokio::time::sleep(delay).await; + delay = (delay + Duration::from_secs(5)).max(Duration::from_secs(120)); + } + } + } + }; + + bin::main_loop::( + db.clone(), + Rpc { db: db.clone(), provider: provider.clone() }, + Scheduler::::new(SmartContract { + chain_id: U256::from_le_slice(&chain_id.to_le_bytes()), + }), + TransactionPublisher::new(db, provider, { + let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") + .expect("ethereum relayer hostname wasn't specified") + .to_string(); + let relayer_port = + env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); + relayer_hostname + ":" + &relayer_port + }), + ) + .await; +} diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs new file mode 100644 index 00000000..9d4a8a2d --- /dev/null +++ b/processor/ethereum/src/primitives/block.rs @@ -0,0 +1,137 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use serai_client::networks::ethereum::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; + +use ethereum_router::{InInstruction as EthereumInInstruction, Executed}; + +use crate::{output::Output, transaction::Eventuality}; + +// We interpret 32-block Epochs as singular blocks. +// There's no reason for further accuracy when these will all finalize at the same time. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct Epoch { + // The hash of the block which ended the prior Epoch. + pub(crate) prior_end_hash: [u8; 32], + // The hash of the last block within this Epoch. + pub(crate) end_hash: [u8; 32], +} + +impl primitives::BlockHeader for Epoch { + fn id(&self) -> [u8; 32] { + self.end_hash + } + fn parent(&self) -> [u8; 32] { + self.prior_end_hash + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct FullEpoch { + pub(crate) epoch: Epoch, + /// The unordered list of `InInstruction`s within this epoch + pub(crate) instructions: Vec, + pub(crate) executed: Vec, +} + +impl primitives::Block for FullEpoch { + type Header = Epoch; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + self.epoch.end_hash + } + + fn scan_for_outputs_unordered( + &self, + latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { + // Only return these outputs for the latest key + if latest_active_key != key { + return vec![]; + } + + // Associate all outputs with the latest active key + // We don't associate these with the current key within the SC as that'll cause outputs to be + // marked for forwarding if the SC is delayed to actually rotate + let mut outputs: Vec<_> = self + .instructions + .iter() + .cloned() + .map(|instruction| Output::Output { key, instruction }) + .collect(); + + /* + The scanner requires a change output be associated with every Eventuality that came from + fulfilling payments, unless said Eventuality descends from an Eventuality meeting that + requirement from the same fulfillment. This ensures we have a fully populated Eventualities + set by the time we process the block which has an Eventuality. + + Accordingly, for any block with an Eventuality completion, we claim there's a Change output + so that the block is flagged. Ethereum doesn't actually have Change outputs, yet the scanner + won't report them to Substrate, and the Smart Contract scheduler will drop any/all outputs + passed to it (handwaving their balances as present within the Smart Contract). + */ + if !self.executed.is_empty() { + outputs.push(Output::Eventuality { key, nonce: self.executed.first().unwrap().nonce() }); + } + + outputs + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + for executed in &self.executed { + let Some(mut expected) = + eventualities.active_eventualities.remove(executed.nonce().to_le_bytes().as_slice()) + else { + // TODO: Why is this a continue, not an assert? + continue; + }; + // If this is a Batch Eventuality, we didn't know how the OutInstructions would resolve at + // time of creation. Copy the results from the actual transaction into the expectation + if let (Executed::Batch { results, .. }, Executed::Batch { results: expected_results, .. }) = + (executed, &mut expected.0) + { + *expected_results = results.clone(); + } + assert_eq!( + executed, + &expected.0, + "Router emitted distinct event for nonce {}", + executed.nonce() + ); + + /* + The transaction ID is used to determine how internal outputs from this transaction should + be handled (if they were actually internal or if they were just to an internal address). + The Ethereum integration doesn't use internal addresses, and only uses internal outputs to + flag a block as having an Eventuality. Those internal outputs will always be scanned, and + while they may be dropped/kept by this ID, the scheduler will then always drop them. + Accordingly, we have free reign as to what to set the transaction ID to. + + We set the ID to the nonce as it's the most helpful value and unique barring someone + finding the preimage for this as a hash. + */ + let mut tx_id = [0; 32]; + tx_id[.. 8].copy_from_slice(executed.nonce().to_le_bytes().as_slice()); + res.insert(tx_id, expected); + } + res + } +} diff --git a/processor/ethereum/src/primitives/machine.rs b/processor/ethereum/src/primitives/machine.rs new file mode 100644 index 00000000..e3252f30 --- /dev/null +++ b/processor/ethereum/src/primitives/machine.rs @@ -0,0 +1,146 @@ +use std::{io, collections::HashMap}; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::{Ciphersuite, Secp256k1}; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::*, + sign::*, +}; + +use ethereum_schnorr::{PublicKey, Signature}; + +use crate::transaction::{Action, Transaction}; + +/// The HRAm to use for the Schnorr Solidity library. +/// +/// This will panic if the public key being signed for is not representable within the Schnorr +/// Solidity library. +#[derive(Clone, Default, Debug)] +pub struct EthereumHram; +impl Hram for EthereumHram { + #[allow(non_snake_case)] + fn hram( + R: &::G, + A: &::G, + m: &[u8], + ) -> ::F { + Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) + } +} + +/// A clonable machine to sign an action. +/// +/// This will panic if the public key being signed with is not representable within the Schnorr +/// Solidity library. +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine { + pub(crate) keys: ThresholdKeys, + pub(crate) action: Action, +} + +type LiteralAlgorithmMachine = AlgorithmMachine>; +type LiteralAlgorithmSignMachine = + AlgorithmSignMachine>; + +pub(crate) struct ActionSignMachine { + key: PublicKey, + action: Action, + machine: LiteralAlgorithmSignMachine, +} + +type LiteralAlgorithmSignatureMachine = + AlgorithmSignatureMachine>; + +pub(crate) struct ActionSignatureMachine { + key: PublicKey, + action: Action, + machine: LiteralAlgorithmSignatureMachine, +} + +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = Transaction; + type SignMachine = ActionSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + let (machine, preprocess) = + AlgorithmMachine::new(IetfSchnorr::::ietf(), self.keys.clone()) + .preprocess(rng); + ( + ActionSignMachine { + key: PublicKey::new(self.keys.group_key()).expect("signing with non-representable key"), + action: self.action, + machine, + }, + preprocess, + ) + } +} + +impl SignMachine for ActionSignMachine { + type Params = ::Signature, + >>::Params; + type Keys = ::Signature, + >>::Keys; + type Preprocess = ::Signature, + >>::Preprocess; + type SignatureShare = ::Signature, + >>::SignatureShare; + type SignatureMachine = ActionSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + fn from_cache( + _params: Self::Params, + _keys: Self::Keys, + _cache: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.machine.read_preprocess(reader) + } + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError> { + assert!(msg.is_empty()); + self.machine.sign(commitments, &self.action.message()).map(|(machine, shares)| { + (ActionSignatureMachine { key: self.key, action: self.action, machine }, shares) + }) + } +} + +impl SignatureMachine for ActionSignatureMachine { + type SignatureShare = ::Signature, + >>::SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.machine.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + self.machine.complete(shares).map(|signature| { + let s = signature.s; + let c = Signature::challenge(signature.R, &self.key, &self.action.message()); + Transaction(self.action, Signature::new(c, s).unwrap()) + }) + } +} diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs new file mode 100644 index 00000000..39f1eb94 --- /dev/null +++ b/processor/ethereum/src/primitives/mod.rs @@ -0,0 +1,24 @@ +use alloy_core::primitives::{FixedBytes, Address}; + +use serai_client::primitives::Amount; + +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod machine; +pub(crate) mod block; + +pub(crate) const DAI: Address = Address(FixedBytes( + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }, +)); + +pub(crate) const TOKENS: [Address; 1] = [DAI]; + +// 8 decimals, so 1_000_000_00 would be 1 ETH. This is 0.0015 ETH (5 USD if Ether is ~3300 USD). +#[allow(clippy::inconsistent_digit_grouping)] +pub(crate) const ETHER_DUST: Amount = Amount(1_500_00); +// 5 DAI +#[allow(clippy::inconsistent_digit_grouping)] +pub(crate) const DAI_DUST: Amount = Amount(5_000_000_00); diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs new file mode 100644 index 00000000..797b528d --- /dev/null +++ b/processor/ethereum/src/primitives/output.rs @@ -0,0 +1,182 @@ +use std::io; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; + +use alloy_core::primitives::U256; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance}, + networks::ethereum::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; +use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction}; + +use crate::{DAI, ETHER_DUST}; + +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { + match coin { + EthereumCoin::Ether => Some(ExternalCoin::Ether), + EthereumCoin::Erc20(token) => { + if *token == DAI { + return Some(ExternalCoin::Dai); + } + None + } + } +} + +fn amount_to_serai_amount(coin: ExternalCoin, amount: U256) -> Amount { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); + assert_eq!(coin.decimals(), 8); + // Remove 10 decimals so we go from 18 decimals to 8 decimals + let divisor = U256::from(10_000_000_000u64); + // This is valid up to 184b, which is assumed for the coins allowed + Amount(u64::try_from(amount / divisor).unwrap()) +} + +#[derive( + Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, +)] +pub(crate) struct OutputId(pub(crate) [u8; 40]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 40]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) enum Output { + Output { key: ::G, instruction: EthereumInInstruction }, + Eventuality { key: ::G, nonce: u64 }, +} +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + match self { + // All outputs received are External + Output::Output { .. } => OutputType::External, + // Yet upon Eventuality completions, we report a Change output to ensure synchrony per the + // scanner's documented bounds + Output::Eventuality { .. } => OutputType::Change, + } + } + + fn id(&self) -> Self::Id { + match self { + Output::Output { key: _, instruction } => { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&instruction.id.block_hash); + id[32 ..].copy_from_slice(&instruction.id.index_within_block.to_le_bytes()); + OutputId(id) + } + // Yet upon Eventuality completions, we report a Change output to ensure synchrony per the + // scanner's documented bounds + Output::Eventuality { key: _, nonce } => { + let mut id = [0; 40]; + id[.. 8].copy_from_slice(&nonce.to_le_bytes()); + OutputId(id) + } + } + } + + fn transaction_id(&self) -> Self::TransactionId { + match self { + Output::Output { key: _, instruction } => instruction.transaction_hash, + Output::Eventuality { key: _, nonce } => { + let mut id = [0; 32]; + id[.. 8].copy_from_slice(&nonce.to_le_bytes()); + id + } + } + } + + fn key(&self) -> ::G { + match self { + Output::Output { key, .. } | Output::Eventuality { key, .. } => *key, + } + } + + fn presumed_origin(&self) -> Option
{ + match self { + Output::Output { key: _, instruction } => Some(Address::Address(*instruction.from.0)), + Output::Eventuality { .. } => None, + } + } + + fn balance(&self) -> ExternalBalance { + match self { + Output::Output { key: _, instruction } => { + let coin = coin_to_serai_coin(&instruction.coin).unwrap_or_else(|| { + panic!( + "mapping coin from an EthereumInInstruction with coin {}, which we don't handle.", + "this never should have been yielded" + ) + }); + ExternalBalance { coin, amount: amount_to_serai_amount(coin, instruction.amount) } + } + Output::Eventuality { .. } => { + ExternalBalance { coin: ExternalCoin::Ether, amount: ETHER_DUST } + } + } + } + fn data(&self) -> &[u8] { + match self { + Output::Output { key: _, instruction } => &instruction.data, + Output::Eventuality { .. } => &[], + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Output::Output { key, instruction } => { + writer.write_all(&[0])?; + writer.write_all(key.to_bytes().as_ref())?; + instruction.serialize(writer) + } + Output::Eventuality { key, nonce } => { + writer.write_all(&[1])?; + writer.write_all(key.to_bytes().as_ref())?; + writer.write_all(&nonce.to_le_bytes()) + } + } + } + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unknown Output type"))?; + } + + Ok(match kind[0] { + 0 => { + let key = Secp256k1::read_G(reader)?; + let instruction = EthereumInInstruction::deserialize_reader(reader)?; + Self::Output { key, instruction } + } + 1 => { + let key = Secp256k1::read_G(reader)?; + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + Self::Eventuality { key, nonce } + } + _ => unreachable!(), + }) + } +} diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs new file mode 100644 index 00000000..a698fdb4 --- /dev/null +++ b/processor/ethereum/src/primitives/transaction.rs @@ -0,0 +1,189 @@ +use std::io; + +use ciphersuite::Secp256k1; +use frost::dkg::ThresholdKeys; + +use alloy_core::primitives::U256; + +use serai_client::networks::ethereum::Address; + +use scheduler::SignableTransaction; + +use ethereum_primitives::keccak256; +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_router::{Coin, OutInstructions, Executed, Router}; + +use crate::{output::OutputId, machine::ClonableTransctionMachine}; + +#[derive(Clone, PartialEq, Debug)] +pub(crate) enum Action { + SetKey { chain_id: U256, nonce: u64, key: PublicKey }, + Batch { chain_id: U256, nonce: u64, coin: Coin, fee: U256, outs: Vec<(Address, U256)> }, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality(pub(crate) Executed); + +impl Action { + pub(crate) fn nonce(&self) -> u64 { + match self { + Action::SetKey { nonce, .. } | Action::Batch { nonce, .. } => *nonce, + } + } + + pub(crate) fn message(&self) -> Vec { + match self { + Action::SetKey { chain_id, nonce, key } => { + Router::update_serai_key_message(*chain_id, *nonce, key) + } + Action::Batch { chain_id, nonce, coin, fee, outs } => Router::execute_message( + *chain_id, + *nonce, + *coin, + *fee, + OutInstructions::from(outs.as_ref()), + ), + } + } + + pub(crate) fn eventuality(&self) -> Eventuality { + Eventuality(match self { + Self::SetKey { chain_id: _, nonce, key } => { + Executed::NextSeraiKeySet { nonce: *nonce, key: key.eth_repr() } + } + Self::Batch { chain_id: _, nonce, .. } => { + Executed::Batch { nonce: *nonce, message_hash: keccak256(self.message()), results: vec![] } + } + }) + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct Transaction(pub(crate) Action, pub(crate) Signature); +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + let action = Action::read(reader)?; + let signature = Signature::read(reader)?; + Ok(Transaction(action, signature)) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer)?; + self.1.write(writer)?; + Ok(()) + } +} + +impl SignableTransaction for Action { + type Transaction = Transaction; + type Ciphersuite = Secp256k1; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unrecognized Action type"))?; + } + + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + let chain_id = U256::from_be_bytes(chain_id); + + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + Ok(match kind[0] { + 0 => { + let mut key = [0; 32]; + reader.read_exact(&mut key)?; + let key = + PublicKey::from_eth_repr(key).ok_or_else(|| io::Error::other("invalid key in Action"))?; + + Action::SetKey { chain_id, nonce, key } + } + 1 => { + let coin = borsh::from_reader(reader)?; + + let mut fee = [0; 32]; + reader.read_exact(&mut fee)?; + let fee = U256::from_le_bytes(fee); + + let mut outs_len = [0; 4]; + reader.read_exact(&mut outs_len)?; + let outs_len = usize::try_from(u32::from_le_bytes(outs_len)).unwrap(); + + let mut outs = vec![]; + for _ in 0 .. outs_len { + let address = borsh::from_reader(reader)?; + + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_bytes(amount); + + outs.push((address, amount)); + } + Action::Batch { chain_id, nonce, coin, fee, outs } + } + _ => unreachable!(), + }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + match self { + Self::SetKey { chain_id, nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&chain_id.to_be_bytes::<32>())?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(&key.eth_repr()) + } + Self::Batch { chain_id, nonce, coin, fee, outs } => { + writer.write_all(&[1])?; + writer.write_all(&chain_id.to_be_bytes::<32>())?; + writer.write_all(&nonce.to_le_bytes())?; + borsh::BorshSerialize::serialize(coin, writer)?; + writer.write_all(&fee.as_le_bytes())?; + writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; + for (address, amount) in outs { + borsh::BorshSerialize::serialize(address, writer)?; + writer.write_all(&amount.as_le_bytes())?; + } + Ok(()) + } + } + } + + fn id(&self) -> [u8; 32] { + let mut res = [0; 32]; + res[.. 8].copy_from_slice(&self.nonce().to_le_bytes()); + res + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine { keys, action: self } + } +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + let mut res = [0; 32]; + res[.. 8].copy_from_slice(&self.0.nonce().to_le_bytes()); + res + } + + fn lookup(&self) -> Vec { + self.0.nonce().to_le_bytes().to_vec() + } + + fn singular_spent_output(&self) -> Option { + None + } + + fn read(reader: &mut impl io::Read) -> io::Result { + Ok(Self(borsh::from_reader(reader)?)) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + borsh::BorshSerialize::serialize(&self.0, writer) + } +} diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs new file mode 100644 index 00000000..3d18a6ef --- /dev/null +++ b/processor/ethereum/src/publisher.rs @@ -0,0 +1,126 @@ +use core::future::Future; +use std::sync::Arc; + +use alloy_rlp::Encodable; + +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::RootProvider; + +use tokio::{ + sync::{RwLockReadGuard, RwLock}, + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; + +use serai_db::Db; + +use ethereum_schnorr::PublicKey; +use ethereum_router::{OutInstructions, Router}; + +use crate::{ + InitialSeraiKey, + transaction::{Action, Transaction}, +}; + +#[derive(Clone)] +pub(crate) struct TransactionPublisher { + db: D, + rpc: Arc>, + router: Arc>>, + relayer_url: String, +} + +impl TransactionPublisher { + pub(crate) fn new(db: D, rpc: Arc>, relayer_url: String) -> Self { + Self { db, rpc, router: Arc::new(RwLock::new(None)), relayer_url } + } + + // This will always return Ok(Some(_)) or Err(_), never Ok(None) + async fn router( + &self, + ) -> Result>, RpcError> { + let router = self.router.read().await; + + // If the router is None, find it on-chain + if router.is_none() { + drop(router); + let mut router = self.router.write().await; + // Check again if it's None in case a different task already did this + if router.is_none() { + let Some(router_actual) = Router::new( + self.rpc.clone(), + &PublicKey::new( + InitialSeraiKey::get(&self.db) + .expect("publishing a transaction yet never confirmed a key") + .0, + ) + .expect("initial key used by Serai wasn't representable on Ethereum"), + ) + .await? + else { + Err(TransportErrorKind::Custom( + "publishing transaction yet couldn't find router on chain. was our node reset?" + .to_string() + .into(), + ))? + }; + *router = Some(router_actual); + } + return Ok(router.downgrade()); + } + + Ok(router) + } +} + +impl signers::TransactionPublisher for TransactionPublisher { + type EphemeralError = RpcError; + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { + let router = self.router().await?; + let router = router.as_ref().unwrap(); + + let nonce = tx.0.nonce(); + // Convert from an Action (an internal representation of a signable event) to a TxLegacy + let tx = match tx.0 { + Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), + Action::Batch { chain_id: _, nonce: _, coin, fee, outs } => { + router.execute(coin, fee, OutInstructions::from(outs.as_ref()), &tx.1) + } + }; + + // Nonce + let mut msg = nonce.to_le_bytes().to_vec(); + // Transaction + tx.encode(&mut msg); + + let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { + Err(TransportErrorKind::Custom( + "couldn't connect to the relayer server".to_string().into(), + ))? + }; + let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + Err(TransportErrorKind::Custom( + "couldn't send the message's len to the relayer server".to_string().into(), + ))? + }; + let Ok(()) = socket.write_all(&msg).await else { + Err(TransportErrorKind::Custom( + "couldn't write the message to the relayer server".to_string().into(), + ))? + }; + if socket.read_u8().await.ok() != Some(1) { + Err(TransportErrorKind::Custom( + "didn't get the ack from the relayer server".to_string().into(), + ))?; + } + + Ok(()) + } + } +} diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs new file mode 100644 index 00000000..57c14f59 --- /dev/null +++ b/processor/ethereum/src/rpc.rs @@ -0,0 +1,232 @@ +use core::future::Future; +use std::{sync::Arc, collections::HashSet}; + +use alloy_core::primitives::B256; +use alloy_rpc_types_eth::{Header, BlockTransactionsKind, BlockNumberOrTag}; +use alloy_transport::{RpcError, TransportErrorKind}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount}; + +use tokio::task::JoinSet; + +use serai_db::Db; + +use scanner::ScannerFeed; + +use ethereum_schnorr::PublicKey; +use ethereum_router::{InInstruction as EthereumInInstruction, Executed, Router}; + +use crate::{ + TOKENS, ETHER_DUST, DAI_DUST, InitialSeraiKey, + block::{Epoch, FullEpoch}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) db: D, + pub(crate) provider: Arc>, +} + +impl ScannerFeed for Rpc { + const NETWORK: ExternalNetworkId = ExternalNetworkId::Ethereum; + + // We only need one confirmation as Ethereum properly finalizes + const CONFIRMATIONS: u64 = 1; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 10; + + const TEN_MINUTES: u64 = 2; + + type Block = FullEpoch; + + type EphemeralError = RpcError; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { + let actual_number = self + .provider + .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom("there was no finalized block".to_string().into()) + })? + .header + .number; + // Error if there hasn't been a full epoch yet + if actual_number < 32 { + Err(TransportErrorKind::Custom( + "there has not been a completed epoch yet".to_string().into(), + ))? + } + // The divison by 32 returns the amount of completed epochs + // Converting from amount of completed epochs to the latest completed epoch requires + // subtracting 1 + let latest_full_epoch = (actual_number / 32) - 1; + Ok(latest_full_epoch) + } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let header = self + .provider + .get_block(BlockNumberOrTag::Number(number).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + "asked for time of a block our node doesn't have".to_string().into(), + ) + })? + .header; + // This is monotonic ever since the merge + // https://github.com/ethereum/consensus-specs/blob/4afe39822c9ad9747e0f5635cca117c18441ec1b + // /specs/bellatrix/beacon-chain.md?plain=1#L393-L394 + Ok(header.timestamp) + } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { + let start = number * 32; + let prior_end_hash = if start == 0 { + [0; 32] + } else { + self + .provider + .get_block((start - 1).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!("ethereum node didn't have requested block: {number:?}. was the node reset?") + .into(), + ) + })? + .header + .hash + .into() + }; + + let end_header = self + .provider + .get_block((start + 31).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!("ethereum node didn't have requested block: {number:?}. was the node reset?") + .into(), + ) + })? + .header; + + let end_hash = end_header.hash.into(); + + Ok(Epoch { prior_end_hash, end_hash }) + } + } + + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let epoch = self.unchecked_block_header_by_number(number).await?; + + let Some(router) = Router::new( + self.provider.clone(), + &PublicKey::new( + InitialSeraiKey::get(&self.db).expect("fetching a block yet never confirmed a key").0, + ) + .expect("initial key used by Serai wasn't representable on Ethereum"), + ) + .await? + else { + Err(TransportErrorKind::Custom("router wasn't deployed on-chain yet".to_string().into()))? + }; + + async fn sync_block( + router: Router, + block: Header, + ) -> Result<(Vec, Vec), RpcError> { + let instructions = router + .in_instructions_unordered(block.number ..= block.number, &HashSet::from(TOKENS)) + .await?; + + let executed = router.executed(block.number ..= block.number).await?; + + Ok((instructions, executed)) + } + + // We use JoinSet here to minimize the latency of the variety of requests we make. For each + // JoinError that may occur, we unwrap it as no underlying tasks should panic + let mut join_set = JoinSet::new(); + let mut to_check = epoch.end_hash; + // TODO: This makes 32 sequential requests. We should run them in parallel using block + // nunbers + while to_check != epoch.prior_end_hash { + let to_check_block = self + .provider + .get_block(B256::from(to_check).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!( + "ethereum node didn't have requested block: {}. was the node reset?", + hex::encode(to_check) + ) + .into(), + ) + })? + .header; + + // Update the next block to check + to_check = *to_check_block.parent_hash; + + // Spawn a task to sync this block + join_set.spawn(sync_block(router.clone(), to_check_block)); + } + + let mut instructions = vec![]; + let mut executed = vec![]; + while let Some(instructions_and_executed) = join_set.join_next().await { + let (mut these_instructions, mut these_executed) = instructions_and_executed.unwrap()?; + instructions.append(&mut these_instructions); + executed.append(&mut these_executed); + } + + Ok(FullEpoch { epoch, instructions, executed }) + } + } + + fn dust(coin: ExternalCoin) -> Amount { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); + match coin { + ExternalCoin::Ether => ETHER_DUST, + ExternalCoin::Dai => DAI_DUST, + _ => unreachable!(), + } + } + + fn cost_to_aggregate( + &self, + coin: ExternalCoin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); + // There is no cost to aggregate as we receive to an account + Ok(Amount(0)) + } + } +} diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs new file mode 100644 index 00000000..207792ec --- /dev/null +++ b/processor/ethereum/src/scheduler.rs @@ -0,0 +1,157 @@ +use std::collections::HashMap; + +use alloy_core::primitives::U256; + +use serai_client::{ + primitives::{ExternalNetworkId, ExternalCoin, ExternalBalance}, + networks::ethereum::Address, +}; + +use serai_db::Db; + +use primitives::Payment; +use scanner::{KeyFor, AddressFor, EventualityFor}; + +use ethereum_schnorr::PublicKey; +use ethereum_router::Coin as EthereumCoin; + +use crate::{DAI, transaction::Action, rpc::Rpc}; + +fn coin_to_ethereum_coin(coin: ExternalCoin) -> EthereumCoin { + assert_eq!(coin.network(), ExternalNetworkId::Ethereum); + match coin { + ExternalCoin::Ether => EthereumCoin::Ether, + ExternalCoin::Dai => EthereumCoin::Erc20(DAI), + _ => unreachable!(), + } +} + +fn balance_to_ethereum_amount(balance: ExternalBalance) -> U256 { + assert_eq!(balance.coin.network(), ExternalNetworkId::Ethereum); + assert_eq!(balance.coin.decimals(), 8); + // Restore 10 decimals so we go from 8 decimals to 18 decimals + // TODO: Document the expectation all integrated coins have 18 decimals + let factor = U256::from(10_000_000_000u64); + U256::from(balance.amount.0) * factor +} + +#[derive(Clone)] +pub(crate) struct SmartContract { + pub(crate) chain_id: U256, +} +impl smart_contract_scheduler::SmartContract> for SmartContract { + type SignableTransaction = Action; + + fn rotate( + &self, + nonce: u64, + _retiring_key: KeyFor>, + new_key: KeyFor>, + ) -> (Self::SignableTransaction, EventualityFor>) { + let action = Action::SetKey { + chain_id: self.chain_id, + nonce, + key: PublicKey::new(new_key).expect("rotating to an invald key"), + }; + (action.clone(), action.eventuality()) + } + + fn fulfill( + &self, + mut nonce: u64, + _key: KeyFor>, + payments: Vec>>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor>)> { + // Sort by coin + let mut outs = HashMap::<_, _>::new(); + for payment in payments { + let coin = payment.balance().coin; + outs + .entry(coin) + .or_insert_with(|| Vec::with_capacity(1)) + .push((payment.address().clone(), balance_to_ethereum_amount(payment.balance()))); + } + + let mut res = vec![]; + for coin in [ExternalCoin::Ether, ExternalCoin::Dai] { + let Some(outs) = outs.remove(&coin) else { continue }; + assert!(!outs.is_empty()); + + let fee_per_gas = match coin { + // 10 gwei + ExternalCoin::Ether => { + U256::try_from(10u64).unwrap() * alloy_core::primitives::utils::Unit::GWEI.wei() + } + // 0.0003 DAI + ExternalCoin::Dai => { + U256::try_from(30u64).unwrap() * alloy_core::primitives::utils::Unit::TWEI.wei() + } + _ => unreachable!(), + }; + + // The gas required to perform any interaction with the Router. + const BASE_GAS: u32 = 0; // TODO + + // The gas required to handle an additional payment to an address, in the worst case. + const ADDRESS_PAYMENT_GAS: u32 = 0; // TODO + + // The gas required to handle an additional payment to an smart contract, in the worst case. + // This does not include the explicit gas budget defined within the address specification. + const CONTRACT_PAYMENT_GAS: u32 = 0; // TODO + + // The maximum amount of gas for a batch. + const BATCH_GAS_LIMIT: u32 = 10_000_000; + + // Split these outs into batches, respecting BATCH_GAS_LIMIT + let mut batches = vec![vec![]]; + let mut current_gas = BASE_GAS; + for out in outs { + let payment_gas = match &out.0 { + Address::Address(_) => ADDRESS_PAYMENT_GAS, + Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), + }; + if (current_gas + payment_gas) > BATCH_GAS_LIMIT { + assert!(!batches.last().unwrap().is_empty()); + batches.push(vec![]); + current_gas = BASE_GAS; + } + batches.last_mut().unwrap().push(out); + current_gas += payment_gas; + } + + // Push each batch onto the result + for mut outs in batches { + let mut total_gas = 0; + + let base_gas_per_payment = BASE_GAS.div_ceil(u32::try_from(outs.len()).unwrap()); + // Deduce the fee from each out + for out in &mut outs { + let payment_gas = base_gas_per_payment + + match &out.0 { + Address::Address(_) => ADDRESS_PAYMENT_GAS, + Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), + }; + total_gas += payment_gas; + + let payment_gas_cost = U256::try_from(payment_gas).unwrap() * fee_per_gas; + out.1 -= payment_gas_cost; + } + + res.push(Action::Batch { + chain_id: self.chain_id, + nonce, + coin: coin_to_ethereum_coin(coin), + fee: U256::try_from(total_gas).unwrap() * fee_per_gas, + outs, + }); + nonce += 1; + } + } + // Ensure we handled all payments we're supposed to + assert!(outs.is_empty()); + + res.into_iter().map(|action| (action.clone(), action.eventuality())).collect() + } +} + +pub(crate) type Scheduler = smart_contract_scheduler::Scheduler, SmartContract>; diff --git a/processor/ethereum/test-primitives/Cargo.toml b/processor/ethereum/test-primitives/Cargo.toml new file mode 100644 index 00000000..6d3b4c1d --- /dev/null +++ b/processor/ethereum/test-primitives/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "serai-ethereum-test-primitives" +version = "0.1.0" +description = "Test primitives for Ethereum" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/test-primitives" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +k256 = { version = "0.13", default-features = false, features = ["std"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.9", default-features = false, features = ["std"] } + +alloy-rpc-types-eth = { version = "0.9", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.9", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } diff --git a/processor/ethereum/test-primitives/LICENSE b/processor/ethereum/test-primitives/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/ethereum/test-primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/test-primitives/README.md b/processor/ethereum/test-primitives/README.md new file mode 100644 index 00000000..efb4d0a4 --- /dev/null +++ b/processor/ethereum/test-primitives/README.md @@ -0,0 +1,5 @@ +# Ethereum Router + +The [Router contract](./contracts/Router.sol) is extensively documented to ensure clarity and +understanding of the design decisions made. Please refer to it for understanding of why/what this +is. diff --git a/processor/ethereum/test-primitives/src/lib.rs b/processor/ethereum/test-primitives/src/lib.rs new file mode 100644 index 00000000..47cc983e --- /dev/null +++ b/processor/ethereum/test-primitives/src/lib.rs @@ -0,0 +1,117 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use k256::{elliptic_curve::sec1::ToEncodedPoint, ProjectivePoint}; + +use alloy_core::{ + primitives::{Address, U256, Bytes, PrimitiveSignature, TxKind}, + hex::FromHex, +}; +use alloy_consensus::{SignableTransaction, TxLegacy, Signed}; + +use alloy_rpc_types_eth::TransactionReceipt; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use ethereum_primitives::{keccak256, deterministically_sign}; + +fn address(point: &ProjectivePoint) -> [u8; 20] { + let encoded_point = point.to_encoded_point(false); + // Last 20 bytes of the hash of the concatenated x and y coordinates + // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point + keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() +} + +/// Fund an account. +pub async fn fund_account(provider: &RootProvider, address: Address, value: U256) { + let _: () = provider + .raw_request("anvil_setBalance".into(), [address.to_string(), value.to_string()]) + .await + .unwrap(); +} + +/// Publish an already-signed transaction. +pub async fn publish_tx( + provider: &RootProvider, + tx: Signed, +) -> TransactionReceipt { + // Fund the sender's address + fund_account( + provider, + tx.recover_signer().unwrap(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)) + tx.tx().value, + ) + .await; + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.into_signed(sig).eip2718_encode(&mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); + pending_tx.get_receipt().await.unwrap() +} + +/// Deploy a contract. +/// +/// The contract deployment will be done by a random account. +pub async fn deploy_contract( + provider: &RootProvider, + file_path: &str, + constructor_arguments: &[u8], +) -> Address { + let hex_bin_buf = std::fs::read_to_string(file_path).unwrap(); + let hex_bin = + if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; + let mut bin = Vec::::from(Bytes::from_hex(hex_bin).unwrap()); + bin.extend(constructor_arguments); + + let deployment_tx = TxLegacy { + chain_id: None, + nonce: 0, + // 100 gwei + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: TxKind::Create, + value: U256::ZERO, + input: bin.into(), + }; + + let deployment_tx = deterministically_sign(deployment_tx); + + let receipt = publish_tx(provider, deployment_tx).await; + assert!(receipt.status()); + + receipt.contract_address.unwrap() +} + +/// Sign and send a transaction from the specified wallet. +/// +/// This assumes the wallet is funded. +pub async fn send( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + mut tx: TxLegacy, +) -> TransactionReceipt { + let verifying_key = *wallet.verifying_key().as_affine(); + let address = Address::from(address(&verifying_key.into())); + + // https://github.com/alloy-rs/alloy/issues/539 + // let chain_id = provider.get_chain_id().await.unwrap(); + // tx.chain_id = Some(chain_id); + tx.chain_id = None; + tx.nonce = provider.get_transaction_count(address).await.unwrap(); + // 100 gwei + tx.gas_price = 100_000_000_000u128; + + let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); + assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); + assert!( + provider.get_balance(address).await.unwrap() > + ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) + ); + + let mut bytes = vec![]; + tx.into_signed(PrimitiveSignature::from(sig)).eip2718_encode(&mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); + pending_tx.get_receipt().await.unwrap() +} diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml new file mode 100644 index 00000000..2a397bac --- /dev/null +++ b/processor/frost-attempt-manager/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "serai-processor-frost-attempt-manager" +version = "0.1.0" +description = "A manager of multiple attempts of FROST signing protocols" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/frost-attempt-manager" +authors = ["Luke Parker "] +keywords = ["frost", "multisig", "threshold"] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["borsh", "scale"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } + +frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0.8.1", default-features = false } + +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +log = { version = "0.4", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/frost-attempt-manager/LICENSE b/processor/frost-attempt-manager/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/frost-attempt-manager/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/frost-attempt-manager/README.md b/processor/frost-attempt-manager/README.md new file mode 100644 index 00000000..08a61398 --- /dev/null +++ b/processor/frost-attempt-manager/README.md @@ -0,0 +1,6 @@ +# FROST Attempt Manager + +A library for helper structures to manage various attempts of a FROST signing +protocol. + +This library is interacted with via the `serai_processor_messages::sign` API. diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs new file mode 100644 index 00000000..e918ff02 --- /dev/null +++ b/processor/frost-attempt-manager/src/individual.rs @@ -0,0 +1,284 @@ +use std::collections::HashMap; + +use rand_core::OsRng; + +use frost::{ + Participant, FrostError, + sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, +}; + +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, Db, create_db}; +use messages::sign::{VariantSignId, SignId, ProcessorMessage}; + +create_db!( + FrostAttemptManager { + Attempted: (session: Session, id: VariantSignId) -> u32, + } +); + +/// An instance of a signing protocol with re-attempts handled internally. +#[allow(clippy::type_complexity)] +pub(crate) struct SigningProtocol { + db: D, + // The session this signing protocol is being conducted by. + session: Session, + // The `i` of our first, or starting, set of key shares we will be signing with. + // The key shares we sign with are expected to be continguous from this position. + start_i: Participant, + // The ID of this signing protocol. + id: VariantSignId, + // This accepts a vector of `root` machines in order to support signing with multiple key shares. + root: Vec, + preprocessed: HashMap, HashMap>)>, + // Here, we drop to a single machine as we only need one to complete the signature. + shared: HashMap< + u32, + ( + >::SignatureMachine, + HashMap>, + ), + >, +} + +impl SigningProtocol { + /// Create a new signing protocol. + pub(crate) fn new( + db: D, + session: Session, + start_i: Participant, + id: VariantSignId, + root: Vec, + ) -> Self { + log::info!("starting signing protocol {id:?}"); + + Self { + db, + session, + start_i, + id, + root, + preprocessed: HashMap::with_capacity(1), + shared: HashMap::with_capacity(1), + } + } + + /// Start a new attempt of the signing protocol. + /// + /// Returns the (serialized) preprocesses for the attempt. + pub(crate) fn attempt(&mut self, attempt: u32) -> Vec { + /* + We'd get slashed as malicious if we: + 1) Preprocessed + 2) Rebooted + 3) On reboot, preprocessed again, sending new preprocesses which would be deduplicated by + the message-queue + 4) Got sent preprocesses + 5) Sent a share based on our new preprocesses, yet with everyone else expecting it to be + based on our old preprocesses + + We avoid this by saving to the DB we preprocessed before sending our preprocessed, and only + keeping our preprocesses for this instance of the processor. Accordingly, on reboot, we will + flag the prior preprocess and not send new preprocesses. This does require our own DB + transaction (to ensure we save to the DB we preprocessed before yielding the preprocess + messages). + + We also won't send the share we were supposed to, unfortunately, yet caching/reloading the + preprocess has enough safety issues it isn't worth the headache. + + Since we bind a signing attempt to the lifetime of the application, we're also safe against + nonce reuse (as the state machines enforce single-use and we never reuse a preprocess). + */ + { + let mut txn = self.db.txn(); + let prior_attempted = Attempted::get(&txn, self.session, self.id); + if Some(attempt) <= prior_attempted { + return vec![]; + } + Attempted::set(&mut txn, self.session, self.id, &attempt); + txn.commit(); + } + + log::debug!("attemting a new instance of signing protocol {:?}", self.id); + + let mut our_preprocesses = HashMap::with_capacity(self.root.len()); + let mut preprocessed = Vec::with_capacity(self.root.len()); + let mut preprocesses = Vec::with_capacity(self.root.len()); + for (i, machine) in self.root.iter().enumerate() { + let (machine, preprocess) = machine.clone().preprocess(&mut OsRng); + preprocessed.push(machine); + + let mut this_preprocess = Vec::with_capacity(64); + preprocess.write(&mut this_preprocess).unwrap(); + + our_preprocesses.insert( + Participant::new( + u16::from(self.start_i) + u16::try_from(i).expect("signing with 2**16 machines"), + ) + .expect("start_i + i exceeded the valid indexes for a Participant"), + this_preprocess.clone(), + ); + preprocesses.push(this_preprocess); + } + assert!(self.preprocessed.insert(attempt, (preprocessed, our_preprocesses)).is_none()); + + vec![ProcessorMessage::Preprocesses { + id: SignId { session: self.session, id: self.id, attempt }, + preprocesses, + }] + } + + /// Handle preprocesses for the signing protocol. + /// + /// Returns the (serialized) shares for the attempt. + pub(crate) fn preprocesses( + &mut self, + attempt: u32, + serialized_preprocesses: HashMap>, + ) -> Vec { + log::debug!("handling preprocesses for signing protocol {:?}", self.id); + + let Some((machines, our_serialized_preprocesses)) = self.preprocessed.remove(&attempt) else { + return vec![]; + }; + + let mut msgs = Vec::with_capacity(1); + + let mut preprocesses = + HashMap::with_capacity(serialized_preprocesses.len() + our_serialized_preprocesses.len()); + for (i, serialized_preprocess) in + serialized_preprocesses.into_iter().chain(our_serialized_preprocesses) + { + let mut serialized_preprocess = serialized_preprocess.as_slice(); + let Ok(preprocess) = machines[0].read_preprocess(&mut serialized_preprocess) else { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + }; + if !serialized_preprocess.is_empty() { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + } + preprocesses.insert(i, preprocess); + } + // We throw out our preprocessed machines here, despite the fact they haven't been invalidated + // We could reuse them with a new set of valid preprocesses + // https://github.com/serai-dex/serai/issues/588 + if !msgs.is_empty() { + return msgs; + } + + let mut our_shares = HashMap::with_capacity(self.root.len()); + let mut shared = Vec::with_capacity(machines.len()); + let mut shares = Vec::with_capacity(machines.len()); + for (i, machine) in machines.into_iter().enumerate() { + let i = Participant::new( + u16::from(self.start_i) + u16::try_from(i).expect("signing with 2**16 machines"), + ) + .expect("start_i + i exceeded the valid indexes for a Participant"); + + let mut preprocesses = preprocesses.clone(); + assert!(preprocesses.remove(&i).is_some()); + + // TODO: Replace this with `()`, which requires making the message type part of the trait + let (machine, share) = match machine.sign(preprocesses, &[]) { + Ok((machine, share)) => (machine, share), + Err(e) => match e { + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) | + FrostError::InvalidShare(_) => { + panic!("FROST had an error which shouldn't be reachable: {e:?}"); + } + FrostError::InvalidPreprocess(i) => { + msgs + .push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + return msgs; + } + }, + }; + shared.push(machine); + + let mut this_share = Vec::with_capacity(32); + share.write(&mut this_share).unwrap(); + + our_shares.insert(i, this_share.clone()); + shares.push(this_share); + } + + assert!(self.shared.insert(attempt, (shared.swap_remove(0), our_shares)).is_none()); + log::debug!( + "successfully handled preprocesses for signing protocol {:?}, sending shares", + self.id, + ); + msgs.push(ProcessorMessage::Shares { + id: SignId { session: self.session, id: self.id, attempt }, + shares, + }); + msgs + } + + /// Process shares for the signing protocol. + /// + /// Returns the signature produced by the protocol. + pub(crate) fn shares( + &mut self, + attempt: u32, + serialized_shares: HashMap>, + ) -> Result> { + log::debug!("handling shares for signing protocol {:?}", self.id); + + let Some((machine, our_serialized_shares)) = self.shared.remove(&attempt) else { Err(vec![])? }; + + let mut msgs = Vec::with_capacity(1); + + let mut shares = HashMap::with_capacity(serialized_shares.len() + our_serialized_shares.len()); + for (i, serialized_share) in our_serialized_shares.into_iter().chain(serialized_shares) { + let mut serialized_share = serialized_share.as_slice(); + let Ok(share) = machine.read_share(&mut serialized_share) else { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + }; + if !serialized_share.is_empty() { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + } + shares.insert(i, share); + } + if !msgs.is_empty() { + Err(msgs)?; + } + + assert!(shares.remove(&self.start_i).is_some()); + + let signature = match machine.complete(shares) { + Ok(signature) => signature, + Err(e) => match e { + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) | + FrostError::InvalidPreprocess(_) => { + panic!("FROST had an error which shouldn't be reachable: {e:?}"); + } + FrostError::InvalidShare(i) => { + Err(vec![ProcessorMessage::InvalidParticipant { session: self.session, participant: i }])? + } + }, + }; + + log::info!("finished signing for protocol {:?}", self.id); + + Ok(signature) + } + + /// Cleanup the database entries for a specified signing protocol. + pub(crate) fn cleanup(txn: &mut impl DbTxn, session: Session, id: VariantSignId) { + Attempted::del(txn, session, id); + } +} diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs new file mode 100644 index 00000000..670d8d9f --- /dev/null +++ b/processor/frost-attempt-manager/src/lib.rs @@ -0,0 +1,114 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::collections::HashMap; + +use frost::{Participant, sign::PreprocessMachine}; + +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; +use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; + +mod individual; +use individual::SigningProtocol; + +/// A response to handling a message from the coordinator. +pub enum Response { + /// Messages to send to the coordinator. + Messages(Vec), + /// A produced signature. + Signature { + /// The ID of the protocol this is for. + id: VariantSignId, + /// The signature. + signature: M::Signature, + }, +} + +/// A manager of attempts for a variety of signing protocols. +pub struct AttemptManager { + db: D, + session: Session, + start_i: Participant, + active: HashMap>, +} + +impl AttemptManager { + /// Create a new attempt manager. + /// + /// This will not restore any signing sessions from the database. Those must be re-registered. + pub fn new(db: D, session: Session, start_i: Participant) -> Self { + AttemptManager { db, session, start_i, active: HashMap::new() } + } + + /// Register a signing protocol to attempt. + /// + /// This ID must be unique to the session, across all attempt managers, protocols, etc. + pub fn register(&mut self, id: VariantSignId, machines: Vec) -> Vec { + let mut protocol = + SigningProtocol::new(self.db.clone(), self.session, self.start_i, id, machines); + let messages = protocol.attempt(0); + self.active.insert(id, protocol); + messages + } + + /// Retire a signing protocol. + /// + /// This frees all memory used for it and means no further messages will be handled for it. + /// This does not stop the protocol from being re-registered and further worked on (with + /// undefined behavior) then. The higher-level context must never call `register` again with this + /// ID accordingly. + pub fn retire(&mut self, txn: &mut impl DbTxn, id: VariantSignId) { + if self.active.remove(&id).is_none() { + log::info!("retiring protocol {id:?}, which we didn't register/already retired"); + } else { + log::info!("retired signing protocol {id:?}"); + } + SigningProtocol::::cleanup(txn, self.session, id); + } + + /// Handle a message for a signing protocol. + /// + /// Handling a message multiple times is safe and will cause subsequent calls to return + /// `Response::Messages(vec![])`. Handling a message for a signing protocol which isn't being + /// worked on (potentially due to rebooting) will also return `Response::Messages(vec![])`. + pub fn handle(&mut self, msg: CoordinatorMessage) -> Response { + match msg { + CoordinatorMessage::Preprocesses { id, preprocesses } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "handling preprocesses for signing protocol {:?}, which we're not actively running", + id.id, + ); + return Response::Messages(vec![]); + }; + Response::Messages(protocol.preprocesses(id.attempt, preprocesses)) + } + CoordinatorMessage::Shares { id, shares } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "handling shares for signing protocol {:?}, which we're not actively running", + id.id, + ); + return Response::Messages(vec![]); + }; + match protocol.shares(id.attempt, shares) { + Ok(signature) => Response::Signature { id: id.id, signature }, + Err(messages) => Response::Messages(messages), + } + } + CoordinatorMessage::Reattempt { id } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "reattempting signing protocol {:?}, which we're not actively running", + id.id, + ); + return Response::Messages(vec![]); + }; + Response::Messages(protocol.attempt(id.attempt)) + } + } + } +} diff --git a/processor/key-gen/Cargo.toml b/processor/key-gen/Cargo.toml new file mode 100644 index 00000000..d5051168 --- /dev/null +++ b/processor/key-gen/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "serai-processor-key-gen" +version = "0.1.0" +description = "Key generation for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/key-gen" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale"] + +[lints] +workspace = true + +[dependencies] +# Macros +zeroize = { version = "1", default-features = false, features = ["std"] } + +# Libs +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } + +# Cryptography +blake2 = { version = "0.10", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std"] } +ec-divisors = { package = "ec-divisors", path = "../../crypto/evrf/divisors", default-features = false } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +dkg = { package = "dkg", path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } + +# Substrate +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +# Encoders +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +# Application +log = { version = "0.4", default-features = false, features = ["std"] } +serai-db = { path = "../../common/db" } +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/key-gen/LICENSE b/processor/key-gen/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/key-gen/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/key-gen/README.md b/processor/key-gen/README.md new file mode 100644 index 00000000..566d1035 --- /dev/null +++ b/processor/key-gen/README.md @@ -0,0 +1,8 @@ +# Key Generation + +This library implements the Distributed Key Generation (DKG) for the Serai +protocol. Two invocations of the eVRF-based DKG are performed, one for Ristretto +(to have a key to oraclize values onto the Serai blockchain with) and one for +the external network's curve. + +This library is interacted with via the `serai_processor_messages::key_gen` API. diff --git a/processor/key-gen/src/db.rs b/processor/key-gen/src/db.rs new file mode 100644 index 00000000..149fe1a2 --- /dev/null +++ b/processor/key-gen/src/db.rs @@ -0,0 +1,152 @@ +use core::marker::PhantomData; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::EvrfCurve}; + +use serai_validator_sets_primitives::Session; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn}; + +use crate::KeyGenParams; + +pub(crate) struct Params { + pub(crate) t: u16, + pub(crate) n: u16, + pub(crate) substrate_evrf_public_keys: + Vec<<::EmbeddedCurve as Ciphersuite>::G>, + pub(crate) network_evrf_public_keys: + Vec<<::EmbeddedCurve as Ciphersuite>::G>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +struct RawParams { + t: u16, + substrate_evrf_public_keys: Vec<[u8; 32]>, + network_evrf_public_keys: Vec>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct Participations { + pub(crate) substrate_participations: HashMap>, + pub(crate) network_participations: HashMap>, +} + +mod _db { + use serai_validator_sets_primitives::Session; + + use serai_db::{Get, DbTxn, create_db}; + + create_db!( + KeyGen { + Params: (session: &Session) -> super::RawParams, + Participations: (session: &Session) -> super::Participations, + KeyShares: (session: &Session) -> Vec, + } + ); +} + +pub(crate) struct KeyGenDb(PhantomData

); +impl KeyGenDb

{ + pub(crate) fn set_params(txn: &mut impl DbTxn, session: Session, params: Params

) { + assert_eq!(params.substrate_evrf_public_keys.len(), params.network_evrf_public_keys.len()); + + _db::Params::set( + txn, + &session, + &RawParams { + t: params.t, + substrate_evrf_public_keys: params + .substrate_evrf_public_keys + .into_iter() + .map(|key| key.to_bytes()) + .collect(), + network_evrf_public_keys: params + .network_evrf_public_keys + .into_iter() + .map(|key| key.to_bytes().as_ref().to_vec()) + .collect(), + }, + ) + } + + pub(crate) fn params(getter: &impl Get, session: Session) -> Option> { + _db::Params::get(getter, &session).map(|params| Params { + t: params.t, + n: params + .network_evrf_public_keys + .len() + .try_into() + .expect("amount of keys exceeded the amount allowed during a DKG"), + substrate_evrf_public_keys: params + .substrate_evrf_public_keys + .into_iter() + .map(|key| { + <::EmbeddedCurve as Ciphersuite>::read_G(&mut key.as_slice()) + .unwrap() + }) + .collect(), + network_evrf_public_keys: params + .network_evrf_public_keys + .into_iter() + .map(|key| { + <::EmbeddedCurve as Ciphersuite>::read_G::< + &[u8], + >(&mut key.as_ref()) + .unwrap() + }) + .collect(), + }) + } + + pub(crate) fn set_participations( + txn: &mut impl DbTxn, + session: Session, + participations: &Participations, + ) { + _db::Participations::set(txn, &session, participations) + } + pub(crate) fn participations(getter: &impl Get, session: Session) -> Option { + _db::Participations::get(getter, &session) + } + + // Set the key shares for a session. + pub(crate) fn set_key_shares( + txn: &mut impl DbTxn, + session: Session, + substrate_keys: &[ThresholdKeys], + network_keys: &[ThresholdKeys], + ) { + assert_eq!(substrate_keys.len(), network_keys.len()); + + let mut keys = Zeroizing::new(vec![]); + for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { + keys.extend(substrate_keys.serialize().as_slice()); + keys.extend(network_keys.serialize().as_slice()); + } + _db::KeyShares::set(txn, &session, &keys); + } + + #[allow(clippy::type_complexity)] + pub(crate) fn key_shares( + getter: &impl Get, + session: Session, + ) -> Option<(Vec>, Vec>)> + { + let keys = _db::KeyShares::get(getter, &session)?; + let mut keys: &[u8] = keys.as_ref(); + + let mut substrate_keys = vec![]; + let mut network_keys = vec![]; + while !keys.is_empty() { + substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys).unwrap())); + let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys).unwrap()); + P::tweak_keys(&mut these_network_keys); + network_keys.push(these_network_keys); + } + Some((substrate_keys, network_keys)) + } +} diff --git a/processor/key-gen/src/generators.rs b/processor/key-gen/src/generators.rs new file mode 100644 index 00000000..cff9c2f1 --- /dev/null +++ b/processor/key-gen/src/generators.rs @@ -0,0 +1,38 @@ +use core::any::{TypeId, Any}; +use std::{ + sync::{LazyLock, Mutex}, + collections::HashMap, +}; + +use dkg::evrf::*; + +use serai_validator_sets_primitives::MAX_KEY_SHARES_PER_SET; + +/// A cache of the generators used by the eVRF DKG. +/// +/// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a +/// generic, this takes advantage of `Any`. This static is isolated in a module to ensure +/// correctness can be evaluated solely by reviewing these few lines of code. +/// +/// This is arguably over-engineered as of right now, as we only need generators for Ristretto +/// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve +/// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve +/// case). +static GENERATORS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); + +pub(crate) fn generators() -> &'static EvrfGenerators { + GENERATORS + .lock() + .unwrap() + .entry(TypeId::of::()) + .or_insert_with(|| { + // If we haven't prior needed generators for this Ciphersuite, generate new ones + Box::leak(Box::new(EvrfGenerators::::new( + (MAX_KEY_SHARES_PER_SET * 2 / 3) + 1, + MAX_KEY_SHARES_PER_SET, + ))) + }) + .downcast_ref() + .unwrap() +} diff --git a/processor/key-gen/src/lib.rs b/processor/key-gen/src/lib.rs new file mode 100644 index 00000000..4db87b20 --- /dev/null +++ b/processor/key-gen/src/lib.rs @@ -0,0 +1,537 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{io, collections::HashMap}; + +use zeroize::Zeroizing; + +use rand_core::{RngCore, SeedableRng, OsRng}; +use rand_chacha::ChaCha20Rng; + +use blake2::{Digest, Blake2s256}; +use transcript::{Transcript, RecommendedTranscript}; +use ciphersuite::{ + group::{Group, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::{Participant, ThresholdKeys, evrf::*}; + +use serai_validator_sets_primitives::Session; +use messages::key_gen::*; + +use serai_db::{Get, DbTxn}; + +mod generators; +use generators::generators; + +mod db; +use db::{Params, Participations, KeyGenDb}; + +/// Parameters for a key generation. +pub trait KeyGenParams { + /// The ID for this instantiation. + const ID: &'static str; + + /// The curve used for the external network. + type ExternalNetworkCiphersuite: EvrfCurve< + EmbeddedCurve: Ciphersuite< + G: ec_divisors::DivisorCurve< + FieldElement = ::F, + >, + >, + >; + + /// Tweaks keys as necessary/beneficial. + /// + /// A default implementation which doesn't perform any tweaking is provided. + fn tweak_keys(keys: &mut ThresholdKeys) { + let _ = keys; + } + + /// Encode keys as optimal. + /// + /// A default implementation is provided which calls the traditional `to_bytes`. + fn encode_key(key: ::G) -> Vec { + key.to_bytes().as_ref().to_vec() + } + + /// Decode keys from their optimal encoding. + /// + /// A default implementation is provided which calls the traditional `from_bytes`. + fn decode_key(mut key: &[u8]) -> Option<::G> { + let res = ::read_G(&mut key).ok()?; + if !key.is_empty() { + None?; + } + Some(res) + } +} + +/* + On the Serai blockchain, users specify their public keys on the embedded curves. Substrate does + not have the libraries for the embedded curves and is unable to evaluate if the keys are valid + or not. + + We could add the libraries for the embedded curves to the blockchain, yet this would be a + non-trivial scope for what's effectively an embedded context. It'd also permanently bind our + consensus to these arbitrary curves. We would have the benefit of being able to also require PoKs + for the keys, ensuring no one uses someone else's key (creating oddities there). Since someone + who uses someone else's key can't actually participate, all it does in effect is give more key + shares to the holder of the private key, and make us unable to rely on eVRF keys as a secure way + to index validators (hence the usage of `Participant` throughout the messages here). + + We could remove invalid keys from the DKG, yet this would create a view of the DKG only the + processor (which does have the embedded curves) has. We'd need to reconcile it with the view of + the DKG which does include all keys (even the invalid keys). + + The easiest solution is to keep the views consistent by replacing invalid keys with valid keys + (which no one has the private key for). This keeps the view consistent. This does prevent those + who posted invalid keys from participating, and receiving their keys, which is the understood and + declared effect of them posting invalid keys. Since at least `t` people must honestly participate + for the DKG to complete, and since their honest participation means they had valid keys, we do + ensure at least `t` people participated and the DKG result can be reconstructed. + + We do lose fault tolerance, yet only by losing those faulty. Accordingly, this is accepted. + + Returns the coerced keys and faulty participants. +*/ +fn coerce_keys( + key_bytes: &[impl AsRef<[u8]>], +) -> (Vec<::G>, Vec) { + fn evrf_key(key: &[u8]) -> Option<::G> { + let mut repr = <::G as GroupEncoding>::Repr::default(); + if repr.as_ref().len() != key.len() { + None?; + } + repr.as_mut().copy_from_slice(key); + let point = Option::<::G>::from(<_>::from_bytes(&repr))?; + if bool::from(point.is_identity()) { + None?; + } + Some(point) + } + + let mut keys = Vec::with_capacity(key_bytes.len()); + let mut faulty = vec![]; + for (i, key) in key_bytes.iter().enumerate() { + let i = Participant::new( + 1 + u16::try_from(i).expect("performing a key gen with more than u16::MAX participants"), + ) + .unwrap(); + keys.push(match evrf_key::(key.as_ref()) { + Some(key) => key, + None => { + // Mark this participant faulty + faulty.push(i); + + // Generate a random key + let mut rng = ChaCha20Rng::from_seed(Blake2s256::digest(key).into()); + loop { + let mut repr = <::G as GroupEncoding>::Repr::default(); + rng.fill_bytes(repr.as_mut()); + if let Some(key) = + Option::<::G>::from(<_>::from_bytes(&repr)) + { + break key; + } + } + } + }); + } + + (keys, faulty) +} + +/// An instance of the Serai key generation protocol. +#[derive(Debug)] +pub struct KeyGen { + substrate_evrf_private_key: + Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + network_evrf_private_key: + Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, +} + +impl KeyGen

{ + /// Create a new key generation instance. + #[allow(clippy::new_ret_no_self)] + pub fn new( + substrate_evrf_private_key: Zeroizing< + <::EmbeddedCurve as Ciphersuite>::F, + >, + network_evrf_private_key: Zeroizing< + <::EmbeddedCurve as Ciphersuite>::F, + >, + ) -> KeyGen

{ + KeyGen { substrate_evrf_private_key, network_evrf_private_key } + } + + /// Fetch the key shares for a specific session. + #[allow(clippy::type_complexity)] + pub fn key_shares( + getter: &impl Get, + session: Session, + ) -> Option<(Vec>, Vec>)> + { + // This is safe, despite not having a txn, since it's a static value + // It doesn't change over time/in relation to other operations + // It is solely set or unset + KeyGenDb::

::key_shares(getter, session) + } + + /// Handle a message from the coordinator. + pub fn handle(&mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage) -> Vec { + const SUBSTRATE_KEY_CONTEXT: &[u8] = b"substrate"; + const NETWORK_KEY_CONTEXT: &[u8] = b"network"; + fn context(session: Session, key_context: &[u8]) -> [u8; 32] { + // TODO2: Also embed the chain ID/genesis block + let mut transcript = RecommendedTranscript::new(b"Serai eVRF Key Gen"); + transcript.append_message(b"network", P::ID.as_bytes()); + transcript.append_message(b"session", session.0.to_le_bytes()); + transcript.append_message(b"key", key_context); + (&(&transcript.challenge(b"context"))[.. 32]).try_into().unwrap() + } + + match msg { + CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => { + log::info!("generating new key, session: {session:?}"); + + // Unzip the vector of eVRF keys + let substrate_evrf_public_keys = + evrf_public_keys.iter().map(|(key, _)| *key).collect::>(); + let (substrate_evrf_public_keys, mut faulty) = + coerce_keys::(&substrate_evrf_public_keys); + + let network_evrf_public_keys = + evrf_public_keys.into_iter().map(|(_, key)| key).collect::>(); + let (network_evrf_public_keys, additional_faulty) = + coerce_keys::(&network_evrf_public_keys); + faulty.extend(additional_faulty); + + // Participate for both Substrate and the network + fn participate( + context: [u8; 32], + threshold: u16, + evrf_public_keys: &[::G], + evrf_private_key: &Zeroizing<::F>, + output: &mut impl io::Write, + ) { + let participation = EvrfDkg::::participate( + &mut OsRng, + generators(), + context, + threshold, + evrf_public_keys, + evrf_private_key, + ); + participation.unwrap().write(output).unwrap(); + } + + let mut participation = Vec::with_capacity(2048); + participate::( + context::

(session, SUBSTRATE_KEY_CONTEXT), + threshold, + &substrate_evrf_public_keys, + &self.substrate_evrf_private_key, + &mut participation, + ); + participate::( + context::

(session, NETWORK_KEY_CONTEXT), + threshold, + &network_evrf_public_keys, + &self.network_evrf_private_key, + &mut participation, + ); + + // Save the params + KeyGenDb::

::set_params( + txn, + session, + Params { + t: threshold, + n: substrate_evrf_public_keys + .len() + .try_into() + .expect("amount of keys exceeded the amount allowed during a DKG"), + substrate_evrf_public_keys, + network_evrf_public_keys, + }, + ); + + // Send back our Participation and all faulty parties + let mut res = Vec::with_capacity(faulty.len() + 1); + faulty.sort_unstable(); + for faulty in faulty { + res.push(ProcessorMessage::Blame { session, participant: faulty }); + } + res.push(ProcessorMessage::Participation { session, participation }); + + res + } + + CoordinatorMessage::Participation { session, participant, participation } => { + log::debug!("received participation from {:?} for {:?}", participant, session); + + let Params { t: threshold, n, substrate_evrf_public_keys, network_evrf_public_keys } = + KeyGenDb::

::params(txn, session).unwrap(); + + // Read these `Participation`s + // If they fail basic sanity checks, fail fast + let (substrate_participation, network_participation) = { + let network_participation_start_pos = { + let mut participation = participation.as_slice(); + let start_len = participation.len(); + + let blame = vec![ProcessorMessage::Blame { session, participant }]; + let Ok(substrate_participation) = + Participation::::read(&mut participation, n) + else { + return blame; + }; + let len_at_network_participation_start_pos = participation.len(); + let Ok(network_participation) = + Participation::::read(&mut participation, n) + else { + return blame; + }; + + // If they added random noise after their participations, they're faulty + // This prevents DoS by causing a slash upon such spam + if !participation.is_empty() { + return blame; + } + + // If we've already generated these keys, we don't actually need to save these + // participations and continue. We solely have to verify them, as to identify malicious + // participants and prevent DoSs, before returning + if Self::key_shares(txn, session).is_some() { + log::debug!("already finished generating a key for {:?}", session); + + match EvrfDkg::::verify( + &mut OsRng, + generators(), + context::

(session, SUBSTRATE_KEY_CONTEXT), + threshold, + &substrate_evrf_public_keys, + &HashMap::from([(participant, substrate_participation)]), + ) + .unwrap() + { + VerifyResult::Valid(_) | VerifyResult::NotEnoughParticipants => {} + VerifyResult::Invalid(faulty) => { + assert_eq!(faulty, vec![participant]); + return vec![ProcessorMessage::Blame { session, participant }]; + } + } + + match EvrfDkg::::verify( + &mut OsRng, + generators(), + context::

(session, NETWORK_KEY_CONTEXT), + threshold, + &network_evrf_public_keys, + &HashMap::from([(participant, network_participation)]), + ) + .unwrap() + { + VerifyResult::Valid(_) | VerifyResult::NotEnoughParticipants => return vec![], + VerifyResult::Invalid(faulty) => { + assert_eq!(faulty, vec![participant]); + return vec![ProcessorMessage::Blame { session, participant }]; + } + } + } + + // Return the position the network participation starts at + start_len - len_at_network_participation_start_pos + }; + + // Instead of re-serializing the `Participation`s we read, we just use the relevant + // sections of the existing byte buffer + ( + participation[.. network_participation_start_pos].to_vec(), + participation[network_participation_start_pos ..].to_vec(), + ) + }; + + // Since these are valid `Participation`s, save them + let (mut substrate_participations, mut network_participations) = + KeyGenDb::

::participations(txn, session).map_or_else( + || (HashMap::with_capacity(1), HashMap::with_capacity(1)), + |p| (p.substrate_participations, p.network_participations), + ); + assert!( + substrate_participations.insert(participant, substrate_participation).is_none() && + network_participations.insert(participant, network_participation).is_none(), + "received participation for someone multiple times" + ); + KeyGenDb::

::set_participations( + txn, + session, + &Participations { + substrate_participations: substrate_participations.clone(), + network_participations: network_participations.clone(), + }, + ); + + // This block is taken from the eVRF DKG itself to evaluate the amount participating + { + let mut participating_weight = 0; + // This uses the Substrate maps as the maps are kept in synchrony + let mut evrf_public_keys_mut = substrate_evrf_public_keys.clone(); + for i in substrate_participations.keys() { + let evrf_public_key = substrate_evrf_public_keys[usize::from(u16::from(*i)) - 1]; + + // Remove this key from the Vec to prevent double-counting + /* + Double-counting would be a risk if multiple participants shared an eVRF public key + and participated. This code does still allow such participants (in order to let + participants be weighted), and any one of them participating will count as all + participating. This is fine as any one such participant will be able to decrypt + the shares for themselves and all other participants, so this is still a key + generated by an amount of participants who could simply reconstruct the key. + */ + let start_len = evrf_public_keys_mut.len(); + evrf_public_keys_mut.retain(|key| *key != evrf_public_key); + let end_len = evrf_public_keys_mut.len(); + let count = start_len - end_len; + + participating_weight += count; + } + if participating_weight < usize::from(threshold) { + return vec![]; + } + } + + // If we now have the threshold participating, verify their `Participation`s + fn verify_dkg( + txn: &mut impl DbTxn, + session: Session, + true_if_substrate_false_if_network: bool, + threshold: u16, + evrf_public_keys: &[::G], + substrate_participations: &mut HashMap>, + network_participations: &mut HashMap>, + ) -> Result, Vec> { + // Parse the `Participation`s + let participations = (if true_if_substrate_false_if_network { + &*substrate_participations + } else { + &*network_participations + }) + .iter() + .map(|(key, participation)| { + ( + *key, + Participation::read( + &mut participation.as_slice(), + evrf_public_keys.len().try_into().unwrap(), + ) + .expect("prior read participation was invalid"), + ) + }) + .collect(); + + // Actually call verify on the DKG + match EvrfDkg::::verify( + &mut OsRng, + generators(), + context::

( + session, + if true_if_substrate_false_if_network { + SUBSTRATE_KEY_CONTEXT + } else { + NETWORK_KEY_CONTEXT + }, + ), + threshold, + evrf_public_keys, + &participations, + ) + .unwrap() + { + // If the DKG was valid, return it + VerifyResult::Valid(dkg) => Ok(dkg), + // This DKG had faulty participants, so create blame messages for them + VerifyResult::Invalid(faulty) => { + let mut blames = vec![]; + for participant in faulty { + // Remove from both maps for simplicity's sake + // There's no point in having one DKG complete yet not the other + assert!(substrate_participations.remove(&participant).is_some()); + assert!(network_participations.remove(&participant).is_some()); + blames.push(ProcessorMessage::Blame { session, participant }); + } + // Since we removed `Participation`s, write the updated versions to the database + KeyGenDb::

::set_participations( + txn, + session, + &Participations { + substrate_participations: substrate_participations.clone(), + network_participations: network_participations.clone(), + }, + ); + Err(blames)? + } + VerifyResult::NotEnoughParticipants => { + // This is the first DKG, and we checked we were at the threshold OR + // This is the second DKG, as the first had no invalid participants, so we're still + // at the threshold + panic!("not enough participants despite checking we were at the threshold") + } + } + } + + let substrate_dkg = match verify_dkg::( + txn, + session, + true, + threshold, + &substrate_evrf_public_keys, + &mut substrate_participations, + &mut network_participations, + ) { + Ok(dkg) => dkg, + // If we had any blames, immediately return them as necessary for the safety of + // `verify_dkg` (it assumes we don't call it again upon prior errors) + Err(blames) => return blames, + }; + + let network_dkg = match verify_dkg::( + txn, + session, + false, + threshold, + &network_evrf_public_keys, + &mut substrate_participations, + &mut network_participations, + ) { + Ok(dkg) => dkg, + Err(blames) => return blames, + }; + + // Get our keys from each DKG + // TODO: Some of these keys may be decrypted by us, yet not actually meant for us, if + // another validator set our eVRF public key as their eVRF public key. We either need to + // ensure the coordinator tracks amount of shares we're supposed to have by the eVRF public + // keys OR explicitly reduce to the keys we're supposed to have based on our `i` index. + let substrate_keys = substrate_dkg.keys(&self.substrate_evrf_private_key); + let mut network_keys = network_dkg.keys(&self.network_evrf_private_key); + // Tweak the keys for the network + for network_keys in &mut network_keys { + P::tweak_keys(network_keys); + } + KeyGenDb::

::set_key_shares(txn, session, &substrate_keys, &network_keys); + + log::info!("generated key, session: {session:?}"); + + // Since no one we verified was invalid, and we had the threshold, yield the new keys + vec![ProcessorMessage::GeneratedKeyPair { + session, + substrate_key: substrate_keys[0].group_key().to_bytes(), + network_key: P::encode_key(network_keys[0].group_key()), + }] + } + } + } +} diff --git a/processor/messages/Cargo.toml b/processor/messages/Cargo.toml index 0eba999d..b1387301 100644 --- a/processor/messages/Cargo.toml +++ b/processor/messages/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Luke Parker "] keywords = [] edition = "2021" publish = false +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -17,6 +18,8 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] +hex = { version = "0.4", default-features = false, features = ["std"] } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } @@ -26,3 +29,5 @@ serai-primitives = { path = "../../substrate/primitives", default-features = fal in-instructions-primitives = { package = "serai-in-instructions-primitives", path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } coins-primitives = { package = "serai-coins-primitives", path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] } + +serai-cosign = { path = "../../coordinator/cosign", default-features = false } diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 22360a1a..7101fdc2 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -1,14 +1,17 @@ +use core::fmt; use std::collections::HashMap; use scale::{Encode, Decode}; use borsh::{BorshSerialize, BorshDeserialize}; -use dkg::{Participant, ThresholdParams}; +use dkg::Participant; use serai_primitives::BlockHash; -use in_instructions_primitives::{Batch, SignedBatch}; +use validator_sets_primitives::{Session, KeyPair, SlashReport}; use coins_primitives::OutInstructionWithBalance; -use validator_sets_primitives::{Session, KeyPair}; +use in_instructions_primitives::SignedBatch; + +use serai_cosign::{Cosign, SignedCosign}; #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct SubstrateContext { @@ -19,203 +22,165 @@ pub struct SubstrateContext { pub mod key_gen { use super::*; - #[derive( - Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, - )] - pub struct KeyGenId { - pub session: Session, - pub attempt: u32, - } - - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - // Instructs the Processor to begin the key generation process. - // TODO: Should this be moved under Substrate? - GenerateKey { - id: KeyGenId, - params: ThresholdParams, - shares: u16, - }, - // Received commitments for the specified key generation protocol. - Commitments { - id: KeyGenId, - commitments: HashMap>, - }, - // Received shares for the specified key generation protocol. - Shares { - id: KeyGenId, - shares: Vec>>, - }, - /// Instruction to verify a blame accusation. - VerifyBlame { - id: KeyGenId, - accuser: Participant, - accused: Participant, - share: Vec, - blame: Option>, - }, + /// Instructs the Processor to begin the key generation process. + /// + /// This is sent by the Coordinator when it creates the Tributary. + GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec)> }, + /// Received participations for the specified key generation protocol. + /// + /// This is sent by the Coordinator's Tributary scanner. + Participation { session: Session, participant: Participant, participation: Vec }, } - impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - None + impl core::fmt::Debug for CoordinatorMessage { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + match self { + CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => fmt + .debug_struct("CoordinatorMessage::GenerateKey") + .field("session", &session) + .field("threshold", &threshold) + .field("evrf_public_keys.len()", &evrf_public_keys.len()) + .finish_non_exhaustive(), + CoordinatorMessage::Participation { session, participant, .. } => fmt + .debug_struct("CoordinatorMessage::Participation") + .field("session", &session) + .field("participant", &participant) + .finish_non_exhaustive(), + } } } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + // This set of messages is sent entirely and solely by serai-processor-key-gen. + #[derive(Clone, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { - // Created commitments for the specified key generation protocol. - Commitments { - id: KeyGenId, - commitments: Vec>, - }, - // Participant published invalid commitments. - InvalidCommitments { - id: KeyGenId, - faulty: Participant, - }, - // Created shares for the specified key generation protocol. - Shares { - id: KeyGenId, - shares: Vec>>, - }, - // Participant published an invalid share. - #[rustfmt::skip] - InvalidShare { - id: KeyGenId, - accuser: Participant, - faulty: Participant, - blame: Option>, - }, + // Participated in the specified key generation protocol. + Participation { session: Session, participation: Vec }, // Resulting keys from the specified key generation protocol. - GeneratedKeyPair { - id: KeyGenId, - substrate_key: [u8; 32], - network_key: Vec, - }, + GeneratedKeyPair { session: Session, substrate_key: [u8; 32], network_key: Vec }, // Blame this participant. - Blame { - id: KeyGenId, - participant: Participant, - }, + Blame { session: Session, participant: Participant }, + } + + impl core::fmt::Debug for ProcessorMessage { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + match self { + ProcessorMessage::Participation { session, .. } => fmt + .debug_struct("ProcessorMessage::Participation") + .field("session", &session) + .finish_non_exhaustive(), + ProcessorMessage::GeneratedKeyPair { session, .. } => fmt + .debug_struct("ProcessorMessage::GeneratedKeyPair") + .field("session", &session) + .finish_non_exhaustive(), + ProcessorMessage::Blame { session, participant } => fmt + .debug_struct("ProcessorMessage::Blame") + .field("session", &session) + .field("participant", &participant) + .finish_non_exhaustive(), + } + } } } pub mod sign { use super::*; - #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] + #[derive(Clone, Copy, PartialEq, Eq, Hash, Encode, Decode, BorshSerialize, BorshDeserialize)] + pub enum VariantSignId { + Cosign(u64), + Batch([u8; 32]), + SlashReport, + Transaction([u8; 32]), + } + impl fmt::Debug for VariantSignId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + Self::Cosign(cosign) => { + f.debug_struct("VariantSignId::Cosign").field("0", &cosign).finish() + } + Self::Batch(batch) => { + f.debug_struct("VariantSignId::Batch").field("0", &hex::encode(batch)).finish() + } + Self::SlashReport => f.debug_struct("VariantSignId::SlashReport").finish(), + Self::Transaction(tx) => { + f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish() + } + } + } + } + + #[derive( + Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, + )] pub struct SignId { pub session: Session, - pub id: [u8; 32], + pub id: VariantSignId, pub attempt: u32, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - // Received preprocesses for the specified signing protocol. + /// Received preprocesses for the specified signing protocol. + /// + /// This is sent by the Coordinator's Tributary scanner. Preprocesses { id: SignId, preprocesses: HashMap> }, // Received shares for the specified signing protocol. + /// + /// This is sent by the Coordinator's Tributary scanner. Shares { id: SignId, shares: HashMap> }, // Re-attempt a signing protocol. + /// + /// This is sent by the Coordinator's Tributary re-attempt scheduling logic. Reattempt { id: SignId }, - // Completed a signing protocol already. - Completed { session: Session, id: [u8; 32], tx: Vec }, } impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - None - } - - pub fn session(&self) -> Session { + pub fn sign_id(&self) -> &SignId { match self { CoordinatorMessage::Preprocesses { id, .. } | CoordinatorMessage::Shares { id, .. } | - CoordinatorMessage::Reattempt { id } => id.session, - CoordinatorMessage::Completed { session, .. } => *session, + CoordinatorMessage::Reattempt { id, .. } => id, } } } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + // This set of messages is sent entirely and solely by serai-processor-frost-attempt-manager. + #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Participant sent an invalid message during the sign protocol. - InvalidParticipant { id: SignId, participant: Participant }, - // Created preprocess for the specified signing protocol. - Preprocess { id: SignId, preprocesses: Vec> }, - // Signed share for the specified signing protocol. - Share { id: SignId, shares: Vec> }, - // Completed a signing protocol already. - Completed { session: Session, id: [u8; 32], tx: Vec }, + InvalidParticipant { session: Session, participant: Participant }, + // Created preprocesses for the specified signing protocol. + Preprocesses { id: SignId, preprocesses: Vec> }, + // Signed shares for the specified signing protocol. + Shares { id: SignId, shares: Vec> }, } } pub mod coordinator { use super::*; - pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec { - const DST: &[u8] = b"Cosign"; - let mut res = vec![u8::try_from(DST.len()).unwrap()]; - res.extend(DST); - res.extend(block_number.to_le_bytes()); - res.extend(block); - res - } - - #[derive( - Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, - )] - pub enum SubstrateSignableId { - CosigningSubstrateBlock([u8; 32]), - Batch(u32), - SlashReport, - } - - #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] - pub struct SubstrateSignId { - pub session: Session, - pub id: SubstrateSignableId, - pub attempt: u32, - } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 }, - SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> }, - SubstratePreprocesses { id: SubstrateSignId, preprocesses: HashMap }, - SubstrateShares { id: SubstrateSignId, shares: HashMap }, - // Re-attempt a batch signing protocol. - BatchReattempt { id: SubstrateSignId }, + /// Cosign the specified Substrate block. + /// + /// This is sent by the Coordinator's Tributary scanner. + CosignSubstrateBlock { session: Session, cosign: Cosign }, + /// Sign the slash report for this session. + /// + /// This is sent by the Coordinator's Tributary scanner. + SignSlashReport { session: Session, slash_report: SlashReport }, } - impl CoordinatorMessage { - // The Coordinator will only send Batch messages once the Batch ID has been recognized - // The ID will only be recognized when the block is acknowledged by a super-majority of the - // network *and the local node* - // This synchrony obtained lets us ignore the synchrony requirement offered here - pub fn required_block(&self) -> Option { - None - } - } - - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] - pub struct PlanMeta { - pub session: Session, - pub id: [u8; 32], - } - - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + // This set of messages is sent entirely and solely by serai-processor-bin's implementation of + // the signers::Coordinator trait. + // TODO: Move message creation into serai-processor-signers + #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { - SubstrateBlockAck { block: u64, plans: Vec }, - InvalidParticipant { id: SubstrateSignId, participant: Participant }, - CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, - BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> }, - SlashReportPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, - SubstrateShare { id: SubstrateSignId, shares: Vec<[u8; 32]> }, - // TODO: Make these signatures [u8; 64]? - CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec }, - SignedSlashReport { session: Session, signature: Vec }, + CosignedBlock { cosign: SignedCosign }, + SignedBatch { batch: SignedBatch }, + SignedSlashReport { session: Session, slash_report: SlashReport, signature: [u8; 64] }, } } @@ -223,34 +188,51 @@ pub mod substrate { use super::*; #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] - pub enum CoordinatorMessage { - ConfirmKeyPair { - context: SubstrateContext, - session: Session, - key_pair: KeyPair, - }, - SubstrateBlock { - context: SubstrateContext, - block: u64, - burns: Vec, - batches: Vec, - }, + pub enum InInstructionResult { + Succeeded, + Failed, } - - impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - let context = match self { - CoordinatorMessage::ConfirmKeyPair { context, .. } | - CoordinatorMessage::SubstrateBlock { context, .. } => context, - }; - Some(context.network_latest_finalized_block) - } + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub struct ExecutedBatch { + pub id: u32, + pub publisher: Session, + pub external_network_block_hash: [u8; 32], + pub in_instructions_hash: [u8; 32], + pub in_instruction_results: Vec, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub enum CoordinatorMessage { + /// Keys set on the Serai blockchain. + /// + /// This is sent by the Coordinator's Substrate canonical event stream. + SetKeys { serai_time: u64, session: Session, key_pair: KeyPair }, + /// Slashes reported on the Serai blockchain OR the process timed out. + /// + /// This is the final message for a session, + /// + /// This is sent by the Coordinator's Substrate canonical event stream. + SlashesReported { session: Session }, + /// A block from Serai with relevance to this processor. + /// + /// This is sent by the Coordinator's Substrate canonical event stream. + Block { + serai_block_number: u64, + batch: Option, + burns: Vec, + }, + } + + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub struct PlanMeta { + pub session: Session, + pub transaction_plan_id: [u8; 32], + } + + #[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { - Batch { batch: Batch }, - SignedBatch { batch: SignedBatch }, + // TODO: Have the processor send this + SubstrateBlockAck { block: [u8; 32], plans: Vec }, } } @@ -277,25 +259,7 @@ impl_from!(sign, CoordinatorMessage, Sign); impl_from!(coordinator, CoordinatorMessage, Coordinator); impl_from!(substrate, CoordinatorMessage, Substrate); -impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - let required = match self { - CoordinatorMessage::KeyGen(msg) => msg.required_block(), - CoordinatorMessage::Sign(msg) => msg.required_block(), - CoordinatorMessage::Coordinator(msg) => msg.required_block(), - CoordinatorMessage::Substrate(msg) => msg.required_block(), - }; - - // 0 is used when Serai hasn't acknowledged *any* block for this network, which also means - // there's no need to wait for the block in question - if required == Some(BlockHash([0; 32])) { - return None; - } - required - } -} - -#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { KeyGen(key_gen::ProcessorMessage), Sign(sign::ProcessorMessage), @@ -313,10 +277,10 @@ impl_from!(substrate, ProcessorMessage, Substrate); const COORDINATOR_UID: u8 = 0; const PROCESSOR_UID: u8 = 1; -const TYPE_KEY_GEN_UID: u8 = 2; -const TYPE_SIGN_UID: u8 = 3; -const TYPE_COORDINATOR_UID: u8 = 4; -const TYPE_SUBSTRATE_UID: u8 = 5; +const TYPE_KEY_GEN_UID: u8 = 0; +const TYPE_SIGN_UID: u8 = 1; +const TYPE_COORDINATOR_UID: u8 = 2; +const TYPE_SUBSTRATE_UID: u8 = 3; impl CoordinatorMessage { /// The intent for this message, which should be unique across the validator's entire system, @@ -328,46 +292,41 @@ impl CoordinatorMessage { pub fn intent(&self) -> Vec { match self { CoordinatorMessage::KeyGen(msg) => { - // Unique since key gen ID embeds the session and attempt let (sub, id) = match msg { - key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id), - key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id), - key_gen::CoordinatorMessage::Shares { id, .. } => (2, id), - key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id), + // Unique since we only have one attempt per session + key_gen::CoordinatorMessage::GenerateKey { session, .. } => { + (0, borsh::to_vec(session).unwrap()) + } + // Unique since one participation per participant per session + key_gen::CoordinatorMessage::Participation { session, participant, .. } => { + (1, borsh::to_vec(&(session, participant)).unwrap()) + } }; let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub]; - res.extend(&id.encode()); + res.extend(&id); res } CoordinatorMessage::Sign(msg) => { let (sub, id) = match msg { - // Unique since SignId includes a hash of the network, and specific transaction info - sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id.encode()), - sign::CoordinatorMessage::Shares { id, .. } => (1, id.encode()), - sign::CoordinatorMessage::Reattempt { id } => (2, id.encode()), - // The coordinator should report all reported completions to the processor - // Accordingly, the intent is a combination of plan ID and actual TX - // While transaction alone may suffice, that doesn't cover cross-chain TX ID conflicts, - // which are possible - sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()), + // Unique since SignId + sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id), + sign::CoordinatorMessage::Shares { id, .. } => (1, id), + sign::CoordinatorMessage::Reattempt { id, .. } => (2, id), }; let mut res = vec![COORDINATOR_UID, TYPE_SIGN_UID, sub]; - res.extend(&id); + res.extend(id.encode()); res } CoordinatorMessage::Coordinator(msg) => { let (sub, id) = match msg { - // Unique since this ID contains the hash of the block being cosigned - coordinator::CoordinatorMessage::CosignSubstrateBlock { id, .. } => (0, id.encode()), - // Unique since there's only one of these per session/attempt, and ID is inclusive to - // both - coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()), - // Unique since this embeds the batch ID (including its network) and attempt - coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (2, id.encode()), - coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (3, id.encode()), - coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (4, id.encode()), + // We only cosign a block once, and Reattempt is a separate message + coordinator::CoordinatorMessage::CosignSubstrateBlock { cosign, .. } => { + (0, cosign.block_number.encode()) + } + // We only sign one slash report, and Reattempt is a separate message + coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -376,9 +335,11 @@ impl CoordinatorMessage { } CoordinatorMessage::Substrate(msg) => { let (sub, id) = match msg { - // Unique since there's only one key pair for a session - substrate::CoordinatorMessage::ConfirmKeyPair { session, .. } => (0, session.encode()), - substrate::CoordinatorMessage::SubstrateBlock { block, .. } => (1, block.encode()), + substrate::CoordinatorMessage::SetKeys { session, .. } => (0, session.encode()), + substrate::CoordinatorMessage::SlashesReported { session } => (1, session.encode()), + substrate::CoordinatorMessage::Block { serai_block_number, .. } => { + (2, serai_block_number.encode()) + } }; let mut res = vec![COORDINATOR_UID, TYPE_SUBSTRATE_UID, sub]; @@ -400,27 +361,32 @@ impl ProcessorMessage { match self { ProcessorMessage::KeyGen(msg) => { let (sub, id) = match msg { - // Unique since KeyGenId - key_gen::ProcessorMessage::Commitments { id, .. } => (0, id), - key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id), - key_gen::ProcessorMessage::Shares { id, .. } => (2, id), - key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id), - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id), - key_gen::ProcessorMessage::Blame { id, .. } => (5, id), + // Unique since we only have one participation per session (due to no re-attempts) + key_gen::ProcessorMessage::Participation { session, .. } => { + (0, borsh::to_vec(session).unwrap()) + } + key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } => { + (1, borsh::to_vec(session).unwrap()) + } + // Unique since we only blame a participant once (as this is fatal) + key_gen::ProcessorMessage::Blame { session, participant } => { + (2, borsh::to_vec(&(session, participant)).unwrap()) + } }; let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub]; - res.extend(&id.encode()); + res.extend(&id); res } ProcessorMessage::Sign(msg) => { let (sub, id) = match msg { + // Unique since we'll only fatally slash a a participant once + sign::ProcessorMessage::InvalidParticipant { session, participant } => { + (0, (session, u16::from(*participant)).encode()) + } // Unique since SignId - sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()), - sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()), - sign::ProcessorMessage::Share { id, .. } => (2, id.encode()), - // Unique since a processor will only sign a TX once - sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), + sign::ProcessorMessage::Preprocesses { id, .. } => (1, id.encode()), + sign::ProcessorMessage::Shares { id, .. } => (2, id.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; @@ -429,16 +395,11 @@ impl ProcessorMessage { } ProcessorMessage::Coordinator(msg) => { let (sub, id) = match msg { - coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()), - // Unique since SubstrateSignId - coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()), - coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()), - coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()), - coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } => (4, id.encode()), - coordinator::ProcessorMessage::SubstrateShare { id, .. } => (5, id.encode()), - // Unique since only one instance of a signature matters - coordinator::ProcessorMessage::CosignedBlock { block, .. } => (6, block.encode()), - coordinator::ProcessorMessage::SignedSlashReport { .. } => (7, vec![]), + coordinator::ProcessorMessage::CosignedBlock { cosign } => { + (0, cosign.cosign.block_hash.encode()) + } + coordinator::ProcessorMessage::SignedBatch { batch, .. } => (1, batch.batch.id.encode()), + coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (2, session.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -447,11 +408,7 @@ impl ProcessorMessage { } ProcessorMessage::Substrate(msg) => { let (sub, id) = match msg { - // Unique since network and ID binding - substrate::ProcessorMessage::Batch { batch } => (0, (batch.network, batch.id).encode()), - substrate::ProcessorMessage::SignedBatch { batch, .. } => { - (1, (batch.batch.network, batch.batch.id).encode()) - } + substrate::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub]; diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml new file mode 100644 index 00000000..d596bb9e --- /dev/null +++ b/processor/monero/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "serai-monero-processor" +version = "0.1.0" +description = "Serai Monero Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/monero" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } +zeroize = { version = "1", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ed25519"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ed25519"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + +monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] } +monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] } + +zalloc = { path = "../../common/zalloc" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } +view-keys = { package = "serai-processor-view-keys", path = "../view-keys" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +utxo-standard-scheduler = { package = "serai-processor-utxo-scheduler", path = "../scheduler/utxo/standard" } +signers = { package = "serai-processor-signers", path = "../signers" } + +bin = { package = "serai-processor-bin", path = "../bin" } + +[features] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/monero/LICENSE b/processor/monero/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/monero/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/monero/README.md b/processor/monero/README.md new file mode 100644 index 00000000..564c83a0 --- /dev/null +++ b/processor/monero/README.md @@ -0,0 +1 @@ +# Serai Monero Processor diff --git a/processor/monero/src/key_gen.rs b/processor/monero/src/key_gen.rs new file mode 100644 index 00000000..6e30d7bf --- /dev/null +++ b/processor/monero/src/key_gen.rs @@ -0,0 +1,8 @@ +use ciphersuite::Ed25519; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Monero"; + + type ExternalNetworkCiphersuite = Ed25519; +} diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs new file mode 100644 index 00000000..b5c67f12 --- /dev/null +++ b/processor/monero/src/main.rs @@ -0,0 +1,189 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use monero_simple_request_rpc::SimpleRequestRpc; + +mod primitives; +pub(crate) use crate::primitives::*; + +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{Planner, Scheduler}; + +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = Rpc { + rpc: loop { + match SimpleRequestRpc::new(bin::url()).await { + Ok(rpc) => break rpc, + Err(e) => { + log::error!("couldn't connect to the Monero node: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }, + }; + + bin::main_loop::<(), _, KeyGenParams, _>( + db, + feed.clone(), + Scheduler::new(Planner(feed.clone())), + feed, + ) + .await; +} + +/* +#[async_trait] +impl TransactionTrait for Transaction { + #[cfg(test)] + async fn fee(&self, _: &Monero) -> u64 { + match self { + Transaction::V1 { .. } => panic!("v1 TX in test-only function"), + Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, + } + } +} + +impl Monero { + async fn median_fee(&self, block: &Block) -> Result { + let mut fees = vec![]; + for tx_hash in &block.transactions { + let tx = + self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; + // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate + let fee = match &tx { + Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, + _ => continue, + }; + fees.push(fee / u64::try_from(tx.weight()).unwrap()); + } + fees.sort(); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); + + // TODO: Set a sane minimum fee + const MINIMUM_FEE: u64 = 1_500_000; + Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) + } + + #[cfg(test)] + fn test_view_pair() -> ViewPair { + ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() + } + + #[cfg(test)] + fn test_scanner() -> Scanner { + Scanner::new(Self::test_view_pair()) + } + + #[cfg(test)] + fn test_address() -> Address { + Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() + } +} + +#[async_trait] +impl Network for Monero { + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block(*id).await.unwrap().number().unwrap() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Eventuality, + ) -> Transaction { + let block = self.rpc.get_block_by_number(block).await.unwrap(); + for tx in &block.transactions { + let tx = self.rpc.get_transaction(*tx).await.unwrap(); + if eventuality.matches(&tx.clone().into()) { + return tx; + } + } + panic!("block didn't have a transaction for this eventuality") + } + + #[cfg(test)] + async fn mine_block(&self) { + // https://github.com/serai-dex/serai/issues/198 + sleep(std::time::Duration::from_millis(100)).await; + self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Address) -> Block { + use zeroize::Zeroizing; + use rand_core::{RngCore, OsRng}; + use monero_wallet::rpc::FeePriority; + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + for _ in 0 .. 80 { + self.mine_block().await; + } + + let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); + let mut outputs = Self::test_scanner() + .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) + .unwrap() + .ignore_additional_timelock(); + let output = outputs.swap_remove(0); + + let amount = output.commitment().amount; + // The dust should always be sufficient for the fee + let fee = Monero::DUST; + + let rct_type = match new_block.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), + }; + + let output = OutputWithDecoys::fingerprintable_deterministic_new( + &mut OsRng, + &self.rpc, + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + self.rpc.get_height().await.unwrap(), + output, + ) + .await + .unwrap(); + + let mut outgoing_view_key = Zeroizing::new([0; 32]); + OsRng.fill_bytes(outgoing_view_key.as_mut()); + let tx = MSignableTransaction::new( + rct_type, + outgoing_view_key, + vec![output], + vec![(address.into(), amount - fee)], + Change::fingerprintable(Some(Self::test_address().into())), + vec![], + self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), + ) + .unwrap() + .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) + .unwrap(); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.publish_transaction(&tx).await.unwrap(); + for _ in 0 .. 10 { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} +*/ diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs new file mode 100644 index 00000000..6afae429 --- /dev/null +++ b/processor/monero/src/primitives/block.rs @@ -0,0 +1,83 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::{ + block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, ScanError, GuaranteedScanner, +}; + +use serai_client::networks::monero::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; +use crate::{ + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, + output::Output, transaction::Eventuality, +}; + +#[derive(Clone, Debug)] +pub(crate) struct BlockHeader(pub(crate) MBlock); +impl primitives::BlockHeader for BlockHeader { + fn id(&self) -> [u8; 32] { + self.0.hash() + } + fn parent(&self) -> [u8; 32] { + self.0.header.previous + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Block(pub(crate) MScannableBlock); + +impl primitives::Block for Block { + type Header = BlockHeader; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + self.0.block.hash() + } + + fn scan_for_outputs_unordered( + &self, + _latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { + let mut scanner = GuaranteedScanner::new(view_pair(key)); + scanner.register_subaddress(EXTERNAL_SUBADDRESS); + scanner.register_subaddress(BRANCH_SUBADDRESS); + scanner.register_subaddress(CHANGE_SUBADDRESS); + scanner.register_subaddress(FORWARDED_SUBADDRESS); + match scanner.scan(self.0.clone()) { + Ok(outputs) => outputs.not_additionally_locked().into_iter().map(Output).collect(), + Err(ScanError::UnsupportedProtocol(version)) => { + panic!("Monero unexpectedly hard-forked (version {version})") + } + Err(ScanError::InvalidScannableBlock(reason)) => { + panic!("fetched an invalid scannable block from the RPC: {reason}") + } + } + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + assert_eq!(self.0.block.transactions.len(), self.0.transactions.len()); + for (hash, tx) in self.0.block.transactions.iter().zip(&self.0.transactions) { + if let Some(eventuality) = eventualities.active_eventualities.get(&tx.prefix().extra) { + if eventuality.eventuality.matches(tx) { + res.insert(*hash, eventualities.active_eventualities.remove(&tx.prefix().extra).unwrap()); + } + } + } + res + } +} diff --git a/processor/monero/src/primitives/mod.rs b/processor/monero/src/primitives/mod.rs new file mode 100644 index 00000000..317cae28 --- /dev/null +++ b/processor/monero/src/primitives/mod.rs @@ -0,0 +1,37 @@ +use zeroize::Zeroizing; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::{address::SubaddressIndex, ViewPairError, GuaranteedViewPair}; + +use view_keys::view_key; + +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; + +pub(crate) const EXTERNAL_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(1, 0) { + Some(index) => index, + None => panic!("SubaddressIndex for EXTERNAL_SUBADDRESS was None"), +}; +pub(crate) const BRANCH_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 0) { + Some(index) => index, + None => panic!("SubaddressIndex for BRANCH_SUBADDRESS was None"), +}; +pub(crate) const CHANGE_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 1) { + Some(index) => index, + None => panic!("SubaddressIndex for CHANGE_SUBADDRESS was None"), +}; +pub(crate) const FORWARDED_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 2) { + Some(index) => index, + None => panic!("SubaddressIndex for FORWARDED_SUBADDRESS was None"), +}; + +pub(crate) fn view_pair(key: ::G) -> GuaranteedViewPair { + match GuaranteedViewPair::new(key.0, Zeroizing::new(*view_key::(0))) { + Ok(view_pair) => view_pair, + Err(ViewPairError::TorsionedSpendKey) => { + unreachable!("dalek_ff_group::EdwardsPoint had torsion") + } + } +} diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs new file mode 100644 index 00000000..b2b87a5c --- /dev/null +++ b/processor/monero/src/primitives/output.rs @@ -0,0 +1,94 @@ +use std::io; + +use ciphersuite::{group::Group, Ciphersuite, Ed25519}; + +use monero_wallet::WalletOutput; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{ExternalCoin, Amount, ExternalBalance}, + networks::monero::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +use crate::{EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS}; + +#[rustfmt::skip] +#[derive( + Clone, Copy, PartialEq, Eq, Default, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, +)] +pub(crate) struct OutputId(pub(crate) [u8; 32]); +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output(pub(crate) WalletOutput); +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + let subaddress = self.0.subaddress().unwrap(); + if subaddress == EXTERNAL_SUBADDRESS { + return OutputType::External; + } + if subaddress == BRANCH_SUBADDRESS { + return OutputType::Branch; + } + if subaddress == CHANGE_SUBADDRESS { + return OutputType::Change; + } + if subaddress == FORWARDED_SUBADDRESS { + return OutputType::Forwarded; + } + unreachable!("scanned output to unknown subaddress"); + } + + fn id(&self) -> Self::Id { + OutputId(self.0.key().compress().to_bytes()) + } + + fn transaction_id(&self) -> Self::TransactionId { + self.0.transaction() + } + + fn key(&self) -> ::G { + // The spend key will be a key we generated, so it'll be in the prime-order subgroup + // The output's key is the spend key + (key_offset * G), so it's in the prime-order subgroup if + // the spend key is + dalek_ff_group::EdwardsPoint( + self.0.key() - (*::G::generator() * self.0.key_offset()), + ) + } + + fn presumed_origin(&self) -> Option

{ + None + } + + fn balance(&self) -> ExternalBalance { + ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(self.0.commitment().amount) } + } + + fn data(&self) -> &[u8] { + self.0.arbitrary_data().first().map_or(&[], Vec::as_slice) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.0.write(writer) + } + + fn read(reader: &mut R) -> io::Result { + WalletOutput::read(reader).map(Self) + } +} diff --git a/processor/monero/src/primitives/transaction.rs b/processor/monero/src/primitives/transaction.rs new file mode 100644 index 00000000..eeeef81d --- /dev/null +++ b/processor/monero/src/primitives/transaction.rs @@ -0,0 +1,137 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Ed25519; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use monero_wallet::{ + transaction::Transaction as MTransaction, + send::{ + SignableTransaction as MSignableTransaction, TransactionMachine, Eventuality as MEventuality, + }, +}; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(pub(crate) MTransaction); + +impl From for Transaction { + fn from(tx: MTransaction) -> Self { + Self(tx) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + MTransaction::read(reader).map(Self) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction { + pub(crate) id: [u8; 32], + pub(crate) signable: MSignableTransaction, +} + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(MSignableTransaction, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + self.0.multisig(self.1).expect("incorrect keys used for SignableTransaction").preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Ed25519; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut id = [0; 32]; + reader.read_exact(&mut id)?; + + let signable = MSignableTransaction::read(reader)?; + Ok(SignableTransaction { id, signable }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.id)?; + self.signable.write(writer) + } + + fn id(&self) -> [u8; 32] { + self.id + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self.signable, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality { + pub(crate) id: [u8; 32], + pub(crate) singular_spent_output: Option, + pub(crate) eventuality: MEventuality, +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + self.id + } + + // We define the lookup as our ID since the resolving transaction only has a singular possible ID + fn lookup(&self) -> Vec { + self.eventuality.extra() + } + + fn singular_spent_output(&self) -> Option { + self.singular_spent_output + } + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut id = [0; 32]; + reader.read_exact(&mut id)?; + + let singular_spent_output = { + let mut singular_spent_output_opt = [0xff]; + reader.read_exact(&mut singular_spent_output_opt)?; + assert!(singular_spent_output_opt[0] <= 1); + (singular_spent_output_opt[0] == 1) + .then(|| -> io::Result<_> { + let mut singular_spent_output = [0; 32]; + reader.read_exact(&mut singular_spent_output)?; + Ok(OutputId(singular_spent_output)) + }) + .transpose()? + }; + + let eventuality = MEventuality::read(reader)?; + Ok(Self { id, singular_spent_output, eventuality }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.id)?; + + if let Some(singular_spent_output) = self.singular_spent_output { + writer.write_all(&[1])?; + writer.write_all(singular_spent_output.as_ref())?; + } else { + writer.write_all(&[0])?; + } + + self.eventuality.write(writer) + } +} diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs new file mode 100644 index 00000000..5ca74d02 --- /dev/null +++ b/processor/monero/src/rpc.rs @@ -0,0 +1,139 @@ +use core::future::Future; + +use monero_wallet::rpc::{RpcError, Rpc as RpcTrait}; +use monero_simple_request_rpc::SimpleRequestRpc; + +use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount}; + +use scanner::ScannerFeed; +use signers::TransactionPublisher; + +use crate::{ + transaction::Transaction, + block::{BlockHeader, Block}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) rpc: SimpleRequestRpc, +} + +impl ScannerFeed for Rpc { + const NETWORK: ExternalNetworkId = ExternalNetworkId::Monero; + // Outputs aren't spendable until 10 blocks later due to the 10-block lock + // Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10 + // A 10-block reorganization hasn't been observed in years and shouldn't occur + const CONFIRMATIONS: u64 = 10; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 30; + + const TEN_MINUTES: u64 = 5; + + type Block = Block; + + type EphemeralError = RpcError; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { + Ok( + self + .rpc + .get_height() + .await? + .checked_sub(1) + .expect("connected to an invalid Monero RPC") + .try_into() + .unwrap(), + ) + } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + // Constant from Monero + const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; + + // If Monero doesn't have enough blocks to build a window, it doesn't define a network time + if (number + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + return Ok(0); + } + + // Fetch all the timestamps within the window + let block_for_time_of = self.rpc.get_block_by_number(number.try_into().unwrap()).await?; + let mut timestamps = vec![block_for_time_of.header.timestamp]; + let mut parent = block_for_time_of.header.previous; + for _ in 1 .. BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + let parent_block = self.rpc.get_block(parent).await?; + timestamps.push(parent_block.header.timestamp); + parent = parent_block.header.previous; + } + timestamps.sort(); + + // Because there are two timestamps equidistance from the ends, Monero's epee picks the + // in-between value, calculated by the following formula (from the "get_mid" function) + let n = timestamps.len() / 2; + let a = timestamps[n - 1]; + let b = timestamps[n]; + #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` + let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; + + // Monero does check that the new block's time is greater than the median, causing the median + // to be monotonic + Ok(res) + } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { Ok(BlockHeader(self.rpc.get_block_by_number(number.try_into().unwrap()).await?)) } + } + + #[rustfmt::skip] // It wants to improperly format the `async move` to a single line + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + Ok(Block(self.rpc.get_scannable_block_by_number(number.try_into().unwrap()).await?)) + } + } + + fn dust(coin: ExternalCoin) -> Amount { + assert_eq!(coin, ExternalCoin::Monero); + + // 0.01 XMR + Amount(10_000_000_000) + } + + fn cost_to_aggregate( + &self, + coin: ExternalCoin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin, ExternalCoin::Bitcoin); + // TODO + Ok(Amount(0)) + } + } +} + +impl TransactionPublisher for Rpc { + type EphemeralError = RpcError; + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { self.rpc.publish_transaction(&tx.0).await } + } +} diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs new file mode 100644 index 00000000..9043f888 --- /dev/null +++ b/processor/monero/src/scheduler.rs @@ -0,0 +1,269 @@ +use core::future::Future; + +use zeroize::Zeroizing; +use rand_core::SeedableRng; +use rand_chacha::ChaCha20Rng; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::rpc::{FeeRate, RpcError}; + +use serai_client::{ + primitives::{ExternalCoin, Amount}, + networks::monero::Address, +}; + +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; + +use monero_wallet::{ + ringct::RctType, + address::{Network, AddressType, MoneroAddress}, + OutputWithDecoys, + send::{ + Change, SendError, SignableTransaction as MSignableTransaction, Eventuality as MEventuality, + }, +}; + +use crate::{ + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + view_pair(key) + .address( + Network::Mainnet, + Some(match kind { + OutputType::External => EXTERNAL_SUBADDRESS, + OutputType::Branch => BRANCH_SUBADDRESS, + OutputType::Change => CHANGE_SUBADDRESS, + OutputType::Forwarded => FORWARDED_SUBADDRESS, + }), + None, + ) + .try_into() + .expect("created address which wasn't representable") +} + +async fn signable_transaction( + rpc: &Rpc, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, +) -> Result, RpcError> { + assert!(inputs.len() < >::MAX_INPUTS); + assert!( + (payments.len() + usize::from(u8::from(change.is_some()))) < + >::MAX_OUTPUTS + ); + + // TODO: Set a sane minimum fee + const MINIMUM_FEE: u64 = 1_500_000; + // TODO: Set a fee rate based on the reference block + let fee_rate = FeeRate::new(MINIMUM_FEE, 10000).unwrap(); + + // Determine the RCT proofs to make based off the hard fork + let rct_type = match reference_block.0.block.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), + }; + + // We need a unique ID to distinguish this transaction from another transaction with an identical + // set of payments (as our Eventualities only match over the payments). The output's ID is + // guaranteed to be unique, making it satisfactory + let id = inputs.first().unwrap().id().0; + + let mut inputs_actual = Vec::with_capacity(inputs.len()); + for input in inputs { + inputs_actual.push( + OutputWithDecoys::fingerprintable_deterministic_new( + // We need a deterministic RNG here with *some* seed + // The unique ID means we don't pick some static seed + // It is a public value, yet that's fine as this is assumed fully transparent + // It is a reused value (with later code), but that's not an issue. Just an oddity + &mut ChaCha20Rng::from_seed(id), + &rpc.rpc, + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + reference_block.0.block.number().unwrap() + 1, + input.0.clone(), + ) + .await?, + ); + } + let inputs = inputs_actual; + + let mut payments = payments + .into_iter() + .map(|payment| { + (MoneroAddress::from(*payment.address()), { + let balance = payment.balance(); + assert_eq!(balance.coin, ExternalCoin::Monero); + balance.amount.0 + }) + }) + .collect::>(); + if (payments.len() + usize::from(u8::from(change.is_some()))) == 1 { + // Monero requires at least two outputs, so add a dummy payment + payments.push(( + MoneroAddress::new( + Network::Mainnet, + AddressType::Legacy, + ::generator().0, + ::generator().0, + ), + 0, + )); + } + + let change = if let Some(change) = change { + Change::guaranteed(view_pair(change), Some(CHANGE_SUBADDRESS)) + } else { + Change::fingerprintable(None) + }; + + Ok( + MSignableTransaction::new( + rct_type, + Zeroizing::new(id), + inputs, + payments, + change, + vec![], + fee_rate, + ) + .map(|signable| (SignableTransaction { id, signable: signable.clone() }, signable)), + ) +} + +#[derive(Clone)] +pub(crate) struct Planner(pub(crate) Rpc); +impl TransactionPlanner for Planner { + type EphemeralError = RpcError; + + type SignableTransaction = SignableTransaction; + + // wallet2 will not create a transaction larger than 100 KB, and Monero won't relay a transaction + // larger than 150 KB. This fits within the 100 KB mark to fit in and not poke the bear. + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; + const MAX_OUTPUTS: usize = 16; + + fn branch_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + Future> { + async move { + Ok(match signable_transaction(&self.0, reference_block, inputs, payments, change).await? { + Ok(tx) => Amount(tx.1.necessary_fee()), + Err(SendError::NotEnoughFunds { necessary_fee, .. }) => { + Amount(necessary_fee.expect("outputs value exceeded inputs value")) + } + Err(SendError::UnsupportedRctType) => { + panic!("tried to use an RctType monero-wallet doesn't support") + } + Err(SendError::NoInputs | SendError::NoOutputs | SendError::TooManyOutputs) => { + panic!("malformed plan passed to calculate_fee") + } + Err(SendError::InvalidDecoyQuantity) => panic!("selected the wrong amount of decoys"), + Err(SendError::NoChange) => { + panic!("didn't add a dummy payment to satisfy the 2-output minimum") + } + Err(SendError::MultiplePaymentIds) => { + panic!("included multiple payment IDs despite not supporting addresses with payment IDs") + } + Err(SendError::TooMuchArbitraryData) => { + panic!("included too much arbitrary data despite not including any") + } + Err(SendError::TooLargeTransaction) => { + panic!("too large transaction despite MAX_INPUTS/MAX_OUTPUTS") + } + Err( + SendError::WrongPrivateKey | + SendError::MaliciousSerialization | + SendError::ClsagError(_) | + SendError::FrostError(_), + ) => unreachable!("signing/serialization error when not signing/serializing"), + }) + } + } + + fn plan( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + + Future, RpcError>> + { + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + + async move { + Ok(match signable_transaction(&self.0, reference_block, inputs, payments, change).await? { + Ok(tx) => { + let id = tx.0.id; + PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { + id, + singular_spent_output, + eventuality: MEventuality::from(tx.1), + }, + auxilliary: (), + } + } + Err(SendError::NotEnoughFunds { .. }) => panic!("failed to successfully amortize the fee"), + Err(SendError::UnsupportedRctType) => { + panic!("tried to use an RctType monero-wallet doesn't support") + } + Err(SendError::NoInputs | SendError::NoOutputs | SendError::TooManyOutputs) => { + panic!("malformed plan passed to calculate_fee") + } + Err(SendError::InvalidDecoyQuantity) => panic!("selected the wrong amount of decoys"), + Err(SendError::NoChange) => { + panic!("didn't add a dummy payment to satisfy the 2-output minimum") + } + Err(SendError::MultiplePaymentIds) => { + panic!("included multiple payment IDs despite not supporting addresses with payment IDs") + } + Err(SendError::TooMuchArbitraryData) => { + panic!("included too much arbitrary data despite not including any") + } + Err(SendError::TooLargeTransaction) => { + panic!("too large transaction despite MAX_INPUTS/MAX_OUTPUTS") + } + Err( + SendError::WrongPrivateKey | + SendError::MaliciousSerialization | + SendError::ClsagError(_) | + SendError::FrostError(_), + ) => unreachable!("signing/serialization error when not signing/serializing"), + }) + } + } +} + +pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; diff --git a/processor/primitives/Cargo.toml b/processor/primitives/Cargo.toml new file mode 100644 index 00000000..a950a61b --- /dev/null +++ b/processor/primitives/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "serai-processor-primitives" +version = "0.1.0" +description = "Primitives for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std", "borsh"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] } + +serai-task = { path = "../../common/task", default-features = false } diff --git a/processor/primitives/LICENSE b/processor/primitives/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/primitives/README.md b/processor/primitives/README.md new file mode 100644 index 00000000..d616993c --- /dev/null +++ b/processor/primitives/README.md @@ -0,0 +1,3 @@ +# Primitives + +Primitive types/traits/structs used by the Processor. diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs new file mode 100644 index 00000000..a3dec40b --- /dev/null +++ b/processor/primitives/src/block.rs @@ -0,0 +1,66 @@ +use core::fmt::Debug; +use std::collections::HashMap; + +use group::{Group, GroupEncoding}; + +use crate::{Address, ReceivedOutput, Eventuality, EventualityTracker}; + +/// A block header from an external network. +pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { + /// The ID of this block. + /// + /// This is fixed to 32-bytes and is expected to be cryptographically binding with 128-bit + /// security. This is not required to be the ID used natively by the external network. + fn id(&self) -> [u8; 32]; + /// The ID of the parent block. + fn parent(&self) -> [u8; 32]; +} + +/// A block from an external network. +/// +/// A block is defined as a consensus event associated with a set of transactions. It is not +/// necessary to literally define it as whatever the external network defines as a block. For +/// external networks which finalize block(s), this block type should be a representation of all +/// transactions within a period finalization (whether block or epoch). +pub trait Block: Send + Sync + Sized + Clone + Debug { + /// The type used for this block's header. + type Header: BlockHeader; + + /// The type used to represent keys on this external network. + type Key: Group + GroupEncoding; + /// The type used to represent addresses on this external network. + type Address: Address; + /// The type used to represent received outputs on this external network. + type Output: ReceivedOutput; + /// The type used to represent an Eventuality for a transaction on this external network. + type Eventuality: Eventuality< + OutputId = >::Id, + >; + + /// The ID of this block. + fn id(&self) -> [u8; 32]; + + /// Scan all outputs within this block to find the outputs spendable by this key. + /// + /// No assumption on the order of the returned outputs is made. + fn scan_for_outputs_unordered( + &self, + latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec; + + /// Check if this block resolved any Eventualities. + /// + /// This MUST mutate `eventualities` to no longer contain the resolved Eventualities. + /// + /// Returns tbe resolved Eventualities, indexed by the ID of the transactions which resolved + /// them. + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + >; +} diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs new file mode 100644 index 00000000..f68ceeae --- /dev/null +++ b/processor/primitives/src/eventuality.rs @@ -0,0 +1,59 @@ +use std::{io, collections::HashMap}; + +use crate::Id; + +/// A description of a transaction which will eventually happen. +pub trait Eventuality: Sized + Send + Sync { + /// The type used to identify a received output. + type OutputId: Id; + + /// The ID of the SignableTransaction this Eventuality is for. + /// + /// This is an internal ID arbitrarily definable so long as it's unique. + fn id(&self) -> [u8; 32]; + + /// A unique byte sequence which can be used to identify potentially resolving transactions. + /// + /// Both a transaction and an Eventuality are expected to be able to yield lookup sequences. + /// Lookup sequences MUST be unique to the Eventuality and identical to any transaction's which + /// satisfies this Eventuality. Transactions which don't satisfy this Eventuality MAY also have + /// an identical lookup sequence. + /// + /// This is used to find the Eventuality a transaction MAY resolve so we don't have to check all + /// transactions against all Eventualities. Once the potential resolved Eventuality is + /// identified, the full check is performed. + fn lookup(&self) -> Vec; + + /// The output the resolution of this Eventuality was supposed to spend. + /// + /// If the resolution of this Eventuality has multiple inputs, there is no singular spent output + /// so this MUST return None. + fn singular_spent_output(&self) -> Option; + + /// Read an Eventuality. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write an Eventuality. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + +/// A tracker of unresolved Eventualities. +#[derive(Debug)] +pub struct EventualityTracker { + /// The active Eventualities. + /// + /// These are keyed by their lookups. + pub active_eventualities: HashMap, E>, +} + +impl Default for EventualityTracker { + fn default() -> Self { + EventualityTracker { active_eventualities: HashMap::new() } + } +} + +impl EventualityTracker { + /// Insert an Eventuality into the tracker. + pub fn insert(&mut self, eventuality: E) { + self.active_eventualities.insert(eventuality.lookup(), eventuality); + } +} diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs new file mode 100644 index 00000000..371bdafb --- /dev/null +++ b/processor/primitives/src/lib.rs @@ -0,0 +1,89 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{hash::Hash, fmt::Debug}; + +use group::GroupEncoding; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +/// A module for task-related structs and functionality. +pub use serai_task as task; + +mod output; +pub use output::*; + +mod eventuality; +pub use eventuality::*; + +mod block; +pub use block::*; + +mod payment; +pub use payment::*; + +/// An ID for an output/transaction/block/etc. +/// +/// IDs don't need to implement `Copy`, enabling `[u8; 33]`, `[u8; 64]` to be used. IDs are still +/// bound to being of a constant-size, where `Default::default()` returns an instance of such size +/// (making `Vec` invalid as an `Id`). +pub trait Id: + Send + + Sync + + Clone + + Default + + PartialEq + + Eq + + Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Debug + + Encode + + Decode + + BorshSerialize + + BorshDeserialize +{ +} +impl< + I: Send + + Sync + + Clone + + Default + + PartialEq + + Eq + + Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Debug + + Encode + + Decode + + BorshSerialize + + BorshDeserialize, + > Id for I +{ +} + +/// A wrapper for a group element which implements the scale/borsh traits. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct EncodableG(pub G); +impl Encode for EncodableG { + fn using_encoded R>(&self, f: F) -> R { + f(self.0.to_bytes().as_ref()) + } +} +impl BorshSerialize for EncodableG { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(self.0.to_bytes().as_ref()) + } +} +impl BorshDeserialize for EncodableG { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut repr = G::Repr::default(); + reader.read_exact(repr.as_mut())?; + Ok(Self( + Option::::from(G::from_bytes(&repr)).ok_or(borsh::io::Error::other("invalid point"))?, + )) + } +} diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs new file mode 100644 index 00000000..e45b7344 --- /dev/null +++ b/processor/primitives/src/output.rs @@ -0,0 +1,144 @@ +use core::fmt::Debug; +use std::io; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::{ExternalAddress, ExternalBalance}; + +use crate::Id; + +/// An address on the external network. +pub trait Address: + Send + + Sync + + Clone + + Into + + TryFrom + + BorshSerialize + + BorshDeserialize +{ +} +// This casts a wide net, yet it only implements `Address` for things `Into` so +// it should only implement this for addresses +impl< + A: Send + + Sync + + Clone + + Into + + TryFrom + + BorshSerialize + + BorshDeserialize, + > Address for A +{ +} + +/// The type of the output. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum OutputType { + /// An output received to the address external payments use. + /// + /// This is reported to Substrate in a `Batch`. + External, + + /// A branch output. + /// + /// Given a known output set, and a known series of outbound transactions, we should be able to + /// form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs + /// in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, + /// say S[1], build off S[0], we need to observe when S[0] is included on-chain. + /// + /// We cannot. + /// + /// Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to + /// create S[0], and the actual payment info behind it, we cannot observe it on the blockchain + /// unless we participated in creating it. Locking the entire schedule, when we cannot sign for + /// the entire schedule at once, to a single signing set isn't feasible. + /// + /// While any member of the active signing set can provide data enabling other signers to + /// participate, it's several KB of data which we then have to code communication for. + /// The other option is to simply not observe S[0]. Instead, observe a TX with an identical + /// output to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a + /// malicious actor, has sent us a forged TX which is... equally as usable? So who cares? + /// + /// The only issue is if we have multiple outputs on-chain with identical amounts and purposes. + /// Accordingly, when the scheduler makes a plan for when a specific output is available, it + /// shouldn't set that plan. It should *push* that plan to a queue of plans to perform when + /// instances of that output occur. + Branch, + + /// A change output. + /// + /// This should be added to the available UTXO pool with no further action taken. It does not + /// need to be reported (though we do still need synchrony on the block it's in). There's no + /// explicit expectation for the usage of this output at time of recipience. + Change, + + /// A forwarded output from the prior multisig. + /// + /// This is distinguished for technical reasons around detecting when a multisig should be + /// retired. + Forwarded, +} + +impl OutputType { + /// Write the OutputType. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&[match self { + OutputType::External => 0, + OutputType::Branch => 1, + OutputType::Change => 2, + OutputType::Forwarded => 3, + }]) + } + + /// Read an OutputType. + pub fn read(reader: &mut R) -> io::Result { + let mut byte = [0; 1]; + reader.read_exact(&mut byte)?; + Ok(match byte[0] { + 0 => OutputType::External, + 1 => OutputType::Branch, + 2 => OutputType::Change, + 3 => OutputType::Forwarded, + _ => Err(io::Error::other("invalid OutputType"))?, + }) + } +} + +/// A received output. +pub trait ReceivedOutput: + Send + Sync + Sized + Clone + PartialEq + Eq + Debug +{ + /// The type used to identify this output. + type Id: 'static + Id; + /// The type used to identify the transaction which created this output. + type TransactionId: 'static + Id; + + /// The type of this output. + fn kind(&self) -> OutputType; + + /// The ID of this output. + fn id(&self) -> Self::Id; + /// The ID of the transaction which created this output. + fn transaction_id(&self) -> Self::TransactionId; + /// The key this output was received by. + fn key(&self) -> K; + + /// The presumed origin for this output. + /// + /// This is used as the address to refund coins to if we can't handle the output as desired + /// (unless overridden). + fn presumed_origin(&self) -> Option; + + /// The balance associated with this output. + fn balance(&self) -> ExternalBalance; + /// The arbitrary data (presumably an InInstruction) associated with this output. + fn data(&self) -> &[u8]; + + /// Write this output. + fn write(&self, writer: &mut W) -> io::Result<()>; + /// Read an output. + fn read(reader: &mut R) -> io::Result; +} diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs new file mode 100644 index 00000000..b892b2b4 --- /dev/null +++ b/processor/primitives/src/payment.rs @@ -0,0 +1,56 @@ +use std::io; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::ExternalBalance; +use serai_coins_primitives::OutInstructionWithBalance; + +use crate::Address; + +/// A payment to fulfill. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub struct Payment { + address: A, + balance: ExternalBalance, +} + +impl TryFrom for Payment { + type Error = (); + fn try_from(out_instruction_with_balance: OutInstructionWithBalance) -> Result { + Ok(Payment { + address: out_instruction_with_balance.instruction.address.try_into().map_err(|_| ())?, + balance: out_instruction_with_balance.balance, + }) + } +} + +impl Payment { + /// Create a new Payment. + pub fn new(address: A, balance: ExternalBalance) -> Self { + Payment { address, balance } + } + + /// The address to pay. + pub fn address(&self) -> &A { + &self.address + } + /// The balance to transfer. + pub fn balance(&self) -> ExternalBalance { + self.balance + } + + /// Read a Payment. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let address = A::deserialize_reader(reader)?; + let reader = &mut IoReader(reader); + let balance = ExternalBalance::decode(reader).map_err(io::Error::other)?; + Ok(Self { address, balance }) + } + /// Write the Payment. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.address.serialize(writer)?; + self.balance.encode_to(writer); + Ok(()) + } +} diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml new file mode 100644 index 00000000..1fc70e0f --- /dev/null +++ b/processor/scanner/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "serai-processor-scanner" +version = "0.1.0" +description = "Scanner of abstract blockchains for Serai" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scanner" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +# Encoders +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +# Cryptography +blake2 = { version = "0.10", default-features = false, features = ["std"] } +group = { version = "0.13", default-features = false } + +# Application +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } diff --git a/processor/scanner/LICENSE b/processor/scanner/LICENSE new file mode 100644 index 00000000..41d5a261 --- /dev/null +++ b/processor/scanner/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scanner/README.md b/processor/scanner/README.md new file mode 100644 index 00000000..f6c6ccc6 --- /dev/null +++ b/processor/scanner/README.md @@ -0,0 +1,12 @@ +# Scanner + +A scanner of arbitrary blockchains for Serai. + +This scanner has two distinct roles: + +1) Scanning blocks for received outputs contained within them +2) Scanning blocks for the completion of eventualities + +While these can be optimized into a single structure, they are written as two +distinct structures (with the associated overhead) for clarity and simplicity +reasons. diff --git a/processor/scanner/src/batch/db.rs b/processor/scanner/src/batch/db.rs new file mode 100644 index 00000000..015b661b --- /dev/null +++ b/processor/scanner/src/batch/db.rs @@ -0,0 +1,125 @@ +use core::marker::PhantomData; +use std::io::{Read, Write}; + +use group::GroupEncoding; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db}; + +use serai_primitives::ExternalBalance; +use serai_validator_sets_primitives::Session; + +use primitives::EncodableG; +use crate::{ScannerFeed, KeyFor, AddressFor}; + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct BatchInfo { + pub(crate) block_number: u64, + pub(crate) session_to_sign_batch: Session, + pub(crate) external_key_for_session_to_sign_batch: K, + pub(crate) in_instructions_hash: [u8; 32], +} + +create_db!( + ScannerBatch { + // The next block to create batches for + NextBlockToBatch: () -> u64, + + // The next Batch ID to use + NextBatchId: () -> u32, + + // The information needed to verify a batch + InfoForBatch: (batch: u32) -> BatchInfo>, + + // The return addresses for the InInstructions within a Batch + SerializedReturnAddresses: (batch: u32) -> Vec, + } +); + +pub(crate) struct ReturnInformation { + pub(crate) address: AddressFor, + pub(crate) balance: ExternalBalance, +} + +pub(crate) struct BatchDb(PhantomData); +impl BatchDb { + pub(crate) fn set_next_block_to_batch(txn: &mut impl DbTxn, next_block_to_batch: u64) { + NextBlockToBatch::set(txn, &next_block_to_batch); + } + pub(crate) fn next_block_to_batch(getter: &impl Get) -> Option { + NextBlockToBatch::get(getter) + } + + pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 { + let id = NextBatchId::get(txn).unwrap_or(0); + NextBatchId::set(txn, &(id + 1)); + id + } + + pub(crate) fn save_batch_info( + txn: &mut impl DbTxn, + id: u32, + block_number: u64, + session_to_sign_batch: Session, + external_key_for_session_to_sign_batch: KeyFor, + in_instructions_hash: [u8; 32], + ) { + InfoForBatch::set( + txn, + id, + &BatchInfo { + block_number, + session_to_sign_batch, + external_key_for_session_to_sign_batch: EncodableG(external_key_for_session_to_sign_batch), + in_instructions_hash, + }, + ); + } + + pub(crate) fn take_info_for_batch( + txn: &mut impl DbTxn, + id: u32, + ) -> Option>>> { + InfoForBatch::take(txn, id) + } + + pub(crate) fn save_return_information( + txn: &mut impl DbTxn, + id: u32, + return_information: &Vec>>, + ) { + let mut buf = Vec::with_capacity(return_information.len() * (32 + 1 + 8)); + for return_information in return_information { + if let Some(ReturnInformation { address, balance }) = return_information { + buf.write_all(&[1]).unwrap(); + address.serialize(&mut buf).unwrap(); + balance.encode_to(&mut buf); + } else { + buf.write_all(&[0]).unwrap(); + } + } + SerializedReturnAddresses::set(txn, id, &buf); + } + pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, + ) -> Option>>> { + let buf = SerializedReturnAddresses::take(txn, id)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8)); + while !buf.is_empty() { + let mut opt = [0xff]; + buf.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + res.push((opt[0] == 1).then(|| { + let address = AddressFor::::deserialize_reader(&mut buf).unwrap(); + let balance = ExternalBalance::decode(&mut IoReader(&mut buf)).unwrap(); + ReturnInformation { address, balance } + })); + } + Some(res) + } +} diff --git a/processor/scanner/src/batch/mod.rs b/processor/scanner/src/batch/mod.rs new file mode 100644 index 00000000..b583b6ed --- /dev/null +++ b/processor/scanner/src/batch/mod.rs @@ -0,0 +1,196 @@ +use core::{marker::PhantomData, future::Future}; + +use blake2::{digest::typenum::U32, Digest, Blake2b}; + +use scale::Encode; +use serai_db::{DbTxn, Db}; + +use serai_primitives::BlockHash; +use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; + +use primitives::{ + EncodableG, + task::{DoesNotError, ContinuallyRan}, +}; +use crate::{ + db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToBatchDb, BatchData, BatchToReportDb}, + index, + scan::next_to_scan_for_outputs_block, + ScannerFeed, KeyFor, +}; + +mod db; +pub(crate) use db::{BatchInfo, ReturnInformation}; +use db::BatchDb; + +pub(crate) fn take_info_for_batch( + txn: &mut impl DbTxn, + id: u32, +) -> Option>>> { + BatchDb::::take_info_for_batch(txn, id) +} + +pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, +) -> Option>>> { + BatchDb::::take_return_information(txn, id) +} + +/* + This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. + + We only produce batches once both tasks, scanning for received outputs and checking for resolved + Eventualities, have processed the block. This ensures we know if this block is notable, and have + the InInstructions for it. +*/ +#[allow(non_snake_case)] +pub(crate) struct BatchTask { + db: D, + _S: PhantomData, +} + +impl BatchTask { + pub(crate) fn new(mut db: D, start_block: u64) -> Self { + if BatchDb::::next_block_to_batch(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + BatchDb::::set_next_block_to_batch(&mut txn, start_block); + txn.commit(); + } + + Self { db, _S: PhantomData } + } +} + +impl ContinuallyRan for BatchTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let highest_batchable = { + // Fetch the next to scan block + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + .expect("BatchTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + // The last scanned block is the block prior to this + #[allow(clippy::let_and_return)] + let last_scanned = next_to_scan - 1; + // The last scanned block is the highest batchable block as we only scan blocks within a + // window where it's safe to immediately report the block + // See `eventuality.rs` for more info + last_scanned + }; + + let next_block_to_batch = BatchDb::::next_block_to_batch(&self.db) + .expect("BatchTask run before writing the start block"); + + for block_number in next_block_to_batch ..= highest_batchable { + let mut txn = self.db.txn(); + + // Receive the InInstructions for this block + // We always do this as we can't trivially tell if we should recv InInstructions before we + // do + let InInstructionData { + session_to_sign_batch, + external_key_for_session_to_sign_batch, + returnable_in_instructions: in_instructions, + } = ScanToBatchDb::::recv_in_instructions(&mut txn, block_number); + + let notable = ScannerGlobalDb::::is_block_notable(&txn, block_number); + if !notable { + assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); + } + // If this block is notable, create the Batch(s) for it + if notable { + let network = S::NETWORK; + let external_network_block_hash = BlockHash(index::block_id(&txn, block_number)); + let mut batch_id = BatchDb::::acquire_batch_id(&mut txn); + + // start with empty batch + let mut batches = vec![Batch { + network, + id: batch_id, + external_network_block_hash, + instructions: vec![], + }]; + // We also track the return information for the InInstructions within a Batch in case + // they error + let mut return_information = vec![vec![]]; + + for Returnable { return_address, in_instruction } in in_instructions { + let balance = in_instruction.balance; + + let batch = batches.last_mut().unwrap(); + batch.instructions.push(in_instruction); + + // check if batch is over-size + if batch.encode().len() > MAX_BATCH_SIZE { + // pop the last instruction so it's back in size + let in_instruction = batch.instructions.pop().unwrap(); + + // bump the id for the new batch + batch_id = BatchDb::::acquire_batch_id(&mut txn); + + // make a new batch with this instruction included + batches.push(Batch { + network, + id: batch_id, + external_network_block_hash, + instructions: vec![in_instruction], + }); + // Since we're allocating a new batch, allocate a new set of return addresses for it + return_information.push(vec![]); + } + + // For the set of return addresses for the InInstructions for the batch we just pushed + // onto, push this InInstruction's return addresses + return_information + .last_mut() + .unwrap() + .push(return_address.map(|address| ReturnInformation { address, balance })); + } + + // Now that we've finalized the Batches, save the information for each to the database + assert_eq!(batches.len(), return_information.len()); + for (batch, return_information) in batches.iter().zip(&return_information) { + assert_eq!(batch.instructions.len(), return_information.len()); + BatchDb::::save_batch_info( + &mut txn, + batch.id, + block_number, + session_to_sign_batch, + external_key_for_session_to_sign_batch, + Blake2b::::digest(batch.instructions.encode()).into(), + ); + BatchDb::::save_return_information(&mut txn, batch.id, return_information); + } + + for batch in batches { + BatchToReportDb::::send_batch( + &mut txn, + &BatchData { + session_to_sign_batch, + external_key_for_session_to_sign_batch: EncodableG( + external_key_for_session_to_sign_batch, + ), + batch, + }, + ); + } + } + + // Update the next block to batch + BatchDb::::set_next_block_to_batch(&mut txn, block_number + 1); + + txn.commit(); + } + + // Run dependents if were able to batch any blocks + Ok(next_block_to_batch <= highest_batchable) + } + } +} diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs new file mode 100644 index 00000000..e7fd464b --- /dev/null +++ b/processor/scanner/src/db.rs @@ -0,0 +1,650 @@ +use core::marker::PhantomData; +use std::io::{self, Read, Write}; + +use group::GroupEncoding; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use serai_coins_primitives::OutInstructionWithBalance; +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{InInstructionWithBalance, Batch}; + +use primitives::{EncodableG, ReceivedOutput}; + +use crate::{ + lifetime::{LifetimeStage, Lifetime}, + ScannerFeed, KeyFor, AddressFor, OutputFor, Return, + scan::next_to_scan_for_outputs_block, +}; + +// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. +trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + +#[derive(BorshSerialize, BorshDeserialize)] +struct SeraiKeyDbEntry { + activation_block_number: u64, + session: Session, + key: K, +} + +#[derive(Clone)] +pub(crate) struct SeraiKey { + pub(crate) session: Session, + pub(crate) key: K, + pub(crate) stage: LifetimeStage, + pub(crate) activation_block_number: u64, + pub(crate) block_at_which_reporting_starts: u64, + pub(crate) block_at_which_forwarding_starts: Option, +} + +pub(crate) struct OutputWithInInstruction { + pub(crate) output: OutputFor, + pub(crate) return_address: Option>, + pub(crate) in_instruction: InInstructionWithBalance, +} + +impl OutputWithInInstruction { + pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { + let output = OutputFor::::read(reader)?; + let return_address = { + let mut opt = [0xff]; + reader.read_exact(&mut opt)?; + assert!((opt[0] == 0) || (opt[0] == 1)); + (opt[0] == 1).then(|| AddressFor::::deserialize_reader(reader)).transpose()? + }; + let in_instruction = + InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; + Ok(Self { output, return_address, in_instruction }) + } + pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.output.write(writer)?; + if let Some(return_address) = &self.return_address { + writer.write_all(&[1])?; + return_address.serialize(writer)?; + } else { + writer.write_all(&[0])?; + } + self.in_instruction.encode_to(writer); + Ok(()) + } +} + +create_db!( + ScannerGlobal { + StartBlock: () -> u64, + + QueuedKey: (key: K) -> (), + + ActiveKeys: () -> Vec>, + RetireAt: (key: K) -> u64, + + // Highest acknowledged block + HighestAcknowledgedBlock: () -> u64, + + // If a block was notable + /* + A block is notable if one of three conditions are met: + + 1) We activated a key within this block (or explicitly forward to an activated key). + 2) We retired a key within this block. + 3) We received outputs within this block. + + The first two conditions, and the reasoning for them, is extensively documented in + `spec/processor/Multisig Rotation.md`. The third is obvious (as any block we receive outputs + in needs synchrony so that we can spend the received outputs). + + We save if a block is notable here by either the scan for received outputs task or the + check for eventuality completion task. Once a block has been processed by both, the reporting + task will report any notable blocks. Finally, the task which sets the block safe to scan to + makes its decision based on the notable blocks and the acknowledged blocks. + */ + // This collapses from `bool` to `()`, using if the value was set for true and false otherwise + NotableBlock: (number: u64) -> (), + + SerializedForwardedOutput: (id: &[u8]) -> Vec, + } +); + +pub(crate) struct ScannerGlobalDb(PhantomData); +impl ScannerGlobalDb { + pub(crate) fn start_block(getter: &impl Get) -> Option { + StartBlock::get(getter) + } + pub(crate) fn set_start_block(txn: &mut impl DbTxn, block: u64) { + StartBlock::set(txn, &block) + } + + fn tidy_keys(txn: &mut impl DbTxn) { + let mut keys: Vec>>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + let Some(key) = keys.first() else { return }; + + // Get the block we're scanning for next + let block_number = next_to_scan_for_outputs_block::(txn).expect( + "tidying keys despite never setting the next to scan for block (done on initialization)", + ); + // If this key is scheduled for retiry... + if let Some(retire_at) = RetireAt::get(txn, key.key) { + // And is retired by/at this block... + if retire_at <= block_number { + // Remove it from the list of keys + let key = keys.remove(0); + ActiveKeys::set(txn, &keys); + // Also clean up the retiry block + RetireAt::del(txn, key.key); + } + } + } + + /// Queue a key. + /// + /// Keys may be queued whenever, so long as they're scheduled to activate `WINDOW_LENGTH` blocks + /// after the next block acknowledged after they've been set. There is no requirement that any + /// prior keys have had their processing completed (meaning what should be a length-2 vector may + /// be a length-n vector). + /// + /// A new key MUST NOT be queued to activate a block preceding the finishing of the key prior to + /// its prior. There MUST only be two keys active at one time. + /// + /// `activation_block_number` is inclusive, so the key will be scanned for starting at the + /// specified block. + pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { + // Set the block which has a key activate as notable + NotableBlock::set(txn, activation_block_number, &()); + + // Check this key has never been queued before + // This should only happen if a malicious supermajority collude, and breaks indexing by the key + assert!(QueuedKey::get(txn, EncodableG(key)).is_none(), "key being queued was prior queued"); + QueuedKey::set(txn, EncodableG(key), &()); + + // Fetch the existing keys + let mut keys: Vec>>> = + ActiveKeys::get(txn).unwrap_or(vec![]); + + // If this new key retires a key, mark the block at which forwarding explicitly occurs notable + // This lets us obtain synchrony over the transactions we'll make to accomplish this + let this_keys_session = if let Some(key_retired_by_this) = keys.last() { + NotableBlock::set( + txn, + Lifetime::calculate::( + // The 'current block number' used for this calculation + activation_block_number, + // The activation block of the key we're getting the lifetime of + key_retired_by_this.activation_block_number, + // The activation block of the key which will retire this key + Some(activation_block_number), + ) + .block_at_which_forwarding_starts + .expect( + "didn't calculate the block forwarding starts at despite passing the next key's info", + ), + &(), + ); + Session(key_retired_by_this.session.0 + 1) + } else { + Session(0) + }; + + // Push and save the next key + keys.push(SeraiKeyDbEntry { + activation_block_number, + session: this_keys_session, + key: EncodableG(key), + }); + ActiveKeys::set(txn, &keys); + + // Now tidy the keys, ensuring this has a maximum length of 2 + Self::tidy_keys(txn); + } + /// Retire a key. + /// + /// The key retired must be the oldest key. There must be another key actively tracked. + pub(crate) fn retire_key(txn: &mut impl DbTxn, at_block: u64, key: KeyFor) { + // Set the block which has a key retire as notable + NotableBlock::set(txn, at_block, &()); + + let keys: Vec>>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + + assert!(keys.len() > 1, "retiring our only key"); + assert_eq!(keys[0].key.0, key, "not retiring the oldest key"); + + RetireAt::set(txn, EncodableG(key), &at_block); + } + /// Fetch the active keys, as of the next-to-scan-for-outputs Block. + /// + /// This means the scan task should scan for all keys returned by this. + pub(crate) fn active_keys_as_of_next_to_scan_for_outputs_block( + getter: &impl Get, + ) -> Option>>> { + // We don't take this as an argument as we don't keep all historical keys in memory + // If we've scanned block 1,000,000, we can't answer the active keys as of block 0 + let block_number = next_to_scan_for_outputs_block::(getter)?; + + let raw_keys: Vec>>> = ActiveKeys::get(getter)?; + let mut keys = Vec::with_capacity(2); + for i in 0 .. raw_keys.len() { + // Ensure this key isn't retired + if let Some(retire_at) = RetireAt::get(getter, raw_keys[i].key) { + if retire_at <= block_number { + continue; + } + } + // Ensure this key isn't yet to activate + if block_number < raw_keys[i].activation_block_number { + continue; + } + let Lifetime { stage, block_at_which_reporting_starts, block_at_which_forwarding_starts } = + Lifetime::calculate::( + block_number, + raw_keys[i].activation_block_number, + raw_keys.get(i + 1).map(|key| key.activation_block_number), + ); + keys.push(SeraiKey { + session: raw_keys[i].session, + key: raw_keys[i].key.0, + stage, + activation_block_number: raw_keys[i].activation_block_number, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }); + } + assert!(keys.len() <= 2, "more than two keys active"); + Some(keys) + } + + pub(crate) fn set_highest_acknowledged_block( + txn: &mut impl DbTxn, + highest_acknowledged_block: u64, + ) { + HighestAcknowledgedBlock::set(txn, &highest_acknowledged_block); + } + pub(crate) fn highest_acknowledged_block(getter: &impl Get) -> Option { + HighestAcknowledgedBlock::get(getter) + } + + /* + This is so verbosely named as the DB itself already flags upon external outputs. Specifically, + if any block yields External outputs to accumulate, we flag it as notable. + + There is the slight edge case where some External outputs are queued for accumulation later. We + consider those outputs received as of the block they're queued to (maintaining the policy any + blocks in which we receive outputs is notable). + */ + pub(crate) fn flag_notable_due_to_non_external_output(txn: &mut impl DbTxn, block_number: u64) { + NotableBlock::set(txn, block_number, &()); + } + + pub(crate) fn is_block_notable(getter: &impl Get, number: u64) -> bool { + NotableBlock::get(getter, number).is_some() + } + + pub(crate) fn return_address_and_in_instruction_for_forwarded_output( + getter: &impl Get, + output: & as ReceivedOutput, AddressFor>>::Id, + ) -> Option<(Option>, InInstructionWithBalance)> { + let buf = SerializedForwardedOutput::get(getter, output.as_ref())?; + let mut buf = buf.as_slice(); + + let mut opt = [0xff]; + buf.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + let address = (opt[0] == 1).then(|| AddressFor::::deserialize_reader(&mut buf).unwrap()); + Some((address, InInstructionWithBalance::decode(&mut IoReader(buf)).unwrap())) + } +} + +/// The data produced by scanning a block. +/// +/// This is the sender's version which includes the forwarded outputs with their InInstructions, +/// which need to be saved to the database for later retrieval. +pub(crate) struct SenderScanData { + /// The block number. + pub(crate) block_number: u64, + /// The received outputs which should be accumulated into the scheduler. + pub(crate) received_external_outputs: Vec>, + /// The outputs which need to be forwarded. + pub(crate) forwards: Vec>, + /// The outputs which need to be returned. + pub(crate) returns: Vec>, +} + +/// The data produced by scanning a block. +/// +/// This is the receiver's version which doesn't include the forwarded outputs' InInstructions, as +/// the Eventuality task doesn't need it to process this block. +pub(crate) struct ReceiverScanData { + /// The block number. + pub(crate) block_number: u64, + /// The received outputs which should be accumulated into the scheduler. + pub(crate) received_external_outputs: Vec>, + /// The outputs which need to be forwarded. + pub(crate) forwards: Vec>, + /// The outputs which need to be returned. + pub(crate) returns: Vec>, +} + +db_channel! { + ScannerScanEventuality { + ScannedBlock: () -> Vec, + } +} + +pub(crate) struct ScanToEventualityDb(PhantomData); +impl ScanToEventualityDb { + pub(crate) fn send_scan_data(txn: &mut impl DbTxn, block_number: u64, data: &SenderScanData) { + // If we received an External output to accumulate, or have an External output to forward + // (meaning we received an External output), or have an External output to return (again + // meaning we received an External output), set this block as notable due to receiving outputs + // The non-External output case is covered with `flag_notable_due_to_non_external_output` + if !(data.received_external_outputs.is_empty() && + data.forwards.is_empty() && + data.returns.is_empty()) + { + NotableBlock::set(txn, block_number, &()); + } + + // Save all the forwarded outputs' data + for forward in &data.forwards { + let mut buf = vec![]; + if let Some(address) = &forward.return_address { + buf.write_all(&[1]).unwrap(); + address.serialize(&mut buf).unwrap(); + } else { + buf.write_all(&[0]).unwrap(); + } + forward.in_instruction.encode_to(&mut buf); + + SerializedForwardedOutput::set(txn, forward.output.id().as_ref(), &buf); + } + + let mut buf = vec![]; + buf.write_all(&data.block_number.to_le_bytes()).unwrap(); + buf + .write_all(&u32::try_from(data.received_external_outputs.len()).unwrap().to_le_bytes()) + .unwrap(); + for output in &data.received_external_outputs { + output.write(&mut buf).unwrap(); + } + buf.write_all(&u32::try_from(data.forwards.len()).unwrap().to_le_bytes()).unwrap(); + for output_with_in_instruction in &data.forwards { + // Only write the output, as we saved the InInstruction above as needed + output_with_in_instruction.output.write(&mut buf).unwrap(); + } + buf.write_all(&u32::try_from(data.returns.len()).unwrap().to_le_bytes()).unwrap(); + for output in &data.returns { + output.write(&mut buf).unwrap(); + } + ScannedBlock::send(txn, &buf); + } + pub(crate) fn recv_scan_data( + txn: &mut impl DbTxn, + expected_block_number: u64, + ) -> ReceiverScanData { + let data = + ScannedBlock::try_recv(txn).expect("receiving data for a scanned block not yet sent"); + let mut data = data.as_slice(); + + let block_number = { + let mut block_number = [0; 8]; + data.read_exact(&mut block_number).unwrap(); + u64::from_le_bytes(block_number) + }; + assert_eq!( + block_number, expected_block_number, + "received data for a scanned block distinct than expected" + ); + + let received_external_outputs = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut received_external_outputs = Vec::with_capacity(len); + for _ in 0 .. len { + received_external_outputs.push(OutputFor::::read(&mut data).unwrap()); + } + received_external_outputs + }; + + let forwards = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut forwards = Vec::with_capacity(len); + for _ in 0 .. len { + forwards.push(OutputFor::::read(&mut data).unwrap()); + } + forwards + }; + + let returns = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut returns = Vec::with_capacity(len); + for _ in 0 .. len { + returns.push(Return::::read(&mut data).unwrap()); + } + returns + }; + + ReceiverScanData { block_number, received_external_outputs, forwards, returns } + } +} + +pub(crate) struct Returnable { + pub(crate) return_address: Option>, + pub(crate) in_instruction: InInstructionWithBalance, +} + +impl Returnable { + fn read(reader: &mut impl io::Read) -> io::Result { + let mut opt = [0xff]; + reader.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + let return_address = + (opt[0] == 1).then(|| AddressFor::::deserialize_reader(reader)).transpose()?; + + let in_instruction = + InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; + Ok(Returnable { return_address, in_instruction }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + if let Some(return_address) = &self.return_address { + writer.write_all(&[1])?; + return_address.serialize(writer)?; + } else { + writer.write_all(&[0])?; + } + self.in_instruction.encode_to(writer); + Ok(()) + } +} + +#[derive(BorshSerialize, BorshDeserialize)] +struct BlockBoundInInstructions { + block_number: u64, + returnable_in_instructions: Vec, +} + +db_channel! { + ScannerScanBatch { + InInstructions: () -> BlockBoundInInstructions, + } +} + +pub(crate) struct InInstructionData { + pub(crate) session_to_sign_batch: Session, + pub(crate) external_key_for_session_to_sign_batch: KeyFor, + pub(crate) returnable_in_instructions: Vec>, +} + +pub(crate) struct ScanToBatchDb(PhantomData); +impl ScanToBatchDb { + pub(crate) fn send_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + data: &InInstructionData, + ) { + let mut buf = data.session_to_sign_batch.encode(); + buf.extend(data.external_key_for_session_to_sign_batch.to_bytes().as_ref()); + for returnable_in_instruction in &data.returnable_in_instructions { + returnable_in_instruction.write(&mut buf).unwrap(); + } + InInstructions::send( + txn, + &BlockBoundInInstructions { block_number, returnable_in_instructions: buf }, + ); + } + + pub(crate) fn recv_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + ) -> InInstructionData { + let data = InInstructions::try_recv(txn) + .expect("receiving InInstructions for a scanned block not yet sent"); + assert_eq!( + block_number, data.block_number, + "received InInstructions for a scanned block distinct than expected" + ); + let mut buf = data.returnable_in_instructions.as_slice(); + + let session_to_sign_batch = Session::decode(&mut buf).unwrap(); + let external_key_for_session_to_sign_batch = { + let mut external_key_for_session_to_sign_batch = + as GroupEncoding>::Repr::default(); + let key_len = external_key_for_session_to_sign_batch.as_ref().len(); + external_key_for_session_to_sign_batch.as_mut().copy_from_slice(&buf[.. key_len]); + buf = &buf[key_len ..]; + KeyFor::::from_bytes(&external_key_for_session_to_sign_batch).unwrap() + }; + + let mut returnable_in_instructions = vec![]; + while !buf.is_empty() { + returnable_in_instructions.push(Returnable::read(&mut buf).unwrap()); + } + InInstructionData { + session_to_sign_batch, + external_key_for_session_to_sign_batch, + returnable_in_instructions, + } + } +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct BatchData { + pub(crate) session_to_sign_batch: Session, + pub(crate) external_key_for_session_to_sign_batch: K, + pub(crate) batch: Batch, +} + +db_channel! { + ScannerBatchReport { + BatchToReport: () -> BatchData, + } +} + +pub(crate) struct BatchToReportDb(PhantomData); +impl BatchToReportDb { + pub(crate) fn send_batch(txn: &mut impl DbTxn, batch_data: &BatchData>>) { + BatchToReport::send(txn, batch_data); + } + + pub(crate) fn try_recv_batch(txn: &mut impl DbTxn) -> Option>>> { + BatchToReport::try_recv(txn) + } +} + +db_channel! { + ScannerSubstrateEventuality { + Burns: (acknowledged_block: u64) -> Vec, + } +} + +pub(crate) struct SubstrateToEventualityDb; +impl SubstrateToEventualityDb { + pub(crate) fn send_burns( + txn: &mut impl DbTxn, + acknowledged_block: u64, + burns: Vec, + ) { + // Drop burns less than the dust + let burns = burns + .into_iter() + .filter(|burn| burn.balance.amount.0 >= S::dust(burn.balance.coin).0) + .collect::>(); + if !burns.is_empty() { + Burns::send(txn, acknowledged_block, &burns); + } + } + + pub(crate) fn try_recv_burns( + txn: &mut impl DbTxn, + acknowledged_block: u64, + ) -> Option> { + Burns::try_recv(txn, acknowledged_block) + } +} + +mod _public_db { + use serai_in_instructions_primitives::Batch; + + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + ScannerPublic { + BatchesToSign: (key: &[u8]) -> Batch, + AcknowledgedBatches: (key: &[u8]) -> u32, + CompletedEventualities: (key: &[u8]) -> [u8; 32], + } + } +} + +/// The batches to sign and publish. +/// +/// This is used for publishing Batches onto Serai. +pub struct BatchesToSign(PhantomData); +impl BatchesToSign { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: &Batch) { + _public_db::BatchesToSign::send(txn, key.to_bytes().as_ref(), batch); + } + + /// Receive a batch to sign and publish. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { + _public_db::BatchesToSign::try_recv(txn, key.to_bytes().as_ref()) + } +} + +/// The batches which were acknowledged on-chain. +pub struct AcknowledgedBatches(PhantomData); +impl AcknowledgedBatches { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: u32) { + _public_db::AcknowledgedBatches::send(txn, key.to_bytes().as_ref(), &batch); + } + + /// Receive the ID of a batch which was acknowledged. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { + _public_db::AcknowledgedBatches::try_recv(txn, key.to_bytes().as_ref()) + } +} + +/// The IDs of completed Eventualities found on-chain, within a finalized block. +pub struct CompletedEventualities(PhantomData); +impl CompletedEventualities { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, id: [u8; 32]) { + _public_db::CompletedEventualities::send(txn, key.to_bytes().as_ref(), &id); + } + + /// Receive the ID of a completed Eventuality. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option<[u8; 32]> { + _public_db::CompletedEventualities::try_recv(txn, key.to_bytes().as_ref()) + } +} diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs new file mode 100644 index 00000000..3e5088d1 --- /dev/null +++ b/processor/scanner/src/eventuality/db.rs @@ -0,0 +1,83 @@ +use core::marker::PhantomData; + +use scale::Encode; +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{EncodableG, ReceivedOutput, Eventuality, EventualityTracker}; + +use crate::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor}; + +create_db!( + ScannerEventuality { + // The next block to check for resolving eventualities + NextToCheckForEventualitiesBlock: () -> u64, + // The latest block this task has handled which was notable + LatestHandledNotableBlock: () -> u64, + + SerializedEventualities: (key: K) -> Vec, + + AccumulatedOutput: (id: &[u8]) -> (), + } +); + +pub(crate) struct EventualityDb(PhantomData); +impl EventualityDb { + pub(crate) fn set_next_to_check_for_eventualities_block( + txn: &mut impl DbTxn, + next_to_check_for_eventualities_block: u64, + ) { + NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); + } + pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { + NextToCheckForEventualitiesBlock::get(getter) + } + + pub(crate) fn set_latest_handled_notable_block( + txn: &mut impl DbTxn, + latest_handled_notable_block: u64, + ) { + LatestHandledNotableBlock::set(txn, &latest_handled_notable_block); + } + pub(crate) fn latest_handled_notable_block(getter: &impl Get) -> Option { + LatestHandledNotableBlock::get(getter) + } + + pub(crate) fn set_eventualities( + txn: &mut impl DbTxn, + key: KeyFor, + eventualities: &EventualityTracker>, + ) { + let mut serialized = Vec::with_capacity(eventualities.active_eventualities.len() * 128); + for eventuality in eventualities.active_eventualities.values() { + eventuality.write(&mut serialized).unwrap(); + } + SerializedEventualities::set(txn, EncodableG(key), &serialized); + } + pub(crate) fn eventualities( + getter: &impl Get, + key: KeyFor, + ) -> EventualityTracker> { + let serialized = SerializedEventualities::get(getter, EncodableG(key)).unwrap_or(vec![]); + let mut serialized = serialized.as_slice(); + + let mut res = EventualityTracker::default(); + while !serialized.is_empty() { + let eventuality = EventualityFor::::read(&mut serialized).unwrap(); + res.insert(eventuality); + } + res + } + + pub(crate) fn prior_accumulated_output( + getter: &impl Get, + id: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + AccumulatedOutput::get(getter, id.as_ref()).is_some() + } + pub(crate) fn accumulated_output( + txn: &mut impl DbTxn, + id: & as ReceivedOutput, AddressFor>>::Id, + ) { + AccumulatedOutput::set(txn, id.as_ref(), &()); + } +} diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs new file mode 100644 index 00000000..8a416903 --- /dev/null +++ b/processor/scanner/src/eventuality/mod.rs @@ -0,0 +1,531 @@ +use core::future::Future; +use std::collections::{HashSet, HashMap}; + +use group::GroupEncoding; + +use serai_db::{Get, DbTxn, Db}; + +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block, Payment}; + +use crate::{ + lifetime::LifetimeStage, + db::{ + SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, + ScanToEventualityDb, + }, + BlockExt, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler, + CompletedEventualities, sort_outputs, + scan::{next_to_scan_for_outputs_block, queue_output_until_block}, +}; + +mod db; +use db::EventualityDb; + +/// The latest scannable block, which is determined by this task. +/// +/// This task decides when a key retires, which impacts the scan task. Accordingly, the scanner is +/// only allowed to scan `S::WINDOW_LENGTH - 1` blocks ahead so we can safely schedule keys to +/// retire `S::WINDOW_LENGTH` blocks out. +pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { + assert!(S::WINDOW_LENGTH > 0); + EventualityDb::::next_to_check_for_eventualities_block(getter) + .map(|b| b + S::WINDOW_LENGTH - 1) +} + +/// Intake a set of Eventualities into the DB. +/// +/// The HashMap is keyed by the key these Eventualities are for. +fn intake_eventualities( + txn: &mut impl DbTxn, + to_intake: HashMap, Vec>>, +) { + for (key, new_eventualities) in to_intake { + let key = { + let mut key_repr = as GroupEncoding>::Repr::default(); + assert_eq!(key.len(), key_repr.as_ref().len()); + key_repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&key_repr).unwrap() + }; + + let mut eventualities = EventualityDb::::eventualities(txn, key); + for new_eventuality in new_eventualities { + eventualities.insert(new_eventuality); + } + EventualityDb::::set_eventualities(txn, key, &eventualities); + } +} + +/* + When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those + outputs into some scheduler, potentially causing certain transactions to begin their signing + protocol. + + Despite only scanning blocks with `CONFIRMATIONS`, we cannot assume that these transactions (in + their signed form) will only appear after `CONFIRMATIONS`. For `CONFIRMATIONS = 10`, the scanned + block's number being `1`, the blockchain will have blocks with numbers `0 ..= 10`. While this + implies the earliest the transaction will appear is when the block number is `11`, which is + `1 + CONFIRMATIONS` (the number of the scanned block, plus the confirmations), this isn't + guaranteed. + + A reorganization could occur which causes all unconfirmed blocks to be replaced, with the new + blockchain having the signed transaction present immediately. + + This means that in order to detect Eventuality completions, we can only check block `b+1` once + we've acknowledged block `b`, accumulated its outputs, triggered any transactions, and prepared + for their Eventualities. This is important as both the completion of Eventualities, and the scan + process, may cause a block to be considered notable (where notable blocks must be perfectly + ordered). + + We do not want to fully serialize the scan flow solely because the Eventuality flow must be. If + the time to scan, acknowledge, and intake a block ever exceeded the block time, we'd form a + backlog. + + The solution is to form a window of blocks we can scan/acknowledge/intake, safely, such that we + only form a backlog if the latency for a block exceeds the duration of the entire window (the + amount of blocks in the window * the block time). + + By considering the block an Eventuality resolves not as the block it does, yet the block a window + later, we enable the following flow: + + - The scanner scans within its window, submitting blocks for acknowledgement. + - We have the blocks acknowledged (the consensus protocol handling this in parallel). + - The scanner checks for Eventualities completed following acknowledged blocks. + - If all Eventualities for a retiring multisig have been cleared, the notable block is one window + later. + - The start of the window shifts to the last block we've checked for Eventualities. This means + the end of the window is the block we just set as notable, and yes, once that's scanned we can + successfully publish a batch for it in a canonical fashion. + + This forms a backlog only if the latency of scanning, acknowledgement, and intake (including + checking Eventualities) exceeds the window duration (the desired property). +*/ +pub(crate) struct EventualityTask> { + db: D, + feed: S, + scheduler: Sch, +} + +impl> EventualityTask { + pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self { + if EventualityDb::::next_to_check_for_eventualities_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed, scheduler } + } + + #[allow(clippy::type_complexity)] + fn keys_and_keys_with_stages( + &self, + block_number: u64, + ) -> (Vec>>, Vec<(KeyFor, LifetimeStage)>) { + /* + This is proper as the keys for the next-to-scan block (at most `WINDOW_LENGTH` ahead) will be + the keys to use here, with only minor edge cases. + + This may include a key which has yet to activate by our perception. We can simply drop + those. + + This may not include a key which has retired by the next-to-scan block. This task is the + one which decides when to retire a key, and when it marks a key to be retired, it is done + with it. Accordingly, it's not an issue if such a key was dropped. + + This also may include a key we've retired which has yet to officially retire. That's fine as + we'll do nothing with it, and the Scheduler traits document this behavior. + */ + let mut keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + .expect("scanning for a blockchain without any keys set"); + // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate + keys.retain(|key| block_number <= key.activation_block_number); + let keys_with_stages = keys.iter().map(|key| (key.key, key.stage)).collect::>(); + + (keys, keys_with_stages) + } + + // Returns a boolean of if we intaked any Burns. + async fn intake_burns(&mut self) -> Result { + let mut intaked_any = false; + + // If we've handled an notable block, we may have Burns being queued with it as the reference + if let Some(latest_handled_notable_block) = + EventualityDb::::latest_handled_notable_block(&self.db) + { + // We always intake Burns per this block as it's the block we have consensus on + // We would have a consensus failure if some thought the change should be the old key and + // others the new key + let (_keys, keys_with_stages) = self.keys_and_keys_with_stages(latest_handled_notable_block); + + let block = self.feed.block_by_number(&self.db, latest_handled_notable_block).await?; + + let mut txn = self.db.txn(); + // Drain the entire channel + while let Some(burns) = + SubstrateToEventualityDb::try_recv_burns(&mut txn, latest_handled_notable_block) + { + intaked_any = true; + + let new_eventualities = self + .scheduler + .fulfill( + &mut txn, + &block, + &keys_with_stages, + burns + .into_iter() + .filter_map(|burn| Payment::>::try_from(burn).ok()) + .collect(), + ) + .await + .map_err(|e| format!("failed to queue fulfilling payments: {e:?}"))?; + intake_eventualities::(&mut txn, new_eventualities); + } + txn.commit(); + } + + Ok(intaked_any) + } +} + +impl> ContinuallyRan for EventualityTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the highest acknowledged block + let Some(highest_acknowledged) = ScannerGlobalDb::::highest_acknowledged_block(&self.db) + else { + // If we've never acknowledged a block, return + return Ok(false); + }; + + // A boolean of if we've made any progress to return at the end of the function + let mut made_progress = false; + + // Start by intaking any Burns we have sitting around + // It's important we run this regardless of if we have a new block to handle + made_progress |= self.intake_burns().await?; + + /* + Eventualities increase upon one of two cases: + + 1) We're fulfilling Burns + 2) We acknowledged a block + + We can't know the processor has intaked all Burns it should have when we process block `b`. + We solve this by executing a consensus protocol whenever a resolution for an Eventuality + created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on + such blocks (and all preceding Burns). + + This means we can only iterate up to the block currently pending acknowledgement. + + We only know blocks will need acknowledgement *for sure* if they were scanned. The only + other causes are key activation and retirement (both scheduled outside the scan window). + This makes the exclusive upper bound the *next block to scan*. + */ + let exclusive_upper_bound = { + // Fetch the next to scan block + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + .expect("EventualityTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + next_to_scan + }; + + // Fetch the next block to check + let next_to_check = EventualityDb::::next_to_check_for_eventualities_block(&self.db) + .expect("EventualityTask run before writing the start block"); + + // Check all blocks + for b in next_to_check .. exclusive_upper_bound { + let is_block_notable = ScannerGlobalDb::::is_block_notable(&self.db, b); + if is_block_notable { + /* + If this block is notable *and* not acknowledged, break. + + This is so if Burns queued prior to this block's acknowledgement caused any + Eventualities (which may resolve this block), we have them. If it wasn't for that, it'd + be so if this block's acknowledgement caused any Eventualities, we have them, though + those would only potentially resolve in the next block (letting us scan this block + without delay). + */ + if b > highest_acknowledged { + break; + } + + // Since this block is notable, ensure we've intaked all the Burns preceding it + // We can know with certainty that the channel is fully populated at this time since + // we've acknowledged a newer block (so we've handled the state up to this point and any + // new state will be for the newer block) + #[allow(unused_assignments)] + { + made_progress |= self.intake_burns().await?; + } + } + + // Since we're handling this block, we are making progress + made_progress = true; + + let block = self.feed.block_by_number(&self.db, b).await?; + + log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); + + let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b); + let latest_active_key = { + let mut keys_with_stages = keys_with_stages.clone(); + loop { + // Use the most recent key + let (key, stage) = keys_with_stages.pop().unwrap(); + // Unless this key is active, but not yet reporting + if stage == LifetimeStage::ActiveYetNotReporting { + continue; + } + break key; + } + }; + + let mut txn = self.db.txn(); + + // Fetch the data from the scanner + let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b); + assert_eq!(scan_data.block_number, b); + let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } = + scan_data; + let mut outputs = received_external_outputs; + + for key in &keys { + // If this is the key's activation block, activate it + if key.activation_block_number == b { + Sch::activate_key(&mut txn, key.key); + } + + let completed_eventualities = { + let mut eventualities = EventualityDb::::eventualities(&txn, key.key); + let completed_eventualities = + block.check_for_eventuality_resolutions(&mut eventualities); + EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); + completed_eventualities + }; + + for (tx, eventuality) in &completed_eventualities { + log::info!( + "eventuality {} resolved by {}", + hex::encode(eventuality.id()), + hex::encode(tx.as_ref()) + ); + CompletedEventualities::send(&mut txn, &key.key, eventuality.id()); + } + + // Fetch all non-External outputs + let mut non_external_outputs = block.scan_for_outputs(latest_active_key, key.key); + non_external_outputs.retain(|output| output.kind() != OutputType::External); + // Drop any outputs less than the dust limit + non_external_outputs.retain(|output| { + let balance = output.balance(); + balance.amount.0 >= S::dust(balance.coin).0 + }); + + /* + Now that we have all non-External outputs, we filter them to be only the outputs which + are from transactions which resolve our own Eventualities *if* the multisig is retiring. + This implements step 6 of `spec/processor/Multisig Rotation.md`. + + We may receive a Change output. The only issue with accumulating this would be if it + extends the multisig's lifetime (by increasing the amount of outputs yet to be + forwarded). By checking it's one we made, either: + 1) It's a legitimate Change output to be forwarded + 2) It's a Change output created by a user burning coins (specifying the Change address), + which can only be created while the multisig is actively handling `Burn`s (therefore + ensuring this multisig cannot be kept alive ad-infinitum) + + The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably + get ignored if not usable however. + */ + if key.stage == LifetimeStage::Finishing { + non_external_outputs + .retain(|output| completed_eventualities.contains_key(&output.transaction_id())); + } + + // Finally, for non-External outputs we didn't make, we check they're worth more than the + // cost to aggregate them to avoid some profitable spam attacks by malicious miners + { + // Fetch and cache the costs to aggregate as this call may be expensive + let coins = non_external_outputs + .iter() + .map(|output| output.balance().coin) + .collect::>(); + let mut costs_to_aggregate = HashMap::new(); + for coin in coins { + costs_to_aggregate.insert( + coin, + self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| { + format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}") + })?, + ); + } + + // Only retain out outputs/outputs sufficiently worthwhile + non_external_outputs.retain(|output| { + completed_eventualities.contains_key(&output.transaction_id()) || { + let balance = output.balance(); + balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0) + } + }); + } + + // Now, we iterate over all Forwarded outputs and queue their InInstructions + for output in + non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) + { + let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else { + // Output sent to the forwarding address yet not one we made + continue; + }; + let Some(forwarded) = eventuality.singular_spent_output() else { + // This was a TX made by us, yet someone burned to the forwarding address as it + // doesn't follow the structure of forwarding transactions + continue; + }; + + let Some((return_address, mut in_instruction)) = + ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( + &txn, &forwarded, + ) + else { + // This was a TX made by us, coincidentally with the necessary structure, yet wasn't + // forwarding an output + continue; + }; + + // We use the original amount, minus twice the cost to aggregate + // If the fees we paid to forward this now (less than the cost to aggregate now, yet not + // necessarily the cost to aggregate historically) caused this amount to be less, reduce + // it accordingly + in_instruction.balance.amount.0 = + in_instruction.balance.amount.0.min(output.balance().amount.0); + + queue_output_until_block::( + &mut txn, + b + S::WINDOW_LENGTH, + &OutputWithInInstruction { output: output.clone(), return_address, in_instruction }, + ); + } + + // Accumulate all of these outputs + outputs.extend(non_external_outputs); + } + + // Update the scheduler + { + let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; + scheduler_update.outputs.sort_by(sort_outputs); + scheduler_update.forwards.sort_by(sort_outputs); + scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + + let empty = { + let a: core::slice::Iter<'_, OutputFor> = scheduler_update.outputs.iter(); + let b: core::slice::Iter<'_, OutputFor> = scheduler_update.forwards.iter(); + let c = + scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output); + let mut all_outputs = a.chain(b).chain(c).peekable(); + + // If we received any output, sanity check this block is notable + let empty = all_outputs.peek().is_none(); + if !empty { + assert!(is_block_notable, "accumulating output(s) in non-notable block"); + } + + // Sanity check we've never accumulated these outputs before + for output in all_outputs { + assert!( + !EventualityDb::::prior_accumulated_output(&txn, &output.id()), + "prior accumulated an output with this ID" + ); + EventualityDb::::accumulated_output(&mut txn, &output.id()); + } + + empty + }; + + if !empty { + // Accumulate the outputs + /* + This uses the `keys_with_stages` for the current block, yet this block is notable. + Accordingly, all future intaked Burns will use at least this block when determining + what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. + If this block wasn't notable, we'd potentially intake Burns with the LifetimeStage + determined off an earlier block than this (enabling an earlier LifetimeStage to be + used after a later one was already used). + */ + let new_eventualities = self + .scheduler + .update(&mut txn, &block, &keys_with_stages, scheduler_update) + .await + .map_err(|e| format!("failed to update scheduler: {e:?}"))?; + // Intake the new Eventualities + for key in new_eventualities.keys() { + keys + .iter() + .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) + .expect("intaking Eventuality for key which isn't active"); + } + intake_eventualities::(&mut txn, new_eventualities); + } + } + + for key in &keys { + // If this is the block at which forwarding starts for this key, flush it + // We do this after we issue the above update for any efficiencies gained by doing so + if key.block_at_which_forwarding_starts == Some(b) { + assert!( + key.key != keys.last().unwrap().key, + "key which was forwarding was the last key (which has no key after it to forward to)" + ); + let new_eventualities = self + .scheduler + .flush_key(&mut txn, &block, key.key, keys.last().unwrap().key) + .await + .map_err(|e| format!("failed to flush key from scheduler: {e:?}"))?; + intake_eventualities::(&mut txn, new_eventualities); + } + + // Now that we've intaked any Eventualities caused, check if we're retiring any keys + if key.stage == LifetimeStage::Finishing { + let eventualities = EventualityDb::::eventualities(&txn, key.key); + if eventualities.active_eventualities.is_empty() { + log::info!( + "key {} has finished and is being retired", + hex::encode(key.key.to_bytes().as_ref()) + ); + + // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never + // has a malleable view of the keys. + ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); + + // We tell the scheduler to retire it now as we're done with it, and this fn doesn't + // require it be called with a canonical order + Sch::retire_key(&mut txn, key.key); + } + } + } + + // Update the next-to-check block + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); + + // If this block was notable, update the latest-handled notable block + if is_block_notable { + EventualityDb::::set_latest_handled_notable_block(&mut txn, b); + } + + txn.commit(); + } + + // Run dependents if we successfully checked any blocks + Ok(made_progress) + } + } +} diff --git a/processor/scanner/src/index/db.rs b/processor/scanner/src/index/db.rs new file mode 100644 index 00000000..9254f9bc --- /dev/null +++ b/processor/scanner/src/index/db.rs @@ -0,0 +1,28 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db!( + ScannerIndex { + // A lookup of a block's number to its ID + BlockId: (number: u64) -> [u8; 32], + + // The latest finalized block to appear on the blockchain + LatestFinalizedBlock: () -> u64, + } +); + +pub(crate) struct IndexDb; +impl IndexDb { + pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: [u8; 32]) { + BlockId::set(txn, number, &id); + } + pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<[u8; 32]> { + BlockId::get(getter, number) + } + + pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { + LatestFinalizedBlock::set(txn, &latest_finalized_block); + } + pub(crate) fn latest_finalized_block(getter: &impl Get) -> Option { + LatestFinalizedBlock::get(getter) + } +} diff --git a/processor/scanner/src/index/mod.rs b/processor/scanner/src/index/mod.rs new file mode 100644 index 00000000..50032bae --- /dev/null +++ b/processor/scanner/src/index/mod.rs @@ -0,0 +1,117 @@ +use core::future::Future; + +use serai_db::{Get, DbTxn, Db}; +use primitives::{task::ContinuallyRan, BlockHeader}; + +use crate::ScannerFeed; + +mod db; +use db::IndexDb; + +/// Panics if an unindexed block's ID is requested. +pub(crate) fn block_id(getter: &impl Get, block_number: u64) -> [u8; 32] { + IndexDb::block_id(getter, block_number) + .unwrap_or_else(|| panic!("requested block ID for unindexed block {block_number}")) +} + +/* + This processor should build its own index of the blockchain, yet only for finalized blocks which + are safe to process. For Proof of Work blockchains, which only have probabilistic finality, these + are the set of sufficiently confirmed blocks. For blockchains with finality, these are the + finalized blocks. + + This task finds the finalized blocks, verifies they're continguous, and saves their IDs. +*/ +pub(crate) struct IndexTask { + db: D, + feed: S, +} + +impl IndexTask { + pub(crate) async fn new(mut db: D, feed: S, start_block: u64) -> Self { + if IndexDb::block_id(&db, start_block).is_none() { + // Fetch the block for its ID + let block = { + let mut delay = Self::DELAY_BETWEEN_ITERATIONS; + loop { + match feed.unchecked_block_header_by_number(start_block).await { + Ok(block) => break block, + Err(e) => { + log::warn!("IndexTask couldn't fetch start block {start_block}: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(delay)).await; + delay += Self::DELAY_BETWEEN_ITERATIONS; + delay = delay.min(Self::MAX_DELAY_BETWEEN_ITERATIONS); + } + }; + } + }; + + // Initialize the DB + let mut txn = db.txn(); + IndexDb::set_block(&mut txn, start_block, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + +impl ContinuallyRan for IndexTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the latest finalized block + let our_latest_finalized = IndexDb::latest_finalized_block(&self.db) + .expect("IndexTask run before writing the start block"); + let latest_finalized = match self.feed.latest_finalized_block_number().await { + Ok(latest_finalized) => latest_finalized, + Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?, + }; + + if latest_finalized < our_latest_finalized { + // Explicitly log this as an error as returned ephemeral errors are logged with debug + // This doesn't panic as the node should sync along our indexed chain, and if it doesn't, + // we'll panic at that point in time + log::error!( + "node is out of sync, latest finalized {} is behind our indexed {}", + latest_finalized, + our_latest_finalized + ); + Err("node is out of sync".to_string())?; + } + + // Index the hashes of all blocks until the latest finalized block + for b in (our_latest_finalized + 1) ..= latest_finalized { + let block = match self.feed.unchecked_block_header_by_number(b).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, + }; + + // Check this descends from our indexed chain + { + let expected_parent = + IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); + if block.parent() != expected_parent { + panic!( + "current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})", + hex::encode(block.parent()), + b - 1, + hex::encode(expected_parent) + ); + } + } + + // Update the latest finalized block + let mut txn = self.db.txn(); + IndexDb::set_block(&mut txn, b, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, b); + txn.commit(); + } + + // Have dependents run if we updated the latest finalized block + Ok(our_latest_finalized != latest_finalized) + } + } +} diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs new file mode 100644 index 00000000..d3e24183 --- /dev/null +++ b/processor/scanner/src/lib.rs @@ -0,0 +1,507 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future, fmt::Debug}; +use std::{io, collections::HashMap}; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, Db}; + +use serai_primitives::{ExternalNetworkId, ExternalCoin, Amount}; +use serai_coins_primitives::OutInstructionWithBalance; + +use messages::substrate::ExecutedBatch; +use primitives::{task::*, Address, ReceivedOutput, Block, Payment}; + +// Logic for deciding where in its lifetime a multisig is. +mod lifetime; +pub use lifetime::LifetimeStage; + +// Database schema definition and associated functions. +mod db; +use db::ScannerGlobalDb; +pub use db::{BatchesToSign, AcknowledgedBatches, CompletedEventualities}; +// Task to index the blockchain, ensuring we don't reorganize finalized blocks. +mod index; +// Scans blocks for received coins. +mod scan; +/// Task which creates Batches for Substrate. +mod batch; +/// Task which reports Batches for signing. +mod report; +/// Task which handles events from Substrate once we can. +mod substrate; +/// Check blocks for transactions expected to eventually occur. +mod eventuality; + +pub(crate) fn sort_outputs>( + a: &O, + b: &O, +) -> core::cmp::Ordering { + use core::cmp::{Ordering, Ord}; + let res = a.id().as_ref().cmp(b.id().as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res +} + +/// Extension traits around Block. +pub(crate) trait BlockExt: Block { + fn scan_for_outputs(&self, latest_active_key: Self::Key, key: Self::Key) -> Vec; +} +impl BlockExt for B { + fn scan_for_outputs(&self, latest_active_key: Self::Key, key: Self::Key) -> Vec { + let mut outputs = self.scan_for_outputs_unordered(latest_active_key, key); + outputs.sort_by(sort_outputs); + outputs + } +} + +/// A feed usable to scan a blockchain. +/// +/// This defines the primitive types used, along with various getters necessary for indexing. +pub trait ScannerFeed: 'static + Send + Sync + Clone { + /// The ID of the network being scanned for. + const NETWORK: ExternalNetworkId; + + /// The amount of confirmations a block must have to be considered finalized. + /// + /// This value must be at least `1`. + // This is distinct from `WINDOW_LENGTH` as it's only used for determining the lifetime of the + // key. The key switches to various stages of its lifetime depending on when user transactions + // will hit the Serai network (relative to the time they're made) and when outputs created by + // Serai become available again. If we set a long WINDOW_LENGTH, say two hours, that doesn't mean + // we expect user transactions made within a few minutes of a new key being declared to only + // appear in finalized blocks two hours later. + const CONFIRMATIONS: u64; + + /// The amount of blocks to process in parallel. + /// + /// This must be at least `1`. This value MUST be at least the worst-case latency to publish a + /// Batch for a block divided by the expected block time. Setting this value too low will risk a + /// backlog forming. Setting this value too high will only delay key rotation and forwarded + /// outputs. + // The latency to publish a Batch for a block is the latency of a provided transaction + // (1 minute), the latency of a signing protocol (1 minute), the latency of Serai to finalize a + // block (1 minute), and the latency to cosign such a block (5 minutes for the cosign distance + // plus 1 minute). Accordingly, this should be at least ~30 minutes, ideally 60 minutes. + const WINDOW_LENGTH: u64; + + /// The amount of blocks which will occur in 10 minutes (approximate). + /// + /// This value must be at least `1`. + const TEN_MINUTES: u64; + + /// The representation of a block for this blockchain. + /// + /// A block is defined as a consensus event associated with a set of transactions. It is not + /// necessary to literally define it as whatever the external network defines as a block. For + /// external networks which finalize block(s), this block type should be a representation of all + /// transactions within a finalization event. + type Block: Block; + + /// An error encountered when fetching data from the blockchain. + /// + /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually + /// resolve without manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// Fetch the number of the latest finalized block. + /// + /// The block number is its zero-indexed position within a linear view of the external network's + /// consensus. The genesis block accordingly has block number 0. + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future>; + + /// Fetch the timestamp of a block (represented in seconds since the epoch). + /// + /// This must be monotonically incrementing. Two blocks may share a timestamp. + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future>; + + /// Fetch a block header by its number. + /// + /// This does not check the returned BlockHeader is the header for the block we indexed. + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + Future::Header, Self::EphemeralError>>; + + /// Fetch a block by its number. + /// + /// This does not check the returned Block is the block we indexed. + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future>; + + /// Fetch a block by its number. + /// + /// Panics if the block requested wasn't indexed. + fn block_by_number( + &self, + getter: &(impl Send + Sync + Get), + number: u64, + ) -> impl Send + Future> { + async move { + let block = match self.unchecked_block_by_number(number).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, + }; + + // Check the ID of this block is the expected ID + { + let expected = crate::index::block_id(getter, number); + if block.id() != expected { + panic!( + "finalized chain reorganized from {} to {} at {}", + hex::encode(expected), + hex::encode(block.id()), + number, + ); + } + } + + Ok(block) + } + } + + /// The dust threshold for the specified coin. + /// + /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This + /// SHOULD be a value worth handling at a human level. + fn dust(coin: ExternalCoin) -> Amount; + + /// The cost to aggregate an input as of the specified block. + /// + /// This is defined as the transaction fee for a 2-input, 1-output transaction. + fn cost_to_aggregate( + &self, + coin: ExternalCoin, + reference_block: &Self::Block, + ) -> impl Send + Future>; +} + +/// The key type for this ScannerFeed. +pub type KeyFor = <::Block as Block>::Key; +/// The address type for this ScannerFeed. +pub type AddressFor = <::Block as Block>::Address; +/// The output type for this ScannerFeed. +pub type OutputFor = <::Block as Block>::Output; +/// The eventuality type for this ScannerFeed. +pub type EventualityFor = <::Block as Block>::Eventuality; +/// The block type for this ScannerFeed. +pub type BlockFor = ::Block; + +/// A return to occur. +pub struct Return { + address: AddressFor, + output: OutputFor, +} + +impl Return { + pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.address.serialize(writer)?; + self.output.write(writer) + } + + pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { + let address = AddressFor::::deserialize_reader(reader)?; + let output = OutputFor::::read(reader)?; + Ok(Return { address, output }) + } + + /// The address to return the output to. + pub fn address(&self) -> &AddressFor { + &self.address + } + + /// The output to return. + pub fn output(&self) -> &OutputFor { + &self.output + } +} + +/// An update for the scheduler. +pub struct SchedulerUpdate { + outputs: Vec>, + forwards: Vec>, + returns: Vec>, +} + +impl SchedulerUpdate { + /// The outputs to accumulate. + /// + /// These MUST be accumulated. + pub fn outputs(&self) -> &[OutputFor] { + &self.outputs + } + + /// The outputs to forward to the latest multisig. + /// + /// These MUST be forwarded in a 1-input 1-output transaction or dropped (if the fees are too + /// high to make the forwarding transaction). + pub fn forwards(&self) -> &[OutputFor] { + &self.forwards + } + + /// The outputs to return. + /// + /// These SHOULD be returned as specified (potentially in batch). They MAY be dropped if the fees + /// are too high to make the return transaction. + pub fn returns(&self) -> &[Return] { + &self.returns + } +} + +/// Eventualities, keyed by the encoding of the key the Eventualities are for. +pub type KeyScopedEventualities = HashMap, Vec>>; + +/// The object responsible for accumulating outputs and planning new transactions. +// TODO: Move this to Scheduler primitives +pub trait Scheduler: 'static + Send { + /// An error encountered when handling updates/payments. + /// + /// This MUST be an ephemeral error. Retrying handling updates/payments MUST eventually + /// resolve without manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// The type for a signable transaction. + type SignableTransaction: scheduler_primitives::SignableTransaction; + + /// Activate a key. + /// + /// This SHOULD setup any necessary database structures. This SHOULD NOT cause the new key to + /// be used as the primary key. The multisig rotation time clearly establishes its steps. + fn activate_key(txn: &mut impl DbTxn, key: KeyFor); + + /// Flush all outputs within a retiring key to the new key. + /// + /// When a key is activated, the existing multisig should retain its outputs and utility for a + /// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some + /// obligation or the `new_key`. Every output held by the retiring key MUST be connected to an + /// Eventuality. If a key no longer has active Eventualities, it MUST be able to be retired + /// without losing any coins. + /// + /// If the retiring key has any unfulfilled payments associated with it, those MUST be made + /// the responsibility of the new key. + fn flush_key( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>>; + + /// Retire a key as it'll no longer be used. + /// + /// Any key retired MUST NOT still have outputs associated with it. This SHOULD be a NOP other + /// than any assertions and database cleanup. This MUST NOT be expected to be called in a fashion + /// ordered to any other calls. + fn retire_key(txn: &mut impl DbTxn, key: KeyFor); + + /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. + /// + /// `active_keys` is the list of active keys, potentially including a key for which we've already + /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will + /// be expected for it. Nonetheless, it may be present. + /// + /// The `Vec` used as the key in the returned HashMap should be the encoded key the + /// Eventualities are for. + fn update( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>>; + + /// Fulfill a series of payments, yielding the Eventualities now to be scanned for. + /// + /// Any Eventualities returned by this function must include an output-to-Serai (such as a Branch + /// or Change), unless they descend from a transaction returned by this function which satisfies + /// that requirement. This ensures when we scan outputs from transactions we made, we report the + /// block up to Substrate, and obtain synchrony on all prior blocks (allowing us to identify our + /// own transactions, which we may be prior unaware of due to a lagging view of Substrate). + /// + /// `active_keys` is the list of active keys, potentially including a key for which we've already + /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will + /// be expected for it. Nonetheless, it may be present. + /// + /// The `Vec` used as the key in the returned HashMap should be the encoded key the + /// Eventualities are for. + /* + We need an output-to-Serai so we can detect a block with an Eventuality completion with regards + to Burns, forcing us to ensure we have accumulated all the Burns we should by the time we + handle that block. We explicitly don't require children have this requirement as by detecting + the first resolution, we ensure we'll accumulate the Burns (therefore becoming aware of the + childrens' Eventualities, enabling recognizing their resolutions). + + This carve out enables the following: + + ------------------ Fulfillment TX ---------------------- + | Primary Output | ---------------> | New Primary Output | + ------------------ | ---------------------- + | + | ------------------------------ + |------> | Branching Output for Burns | + ------------------------------ + + Without wasting pointless Change outputs on every transaction (as there's a single parent which + has an output-to-Serai, the new primary output). + */ + fn fulfill( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>>; +} + +/// A representation of a scanner. +#[allow(non_snake_case)] +pub struct Scanner { + substrate_handle: TaskHandle, + _S: PhantomData, +} +impl Scanner { + /// Create a new scanner. + /// + /// This will begin its execution, spawning several asynchronous tasks. + /// + /// This will return None if the Scanner was never initialized. + pub async fn new(db: impl Db, feed: S, scheduler: impl Scheduler) -> Option { + let start_block = ScannerGlobalDb::::start_block(&db)?; + + let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; + let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); + let batch_task = batch::BatchTask::<_, S>::new(db.clone(), start_block); + let report_task = report::ReportTask::<_, S>::new(db.clone()); + let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); + let eventuality_task = + eventuality::EventualityTask::<_, _, _>::new(db, feed, scheduler, start_block); + + let (index_task_def, _index_handle) = Task::new(); + let (scan_task_def, scan_handle) = Task::new(); + let (batch_task_def, batch_handle) = Task::new(); + let (report_task_def, report_handle) = Task::new(); + let (substrate_task_def, substrate_handle) = Task::new(); + let (eventuality_task_def, eventuality_handle) = Task::new(); + + // Upon indexing a new block, scan it + tokio::spawn(index_task.continually_run(index_task_def, vec![scan_handle.clone()])); + // Upon scanning a block, creates the batches for it + tokio::spawn(scan_task.continually_run(scan_task_def, vec![batch_handle])); + // Upon creating batches for a block, we run the report task + tokio::spawn(batch_task.continually_run(batch_task_def, vec![report_handle])); + // Upon reporting the batches for signing, we do nothing (as the burden is on a tributary which + // won't immediately yield a result) + tokio::spawn(report_task.continually_run(report_task_def, vec![])); + // Upon handling an event from Substrate, we run the Eventuality task (as it's what's affected) + tokio::spawn(substrate_task.continually_run(substrate_task_def, vec![eventuality_handle])); + // Upon handling the Eventualities in a block, we run the scan task as we've advanced the + // window its allowed to scan + tokio::spawn(eventuality_task.continually_run(eventuality_task_def, vec![scan_handle])); + + Some(Self { substrate_handle, _S: PhantomData }) + } + + /// Initialize the scanner. + /// + /// This will begin its execution, spawning several asynchronous tasks. + /// + /// This passes through to `Scanner::new` if prior called. + pub async fn initialize( + mut db: impl Db, + feed: S, + scheduler: impl Scheduler, + start_block: u64, + start_key: KeyFor, + ) -> Self { + if ScannerGlobalDb::::start_block(&db).is_none() { + let mut txn = db.txn(); + ScannerGlobalDb::::set_start_block(&mut txn, start_block); + ScannerGlobalDb::::queue_key(&mut txn, start_block, start_key); + txn.commit(); + } + + Self::new(db, feed, scheduler).await.unwrap() + } + + /// Acknowledge a Batch having been published on Serai. + /// + /// This means the specified Batch was ordered on Serai in relation to Burn events, and all + /// validators have achieved synchrony on it. + /// + /// `burns` is a list of Burns to queue with the acknowledgement of this Batch for efficiency's + /// sake. Any Burns passed here MUST NOT be passed into any other call of `acknowledge_batch` nor + /// `queue_burns`. Doing so will cause them to be executed multiple times. + /// + /// The calls to this function must be ordered with regards to `queue_burns`. + pub fn acknowledge_batch( + &mut self, + mut txn: impl DbTxn, + batch: ExecutedBatch, + burns: Vec, + key_to_activate: Option>, + ) { + log::info!("acknowledging batch {}", batch.id); + + // Queue acknowledging this block via the Substrate task + substrate::queue_acknowledge_batch::(&mut txn, batch, burns, key_to_activate); + // Commit this txn so this data is flushed + txn.commit(); + // Then run the Substrate task + self.substrate_handle.run_now(); + } + + /// Queue Burns. + /// + /// The scanner only updates the scheduler with new outputs upon acknowledging a block. The + /// ability to fulfill Burns, and therefore their order, is dependent on the current output + /// state. This immediately sets a bound that this function is ordered with regards to + /// `acknowledge_batch`. + /// + /// The Burns specified here MUST NOT also be passed to `acknowledge_batch`. + /* + The fact Burns can be queued during any Substrate block is problematic. The scanner is allowed + to scan anything within the window set by the Eventuality task. The Eventuality task is allowed + to handle all blocks until it reaches a block needing acknowledgement. + + This means we may queue Burns when the latest acknowledged block is 1, yet we've already + scanned 101. Such Burns may complete back in block 2, and we simply wouldn't have noticed due + to not having yet generated the Eventualities. + + We solve this by mandating all transactions made as the result of an Eventuality include a + output-to-Serai worth at least `DUST`. If that occurs, the scanner will force a consensus + protocol on block 2. Accordingly, we won't scan all the way to block 101 (missing the + resolution of the Eventuality) as we'll obtain synchrony on block 2 and all Burns queued prior + to it. + + Another option would be to re-check historical blocks, yet this would potentially redo an + unbounded amount of work. It would also not allow us to safely detect if received outputs were + in fact the result of Eventualities or not. + + Another option would be to schedule Burns after the next-acknowledged block, yet this would add + latency and likely practically require we add regularly scheduled notable blocks (which may be + unnecessary). + */ + pub fn queue_burns(&mut self, mut txn: impl DbTxn, burns: Vec) { + if burns.is_empty() { + return; + } + + // Queue queueing these burns via the Substrate task + substrate::queue_queue_burns::(&mut txn, burns); + // Commit this txn so this data is flushed + txn.commit(); + // Then run the Substrate task + self.substrate_handle.run_now(); + } +} diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs new file mode 100644 index 00000000..e07f5f42 --- /dev/null +++ b/processor/scanner/src/lifetime.rs @@ -0,0 +1,134 @@ +use crate::ScannerFeed; + +/// An enum representing the stage of a multisig within its lifetime. +/// +/// This corresponds to `spec/processor/Multisig Rotation.md`, which details steps 1-8 of the +/// rotation process. Steps 7-8 regard a multisig which isn't retiring yet retired, and +/// accordingly, no longer exists, so they are not modelled here (as this only models active +/// multisigs. Inactive multisigs aren't represented in the first place). +#[derive(Clone, Copy, PartialEq, Debug)] +pub enum LifetimeStage { + /// A new multisig, once active, shouldn't actually start receiving coins until several blocks + /// later. If any UI is premature in sending to this multisig, we delay to report the outputs to + /// prevent some DoS concerns. + /// + /// This represents steps 1-3 for a new multisig. + ActiveYetNotReporting, + /// Active with all outputs being reported on-chain. + /// + /// This represents step 4 onwards for a new multisig. + Active, + /// Retiring with all outputs being reported on-chain. + /// + /// This represents step 4 for a retiring multisig. + UsingNewForChange, + /// Retiring with outputs being forwarded, reported on-chain once forwarded. + /// + /// This represents step 5 for a retiring multisig. + Forwarding, + /// Retiring with only existing obligations being handled. + /// + /// This represents step 6 for a retiring multisig. + /// + /// Steps 7 and 8 are represented by the retiring multisig no longer existing, and these states + /// are only for multisigs which actively exist. + Finishing, +} + +/// The lifetime of the multisig, including various block numbers. +pub(crate) struct Lifetime { + pub(crate) stage: LifetimeStage, + pub(crate) block_at_which_reporting_starts: u64, + // This is only Some if the next key's activation block number is passed to calculate, and the + // stage is at least `LifetimeStage::Active.` + pub(crate) block_at_which_forwarding_starts: Option, +} + +impl Lifetime { + /// Get the lifetime of this multisig. + /// + /// Panics if the multisig being calculated for isn't actually active and a variety of other + /// insane cases. + pub(crate) fn calculate( + block_number: u64, + activation_block_number: u64, + next_keys_activation_block_number: Option, + ) -> Self { + assert!( + activation_block_number >= block_number, + "calculating lifetime stage for an inactive multisig" + ); + // This is exclusive, not inclusive, since we want a CONFIRMATIONS + 10 minutes window and the + // activation block itself is the first block within this window + let active_yet_not_reporting_end_block = + activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + // The exclusive end block is the inclusive start block + let block_at_which_reporting_starts = active_yet_not_reporting_end_block; + if block_number < active_yet_not_reporting_end_block { + return Lifetime { + stage: LifetimeStage::ActiveYetNotReporting, + block_at_which_reporting_starts, + block_at_which_forwarding_starts: None, + }; + } + + let Some(next_keys_activation_block_number) = next_keys_activation_block_number else { + // If there is no next multisig, this is the active multisig + return Lifetime { + stage: LifetimeStage::Active, + block_at_which_reporting_starts, + block_at_which_forwarding_starts: None, + }; + }; + + assert!( + next_keys_activation_block_number > active_yet_not_reporting_end_block, + "next set of keys activated before this multisig activated" + ); + + let new_active_yet_not_reporting_end_block = + next_keys_activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + let new_active_and_used_for_change_end_block = + new_active_yet_not_reporting_end_block + S::CONFIRMATIONS; + // The exclusive end block is the inclusive start block + let block_at_which_forwarding_starts = Some(new_active_and_used_for_change_end_block); + + // If the new multisig is still having its activation block finalized on-chain, this multisig + // is still active (step 3) + if block_number < new_active_yet_not_reporting_end_block { + return Lifetime { + stage: LifetimeStage::Active, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; + } + + // Step 4 details a further CONFIRMATIONS + if block_number < new_active_and_used_for_change_end_block { + return Lifetime { + stage: LifetimeStage::UsingNewForChange, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; + } + + // Step 5 details a further 6 hours + // 6 hours = 6 * 60 minutes = 6 * 6 * 10 minutes + let new_active_and_forwarded_to_end_block = + new_active_and_used_for_change_end_block + (6 * 6 * S::TEN_MINUTES); + if block_number < new_active_and_forwarded_to_end_block { + return Lifetime { + stage: LifetimeStage::Forwarding, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; + } + + // Step 6 + Lifetime { + stage: LifetimeStage::Finishing, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + } + } +} diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs new file mode 100644 index 00000000..a97a6b39 --- /dev/null +++ b/processor/scanner/src/report/db.rs @@ -0,0 +1,26 @@ +use serai_db::{Get, DbTxn, create_db}; + +use serai_validator_sets_primitives::Session; + +create_db!( + ScannerBatch { + // The last session to sign a Batch and their first Batch signed + LastSessionToSignBatchAndFirstBatch: () -> (Session, u32), + } +); + +pub(crate) struct BatchDb; +impl BatchDb { + pub(crate) fn set_last_session_to_sign_batch_and_first_batch( + txn: &mut impl DbTxn, + session: Session, + id: u32, + ) { + LastSessionToSignBatchAndFirstBatch::set(txn, &(session, id)); + } + pub(crate) fn last_session_to_sign_batch_and_first_batch( + getter: &impl Get, + ) -> Option<(Session, u32)> { + LastSessionToSignBatchAndFirstBatch::get(getter) + } +} diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs new file mode 100644 index 00000000..9055fcd0 --- /dev/null +++ b/processor/scanner/src/report/mod.rs @@ -0,0 +1,107 @@ +use core::{marker::PhantomData, future::Future}; + +use serai_db::{DbTxn, Db}; + +use serai_validator_sets_primitives::Session; + +use primitives::task::{DoesNotError, ContinuallyRan}; +use crate::{ + db::{BatchData, BatchToReportDb, BatchesToSign}, + substrate, ScannerFeed, +}; + +mod db; +use db::BatchDb; + +// This task begins reporting Batches for signing once the pre-requisities are met. +#[allow(non_snake_case)] +pub(crate) struct ReportTask { + db: D, + _S: PhantomData, +} + +impl ReportTask { + pub(crate) fn new(db: D) -> Self { + Self { db, _S: PhantomData } + } +} + +impl ContinuallyRan for ReportTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + loop { + let mut txn = self.db.txn(); + let Some(BatchData { + session_to_sign_batch, + external_key_for_session_to_sign_batch, + batch, + }) = BatchToReportDb::::try_recv_batch(&mut txn) + else { + break; + }; + + /* + If this is the handover Batch, the first Batch signed by a session which retires the + prior validator set, then this should only be signed after the prior validator set's + actions are fully validated. + + The new session will only be responsible for signing this Batch if the prior key has + retired, successfully completed all its on-external-network actions. + + We check here the prior session has successfully completed all its on-Serai-network + actions by ensuring we've validated all Batches expected from it. Only then do we sign + the Batch confirming the handover. + + We also wait for the Batch confirming the handover to be accepted on-chain, ensuring we + don't verify the prior session's Batches, sign the handover Batch and the following + Batch, have the prior session publish a malicious Batch where our handover Batch should + be, before our following Batch becomes our handover Batch. + */ + if session_to_sign_batch != Session(0) { + // We may have Session(1)'s first Batch be Batch 0 if Session(0) never publishes a + // Batch. This is fine as we'll hit the distinct Session check and then set the correct + // values into this DB entry. All other sessions must complete the handover process, + // which requires having published at least one Batch + let (last_session, first_batch) = + BatchDb::last_session_to_sign_batch_and_first_batch(&txn).unwrap_or((Session(0), 0)); + // Because this boolean was expanded, we lose short-circuiting. That's fine + let handover_batch = last_session != session_to_sign_batch; + let batch_after_handover_batch = + (last_session == session_to_sign_batch) && ((first_batch + 1) == batch.id); + if handover_batch || batch_after_handover_batch { + let verified_prior_batch = substrate::last_acknowledged_batch::(&txn) + // Since `batch.id = 0` in the Session(0)-never-published-a-Batch case, we don't + // check `last_acknowledged_batch >= (batch.id - 1)` but instead this + .map(|last_acknowledged_batch| (last_acknowledged_batch + 1) >= batch.id) + // We've never verified any Batches + .unwrap_or(false); + if !verified_prior_batch { + // Drop the txn to restore the Batch to report to the DB + drop(txn); + break; + } + } + + // If this is the handover Batch, update the last session to sign a Batch + if handover_batch { + BatchDb::set_last_session_to_sign_batch_and_first_batch( + &mut txn, + session_to_sign_batch, + batch.id, + ); + } + } + + BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch.0, &batch); + txn.commit(); + + made_progress = true; + } + + Ok(made_progress) + } + } +} diff --git a/processor/scanner/src/scan/db.rs b/processor/scanner/src/scan/db.rs new file mode 100644 index 00000000..44023bc8 --- /dev/null +++ b/processor/scanner/src/scan/db.rs @@ -0,0 +1,68 @@ +use core::marker::PhantomData; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::ReceivedOutput; + +use crate::{db::OutputWithInInstruction, ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db!( + ScannerScan { + // The next block to scan for received outputs + NextToScanForOutputsBlock: () -> u64, + + SerializedQueuedOutputs: (block_number: u64) -> Vec, + + ReportedInInstructionForOutput: (id: &[u8]) -> (), + } +); + +pub(crate) struct ScanDb(PhantomData); +impl ScanDb { + pub(crate) fn set_next_to_scan_for_outputs_block( + txn: &mut impl DbTxn, + next_to_scan_for_outputs_block: u64, + ) { + NextToScanForOutputsBlock::set(txn, &next_to_scan_for_outputs_block); + } + pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + NextToScanForOutputsBlock::get(getter) + } + + pub(crate) fn take_queued_outputs( + txn: &mut impl DbTxn, + block_number: u64, + ) -> Vec> { + let serialized = SerializedQueuedOutputs::get(txn, block_number).unwrap_or(vec![]); + let mut serialized = serialized.as_slice(); + + let mut res = Vec::with_capacity(serialized.len() / 128); + while !serialized.is_empty() { + res.push(OutputWithInInstruction::::read(&mut serialized).unwrap()); + } + res + } + pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, + ) { + let mut outputs = + SerializedQueuedOutputs::get(txn, queue_for_block).unwrap_or(Vec::with_capacity(128)); + output.write(&mut outputs).unwrap(); + SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); + } + + pub(crate) fn prior_reported_in_instruction_for_output( + getter: &impl Get, + id: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + ReportedInInstructionForOutput::get(getter, id.as_ref()).is_some() + } + pub(crate) fn reported_in_instruction_for_output( + txn: &mut impl DbTxn, + id: & as ReceivedOutput, AddressFor>>::Id, + ) { + ReportedInInstructionForOutput::set(txn, id.as_ref(), &()); + } +} diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs new file mode 100644 index 00000000..24426c62 --- /dev/null +++ b/processor/scanner/src/scan/mod.rs @@ -0,0 +1,371 @@ +use core::future::Future; +use std::collections::HashMap; + +use scale::Decode; +use serai_db::{Get, DbTxn, Db}; + +use serai_in_instructions_primitives::{ + Shorthand, RefundableInInstruction, InInstruction, InInstructionWithBalance, +}; + +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; + +use crate::{ + lifetime::LifetimeStage, + db::{ + OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, InInstructionData, + ScanToBatchDb, ScanToEventualityDb, + }, + BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, + eventuality::latest_scannable_block, +}; + +mod db; +use db::ScanDb; + +pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + ScanDb::::next_to_scan_for_outputs_block(getter) +} + +pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, +) { + // This isn't a perfect assertion as by the time this txn commits, we may have already started + // scanning this block. That doesn't change it should never trip as we queue outside the window + // we'll scan + assert!( + queue_for_block >= + next_to_scan_for_outputs_block::(txn) + .expect("queueing an output despite no next-to-scan-for-outputs block"), + "queueing an output for a block already scanned" + ); + ScanDb::::queue_output_until_block(txn, queue_for_block, output) +} + +// Construct an InInstruction from an external output. +// +// Also returns the address to return the coins to upon error. +fn in_instruction_from_output( + output: &OutputFor, +) -> (Option>, Option) { + assert_eq!(output.kind(), OutputType::External); + + let presumed_origin = output.presumed_origin(); + + let mut data = output.data(); + let shorthand = match Shorthand::decode(&mut data) { + Ok(shorthand) => shorthand, + Err(e) => { + log::info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); + return (presumed_origin, None); + } + }; + let instruction = match RefundableInInstruction::try_from(shorthand) { + Ok(instruction) => instruction, + Err(e) => { + log::info!( + "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", + hex::encode(output.id()) + ); + return (presumed_origin, None); + } + }; + + ( + instruction.origin.and_then(|addr| AddressFor::::try_from(addr).ok()).or(presumed_origin), + Some(instruction.instruction), + ) +} + +pub(crate) struct ScanTask { + db: D, + feed: S, +} + +impl ScanTask { + pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { + if ScanDb::::next_to_scan_for_outputs_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + +impl ContinuallyRan for ScanTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the safe to scan block + let latest_scannable = + latest_scannable_block::(&self.db).expect("ScanTask run before writing the start block"); + // Fetch the next block to scan + let next_to_scan = ScanDb::::next_to_scan_for_outputs_block(&self.db) + .expect("ScanTask run before writing the start block"); + + for b in next_to_scan ..= latest_scannable { + let block = self.feed.block_by_number(&self.db, b).await?; + + log::info!("scanning block: {} ({b})", hex::encode(block.id())); + + let mut txn = self.db.txn(); + + assert_eq!(ScanDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); + + let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) + .expect("scanning for a blockchain without any keys set"); + + let latest_active_key = { + let mut keys = keys.clone(); + loop { + // Use the most recent key + let key = keys.pop().unwrap(); + // Unless this key is active, but not yet reporting + if key.stage == LifetimeStage::ActiveYetNotReporting { + continue; + } + break key.key; + } + }; + + // The scan data for this block + let mut scan_data = SenderScanData { + block_number: b, + received_external_outputs: vec![], + forwards: vec![], + returns: vec![], + }; + // The InInstructions for this block + let mut in_instructions = vec![]; + + // The outputs queued for this block + let queued_outputs = { + let mut queued_outputs = ScanDb::::take_queued_outputs(&mut txn, b); + // Sort the queued outputs in case they weren't queued in a deterministic fashion + queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + queued_outputs + }; + for queued_output in queued_outputs { + in_instructions.push(( + queued_output.output.id(), + Returnable { + return_address: queued_output.return_address, + in_instruction: queued_output.in_instruction, + }, + )); + scan_data.received_external_outputs.push(queued_output.output); + } + + // We subtract the cost to aggregate from some outputs we scan + // This cost is fetched with an asynchronous function which may be non-trivial + // We cache the result of this function here to avoid calling it multiple times + let mut costs_to_aggregate = HashMap::with_capacity(1); + + // Scan for each key + for key in &keys { + for output in block.scan_for_outputs(latest_active_key, key.key) { + assert_eq!(output.key(), key.key); + + /* + The scan task runs ahead of time, obtaining ordering on the external network's blocks + with relation to events on the Serai network. This is done via publishing a Batch + which contains the InInstructions from External outputs. Accordingly, the scan + process only has to yield External outputs. + + It'd appear to make sense to scan for all outputs, and after scanning for all + outputs, yield all outputs. The issue is we can't identify outputs we created here. + We can only identify the outputs we receive and their *declared intention*. + + We only want to handle Change/Branch/Forwarded outputs we made ourselves. For + Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet + accepting new outputs solely because they claim to be Forwarded would increase the + size of the multisig). For Change/Branch, it's because such outputs which aren't ours + are pointless. They wouldn't hurt to accumulate though. + + The issue is they would hurt to accumulate. We want to filter outputs which are less + than their cost to aggregate, a variable itself variable to the current blockchain. + We can filter such outputs here, yet if we drop a Change output, we create an + insolvency. We'd need to track the loss and offset it later. That means we can't + filter such outputs, as we expect any Change output we make. + + The issue is the Change outputs we don't make. Someone can create an output declaring + to be Change, yet not actually Change. If we don't filter it, it'd be queued for + accumulation, yet it may cost more to accumulate than it's worth. + + The solution is to let the Eventuality task, which does know if we made an output or + not (or rather, if a transaction is identical to a transaction which should exist + regarding effects) decide to keep/yield the outputs which we should only keep if we + made them (as Serai itself should not make worthless outputs, so we can assume + they're worthwhile, and even if they're not economically, they are technically). + + The alternative, we drop outputs here with a generic filter rule and then report back + the insolvency created, still doesn't work as we'd only be creating an insolvency if + the output was actually made by us (and not simply someone else sending in). We can + have the Eventuality task report the insolvency, yet that requires the scanner be + responsible for such filter logic. It's more flexible, and has a cleaner API, + to do so at a higher level. + */ + if output.kind() != OutputType::External { + // While we don't report these outputs, we still need consensus on this block and + // accordingly still need to set it as notable + let balance = output.balance(); + // We ensure it's over the dust limit to prevent people sending 1 satoshi from + // causing an invocation of a consensus/signing protocol + if balance.amount.0 >= S::dust(balance.coin).0 { + ScannerGlobalDb::::flag_notable_due_to_non_external_output(&mut txn, b); + } + continue; + } + + // Check this isn't dust + let balance_to_use = { + let mut balance = output.balance(); + + // First, subtract 2 * the cost to aggregate, as detailed in + // `spec/processor/UTXO Management.md` + + // We cache this, so if it isn't yet cached, insert it into the cache + if let std::collections::hash_map::Entry::Vacant(e) = + costs_to_aggregate.entry(balance.coin) + { + e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| { + format!( + "ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}", + balance.coin + ) + })?); + } + let cost_to_aggregate = costs_to_aggregate[&balance.coin]; + balance.amount.0 -= 2 * cost_to_aggregate.0; + + // Now, check it's still past the dust threshold + if balance.amount.0 < S::dust(balance.coin).0 { + continue; + } + + balance + }; + + // Fetch the InInstruction/return addr for this output + let output_with_in_instruction = match in_instruction_from_output::(&output) { + (return_address, Some(instruction)) => OutputWithInInstruction { + output, + return_address, + in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use }, + }, + (Some(address), None) => { + // Since there was no instruction here, return this since we parsed a return + // address + if key.stage != LifetimeStage::Finishing { + scan_data.returns.push(Return { address, output }); + } + continue; + } + // Since we didn't receive an instruction nor can we return this, queue this for + // accumulation and move on + (None, None) => { + if key.stage != LifetimeStage::Finishing { + scan_data.received_external_outputs.push(output); + } + continue; + } + }; + + // Drop External outputs if they're to a multisig which won't report them + // This means we should report any External output we save to disk here + #[allow(clippy::match_same_arms)] + match key.stage { + // This multisig isn't yet reporting its External outputs to avoid a DoS + // Queue the output to be reported when this multisig starts reporting + LifetimeStage::ActiveYetNotReporting => { + ScanDb::::queue_output_until_block( + &mut txn, + key.block_at_which_reporting_starts, + &output_with_in_instruction, + ); + continue; + } + // We should report External outputs in these cases + LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} + // We should report External outputs only once forwarded, where they'll appear as + // OutputType::Forwarded. We save them now for when they appear + LifetimeStage::Forwarding => { + // When the forwarded output appears, we can see which Plan it's associated with + // and from there recover this output + scan_data.forwards.push(output_with_in_instruction); + continue; + } + // We should drop these as we should not be handling new External outputs at this + // time + LifetimeStage::Finishing => { + continue; + } + } + // Ensures we didn't miss a `continue` above + assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); + + in_instructions.push(( + output_with_in_instruction.output.id(), + Returnable { + return_address: output_with_in_instruction.return_address, + in_instruction: output_with_in_instruction.in_instruction, + }, + )); + scan_data.received_external_outputs.push(output_with_in_instruction.output); + } + } + + // Sort the InInstructions by the output ID + in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| { + use core::cmp::{Ordering, Ord}; + let res = output_id_a.as_ref().cmp(output_id_b.as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res + }); + // Check we haven't prior reported an InInstruction for this output + // This is a sanity check which is intended to prevent multiple instances of sriXYZ + // on-chain due to a single output + for (id, _) in &in_instructions { + assert!( + !ScanDb::::prior_reported_in_instruction_for_output(&txn, id), + "prior reported an InInstruction for an output with this ID" + ); + ScanDb::::reported_in_instruction_for_output(&mut txn, id); + } + // Reformat the InInstructions to just the InInstructions + let in_instructions = in_instructions + .into_iter() + .map(|(_id, in_instruction)| in_instruction) + .collect::>(); + // Send the InInstructions to the report task + // We need to also specify which key is responsible for signing the Batch for these, which + // will always be the oldest key (as the new key signing the Batch signifies handover + // acceptance) + ScanToBatchDb::::send_in_instructions( + &mut txn, + b, + &InInstructionData { + session_to_sign_batch: keys[0].session, + external_key_for_session_to_sign_batch: keys[0].key, + returnable_in_instructions: in_instructions, + }, + ); + + // Send the scan data to the eventuality task + ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); + // Update the next to scan block + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); + txn.commit(); + } + + // Run dependents if we successfully scanned any blocks + Ok(next_to_scan <= latest_scannable) + } + } +} diff --git a/processor/scanner/src/substrate/db.rs b/processor/scanner/src/substrate/db.rs new file mode 100644 index 00000000..1e0181b8 --- /dev/null +++ b/processor/scanner/src/substrate/db.rs @@ -0,0 +1,98 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use serai_coins_primitives::OutInstructionWithBalance; + +use messages::substrate::ExecutedBatch; + +use crate::{ScannerFeed, KeyFor}; + +#[derive(BorshSerialize, BorshDeserialize)] +struct AcknowledgeBatchEncodable { + batch: ExecutedBatch, + burns: Vec, + key_to_activate: Option>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +enum ActionEncodable { + AcknowledgeBatch(AcknowledgeBatchEncodable), + QueueBurns(Vec), +} + +pub(crate) struct AcknowledgeBatch { + pub(crate) batch: ExecutedBatch, + pub(crate) burns: Vec, + pub(crate) key_to_activate: Option>, +} + +pub(crate) enum Action { + AcknowledgeBatch(AcknowledgeBatch), + QueueBurns(Vec), +} + +create_db!( + ScannerSubstrate { + LastAcknowledgedBatch: () -> u32, + } +); + +db_channel!( + ScannerSubstrate { + Actions: () -> ActionEncodable, + } +); + +pub(crate) struct SubstrateDb(PhantomData); +impl SubstrateDb { + pub(crate) fn last_acknowledged_batch(getter: &impl Get) -> Option { + LastAcknowledgedBatch::get(getter) + } + + pub(crate) fn set_last_acknowledged_batch(txn: &mut impl DbTxn, id: u32) { + LastAcknowledgedBatch::set(txn, &id) + } + + pub(crate) fn queue_acknowledge_batch( + txn: &mut impl DbTxn, + batch: ExecutedBatch, + burns: Vec, + key_to_activate: Option>, + ) { + Actions::send( + txn, + &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { + batch, + burns, + key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()), + }), + ); + } + pub(crate) fn queue_queue_burns(txn: &mut impl DbTxn, burns: Vec) { + Actions::send(txn, &ActionEncodable::QueueBurns(burns)); + } + + pub(crate) fn next_action(txn: &mut impl DbTxn) -> Option> { + let action_encodable = Actions::try_recv(txn)?; + Some(match action_encodable { + ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { + batch, + burns, + key_to_activate, + }) => Action::AcknowledgeBatch(AcknowledgeBatch { + batch, + burns, + key_to_activate: key_to_activate.map(|key| { + let mut repr = as GroupEncoding>::Repr::default(); + repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&repr).unwrap() + }), + }), + ActionEncodable::QueueBurns(burns) => Action::QueueBurns(burns), + }) + } +} diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs new file mode 100644 index 00000000..4963f66b --- /dev/null +++ b/processor/scanner/src/substrate/mod.rs @@ -0,0 +1,180 @@ +use core::{marker::PhantomData, future::Future}; + +use serai_db::{Get, DbTxn, Db}; + +use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; + +use messages::substrate::ExecutedBatch; +use primitives::task::{DoesNotError, ContinuallyRan}; +use crate::{ + db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches}, + index, batch, ScannerFeed, KeyFor, +}; + +mod db; +use db::*; + +pub(crate) fn last_acknowledged_batch(getter: &impl Get) -> Option { + SubstrateDb::::last_acknowledged_batch(getter) +} +pub(crate) fn queue_acknowledge_batch( + txn: &mut impl DbTxn, + batch: ExecutedBatch, + burns: Vec, + key_to_activate: Option>, +) { + SubstrateDb::::queue_acknowledge_batch(txn, batch, burns, key_to_activate) +} +pub(crate) fn queue_queue_burns( + txn: &mut impl DbTxn, + burns: Vec, +) { + SubstrateDb::::queue_queue_burns(txn, burns) +} + +/* + When Serai acknowledges a Batch, we can only handle it once we've scanned the chain and generated + the same Batch ourselves. This takes the `acknowledge_batch`, `queue_burns` arguments and sits on + them until we're able to process them. +*/ +#[allow(non_snake_case)] +pub(crate) struct SubstrateTask { + db: D, + _S: PhantomData, +} + +impl SubstrateTask { + pub(crate) fn new(db: D) -> Self { + Self { db, _S: PhantomData } + } +} + +impl ContinuallyRan for SubstrateTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + loop { + // Fetch the next action to handle + let mut txn = self.db.txn(); + let Some(action) = SubstrateDb::::next_action(&mut txn) else { + drop(txn); + return Ok(made_progress); + }; + + match action { + Action::AcknowledgeBatch(AcknowledgeBatch { batch, mut burns, key_to_activate }) => { + // Check if we have the information for this batch + let Some(batch::BatchInfo { + block_number, + session_to_sign_batch, + external_key_for_session_to_sign_batch, + in_instructions_hash, + }) = batch::take_info_for_batch::(&mut txn, batch.id) + else { + // If we don't, drop this txn (restoring the action to the database) + drop(txn); + return Ok(made_progress); + }; + assert_eq!( + batch.publisher, session_to_sign_batch, + "batch acknowledged on-chain was acknowledged by an unexpected publisher" + ); + assert_eq!( + batch.external_network_block_hash, + index::block_id(&txn, block_number), + "batch acknowledged on-chain was for a distinct block" + ); + assert_eq!( + batch.in_instructions_hash, in_instructions_hash, + "batch acknowledged on-chain had distinct InInstructions" + ); + + SubstrateDb::::set_last_acknowledged_batch(&mut txn, batch.id); + AcknowledgedBatches::send( + &mut txn, + &external_key_for_session_to_sign_batch.0, + batch.id, + ); + + // Mark we made progress and handle this + made_progress = true; + + assert!( + ScannerGlobalDb::::is_block_notable(&txn, block_number), + "acknowledging a block which wasn't notable" + ); + if let Some(prior_highest_acknowledged_block) = + ScannerGlobalDb::::highest_acknowledged_block(&txn) + { + // If a single block produced multiple Batches, the block number won't increment + assert!( + block_number >= prior_highest_acknowledged_block, + "acknowledging blocks out-of-order" + ); + for b in (prior_highest_acknowledged_block + 1) .. block_number { + assert!( + !ScannerGlobalDb::::is_block_notable(&txn, b), + "skipped acknowledging a block which was notable" + ); + } + } + + ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); + if let Some(key_to_activate) = key_to_activate { + ScannerGlobalDb::::queue_key( + &mut txn, + block_number + S::WINDOW_LENGTH, + key_to_activate, + ); + } + + // Return the balances for any InInstructions which failed to execute + { + let return_information = batch::take_return_information::(&mut txn, batch.id) + .expect("didn't save the return information for Batch we published"); + assert_eq!( + batch.in_instruction_results.len(), + return_information.len(), + "amount of InInstruction succeededs differed from amount of return information saved" + ); + + // We map these into standard Burns + for (result, return_information) in + batch.in_instruction_results.into_iter().zip(return_information) + { + if result == messages::substrate::InInstructionResult::Succeeded { + continue; + } + + if let Some(batch::ReturnInformation { address, balance }) = return_information { + burns.push(OutInstructionWithBalance { + instruction: OutInstruction { address: address.into() }, + balance, + }); + } + } + } + + // We send these Burns as stemming from this block we just acknowledged + // This causes them to be acted on after we accumulate the outputs from this block + SubstrateToEventualityDb::send_burns::(&mut txn, block_number, burns); + } + + Action::QueueBurns(burns) => { + // We can instantly handle this so long as we've handled all prior actions + made_progress = true; + + let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(&txn) + .expect("queueing Burns yet never acknowledged a block"); + + SubstrateToEventualityDb::send_burns::(&mut txn, queue_as_of, burns); + } + } + + txn.commit(); + } + } + } +} diff --git a/processor/scheduler/primitives/Cargo.toml b/processor/scheduler/primitives/Cargo.toml new file mode 100644 index 00000000..7540dc84 --- /dev/null +++ b/processor/scheduler/primitives/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +description = "Primitives for schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +ciphersuite = { path = "../../../crypto/ciphersuite", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../../common/db" } diff --git a/processor/scheduler/primitives/LICENSE b/processor/scheduler/primitives/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/scheduler/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/primitives/README.md b/processor/scheduler/primitives/README.md new file mode 100644 index 00000000..6e81249d --- /dev/null +++ b/processor/scheduler/primitives/README.md @@ -0,0 +1,3 @@ +# Scheduler Primitives + +Primitives for schedulers. diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs new file mode 100644 index 00000000..3c214d15 --- /dev/null +++ b/processor/scheduler/primitives/src/lib.rs @@ -0,0 +1,77 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::marker::PhantomData; +use std::io; + +use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use serai_db::DbTxn; + +/// A transaction. +pub trait Transaction: Sized + Send { + /// Read a `Transaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `Transaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + +/// A signable transaction. +pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { + /// The underlying transaction type. + type Transaction: Transaction; + /// The ciphersuite used to sign this transaction. + type Ciphersuite: Ciphersuite; + /// The preprocess machine for the signing protocol for this transaction. + type PreprocessMachine: Clone + PreprocessMachine>; + + /// Read a `SignableTransaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `SignableTransaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; + + /// The ID for this transaction. + /// + /// This is an internal ID arbitrarily definable so long as it's unique. + /// + /// This same ID MUST be returned by the Eventuality for this transaction. + fn id(&self) -> [u8; 32]; + + /// Sign this transaction. + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine; +} + +/// The transaction type for a SignableTransaction. +pub type TransactionFor = ::Transaction; + +mod db { + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + SchedulerPrimitives { + TransactionsToSign: (key: &[u8]) -> Vec, + } + } +} + +/// The transactions to sign, as scheduled by a Scheduler. +pub struct TransactionsToSign(PhantomData); +impl TransactionsToSign { + /// Send a transaction to sign. + pub fn send(txn: &mut impl DbTxn, key: &impl GroupEncoding, tx: &T) { + let mut buf = Vec::with_capacity(128); + tx.write(&mut buf).unwrap(); + db::TransactionsToSign::send(txn, key.to_bytes().as_ref(), &buf); + } + + /// Try to receive a transaction to sign. + pub fn try_recv(txn: &mut impl DbTxn, key: &impl GroupEncoding) -> Option { + let tx = db::TransactionsToSign::try_recv(txn, key.to_bytes().as_ref())?; + let mut tx = tx.as_slice(); + let res = T::read(&mut tx).unwrap(); + assert!(tx.is_empty()); + Some(res) + } +} diff --git a/processor/scheduler/smart-contract/Cargo.toml b/processor/scheduler/smart-contract/Cargo.toml new file mode 100644 index 00000000..0a2a0ff2 --- /dev/null +++ b/processor/scheduler/smart-contract/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "serai-processor-smart-contract-scheduler" +version = "0.1.0" +description = "Scheduler for a smart contract representing the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/smart-contract" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/smart-contract/LICENSE b/processor/scheduler/smart-contract/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/scheduler/smart-contract/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/smart-contract/README.md b/processor/scheduler/smart-contract/README.md new file mode 100644 index 00000000..0be94d20 --- /dev/null +++ b/processor/scheduler/smart-contract/README.md @@ -0,0 +1,3 @@ +# Smart Contract Scheduler + +A scheduler for a smart contract representing the Serai processor. diff --git a/processor/scheduler/smart-contract/src/lib.rs b/processor/scheduler/smart-contract/src/lib.rs new file mode 100644 index 00000000..0c9c690b --- /dev/null +++ b/processor/scheduler/smart-contract/src/lib.rs @@ -0,0 +1,150 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, EventualityFor, BlockFor, SchedulerUpdate, + KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; + +create_db! { + SmartContractScheduler { + NextNonce: () -> u64, + } +} + +/// A smart contract. +pub trait SmartContract: 'static + Send { + /// The type representing a signable transaction. + type SignableTransaction: SignableTransaction; + + /// Rotate from the retiring key to the new key. + fn rotate( + &self, + nonce: u64, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> (Self::SignableTransaction, EventualityFor); + + /// Fulfill the set of payments, dropping any not worth handling. + fn fulfill( + &self, + starting_nonce: u64, + key: KeyFor, + payments: Vec>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor)>; +} + +/// A scheduler for a smart contract representing the Serai processor. +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler> { + smart_contract: SC, + _S: PhantomData, +} + +impl> Scheduler { + /// Create a new scheduler. + pub fn new(smart_contract: SC) -> Self { + Self { smart_contract, _S: PhantomData } + } + + fn fulfill_payments( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> KeyScopedEventualities { + let key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + let mut nonce = NextNonce::get(txn).unwrap_or(0); + let mut eventualities = Vec::with_capacity(1); + for (signable, eventuality) in self.smart_contract.fulfill(nonce, key, payments) { + TransactionsToSign::::send(txn, &key, &signable); + nonce += 1; + eventualities.push(eventuality); + } + NextNonce::set(txn, &nonce); + HashMap::from([(key.to_bytes().as_ref().to_vec(), eventualities)]) + } +} + +impl> SchedulerTrait for Scheduler { + type EphemeralError = (); + type SignableTransaction = SC::SignableTransaction; + + fn activate_key(_txn: &mut impl DbTxn, _key: KeyFor) {} + + fn flush_key( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let nonce = NextNonce::get(txn).unwrap_or(0); + let (signable, eventuality) = self.smart_contract.rotate(nonce, retiring_key, new_key); + NextNonce::set(txn, &(nonce + 1)); + TransactionsToSign::::send(txn, &retiring_key, &signable); + Ok(HashMap::from([(retiring_key.to_bytes().as_ref().to_vec(), vec![eventuality])])) + } + } + + fn retire_key(_txn: &mut impl DbTxn, _key: KeyFor) {} + + fn update( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // We ignore the outputs as we don't need to know our current state as it never suffers + // partial availability + + // We shouldn't have any forwards though + assert!(update.forwards().is_empty()); + + // Create the transactions for the returns + Ok( + self.fulfill_payments( + txn, + active_keys, + update + .returns() + .iter() + .map(|to_return| { + Payment::new(to_return.address().clone(), to_return.output().balance()) + }) + .collect::>(), + ), + ) + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { Ok(self.fulfill_payments(txn, active_keys, payments)) } + } +} diff --git a/processor/scheduler/utxo/primitives/Cargo.toml b/processor/scheduler/utxo/primitives/Cargo.toml new file mode 100644 index 00000000..d1f9c8cf --- /dev/null +++ b/processor/scheduler/utxo/primitives/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "serai-processor-utxo-scheduler-primitives" +version = "0.1.0" +description = "Primitives for UTXO schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } diff --git a/processor/scheduler/utxo/primitives/LICENSE b/processor/scheduler/utxo/primitives/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/scheduler/utxo/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/primitives/README.md b/processor/scheduler/utxo/primitives/README.md new file mode 100644 index 00000000..81bc954a --- /dev/null +++ b/processor/scheduler/utxo/primitives/README.md @@ -0,0 +1,3 @@ +# UTXO Scheduler Primitives + +Primitives for UTXO schedulers. diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs new file mode 100644 index 00000000..a793c906 --- /dev/null +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -0,0 +1,280 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{fmt::Debug, future::Future}; + +use serai_primitives::Amount; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor}; +use scheduler_primitives::*; + +mod tree; +pub use tree::*; + +/// A planned transaction. +pub struct PlannedTransaction { + /// The signable transaction. + pub signable: ST, + /// The Eventuality to watch for. + pub eventuality: EventualityFor, + /// The auxilliary data for this transaction. + pub auxilliary: A, +} + +/// A planned transaction which was created via amortizing the fee. +pub struct AmortizePlannedTransaction { + /// The amounts the included payments were worth. + /// + /// If the payments passed as an argument are sorted from highest to lowest valued, these `n` + /// amounts will be for the first `n` payments. + pub effected_payments: Vec, + /// Whether or not the planned transaction had a change output. + pub has_change: bool, + /// The signable transaction. + pub signable: ST, + /// The Eventuality to watch for. + pub eventuality: EventualityFor, + /// The auxilliary data for this transaction. + pub auxilliary: A, +} + +/// An object able to plan a transaction. +pub trait TransactionPlanner: 'static + Send + Sync { + /// An error encountered when handling planning transactions. + /// + /// This MUST be an ephemeral error. Retrying planning transactions MUST eventually resolve + /// resolve manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// The type representing a signable transaction. + type SignableTransaction: SignableTransaction; + + /// The maximum amount of inputs allowed in a transaction. + const MAX_INPUTS: usize; + /// The maximum amount of outputs allowed in a transaction, including the change output. + const MAX_OUTPUTS: usize; + + /// The branch address for this key of Serai's. + fn branch_address(key: KeyFor) -> AddressFor; + /// The change address for this key of Serai's. + fn change_address(key: KeyFor) -> AddressFor; + /// The forwarding address for this key of Serai's. + fn forwarding_address(key: KeyFor) -> AddressFor; + + /// Calculate the for a tansaction with this structure. + /// + /// The fee rate, inputs, and payments, will all be for the same coin. The returned fee is + /// denominated in this coin. + fn calculate_fee( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + Future>; + + /// Plan a transaction. + /// + /// This must only require the same fee as would be returned by `calculate_fee`. The caller is + /// trusted to maintain `sum(inputs) - sum(payments) >= if change.is_some() { DUST } else { 0 }`. + /// + /// `change` will always be an address belonging to the Serai network. If it is `Some`, a change + /// output must be created. + fn plan( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + + Future< + Output = Result, Self::EphemeralError>, + >; + + /// Obtain a PlannedTransaction via amortizing the fee over the payments. + /// + /// `operating_costs` is accrued to if Serai faces the burden of a fee or drops inputs not worth + /// accumulating. `operating_costs` will be amortized along with this transaction's fee as + /// possible, if there is a change output. Please see `spec/processor/UTXO Management.md` for + /// more information. + /// + /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. + // TODO: Enum for Change of None, Some, Mandatory + #[allow(clippy::type_complexity)] + fn plan_transaction_with_fee_amortization( + &self, + operating_costs: &mut u64, + reference_block: &BlockFor, + inputs: Vec>, + mut payments: Vec>>, + mut change: Option>, + ) -> impl Send + + Future< + Output = Result< + Option>, + Self::EphemeralError, + >, + > { + async move { + // If there's no change output, we can't recoup any operating costs we would amortize + // We also don't have any losses if the inputs are written off/the change output is reduced + let mut operating_costs_if_no_change = 0; + let operating_costs_in_effect = + if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs }; + + // Sanity checks + { + assert!(!inputs.is_empty()); + assert!((!payments.is_empty()) || change.is_some()); + let coin = inputs.first().unwrap().balance().coin; + for input in &inputs { + assert_eq!(coin, input.balance().coin); + } + for payment in &payments { + assert_eq!(coin, payment.balance().coin); + } + assert!( + (inputs.iter().map(|input| input.balance().amount.0).sum::() + + *operating_costs_in_effect) >= + payments.iter().map(|payment| payment.balance().amount.0).sum::(), + "attempted to fulfill payments without a sufficient input set" + ); + } + + let coin = inputs.first().unwrap().balance().coin; + + // Amortization + { + // Sort payments from high amount to low amount + payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); + + let mut fee = + self.calculate_fee(reference_block, inputs.clone(), payments.clone(), change).await?.0; + let mut amortized = 0; + while !payments.is_empty() { + // We need to pay the fee, and any accrued operating costs, minus what we've already + // amortized + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); + + /* + Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could + be amortized over the largest outputs, which wouldn't be relevant here as we only work + with the smallest output. The issue is the theoretical edge case where all outputs have + the same value and are of the minimum value. In that case, none would be able to have + the remainder amortized as it'd cause them to need to be dropped. Using a ceil div + avoids this. + */ + let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap()); + // Pop the last payment if it can't pay the fee, remaining about the dust limit as it does + if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { + amortized += payments.pop().unwrap().balance().amount.0; + // Recalculate the fee and try again + fee = self + .calculate_fee(reference_block, inputs.clone(), payments.clone(), change) + .await? + .0; + continue; + } + // Break since all of these payments shouldn't be dropped + break; + } + + // If we couldn't amortize the fee over the payments, check if we even have enough to pay it + if payments.is_empty() { + // If we don't have a change output, we simply return here + // We no longer have anything to do here, nor any expectations + if change.is_none() { + return Ok(None); + } + + let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::(); + // Checks not just if we can pay for it, yet that the would-be change output is at least + // dust + if inputs < (fee + S::dust(coin).0) { + // Write off these inputs + *operating_costs_in_effect += inputs; + // Yet also claw back the payments we dropped, as we only lost the change + // The dropped payments will be worth less than the inputs + operating_costs we started + // with, so this shouldn't use `saturating_sub` + *operating_costs_in_effect -= amortized; + return Ok(None); + } + } else { + // Since we have payments which can pay the fee we ended up with, amortize it + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); + let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap(); + let payments_paying_one_atomic_unit_more = + usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap(); + + for (i, payment) in payments.iter_mut().enumerate() { + let per_payment_fee = + per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more)); + payment.balance().amount.0 -= per_payment_fee; + amortized += per_payment_fee; + } + assert!(amortized >= (*operating_costs_in_effect + fee)); + + // If the change is less than the dust, drop it + let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - + payments.iter().map(|payment| payment.balance().amount.0).sum::() - + fee; + if would_be_change < S::dust(coin).0 { + change = None; + *operating_costs_in_effect += would_be_change; + } + } + + // Update the amount of operating costs + *operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized); + } + + // Because we amortized, or accrued as operating costs, the fee, make the transaction + let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect(); + let has_change = change.is_some(); + + let PlannedTransaction { signable, eventuality, auxilliary } = + self.plan(reference_block, inputs, payments, change).await?; + Ok(Some(AmortizePlannedTransaction { + effected_payments, + has_change, + signable, + eventuality, + auxilliary, + })) + } + } + + /// Create a tree to fulfill a set of payments. + /// + /// Returns a `TreeTransaction` whose children (and arbitrary children of children) fulfill all + /// these payments. This tree root will be able to be made with a change output. + fn tree(payments: &[Payment>]) -> TreeTransaction> { + // This variable is for the current layer of the tree being built + let mut tree = Vec::with_capacity(payments.len().div_ceil(Self::MAX_OUTPUTS)); + + // Push the branches for the leaves (the payments out) + for payments in payments.chunks(Self::MAX_OUTPUTS) { + let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + tree.push(TreeTransaction::>::Leaves { payments: payments.to_vec(), value }); + } + + // While we haven't calculated a tree root, or the tree root doesn't support a change output, + // keep working + while (tree.len() != 1) || (tree[0].children() == Self::MAX_OUTPUTS) { + let mut branch_layer = vec![]; + for children in tree.chunks(Self::MAX_OUTPUTS) { + branch_layer.push(TreeTransaction::>::Branch { + children: children.to_vec(), + value: children.iter().map(TreeTransaction::value).sum(), + }); + } + tree = branch_layer; + } + assert_eq!(tree.len(), 1); + let tree_root = tree.remove(0); + assert!((tree_root.children() + 1) <= Self::MAX_OUTPUTS); + tree_root + } +} diff --git a/processor/scheduler/utxo/primitives/src/tree.rs b/processor/scheduler/utxo/primitives/src/tree.rs new file mode 100644 index 00000000..565706a3 --- /dev/null +++ b/processor/scheduler/utxo/primitives/src/tree.rs @@ -0,0 +1,145 @@ +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::{ExternalCoin, Amount, ExternalBalance}; + +use primitives::{Address, Payment}; +use scanner::ScannerFeed; + +/// A transaction within a tree to fulfill payments. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub enum TreeTransaction { + /// A transaction for the leaves (payments) of the tree. + Leaves { + /// The payments within this transaction. + payments: Vec>, + /// The sum value of the payments. + value: u64, + }, + /// A transaction for the branches of the tree. + Branch { + /// The child transactions. + children: Vec, + /// The sum value of the child transactions. + value: u64, + }, +} +impl TreeTransaction { + /// How many children this transaction has. + /// + /// A child is defined as any dependent, whether payment or transaction. + pub fn children(&self) -> usize { + match self { + Self::Leaves { payments, .. } => payments.len(), + Self::Branch { children, .. } => children.len(), + } + } + + /// The value this transaction wants to spend. + pub fn value(&self) -> u64 { + match self { + Self::Leaves { value, .. } | Self::Branch { value, .. } => *value, + } + } + + /// The payments to make to enable this transaction's children. + /// + /// A child is defined as any dependent, whether payment or transaction. + /// + /// The input value given to this transaction MUST be less than or equal to the desired value. + /// The difference will be amortized over all dependents. + /// + /// Returns None if no payments should be made. Returns Some containing a non-empty Vec if any + /// payments should be made. + pub fn payments( + &self, + coin: ExternalCoin, + branch_address: &A, + input_value: u64, + ) -> Option>> { + // Fetch the amounts for the payments we'll make + let mut amounts: Vec<_> = match self { + Self::Leaves { payments, .. } => payments + .iter() + .map(|payment| { + assert_eq!(payment.balance().coin, coin); + Some(payment.balance().amount.0) + }) + .collect(), + Self::Branch { children, .. } => children.iter().map(|child| Some(child.value())).collect(), + }; + + // We need to reduce them so their sum is our input value + assert!(input_value <= self.value()); + let amount_to_amortize = self.value() - input_value; + + // If any payments won't survive the reduction, set them to None + let mut amortized = 0; + 'outer: while amounts.iter().any(Option::is_some) && (amortized < amount_to_amortize) { + let adjusted_fee = amount_to_amortize - amortized; + let amounts_len = + u64::try_from(amounts.iter().filter(|amount| amount.is_some()).count()).unwrap(); + let per_payment_fee_check = adjusted_fee.div_ceil(amounts_len); + + // Check each amount to see if it's not viable + let mut i = 0; + while i < amounts.len() { + if let Some(amount) = amounts[i] { + if amount.saturating_sub(per_payment_fee_check) < S::dust(coin).0 { + amounts[i] = None; + amortized += amount; + // If this amount wasn't viable, re-run with the new fee/amortization amounts + continue 'outer; + } + } + i += 1; + } + + // Now that we have the payments which will survive, reduce them + for (i, amount) in amounts.iter_mut().enumerate() { + if let Some(amount) = amount { + *amount -= adjusted_fee / amounts_len; + if i < usize::try_from(adjusted_fee % amounts_len).unwrap() { + *amount -= 1; + } + } + } + break; + } + + // Now that we have the reduced amounts, create the payments + let payments: Vec<_> = match self { + Self::Leaves { payments, .. } => { + payments + .iter() + .zip(amounts) + .filter_map(|(payment, amount)| { + amount.map(|amount| { + // The existing payment, with the new amount + Payment::new( + payment.address().clone(), + ExternalBalance { coin, amount: Amount(amount) }, + ) + }) + }) + .collect() + } + Self::Branch { .. } => { + amounts + .into_iter() + .filter_map(|amount| { + amount.map(|amount| { + // A branch output with the new amount + Payment::new(branch_address.clone(), ExternalBalance { coin, amount: Amount(amount) }) + }) + }) + .collect() + } + }; + + // Use None for vec![] so we never actually use vec![] + if payments.is_empty() { + None?; + } + Some(payments) + } +} diff --git a/processor/scheduler/utxo/standard/Cargo.toml b/processor/scheduler/utxo/standard/Cargo.toml new file mode 100644 index 00000000..e3d574ac --- /dev/null +++ b/processor/scheduler/utxo/standard/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "serai-processor-utxo-scheduler" +version = "0.1.0" +description = "Scheduler for UTXO networks for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/standard" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/utxo/standard/LICENSE b/processor/scheduler/utxo/standard/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/scheduler/utxo/standard/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/standard/README.md b/processor/scheduler/utxo/standard/README.md new file mode 100644 index 00000000..8e5360f0 --- /dev/null +++ b/processor/scheduler/utxo/standard/README.md @@ -0,0 +1,17 @@ +# UTXO Scheduler + +A scheduler of transactions for networks premised on the UTXO model. + +### Design + +The scheduler is designed to achieve fulfillment of all expected payments with +an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and +`O(log(n) + n)` computational complexity. + +For the time/computational complexity, we use a tree to fulfill payments. +This quickly gives us the ability to make as many outputs as necessary +(regardless of per-transaction output limits) and only has the latency of +including a chain of `O(log n)` transactions on-chain. The only computational +overhead is in creating the transactions which are branches in the tree. +Since we split off the root of the tree from a master output, the delay to start +fulfillment is the delay for the master output to re-appear on-chain. diff --git a/processor/scheduler/utxo/standard/src/db.rs b/processor/scheduler/utxo/standard/src/db.rs new file mode 100644 index 00000000..128c5df6 --- /dev/null +++ b/processor/scheduler/utxo/standard/src/db.rs @@ -0,0 +1,113 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use serai_primitives::{ExternalCoin, Amount, ExternalBalance}; + +use borsh::BorshDeserialize; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use primitives::{Payment, ReceivedOutput}; +use utxo_scheduler_primitives::TreeTransaction; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db! { + UtxoScheduler { + OperatingCosts: (coin: ExternalCoin) -> Amount, + SerializedOutputs: (key: &[u8], coin: ExternalCoin) -> Vec, + SerializedQueuedPayments: (key: &[u8], coin: ExternalCoin) -> Vec, + } +} + +db_channel! { + UtxoScheduler { + PendingBranch: (key: &[u8], balance: ExternalBalance) -> Vec, + } +} + +pub(crate) struct Db(PhantomData); +impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: ExternalCoin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: ExternalCoin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + + pub(crate) fn outputs( + getter: &impl Get, + key: KeyFor, + coin: ExternalCoin, + ) -> Option>> { + let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(OutputFor::::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + key: KeyFor, + coin: ExternalCoin, + outputs: &[OutputFor], + ) { + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { + SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + coin: ExternalCoin, + ) -> Option>>> { + let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(Payment::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_queued_payments( + txn: &mut impl DbTxn, + key: KeyFor, + coin: ExternalCoin, + queued: &[Payment>], + ) { + let mut buf = Vec::with_capacity(queued.len() * 128); + for queued in queued { + queued.write(&mut buf).unwrap(); + } + SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn queue_pending_branch( + txn: &mut impl DbTxn, + key: KeyFor, + balance: ExternalBalance, + child: &TreeTransaction>, + ) { + PendingBranch::send(txn, key.to_bytes().as_ref(), balance, &borsh::to_vec(child).unwrap()) + } + pub(crate) fn take_pending_branch( + txn: &mut impl DbTxn, + key: KeyFor, + balance: ExternalBalance, + ) -> Option>> { + PendingBranch::try_recv(txn, key.to_bytes().as_ref(), balance) + .map(|bytes| TreeTransaction::>::deserialize(&mut bytes.as_slice()).unwrap()) + } +} diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs new file mode 100644 index 00000000..cc2e2d35 --- /dev/null +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -0,0 +1,566 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_primitives::{ExternalCoin, Amount, ExternalBalance}; + +use serai_db::DbTxn; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, + SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; +use utxo_scheduler_primitives::*; + +mod db; +use db::Db; + +/// A scheduler of transactions for networks premised on the UTXO model. +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler> { + planner: P, + _S: PhantomData, +} + +impl> Scheduler { + /// Create a new scheduler. + pub fn new(planner: P) -> Self { + Self { planner, _S: PhantomData } + } + + async fn aggregate_inputs( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + key_for_change: KeyFor, + key: KeyFor, + coin: ExternalCoin, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let mut outputs = Db::::outputs(txn, key, coin).unwrap(); + outputs.sort_by_key(|output| output.balance().amount.0); + while outputs.len() > P::MAX_INPUTS { + let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); + + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + to_aggregate, + vec![], + Some(key_for_change), + ) + .await? + else { + continue; + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + } + + Db::::set_outputs(txn, key, coin, &outputs); + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + Ok(eventualities) + } + + fn fulfillable_payments( + txn: &mut impl DbTxn, + operating_costs: &mut u64, + key: KeyFor, + coin: ExternalCoin, + value_of_outputs: u64, + ) -> Vec>> { + // Fetch all payments for this key + let mut payments = Db::::queued_payments(txn, key, coin).unwrap(); + if payments.is_empty() { + return vec![]; + } + + loop { + // inputs must be >= (payments - operating costs) + // Accordingly, (inputs + operating costs) must be >= payments + let value_fulfillable = value_of_outputs + *operating_costs; + + // Drop to just the payments we can currently fulfill + { + let mut can_handle = 0; + let mut value_used = 0; + for payment in &payments { + value_used += payment.balance().amount.0; + if value_fulfillable < value_used { + break; + } + can_handle += 1; + } + + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, coin, &remaining_payments); + } + + // If these payments are worth less than the operating costs, immediately drop them + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= *operating_costs { + *operating_costs -= payments_value; + Db::::set_operating_costs(txn, coin, Amount(*operating_costs)); + + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { + return vec![]; + } + // Find which of these we should handle + continue; + } + + return payments; + } + } + + fn queue_branches( + txn: &mut impl DbTxn, + key: KeyFor, + coin: ExternalCoin, + effected_payments: Vec, + tx: TreeTransaction>, + ) { + match tx { + TreeTransaction::Leaves { .. } => {} + TreeTransaction::Branch { mut children, .. } => { + children.sort_by_key(TreeTransaction::value); + children.reverse(); + + /* + This may only be a subset of payments but it'll be the originally-highest-valued + payments. `zip` will truncate to the first children which will be the highest-valued + children thanks to our sort. + */ + for (amount, child) in effected_payments.into_iter().zip(children) { + Db::::queue_pending_branch(txn, key, ExternalBalance { coin, amount }, &child); + } + } + } + } + + async fn handle_branch( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + eventualities: &mut Vec>, + output: OutputFor, + tx: TreeTransaction>, + ) -> Result>::EphemeralError> { + let key = output.key(); + let coin = output.balance().coin; + let Some(payments) = tx.payments::(coin, &P::branch_address(key), output.balance().amount.0) + else { + // If this output has become too small to satisfy this branch, drop it + return Ok(false); + }; + + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + block, + vec![output], + payments, + None, + ) + .await? + else { + // This Branch isn't viable, so drop it (and its children) + return Ok(false); + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + Self::queue_branches(txn, key, coin, planned.effected_payments, tx); + + Ok(true) + } + + async fn step( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + block: &BlockFor, + key: KeyFor, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let key_for_change = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active => active_keys[0].0, + LifetimeStage::UsingNewForChange | LifetimeStage::Forwarding | LifetimeStage::Finishing => { + active_keys[1].0 + } + }; + let branch_address = P::branch_address(key); + + 'coin: for coin in S::NETWORK.coins() { + // Perform any input aggregation we should + eventualities + .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); + + // Fetch the operating costs/outputs + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, key, coin).unwrap(); + if outputs.is_empty() { + continue; + } + + // Fetch the fulfillable payments + let payments = Self::fulfillable_payments( + txn, + &mut operating_costs, + key, + coin, + outputs.iter().map(|output| output.balance().amount.0).sum(), + ); + if payments.is_empty() { + continue; + } + + // Create a tree to fulfill the payments + let mut tree = vec![P::tree(&payments)]; + + // Create the transaction for the root of the tree + // Try creating this transaction twice, once with a change output and once with increased + // operating costs to ensure a change output (as necessary to meet the requirements of the + // scanner API) + let mut planned_outer = None; + for i in 0 .. 2 { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) + .await? + else { + // This should trip on the first iteration or not at all + assert_eq!(i, 0); + // This doesn't have inputs even worth aggregating so drop the entire tree + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + continue 'coin; + }; + + // If this doesn't have a change output, increase operating costs and try again + if !planned.has_change { + /* + Since we'll create a change output if it's worth at least dust, amortizing dust from + the payments should solve this. If the new transaction can't afford those operating + costs, then the payments should be amortized out, causing there to be a change or no + transaction at all. + */ + operating_costs += S::dust(coin).0; + continue; + } + + // Since this had a change output, move forward with it + planned_outer = Some(planned); + break; + } + let Some(planned) = planned_outer else { + panic!("couldn't create a tree root with a change output") + }; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // Now save the next layer of the tree to the database + // We'll execute it when it appears + Self::queue_branches(txn, key, coin, planned.effected_payments, tree.remove(0)); + } + + Ok(eventualities) + } + + async fn flush_outputs( + &self, + txn: &mut impl DbTxn, + eventualities: &mut KeyScopedEventualities, + block: &BlockFor, + from: KeyFor, + to: KeyFor, + coin: ExternalCoin, + ) -> Result<(), >::EphemeralError> { + let from_bytes = from.to_bytes().as_ref().to_vec(); + // Ensure our inputs are aggregated + eventualities + .entry(from_bytes.clone()) + .or_insert(vec![]) + .append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?); + + // Now that our inputs are aggregated, transfer all of them to the new key + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, from, coin).unwrap(); + if outputs.is_empty() { + return Ok(()); + } + let planned = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs, + vec![], + Some(to), + ) + .await?; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + let Some(planned) = planned else { return Ok(()) }; + + TransactionsToSign::::send(txn, &from, &planned.signable); + eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + + Ok(()) + } +} + +impl> SchedulerTrait for Scheduler { + type EphemeralError = P::EphemeralError; + type SignableTransaction = P::SignableTransaction; + + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, coin).is_none()); + Db::::set_outputs(txn, key, coin, &[]); + assert!(Db::::queued_payments(txn, key, coin).is_none()); + Db::::set_queued_payments(txn, key, coin, &[]); + } + } + + fn flush_key( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, coin, &[]); + Db::::set_queued_payments(txn, new_key, coin, &queued); + } + + // Move the outputs to the new key + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, coin).await?; + } + Ok(eventualities) + } + } + + fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, coin); + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, coin); + } + } + + fn update( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + + // Accumulate the new outputs + { + let mut outputs_by_key = HashMap::new(); + for output in update.outputs() { + // If this aligns for a branch, handle it + if let Some(branch) = Db::::take_pending_branch(txn, output.key(), output.balance()) { + if self + .handle_branch( + txn, + block, + eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]), + output.clone(), + branch, + ) + .await? + { + // If we could use it for a branch, we do and move on + // Else, we let it be accumulated by the standard accumulation code + continue; + } + } + + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output.clone()); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + } + + // Fulfill the payments we prior couldn't + for (key, _stage) in active_keys { + eventualities + .entry(key.to_bytes().as_ref().to_vec()) + .or_insert(vec![]) + .append(&mut self.step(txn, active_keys, block, *key).await?); + } + + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + self + .flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + coin, + ) + .await?; + } + } + } + + // Create the transactions for the forwards/returns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + block, + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance())], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance()); + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + block, + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } + + Ok(eventualities) + } + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, coin, &queued_payments); + } + + // Handle the queued payments + Ok(HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.step(txn, active_keys, block, fulfillment_key).await?, + )])) + } + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml new file mode 100644 index 00000000..f8a676f8 --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "serai-processor-transaction-chaining-scheduler" +version = "0.1.0" +description = "Scheduler for UTXO networks with transaction chaining for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/transaction-chaining" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/utxo/transaction-chaining/LICENSE b/processor/scheduler/utxo/transaction-chaining/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/transaction-chaining/README.md b/processor/scheduler/utxo/transaction-chaining/README.md new file mode 100644 index 00000000..a129b669 --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/README.md @@ -0,0 +1,19 @@ +# Transaction Chaining Scheduler + +A scheduler of transactions for networks premised on the UTXO model which +support transaction chaining. Transaction chaining refers to the ability to +obtain an identifier for an output within a transaction not yet signed usable +to build and sign a transaction spending it. + +### Design + +The scheduler is designed to achieve fulfillment of all expected payments with +an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and +`O(log(n) + n)` computational complexity. + +Due to the ability to chain transactions, we can immediately plan/sign dependent +transactions. For the time/computational complexity, we use a tree to fulfill +payments. This quickly gives us the ability to make as many outputs as necessary +(regardless of per-transaction output limits) and only has the latency of +including a chain of `O(log n)` transactions on-chain. The only computational +overhead is in creating the transactions which are branches in the tree. diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs new file mode 100644 index 00000000..68558e6f --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -0,0 +1,104 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use serai_primitives::{ExternalCoin, Amount}; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{Payment, ReceivedOutput}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db! { + TransactionChainingScheduler { + OperatingCosts: (coin: ExternalCoin) -> Amount, + SerializedOutputs: (key: &[u8], coin: ExternalCoin) -> Vec, + AlreadyAccumulatedOutput: (id: &[u8]) -> (), + // We should be immediately able to schedule the fulfillment of payments, yet this may not be + // possible if we're in the middle of a multisig rotation (as our output set will be split) + SerializedQueuedPayments: (key: &[u8], coin: ExternalCoin) -> Vec, + } +} + +pub(crate) struct Db(PhantomData); +impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: ExternalCoin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: ExternalCoin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + + pub(crate) fn outputs( + getter: &impl Get, + key: KeyFor, + coin: ExternalCoin, + ) -> Option>> { + let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(OutputFor::::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + key: KeyFor, + coin: ExternalCoin, + outputs: &[OutputFor], + ) { + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { + SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn set_already_accumulated_output( + txn: &mut impl DbTxn, + output: & as ReceivedOutput, AddressFor>>::Id, + ) { + AlreadyAccumulatedOutput::set(txn, output.as_ref(), &()); + } + pub(crate) fn take_if_already_accumulated_output( + txn: &mut impl DbTxn, + output: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + AlreadyAccumulatedOutput::take(txn, output.as_ref()).is_some() + } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + coin: ExternalCoin, + ) -> Option>>> { + let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(Payment::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_queued_payments( + txn: &mut impl DbTxn, + key: KeyFor, + coin: ExternalCoin, + queued: &[Payment>], + ) { + let mut buf = Vec::with_capacity(queued.len() * 128); + for queued in queued { + queued.write(&mut buf).unwrap(); + } + SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: ExternalCoin) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs new file mode 100644 index 00000000..5f7275ce --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -0,0 +1,584 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_primitives::{ExternalCoin, Amount}; + +use serai_db::DbTxn; + +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, + SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; +use utxo_scheduler_primitives::*; + +mod db; +use db::Db; + +/// The outputs which will be effected by a PlannedTransaction and received by Serai. +pub struct EffectedReceivedOutputs(pub Vec>); + +/// A scheduler of transactions for networks premised on the UTXO model which support +/// transaction chaining. +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler>> { + planner: P, + _S: PhantomData, +} + +impl>> Scheduler { + /// Create a new scheduler. + pub fn new(planner: P) -> Self { + Self { planner, _S: PhantomData } + } + + fn accumulate_outputs(txn: &mut impl DbTxn, outputs: Vec>, from_scanner: bool) { + let mut outputs_by_key = HashMap::new(); + for output in outputs { + if !from_scanner { + // Since this isn't being reported by the scanner, flag it so when the scanner does report + // it, we don't accumulate it again + Db::::set_already_accumulated_output(txn, &output.id()); + } else if Db::::take_if_already_accumulated_output(txn, &output.id()) { + continue; + } + + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + } + + async fn aggregate_inputs( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + key_for_change: KeyFor, + key: KeyFor, + coin: ExternalCoin, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let mut outputs = Db::::outputs(txn, key, coin).unwrap(); + while outputs.len() > P::MAX_INPUTS { + let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); + Db::::set_outputs(txn, key, coin, &outputs); + + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + to_aggregate, + vec![], + Some(key_for_change), + ) + .await? + else { + continue; + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + Self::accumulate_outputs(txn, planned.auxilliary.0, false); + + // Reload the outputs for the next loop iteration + outputs = Db::::outputs(txn, key, coin).unwrap(); + } + + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + Ok(eventualities) + } + + fn fulfillable_payments( + txn: &mut impl DbTxn, + operating_costs: &mut u64, + key: KeyFor, + coin: ExternalCoin, + value_of_outputs: u64, + ) -> Vec>> { + // Fetch all payments for this key + let mut payments = Db::::queued_payments(txn, key, coin).unwrap(); + if payments.is_empty() { + return vec![]; + } + + loop { + // inputs must be >= (payments - operating costs) + // Accordingly, (inputs + operating costs) must be >= payments + let value_fulfillable = value_of_outputs + *operating_costs; + + // Drop to just the payments we can currently fulfill + { + let mut can_handle = 0; + let mut value_used = 0; + for payment in &payments { + value_used += payment.balance().amount.0; + if value_fulfillable < value_used { + break; + } + can_handle += 1; + } + + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, coin, &remaining_payments); + } + + // If these payments are worth less than the operating costs, immediately drop them + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= *operating_costs { + *operating_costs -= payments_value; + Db::::set_operating_costs(txn, coin, Amount(*operating_costs)); + + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { + return vec![]; + } + // Find which of these we should handle + continue; + } + + return payments; + } + } + + async fn step( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + block: &BlockFor, + key: KeyFor, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let key_for_change = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active => active_keys[0].0, + LifetimeStage::UsingNewForChange | LifetimeStage::Forwarding | LifetimeStage::Finishing => { + active_keys[1].0 + } + }; + let branch_address = P::branch_address(key); + + 'coin: for coin in S::NETWORK.coins() { + // Perform any input aggregation we should + eventualities + .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); + + // Fetch the operating costs/outputs + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, key, coin).unwrap(); + if outputs.is_empty() { + continue; + } + + // Fetch the fulfillable payments + let payments = Self::fulfillable_payments( + txn, + &mut operating_costs, + key, + coin, + outputs.iter().map(|output| output.balance().amount.0).sum(), + ); + if payments.is_empty() { + continue; + } + + // If this is our only key, we should be able to fulfill all payments + // Else, we'd be insolvent + if active_keys.len() == 1 { + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); + } + + // Create a tree to fulfill the payments + let mut tree = vec![P::tree(&payments)]; + + // Create the transaction for the root of the tree + let mut branch_outputs = { + // Try creating this transaction twice, once with a change output and once with increased + // operating costs to ensure a change output (as necessary to meet the requirements of the + // scanner API) + let mut planned_outer = None; + for i in 0 .. 2 { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) + .await? + else { + // This should trip on the first iteration or not at all + assert_eq!(i, 0); + // This doesn't have inputs even worth aggregating so drop the entire tree + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + continue 'coin; + }; + + // If this doesn't have a change output, increase operating costs and try again + if !planned.has_change { + /* + Since we'll create a change output if it's worth at least dust, amortizing dust from + the payments should solve this. If the new transaction can't afford those operating + costs, then the payments should be amortized out, causing there to be a change or no + transaction at all. + */ + operating_costs += S::dust(coin).0; + continue; + } + + // Since this had a change output, move forward with it + planned_outer = Some(planned); + break; + } + let Some(mut planned) = planned_outer else { + panic!("couldn't create a tree root with a change output") + }; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // We accumulate the change output, but not the branches as we'll consume them momentarily + Self::accumulate_outputs( + txn, + planned + .auxilliary + .0 + .iter() + .filter(|output| output.kind() == OutputType::Change) + .cloned() + .collect(), + false, + ); + planned.auxilliary.0.retain(|output| output.kind() == OutputType::Branch); + planned.auxilliary.0 + }; + + // Now execute each layer of the tree + tree = match tree.remove(0) { + TreeTransaction::Leaves { .. } => vec![], + TreeTransaction::Branch { children, .. } => children, + }; + while !tree.is_empty() { + // Sort the branch outputs by their value (high to low) + branch_outputs.sort_by_key(|a| a.balance().amount.0); + branch_outputs.reverse(); + // Sort the transactions we should create by their value so they share an order with the + // branch outputs + tree.sort_by_key(TreeTransaction::value); + tree.reverse(); + + // If we dropped any Branch outputs, drop the associated children + tree.truncate(branch_outputs.len()); + assert_eq!(branch_outputs.len(), tree.len()); + + let branch_outputs_for_this_layer = branch_outputs; + let this_layer = tree; + branch_outputs = vec![]; + tree = vec![]; + + for (branch_output, tx) in branch_outputs_for_this_layer.into_iter().zip(this_layer) { + assert_eq!(branch_output.kind(), OutputType::Branch); + + let Some(payments) = + tx.payments::(coin, &branch_address, branch_output.balance().amount.0) + else { + // If this output has become too small to satisfy this branch, drop it + continue; + }; + + let branch_output_id = branch_output.id(); + let Some(mut planned) = self + .planner + .plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + block, + vec![branch_output], + payments, + None, + ) + .await? + else { + // This Branch isn't viable, so drop it (and its children) + continue; + }; + // Since we've made a TX spending this output, don't accumulate it later + Db::::set_already_accumulated_output(txn, &branch_output_id); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + match tx { + TreeTransaction::Leaves { .. } => {} + // If this was a branch, handle its children + TreeTransaction::Branch { mut children, .. } => { + branch_outputs.append(&mut planned.auxilliary.0); + tree.append(&mut children); + } + } + } + } + } + + Ok(eventualities) + } + + async fn flush_outputs( + &self, + txn: &mut impl DbTxn, + eventualities: &mut KeyScopedEventualities, + block: &BlockFor, + from: KeyFor, + to: KeyFor, + coin: ExternalCoin, + ) -> Result<(), >::EphemeralError> { + let from_bytes = from.to_bytes().as_ref().to_vec(); + // Ensure our inputs are aggregated + eventualities + .entry(from_bytes.clone()) + .or_insert(vec![]) + .append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?); + + // Now that our inputs are aggregated, transfer all of them to the new key + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, from, coin).unwrap(); + if outputs.is_empty() { + return Ok(()); + } + let planned = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs, + vec![], + Some(to), + ) + .await?; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + let Some(planned) = planned else { return Ok(()) }; + + TransactionsToSign::::send(txn, &from, &planned.signable); + eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + Self::accumulate_outputs(txn, planned.auxilliary.0, false); + + Ok(()) + } +} + +impl>> SchedulerTrait + for Scheduler +{ + type EphemeralError = P::EphemeralError; + type SignableTransaction = P::SignableTransaction; + + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, coin).is_none()); + Db::::set_outputs(txn, key, coin, &[]); + assert!(Db::::queued_payments(txn, key, coin).is_none()); + Db::::set_queued_payments(txn, key, coin, &[]); + } + } + + fn flush_key( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, coin, &[]); + Db::::set_queued_payments(txn, new_key, coin, &queued); + } + + // Move the outputs to the new key + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, coin).await?; + } + Ok(eventualities) + } + } + + fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, coin); + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, coin); + } + } + + fn update( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + Self::accumulate_outputs(txn, update.outputs().to_vec(), true); + + // Fulfill the payments we prior couldn't + let mut eventualities = HashMap::new(); + for (key, _stage) in active_keys { + assert!(eventualities + .insert(key.to_bytes().as_ref().to_vec(), self.step(txn, active_keys, block, *key).await?) + .is_none()); + } + + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + self + .flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + coin, + ) + .await?; + } + } + } + + // Create the transactions for the forwards/returns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + block, + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance())], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance()); + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + block, + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } + + Ok(eventualities) + } + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, coin, &queued_payments); + } + + // Handle the queued payments + Ok(HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.step(txn, active_keys, block, fulfillment_key).await?, + )])) + } + } +} diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml new file mode 100644 index 00000000..ecf588d4 --- /dev/null +++ b/processor/signers/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "serai-processor-signers" +version = "0.1.0" +description = "Signers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/signers" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["borsh"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } + +blake2 = { version = "0.10", default-features = false, features = ["std"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } +frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../common/db" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-cosign = { path = "../../coordinator/cosign" } +messages = { package = "serai-processor-messages", path = "../messages" } +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } + +frost-attempt-manager = { package = "serai-processor-frost-attempt-manager", path = "../frost-attempt-manager" } diff --git a/processor/signers/LICENSE b/processor/signers/LICENSE new file mode 100644 index 00000000..e091b149 --- /dev/null +++ b/processor/signers/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/signers/README.md b/processor/signers/README.md new file mode 100644 index 00000000..b6eddd56 --- /dev/null +++ b/processor/signers/README.md @@ -0,0 +1,6 @@ +# Processor Signers + +Implementations of the tree signers used by a processor (the transaction signer, +the Substrate signer, and the cosigner). + +This library is interacted with via the `serai_processor_messages::sign` API. diff --git a/processor/signers/src/batch/db.rs b/processor/signers/src/batch/db.rs new file mode 100644 index 00000000..8d9bc605 --- /dev/null +++ b/processor/signers/src/batch/db.rs @@ -0,0 +1,14 @@ +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{Batch, SignedBatch}; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersBatch { + ActiveSigningProtocols: (session: Session) -> Vec<[u8; 32]>, + BatchHash: (id: u32) -> [u8; 32], + Batches: (hash: [u8; 32]) -> Batch, + SignedBatches: (id: u32) -> SignedBatch, + LastAcknowledgedBatch: () -> u32, + } +} diff --git a/processor/signers/src/batch/mod.rs b/processor/signers/src/batch/mod.rs new file mode 100644 index 00000000..f38666a8 --- /dev/null +++ b/processor/signers/src/batch/mod.rs @@ -0,0 +1,215 @@ +use core::future::Future; +use std::collections::HashSet; + +use blake2::{digest::typenum::U32, Digest, Blake2b}; +use ciphersuite::{group::GroupEncoding, Ristretto}; +use frost::dkg::ThresholdKeys; + +use scale::Encode; + +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{SignedBatch, batch_message}; + +use serai_db::{Get, DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::{DoesNotError, ContinuallyRan}; +use scanner::{BatchesToSign, AcknowledgedBatches}; + +use frost_attempt_manager::*; + +use crate::{ + db::{CoordinatorToBatchSignerMessages, BatchSignerToCoordinatorMessages}, + WrappedSchnorrkelMachine, +}; + +mod db; +use db::*; + +pub(crate) fn last_acknowledged_batch(getter: &impl Get) -> Option { + LastAcknowledgedBatch::get(getter) +} + +pub(crate) fn signed_batch(getter: &impl Get, id: u32) -> Option { + SignedBatches::get(getter, id) +} + +// Fetches batches to sign and signs them. +pub(crate) struct BatchSignerTask { + db: D, + + session: Session, + external_key: E, + keys: Vec>, + + active_signing_protocols: HashSet<[u8; 32]>, + attempt_manager: AttemptManager, +} + +impl BatchSignerTask { + pub(crate) fn new( + db: D, + session: Session, + external_key: E, + keys: Vec>, + ) -> Self { + let mut active_signing_protocols = HashSet::new(); + let mut attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a batch signer with 0 keys").params().i(), + ); + + // Re-register all active signing protocols + for id in ActiveSigningProtocols::get(&db, session).unwrap_or(vec![]) { + active_signing_protocols.insert(id); + + let batch = Batches::get(&db, id).unwrap(); + + let mut machines = Vec::with_capacity(keys.len()); + for keys in &keys { + // TODO: Fetch the context for this from a constant instead of re-defining it + machines.push(WrappedSchnorrkelMachine::new( + keys.clone(), + b"substrate", + batch_message(&batch), + )); + } + attempt_manager.register(VariantSignId::Batch(id), machines); + } + + Self { db, session, external_key, keys, active_signing_protocols, attempt_manager } + } +} + +impl ContinuallyRan for BatchSignerTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check for new batches to sign + loop { + let mut txn = self.db.txn(); + let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + let batch_hash = <[u8; 32]>::from(Blake2b::::digest(batch.encode())); + self.active_signing_protocols.insert(batch_hash); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + BatchHash::set(&mut txn, batch.id, &batch_hash); + Batches::set(&mut txn, batch_hash, &batch); + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + // TODO: Also fetch the constant here + machines.push(WrappedSchnorrkelMachine::new( + keys.clone(), + b"substrate", + batch_message(&batch), + )); + } + for msg in self.attempt_manager.register(VariantSignId::Batch(batch_hash), machines) { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + + // Check for acknowledged Batches (meaning we should no longer sign for these Batches) + loop { + let mut txn = self.db.txn(); + let batch_hash = { + let Some(batch_id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else { + break; + }; + + /* + We may have yet to register this signing protocol. + + While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically + have `BatchesToSign` populated with a new batch _while iterating over + `AcknowledgedBatches`_, and then have `AcknowledgedBatched` populated. In that edge + case, we will see the acknowledgement notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the acknowledgement + notification). On the task's next iteration, we'll process the Batch from + `BatchesToSign` and be able to make progress. + */ + let Some(batch_hash) = BatchHash::take(&mut txn, batch_id) else { + drop(txn); + break; + }; + batch_hash + }; + let batch = + Batches::take(&mut txn, batch_hash).expect("BatchHash populated but not Batches"); + + iterated = true; + + // Update the last acknowledged Batch + { + let last_acknowledged = LastAcknowledgedBatch::get(&txn); + if Some(batch.id) > last_acknowledged { + LastAcknowledgedBatch::set(&mut txn, &batch.id); + } + } + + // Remove this as an active signing protocol + assert!(self.active_signing_protocols.remove(&batch_hash)); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + + // Clean up SignedBatches + SignedBatches::del(&mut txn, batch.id); + + // We retire with a txn so we either successfully flag this Batch as acknowledged, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Batch(batch_hash)); + + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") }; + let batch = + Batches::get(&txn, id).expect("signed a Batch we didn't save to the database"); + let signed_batch = SignedBatch { batch, signature: signature.into() }; + SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch); + } + } + + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/coordinator/db.rs b/processor/signers/src/coordinator/db.rs new file mode 100644 index 00000000..c8235ede --- /dev/null +++ b/processor/signers/src/coordinator/db.rs @@ -0,0 +1,7 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersCoordinator { + LastPublishedBatch: () -> u32, + } +} diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs new file mode 100644 index 00000000..0fd10822 --- /dev/null +++ b/processor/signers/src/coordinator/mod.rs @@ -0,0 +1,157 @@ +use core::future::Future; + +use serai_primitives::Signature; + +use serai_db::{DbTxn, Db}; + +use primitives::task::ContinuallyRan; + +use crate::{db::*, Coordinator}; + +mod db; + +// Fetches messages to send the coordinator and sends them. +pub(crate) struct CoordinatorTask { + db: D, + coordinator: C, +} + +impl CoordinatorTask { + pub(crate) fn new(db: D, coordinator: C) -> Self { + Self { db, coordinator } + } +} + +impl ContinuallyRan for CoordinatorTask { + type Error = String; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) { + // Publish the messages generated by this key's signers + loop { + let mut txn = self.db.txn(); + let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session) + else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session) + else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + // Publish the cosigns from this session + { + let mut txn = self.db.txn(); + while let Some(signed_cosign) = Cosign::try_recv(&mut txn, session) { + iterated = true; + self + .coordinator + .publish_cosign(signed_cosign) + .await + .map_err(|e| format!("couldn't publish Cosign: {e:?}"))?; + } + txn.commit(); + } + + // If this session signed its slash report, publish its signature + { + let mut txn = self.db.txn(); + if let Some((slash_report, signature)) = SignedSlashReport::try_recv(&mut txn, session) { + iterated = true; + + self + .coordinator + .publish_slash_report_signature(session, slash_report, Signature(signature)) + .await + .map_err(|e| { + format!("couldn't send slash report signature to the coordinator: {e:?}") + })?; + + txn.commit(); + } + } + } + + // Publish the signed Batches + { + let mut txn = self.db.txn(); + // The last acknowledged Batch may exceed the last Batch we published if we didn't sign for + // the prior Batch(es) (and accordingly didn't publish them) + let last_batch = + crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn)); + let mut next_batch = last_batch.map(|id| id + 1).unwrap_or(0); + while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) { + iterated = true; + db::LastPublishedBatch::set(&mut txn, &batch.batch.id); + self + .coordinator + .publish_signed_batch(batch) + .await + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; + next_batch += 1; + } + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/cosign/db.rs b/processor/signers/src/cosign/db.rs new file mode 100644 index 00000000..01a42446 --- /dev/null +++ b/processor/signers/src/cosign/db.rs @@ -0,0 +1,9 @@ +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersCosigner { + LatestCosigned: (session: Session) -> u64, + } +} diff --git a/processor/signers/src/cosign/mod.rs b/processor/signers/src/cosign/mod.rs new file mode 100644 index 00000000..ddf6c490 --- /dev/null +++ b/processor/signers/src/cosign/mod.rs @@ -0,0 +1,143 @@ +use core::future::Future; + +use ciphersuite::Ristretto; +use frost::dkg::ThresholdKeys; + +use scale::Encode; +use serai_primitives::Signature; +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; + +use serai_cosign::{COSIGN_CONTEXT, Cosign as CosignStruct, SignedCosign}; +use messages::sign::VariantSignId; + +use primitives::task::{DoesNotError, ContinuallyRan}; + +use frost_attempt_manager::*; + +use crate::{ + db::{ToCosign, Cosign, CoordinatorToCosignerMessages, CosignerToCoordinatorMessages}, + WrappedSchnorrkelMachine, +}; + +mod db; +use db::LatestCosigned; + +/// Fetches the latest cosign information and works on it. +/// +/// Only the latest cosign attempt is kept. We don't work on historical attempts as later cosigns +/// supersede them. +#[allow(non_snake_case)] +pub(crate) struct CosignerTask { + db: D, + + session: Session, + keys: Vec>, + + current_cosign: Option, + attempt_manager: AttemptManager, +} + +impl CosignerTask { + pub(crate) fn new(db: D, session: Session, keys: Vec>) -> Self { + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a cosigner with 0 keys").params().i(), + ); + + Self { db, session, keys, current_cosign: None, attempt_manager } + } +} + +impl ContinuallyRan for CosignerTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check the cosign to work on + { + let mut txn = self.db.txn(); + if let Some(cosign) = ToCosign::get(&txn, self.session) { + // If this wasn't already signed for... + if LatestCosigned::get(&txn, self.session) < Some(cosign.block_number) { + // If this isn't the cosign we're currently working on, meaning it's fresh + if self.current_cosign.as_ref() != Some(&cosign) { + // Retire the current cosign + if let Some(current_cosign) = &self.current_cosign { + assert!(current_cosign.block_number < cosign.block_number); + self + .attempt_manager + .retire(&mut txn, VariantSignId::Cosign(current_cosign.block_number)); + } + + // Set the cosign being worked on + self.current_cosign = Some(cosign.clone()); + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = cosign.signature_message(); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new( + keys.clone(), + COSIGN_CONTEXT, + message.clone(), + )); + } + } + for msg in + self.attempt_manager.register(VariantSignId::Cosign(cosign.block_number), machines) + { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + } + } + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Cosign(block_number) = id else { + panic!("CosignerTask signed a non-Cosign") + }; + assert_eq!( + Some(block_number), + self.current_cosign.as_ref().map(|cosign| cosign.block_number) + ); + + let cosign = self.current_cosign.take().unwrap(); + LatestCosigned::set(&mut txn, self.session, &cosign.block_number); + let cosign = SignedCosign { + cosign, + signature: Signature::from(signature).encode().try_into().unwrap(), + }; + // Send the cosign + Cosign::send(&mut txn, self.session, &cosign); + } + } + + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs new file mode 100644 index 00000000..23862236 --- /dev/null +++ b/processor/signers/src/db.rs @@ -0,0 +1,52 @@ +use serai_validator_sets_primitives::{Session, SlashReport as SlashReportStruct}; + +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use serai_cosign::{Cosign as CosignStruct, SignedCosign}; + +use messages::sign::{ProcessorMessage, CoordinatorMessage}; + +create_db! { + SignersGlobal { + RegisteredKeys: () -> Vec, + SerializedKeys: (session: Session) -> Vec, + LatestRetiredSession: () -> Session, + ToCleanup: () -> Vec<(Session, Vec)>, + + ToCosign: (session: Session) -> CosignStruct, + } +} + +db_channel! { + SignersGlobal { + Cosign: (session: Session) -> SignedCosign, + + SlashReport: (session: Session) -> SlashReportStruct, + SignedSlashReport: (session: Session) -> (SlashReportStruct, [u8; 64]), + + /* + TODO: Most of these are pointless? We drop all active signing sessions on reboot. It's + accordingly not valuable to use a DB-backed channel to communicate messages for signing + sessions (Preprocess/Shares). + + Transactions, Batches, Slash Reports, and Cosigns all have their own mechanisms/DB entries + and don't use the following channels. The only questions are: + + 1) If it's safe to drop Reattempt? Or if we need tweaks to enable that + 2) If we reboot with a pending Reattempt, we'll participate on reboot. If we drop that + Reattempt, we won't. Accordingly, we have degraded performance in that edge case in + exchange for less disk IO in the majority of cases. Is that work it? + */ + CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, + CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToBatchSignerMessages: (session: Session) -> CoordinatorMessage, + BatchSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToSlashReportSignerMessages: (session: Session) -> CoordinatorMessage, + SlashReportSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToTransactionSignerMessages: (session: Session) -> CoordinatorMessage, + TransactionSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + } +} diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs new file mode 100644 index 00000000..2f5a4a04 --- /dev/null +++ b/processor/signers/src/lib.rs @@ -0,0 +1,447 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{future::Future, fmt::Debug, marker::PhantomData}; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost::dkg::{ThresholdCore, ThresholdKeys}; + +use serai_primitives::Signature; +use serai_validator_sets_primitives::{Session, SlashReport}; +use serai_in_instructions_primitives::SignedBatch; + +use serai_db::{DbTxn, Db}; + +use serai_cosign::{Cosign, SignedCosign}; + +use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; + +use primitives::task::{Task, TaskHandle, ContinuallyRan}; +use scheduler::{Transaction, SignableTransaction, TransactionFor}; +use scanner::{ScannerFeed, Scheduler}; + +mod wrapped_schnorrkel; +pub(crate) use wrapped_schnorrkel::WrappedSchnorrkelMachine; + +pub(crate) mod db; + +mod coordinator; +use coordinator::CoordinatorTask; + +mod cosign; +use cosign::CosignerTask; + +mod batch; +use batch::BatchSignerTask; + +mod slash_report; +use slash_report::SlashReportSignerTask; + +mod transaction; +use transaction::TransactionSignerTask; + +/// A connection to the Coordinator which messages can be published with. +pub trait Coordinator: 'static + Send + Sync { + /// An error encountered when interacting with a coordinator. + /// + /// This MUST be an ephemeral error. Retrying an interaction MUST eventually resolve without + /// manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// Send a `messages::sign::ProcessorMessage`. + fn send( + &mut self, + message: ProcessorMessage, + ) -> impl Send + Future>; + + /// Publish a cosign. + fn publish_cosign( + &mut self, + signed_cosign: SignedCosign, + ) -> impl Send + Future>; + + /// Publish a `SignedBatch`. + fn publish_signed_batch( + &mut self, + batch: SignedBatch, + ) -> impl Send + Future>; + + /// Publish a slash report's signature. + fn publish_slash_report_signature( + &mut self, + session: Session, + slash_report: SlashReport, + signature: Signature, + ) -> impl Send + Future>; +} + +/// An object capable of publishing a transaction. +pub trait TransactionPublisher: 'static + Send + Sync + Clone { + /// An error encountered when publishing a transaction. + /// + /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual + /// intervention/changing the arguments. + type EphemeralError: Debug; + + /// Publish a transaction. + /// + /// This will be called multiple times, with the same transaction, until the transaction is + /// confirmed on-chain. + /// + /// The transaction already being present in the mempool/on-chain MUST NOT be considered an + /// error. + fn publish(&self, tx: T) -> impl Send + Future>; +} + +struct Tasks { + cosigner: TaskHandle, + batch: TaskHandle, + slash_report: TaskHandle, + transaction: TaskHandle, +} + +/// The signers used by a processor. +#[allow(non_snake_case)] +pub struct Signers< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher>>, +> { + db: D, + publisher: P, + coordinator_handle: TaskHandle, + tasks: HashMap, + _Sch: PhantomData, + _S: PhantomData, +} + +type CiphersuiteFor = + <>::SignableTransaction as SignableTransaction>::Ciphersuite; +type SignableTransactionFor = >::SignableTransaction; + +/* + This is completely outside of consensus, so the worst that can happen is: + + 1) Leakage of a private key, hence the usage of frost-attempt-manager which has an API to ensure + that doesn't happen + 2) The database isn't perfectly cleaned up (leaving some bytes on disk wasted) + 3) The state isn't perfectly cleaned up (leaving some bytes in RAM wasted) + + The last two are notably possible via a series of race conditions. For example, if an Eventuality + completion comes in *before* we registered a key, the signer will hold the signing protocol in + memory until the session is retired entirely. +*/ +impl< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher>>, + > Signers +{ + fn tasks( + db: D, + publisher: P, + coordinator_handle: TaskHandle, + session: Session, + substrate_keys: Vec>, + external_keys: Vec>>, + ) -> Tasks { + let (cosign_task, cosign_handle) = Task::new(); + tokio::spawn( + CosignerTask::new(db.clone(), session, substrate_keys.clone()) + .continually_run(cosign_task, vec![coordinator_handle.clone()]), + ); + + let (batch_task, batch_handle) = Task::new(); + tokio::spawn( + BatchSignerTask::new( + db.clone(), + session, + external_keys[0].group_key(), + substrate_keys.clone(), + ) + .continually_run(batch_task, vec![coordinator_handle.clone()]), + ); + + let (slash_report_task, slash_report_handle) = Task::new(); + tokio::spawn( + SlashReportSignerTask::<_, S>::new(db.clone(), session, substrate_keys) + .continually_run(slash_report_task, vec![coordinator_handle.clone()]), + ); + + let (transaction_task, transaction_handle) = Task::new(); + tokio::spawn( + TransactionSignerTask::<_, SignableTransactionFor, _>::new( + db, + publisher, + session, + external_keys, + ) + .continually_run(transaction_task, vec![coordinator_handle]), + ); + + Tasks { + cosigner: cosign_handle, + batch: batch_handle, + slash_report: slash_report_handle, + transaction: transaction_handle, + } + } + /// Initialize the signers. + /// + /// This will spawn tasks for any historically registered keys. + pub fn new(mut db: D, coordinator: impl Coordinator, publisher: P) -> Self { + /* + On boot, perform any database cleanup which was queued. + + We don't do this cleanup at time of dropping the task as we'd need to wait an unbounded + amount of time for the task to stop (requiring an async task), then we'd have to drain the + channels (which would be on a distinct DB transaction and risk not occurring if we rebooted + while waiting for the task to stop). This is the easiest way to handle this. + */ + { + let mut txn = db.txn(); + for (session, external_key_bytes) in db::ToCleanup::get(&txn).unwrap_or(vec![]) { + let mut external_key_bytes = external_key_bytes.as_slice(); + let external_key = CiphersuiteFor::::read_G(&mut external_key_bytes).unwrap(); + assert!(external_key_bytes.is_empty()); + + // Drain the Batches to sign + // This will be fully populated by the scanner before retiry occurs, making this perfect + // in not leaving any pending blobs behind + while scanner::BatchesToSign::try_recv(&mut txn, &external_key).is_some() {} + // Drain the acknowledged batches to no longer sign + while scanner::AcknowledgedBatches::try_recv(&mut txn, &external_key).is_some() {} + + // Drain the transactions to sign + // This will be fully populated by the scheduler before retiry + while scheduler::TransactionsToSign::>::try_recv( + &mut txn, + &external_key, + ) + .is_some() + {} + + // Drain the completed Eventualities + while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} + + // Delete the cosign this session should be working on + db::ToCosign::del(&mut txn, session); + // Drain our DB channels + while db::Cosign::try_recv(&mut txn, session).is_some() {} + while db::SlashReport::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToCosignerMessages::try_recv(&mut txn, session).is_some() {} + while db::CosignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToBatchSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::BatchSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToTransactionSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + } + db::ToCleanup::del(&mut txn); + txn.commit(); + } + + let mut tasks = HashMap::new(); + + let (coordinator_task, coordinator_handle) = Task::new(); + tokio::spawn( + CoordinatorTask::new(db.clone(), coordinator).continually_run(coordinator_task, vec![]), + ); + + for session in db::RegisteredKeys::get(&db).unwrap_or(vec![]) { + let buf = db::SerializedKeys::get(&db, session).unwrap(); + let mut buf = buf.as_slice(); + + let mut substrate_keys = vec![]; + let mut external_keys = vec![]; + while !buf.is_empty() { + substrate_keys + .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + external_keys.push(ThresholdKeys::from( + ThresholdCore::>::read(&mut buf).unwrap(), + )); + } + + tasks.insert( + session, + Self::tasks( + db.clone(), + publisher.clone(), + coordinator_handle.clone(), + session, + substrate_keys, + external_keys, + ), + ); + } + + Self { db, publisher, coordinator_handle, tasks, _Sch: PhantomData, _S: PhantomData } + } + + /// Register a set of keys to sign with. + /// + /// If this session (or a session after it) has already been retired, this is a NOP. + pub fn register_keys( + &mut self, + txn: &mut impl DbTxn, + session: Session, + substrate_keys: Vec>, + external_keys: Vec>>, + ) { + // Don't register already retired keys + if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { + return; + } + + { + let mut sessions = db::RegisteredKeys::get(txn).unwrap_or_else(|| Vec::with_capacity(1)); + sessions.push(session); + db::RegisteredKeys::set(txn, &sessions); + } + + { + let mut buf = Zeroizing::new(Vec::with_capacity(2 * substrate_keys.len() * 128)); + for (substrate_keys, external_keys) in substrate_keys.iter().zip(&external_keys) { + buf.extend(&*substrate_keys.serialize()); + buf.extend(&*external_keys.serialize()); + } + db::SerializedKeys::set(txn, session, &buf); + } + + // Spawn the tasks + self.tasks.insert( + session, + Self::tasks( + self.db.clone(), + self.publisher.clone(), + self.coordinator_handle.clone(), + session, + substrate_keys, + external_keys, + ), + ); + } + + /// Retire the signers for a session. + /// + /// This MUST be called in order, for every session (even if we didn't register keys for this + /// session). This MUST only be called after slash report publication, or after that process + /// times out (not once the key is done with regards to the external network). + pub fn retire_session( + &mut self, + txn: &mut impl DbTxn, + session: Session, + external_key: &impl GroupEncoding, + ) { + // Update the latest retired session + { + let next_to_retire = + db::LatestRetiredSession::get(txn).map_or(Session(0), |session| Session(session.0 + 1)); + assert_eq!(session, next_to_retire); + db::LatestRetiredSession::set(txn, &session); + } + + // Update RegisteredKeys/SerializedKeys + if let Some(registered) = db::RegisteredKeys::get(txn) { + db::RegisteredKeys::set( + txn, + ®istered.into_iter().filter(|session_i| *session_i != session).collect(), + ); + } + db::SerializedKeys::del(txn, session); + + // Queue the session for clean up + let mut to_cleanup = db::ToCleanup::get(txn).unwrap_or(vec![]); + to_cleanup.push((session, external_key.to_bytes().as_ref().to_vec())); + db::ToCleanup::set(txn, &to_cleanup); + + // Drop the task handles, which will cause the tasks to close + self.tasks.remove(&session); + } + + /// Queue handling a message. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn queue_message(&mut self, txn: &mut impl DbTxn, message: &CoordinatorMessage) { + let sign_id = message.sign_id(); + + // Don't queue messages for already retired keys + if Some(sign_id.session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { + return; + } + + let tasks = self.tasks.get(&sign_id.session); + match sign_id.id { + VariantSignId::Cosign(_) => { + db::CoordinatorToCosignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.cosigner.run_now(); + } + } + VariantSignId::Batch(_) => { + db::CoordinatorToBatchSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.batch.run_now(); + } + } + VariantSignId::SlashReport => { + db::CoordinatorToSlashReportSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.slash_report.run_now(); + } + } + VariantSignId::Transaction(_) => { + db::CoordinatorToTransactionSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.transaction.run_now(); + } + } + } + } + + /// Cosign a block. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn cosign_block(&mut self, mut txn: impl DbTxn, session: Session, cosign: &Cosign) { + // Don't cosign blocks with already retired keys + if Some(session.0) <= db::LatestRetiredSession::get(&txn).map(|session| session.0) { + return; + } + + db::ToCosign::set(&mut txn, session, cosign); + txn.commit(); + + if let Some(tasks) = self.tasks.get(&session) { + tasks.cosigner.run_now(); + } + } + + /// Sign a slash report. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn sign_slash_report( + &mut self, + mut txn: impl DbTxn, + session: Session, + slash_report: &SlashReport, + ) { + // Don't sign slash reports with already retired keys + if Some(session.0) <= db::LatestRetiredSession::get(&txn).map(|session| session.0) { + return; + } + + db::SlashReport::send(&mut txn, session, slash_report); + txn.commit(); + + if let Some(tasks) = self.tasks.get(&session) { + tasks.slash_report.run_now(); + } + } +} diff --git a/processor/signers/src/slash_report.rs b/processor/signers/src/slash_report.rs new file mode 100644 index 00000000..14437a74 --- /dev/null +++ b/processor/signers/src/slash_report.rs @@ -0,0 +1,123 @@ +use core::{marker::PhantomData, future::Future}; + +use ciphersuite::Ristretto; +use frost::dkg::ThresholdKeys; + +use serai_primitives::Signature; +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::{DoesNotError, ContinuallyRan}; +use scanner::ScannerFeed; + +use frost_attempt_manager::*; + +use crate::{ + db::{ + SlashReport, SignedSlashReport, CoordinatorToSlashReportSignerMessages, + SlashReportSignerToCoordinatorMessages, + }, + WrappedSchnorrkelMachine, +}; + +// Fetches slash reports to sign and signs them. +#[allow(non_snake_case)] +pub(crate) struct SlashReportSignerTask { + db: D, + _S: PhantomData, + + session: Session, + keys: Vec>, + + has_slash_report: bool, + attempt_manager: AttemptManager, +} + +impl SlashReportSignerTask { + pub(crate) fn new(db: D, session: Session, keys: Vec>) -> Self { + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a slash report signer with 0 keys").params().i(), + ); + + Self { db, _S: PhantomData, session, keys, has_slash_report: false, attempt_manager } + } +} + +impl ContinuallyRan for SlashReportSignerTask { + type Error = DoesNotError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check for the slash report to sign + if !self.has_slash_report { + let mut txn = self.db.txn(); + let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else { + return Ok(false); + }; + // We only commit this upon successfully signing this slash report + drop(txn); + iterated = true; + + self.has_slash_report = true; + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = slash_report.report_slashes_message(); + for keys in &self.keys { + // TODO: Fetch this constant from somewhere instead of inlining it + machines.push(WrappedSchnorrkelMachine::new( + keys.clone(), + b"substrate", + message.clone(), + )); + } + } + let mut txn = self.db.txn(); + for msg in self.attempt_manager.register(VariantSignId::SlashReport, machines) { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + assert_eq!(id, VariantSignId::SlashReport); + // Drain the channel + let slash_report = SlashReport::try_recv(&mut txn, self.session).unwrap(); + // Send the signature + SignedSlashReport::send( + &mut txn, + self.session, + &(slash_report, Signature::from(signature).0), + ); + } + } + + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/transaction/db.rs b/processor/signers/src/transaction/db.rs new file mode 100644 index 00000000..a91881e7 --- /dev/null +++ b/processor/signers/src/transaction/db.rs @@ -0,0 +1,11 @@ +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersTransaction { + ActiveSigningProtocols: (session: Session) -> Vec<[u8; 32]>, + SerializedSignableTransactions: (id: [u8; 32]) -> Vec, + SerializedTransactions: (id: [u8; 32]) -> Vec, + } +} diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs new file mode 100644 index 00000000..b62e7303 --- /dev/null +++ b/processor/signers/src/transaction/mod.rs @@ -0,0 +1,236 @@ +use core::future::Future; +use std::{ + collections::HashSet, + time::{Duration, Instant}, +}; + +use frost::dkg::ThresholdKeys; + +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::ContinuallyRan; +use scheduler::{Transaction, SignableTransaction, TransactionFor, TransactionsToSign}; +use scanner::CompletedEventualities; + +use frost_attempt_manager::*; + +use crate::{ + db::{CoordinatorToTransactionSignerMessages, TransactionSignerToCoordinatorMessages}, + TransactionPublisher, +}; + +mod db; +use db::*; + +// Fetches transactions to sign and signs them. +pub(crate) struct TransactionSignerTask< + D: Db, + ST: SignableTransaction, + P: TransactionPublisher>, +> { + db: D, + publisher: P, + + session: Session, + keys: Vec>, + + active_signing_protocols: HashSet<[u8; 32]>, + attempt_manager: AttemptManager::PreprocessMachine>, + + last_publication: Instant, +} + +impl>> + TransactionSignerTask +{ + pub(crate) fn new( + db: D, + publisher: P, + session: Session, + keys: Vec>, + ) -> Self { + let mut active_signing_protocols = HashSet::new(); + let mut attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a transaction signer with 0 keys").params().i(), + ); + + // Re-register all active signing protocols + for tx in ActiveSigningProtocols::get(&db, session).unwrap_or(vec![]) { + active_signing_protocols.insert(tx); + + let signable_transaction_buf = SerializedSignableTransactions::get(&db, tx).unwrap(); + let mut signable_transaction_buf = signable_transaction_buf.as_slice(); + let signable_transaction = ST::read(&mut signable_transaction_buf).unwrap(); + assert!(signable_transaction_buf.is_empty()); + assert_eq!(signable_transaction.id(), tx); + + let mut machines = Vec::with_capacity(keys.len()); + for keys in &keys { + machines.push(signable_transaction.clone().sign(keys.clone())); + } + attempt_manager.register(VariantSignId::Transaction(tx), machines); + } + + Self { + db, + publisher, + session, + keys, + active_signing_protocols, + attempt_manager, + last_publication: Instant::now(), + } + } +} + +impl>> ContinuallyRan + for TransactionSignerTask +{ + type Error = P::EphemeralError; + + fn run_iteration(&mut self) -> impl Send + Future> { + async { + let mut iterated = false; + + // Check for new transactions to sign + loop { + let mut txn = self.db.txn(); + let Some(tx) = TransactionsToSign::::try_recv(&mut txn, &self.keys[0].group_key()) + else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + self.active_signing_protocols.insert(tx.id()); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + { + let mut buf = Vec::with_capacity(256); + tx.write(&mut buf).unwrap(); + SerializedSignableTransactions::set(&mut txn, tx.id(), &buf); + } + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(tx.clone().sign(keys.clone())); + } + for msg in self.attempt_manager.register(VariantSignId::Transaction(tx.id()), machines) { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + + // Check for completed Eventualities (meaning we should no longer sign for these transactions) + loop { + let mut txn = self.db.txn(); + let Some(id) = CompletedEventualities::try_recv(&mut txn, &self.keys[0].group_key()) else { + break; + }; + + /* + We may have yet to register this signing protocol. + + While `TransactionsToSign` is populated before `CompletedEventualities`, we could + theoretically have `TransactionsToSign` populated with a new transaction _while iterating + over `CompletedEventualities`_, and then have `CompletedEventualities` populated. In that + edge case, we will see the completion notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the completion notification). On + the task's next iteration, we'll process the transaction from `TransactionsToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; + } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + SerializedSignableTransactions::del(&mut txn, id); + SerializedTransactions::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Eventuality as completed, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Transaction(id)); + + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToTransactionSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature: signed_tx } => { + let signed_tx: TransactionFor = signed_tx.into(); + + // Save this transaction to the database + { + let mut buf = Vec::with_capacity(256); + signed_tx.write(&mut buf).unwrap(); + SerializedTransactions::set( + &mut txn, + match id { + VariantSignId::Transaction(id) => id, + _ => panic!("TransactionSignerTask signed a non-transaction"), + }, + &buf, + ); + } + + match self.publisher.publish(signed_tx).await { + Ok(()) => {} + Err(e) => log::warn!("couldn't broadcast transaction: {e:?}"), + } + } + } + + txn.commit(); + } + + // If it's been five minutes since the last publication, republish the transactions for all + // active signing protocols + if Instant::now().duration_since(self.last_publication) > Duration::from_secs(5 * 60) { + for tx in &self.active_signing_protocols { + let Some(tx_buf) = SerializedTransactions::get(&self.db, *tx) else { continue }; + let mut tx_buf = tx_buf.as_slice(); + let tx = TransactionFor::::read(&mut tx_buf).unwrap(); + assert!(tx_buf.is_empty()); + + self.publisher.publish(tx).await?; + } + + self.last_publication = Instant::now(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/wrapped_schnorrkel.rs b/processor/signers/src/wrapped_schnorrkel.rs new file mode 100644 index 00000000..a84b8d43 --- /dev/null +++ b/processor/signers/src/wrapped_schnorrkel.rs @@ -0,0 +1,86 @@ +use std::{ + collections::HashMap, + io::{self, Read}, +}; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Ristretto; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::Algorithm, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +// This wraps a Schnorrkel sign machine into one with a preset message. +#[derive(Clone)] +pub(crate) struct WrappedSchnorrkelMachine(ThresholdKeys, &'static [u8], Vec); +impl WrappedSchnorrkelMachine { + pub(crate) fn new(keys: ThresholdKeys, context: &'static [u8], msg: Vec) -> Self { + Self(keys, context, msg) + } +} + +pub(crate) struct WrappedSchnorrkelSignMachine( + as PreprocessMachine>::SignMachine, + Vec, +); + +type Signature = as PreprocessMachine>::Signature; +impl PreprocessMachine for WrappedSchnorrkelMachine { + type Preprocess = as PreprocessMachine>::Preprocess; + type Signature = Signature; + type SignMachine = WrappedSchnorrkelSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Preprocess>::Addendum>) + { + let WrappedSchnorrkelMachine(keys, context, msg) = self; + let (machine, preprocess) = + AlgorithmMachine::new(Schnorrkel::new(context), keys).preprocess(rng); + (WrappedSchnorrkelSignMachine(machine, msg), preprocess) + } +} + +impl SignMachine for WrappedSchnorrkelSignMachine { + type Params = as SignMachine>::Params; + type Keys = as SignMachine>::Keys; + type Preprocess = + as SignMachine>::Preprocess; + type SignatureShare = + as SignMachine>::SignatureShare; + type SignatureMachine = + as SignMachine>::SignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + + fn from_cache( + _algorithm: Schnorrkel, + _keys: ThresholdKeys, + _cache: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.0.read_preprocess(reader) + } + + fn sign( + self, + preprocesses: HashMap< + Participant, + Preprocess>::Addendum>, + >, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, SignatureShare), FrostError> { + assert!(msg.is_empty()); + self.0.sign(preprocesses, &self.1) + } +} diff --git a/processor/src/additional_key.rs b/processor/src/additional_key.rs deleted file mode 100644 index f875950d..00000000 --- a/processor/src/additional_key.rs +++ /dev/null @@ -1,14 +0,0 @@ -use ciphersuite::Ciphersuite; - -use crate::networks::Network; - -// Generate a static additional key for a given chain in a globally consistent manner -// Doesn't consider the current group key to increase the simplicity of verifying Serai's status -// Takes an index, k, to support protocols which use multiple secondary keys -// Presumably a view key -pub fn additional_key(k: u64) -> ::F { - ::hash_to_F( - b"Serai DEX Additional Key", - &[N::ID.as_bytes(), &k.to_le_bytes()].concat(), - ) -} diff --git a/processor/src/batch_signer.rs b/processor/src/batch_signer.rs deleted file mode 100644 index 64cbb8a0..00000000 --- a/processor/src/batch_signer.rs +++ /dev/null @@ -1,421 +0,0 @@ -use core::{marker::PhantomData, fmt}; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, debug, warn}; - -use serai_client::{ - primitives::{ExternalNetworkId, BlockHash}, - in_instructions::primitives::{Batch, SignedBatch, batch_message}, - validator_sets::primitives::Session, -}; - -use messages::coordinator::*; -use crate::{Get, DbTxn, Db, create_db}; - -create_db!( - BatchSignerDb { - CompletedDb: (id: u32) -> (), - AttemptDb: (id: u32, attempt: u32) -> (), - BatchDb: (block: BlockHash) -> SignedBatch - } -); - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct BatchSigner { - db: PhantomData, - - network: ExternalNetworkId, - session: Session, - keys: Vec>, - - signable: HashMap, - attempt: HashMap, - #[allow(clippy::type_complexity)] - preprocessing: HashMap>, Vec)>, - #[allow(clippy::type_complexity)] - signing: HashMap, Vec)>, -} - -impl fmt::Debug for BatchSigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("BatchSigner") - .field("signable", &self.signable) - .field("attempt", &self.attempt) - .finish_non_exhaustive() - } -} - -impl BatchSigner { - pub fn new( - network: ExternalNetworkId, - session: Session, - keys: Vec>, - ) -> BatchSigner { - assert!(!keys.is_empty()); - BatchSigner { - db: PhantomData, - - network, - session, - keys, - - signable: HashMap::new(), - attempt: HashMap::new(), - preprocessing: HashMap::new(), - signing: HashMap::new(), - } - } - - fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32), ()> { - let SubstrateSignId { session, id, attempt } = id; - let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") }; - - assert_eq!(session, &self.session); - - // Check the attempt lines up - match self.attempt.get(id) { - // If we don't have an attempt logged, it's because the coordinator is faulty OR because we - // rebooted OR we detected the signed batch on chain - // The latter is the expected flow for batches not actively being participated in - None => { - warn!("not attempting batch {id} #{attempt}"); - Err(())?; - } - Some(our_attempt) => { - if attempt != our_attempt { - warn!("sent signing data for batch {id} #{attempt} yet we have attempt #{our_attempt}"); - Err(())?; - } - } - } - - Ok((*session, *id, *attempt)) - } - - #[must_use] - fn attempt( - &mut self, - txn: &mut D::Transaction<'_>, - id: u32, - attempt: u32, - ) -> Option { - // See above commentary for why this doesn't emit SignedBatch - if CompletedDb::get(txn, id).is_some() { - return None; - } - - // Check if we're already working on this attempt - if let Some(curr_attempt) = self.attempt.get(&id) { - if curr_attempt >= &attempt { - warn!("told to attempt {id} #{attempt} yet we're already working on {curr_attempt}"); - return None; - } - } - - // Start this attempt - let block = if let Some(batch) = self.signable.get(&id) { - batch.block - } else { - warn!("told to attempt signing a batch we aren't currently signing for"); - return None; - }; - - // Delete any existing machines - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // Update the attempt number - self.attempt.insert(id, attempt); - - info!("signing batch {id} #{attempt}"); - - // If we reboot mid-sign, the current design has us abort all signs and wait for latter - // attempts/new signing protocols - // This is distinct from the DKG which will continue DKG sessions, even on reboot - // This is because signing is tolerant of failures of up to 1/3rd of the group - // The DKG requires 100% participation - // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for - // reboots, it's not worth the complexity when messing up here leaks our secret share - // - // Despite this, on reboot, we'll get told of active signing items, and may be in this - // branch again for something we've already attempted - // - // Only run if this hasn't already been attempted - // TODO: This isn't complete as this txn may not be committed with the expected timing - if AttemptDb::get(txn, id, attempt).is_some() { - warn!( - "already attempted batch {id}, attempt #{attempt}. this is an error if we didn't reboot" - ); - return None; - } - AttemptDb::set(txn, id, attempt, &()); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &self.keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - self.preprocessing.insert(id, (machines, preprocesses)); - - let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt }; - - // Broadcast our preprocesses - Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses }) - } - - #[must_use] - pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option { - debug_assert_eq!(self.network, batch.network); - let id = batch.id; - if CompletedDb::get(txn, id).is_some() { - debug!("Sign batch order for ID we've already completed signing"); - // See batch_signed for commentary on why this simply returns - return None; - } - - self.signable.insert(id, batch); - self.attempt(txn, id, 0) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("BatchSigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("Cosigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - let (session, id, attempt) = self.verify_id(&id).ok()?; - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; - - let (machines, our_preprocesses) = match self.preprocessing.remove(&id) { - // Either rebooted or RPC error, or some invariant - None => { - warn!("not preprocessing for {id}. this is an error if we didn't reboot"); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - }; - if !preprocess_ref.is_empty() { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = match machine - .sign(preprocesses, &batch_message(&self.signable[&id])) - { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing.insert(id, (signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some( - (ProcessorMessage::SubstrateShare { id: substrate_sign_id, shares: serialized_shares }) - .into(), - ) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - let (session, id, attempt) = self.verify_id(&id).ok()?; - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; - - let (machine, our_shares) = match self.signing.remove(&id) { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.contains_key(&id) { - panic!("never preprocessed yet signing?"); - } - - warn!("not preprocessing for {id}. this is an error if we didn't reboot"); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - }; - if !share_ref.is_empty() { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ) - } - }, - }; - - info!("signed batch {id} with attempt #{attempt}"); - - let batch = - SignedBatch { batch: self.signable.remove(&id).unwrap(), signature: sig.into() }; - - // Save the batch in case it's needed for recovery - BatchDb::set(txn, batch.batch.block, &batch); - CompletedDb::set(txn, id, &()); - - // Stop trying to sign for this batch - assert!(self.attempt.remove(&id).is_some()); - assert!(self.preprocessing.remove(&id).is_none()); - assert!(self.signing.remove(&id).is_none()); - - Some((messages::substrate::ProcessorMessage::SignedBatch { batch }).into()) - } - - CoordinatorMessage::BatchReattempt { id } => { - let SubstrateSignableId::Batch(batch_id) = id.id else { - panic!("BatchReattempt passed non-Batch ID") - }; - self.attempt(txn, batch_id, id.attempt).map(Into::into) - } - } - } - - pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) { - // Stop trying to sign for this batch - CompletedDb::set(txn, id, &()); - - self.signable.remove(&id); - self.attempt.remove(&id); - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // This doesn't emit SignedBatch because it doesn't have access to the SignedBatch - // This function is expected to only be called once Substrate acknowledges this block, - // which means its batch must have been signed - // While a successive batch's signing would also cause this block to be acknowledged, Substrate - // guarantees a batch's ordered inclusion - - // This also doesn't return any messages since all mutation from the Batch being signed happens - // on the substrate::CoordinatorMessage::SubstrateBlock message (which SignedBatch is meant to - // end up triggering) - } -} diff --git a/processor/src/coordinator.rs b/processor/src/coordinator.rs deleted file mode 100644 index 26786e30..00000000 --- a/processor/src/coordinator.rs +++ /dev/null @@ -1,43 +0,0 @@ -use messages::{ProcessorMessage, CoordinatorMessage}; - -use message_queue::{Service, Metadata, client::MessageQueue}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Message { - pub id: u64, - pub msg: CoordinatorMessage, -} - -#[async_trait::async_trait] -pub trait Coordinator { - async fn send(&mut self, msg: impl Send + Into); - async fn recv(&mut self) -> Message; - async fn ack(&mut self, msg: Message); -} - -#[async_trait::async_trait] -impl Coordinator for MessageQueue { - async fn send(&mut self, msg: impl Send + Into) { - let msg: ProcessorMessage = msg.into(); - let metadata = Metadata { from: self.service, to: Service::Coordinator, intent: msg.intent() }; - let msg = borsh::to_vec(&msg).unwrap(); - - self.queue(metadata, msg).await; - } - - async fn recv(&mut self) -> Message { - let msg = self.next(Service::Coordinator).await; - - let id = msg.id; - - // Deserialize it into a CoordinatorMessage - let msg: CoordinatorMessage = - borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded CoordinatorMessage"); - - return Message { id, msg }; - } - - async fn ack(&mut self, msg: Message) { - MessageQueue::ack(self, Service::Coordinator, msg.id).await - } -} diff --git a/processor/src/cosigner.rs b/processor/src/cosigner.rs deleted file mode 100644 index a9fb6ccc..00000000 --- a/processor/src/cosigner.rs +++ /dev/null @@ -1,296 +0,0 @@ -use core::fmt; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, warn}; - -use serai_client::validator_sets::primitives::Session; - -use messages::coordinator::*; -use crate::{Get, DbTxn, create_db}; - -create_db! { - CosignerDb { - Completed: (id: [u8; 32]) -> (), - Attempt: (id: [u8; 32], attempt: u32) -> (), - } -} - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct Cosigner { - session: Session, - keys: Vec>, - - block_number: u64, - id: [u8; 32], - attempt: u32, - #[allow(clippy::type_complexity)] - preprocessing: Option<(Vec>, Vec)>, - #[allow(clippy::type_complexity)] - signing: Option<(AlgorithmSignatureMachine, Vec)>, -} - -impl fmt::Debug for Cosigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Cosigner") - .field("session", &self.session) - .field("block_number", &self.block_number) - .field("id", &self.id) - .field("attempt", &self.attempt) - .field("preprocessing", &self.preprocessing.is_some()) - .field("signing", &self.signing.is_some()) - .finish_non_exhaustive() - } -} - -impl Cosigner { - pub fn new( - txn: &mut impl DbTxn, - session: Session, - keys: Vec>, - block_number: u64, - id: [u8; 32], - attempt: u32, - ) -> Option<(Cosigner, ProcessorMessage)> { - assert!(!keys.is_empty()); - - if Completed::get(txn, id).is_some() { - return None; - } - - if Attempt::get(txn, id, attempt).is_some() { - warn!( - "already attempted cosigning {}, attempt #{}. this is an error if we didn't reboot", - hex::encode(id), - attempt, - ); - return None; - } - Attempt::set(txn, id, attempt, &()); - - info!("cosigning block {} with attempt #{}", hex::encode(id), attempt); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - let preprocessing = Some((machines, preprocesses)); - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt }; - - Some(( - Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None }, - ProcessorMessage::CosignPreprocess { - id: substrate_sign_id, - preprocesses: serialized_preprocesses, - }, - )) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut impl DbTxn, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("Cosigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("Cosigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - assert_eq!(id.session, self.session); - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("cosigner passed Batch") - }; - if block != self.id { - panic!("given preprocesses for a distinct block than cosigner is signing") - } - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than cosigner is signing") - } - - let (machines, our_preprocesses) = match self.preprocessing.take() { - // Either rebooted or RPC error, or some invariant - None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(block), - ); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = - match machine.sign(preprocesses, &cosign_block_msg(self.block_number, self.id)) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing = Some((signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - assert_eq!(id.session, self.session); - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("cosigner passed Batch") - }; - if block != self.id { - panic!("given preprocesses for a distinct block than cosigner is signing") - } - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than cosigner is signing") - } - - let (machine, our_shares) = match self.signing.take() { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.is_some() { - panic!("never preprocessed yet signing?"); - } - - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(block) - ); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - info!("cosigned {} with attempt #{}", hex::encode(block), id.attempt); - - Completed::set(txn, block, &()); - - Some(ProcessorMessage::CosignedBlock { - block_number: self.block_number, - block, - signature: sig.to_bytes().to_vec(), - }) - } - CoordinatorMessage::BatchReattempt { .. } => panic!("BatchReattempt passed to Cosigner"), - } - } -} diff --git a/processor/src/db.rs b/processor/src/db.rs deleted file mode 100644 index ffd7c43a..00000000 --- a/processor/src/db.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::io::Read; - -use scale::{Encode, Decode}; -use serai_client::validator_sets::primitives::{Session, KeyPair}; - -pub use serai_db::*; - -use crate::networks::{Block, Network}; - -create_db!( - MainDb { - HandledMessageDb: (id: u64) -> (), - PendingActivationsDb: () -> Vec - } -); - -impl PendingActivationsDb { - pub fn pending_activation( - getter: &impl Get, - ) -> Option<(>::Id, Session, KeyPair)> { - if let Some(bytes) = Self::get(getter) { - if !bytes.is_empty() { - let mut slice = bytes.as_slice(); - let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap(); - let mut block_before_queue_block = >::Id::default(); - slice.read_exact(block_before_queue_block.as_mut()).unwrap(); - assert!(slice.is_empty()); - return Some((block_before_queue_block, session, key_pair)); - } - } - None - } - pub fn set_pending_activation( - txn: &mut impl DbTxn, - block_before_queue_block: &>::Id, - session: Session, - key_pair: KeyPair, - ) { - let mut buf = (session, key_pair).encode(); - buf.extend(block_before_queue_block.as_ref()); - Self::set(txn, &buf); - } -} diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs deleted file mode 100644 index 297db194..00000000 --- a/processor/src/key_gen.rs +++ /dev/null @@ -1,605 +0,0 @@ -use std::collections::HashMap; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::group::GroupEncoding; -use frost::{ - curve::{Ciphersuite, Ristretto}, - dkg::{ - DkgError, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, pedpop::*, - }, -}; - -use log::info; - -use serai_client::validator_sets::primitives::{Session, KeyPair}; -use messages::key_gen::*; - -use crate::{Get, DbTxn, Db, create_db, networks::Network}; - -#[derive(Debug)] -pub struct KeyConfirmed { - pub substrate_keys: Vec>, - pub network_keys: Vec>, -} - -create_db!( - KeyGenDb { - ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16), - // Not scoped to the set since that'd have latter attempts overwrite former - // A former attempt may become the finalized attempt, even if it doesn't in a timely manner - // Overwriting its commitments would be accordingly poor - CommitmentsDb: (key: &KeyGenId) -> HashMap>, - GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec, - // These do assume a key is only used once across sets, which holds true so long as a single - // participant is honest in their execution of the protocol - KeysDb: (network_key: &[u8]) -> Vec, - SessionDb: (network_key: &[u8]) -> Session, - NetworkKeyDb: (session: Session) -> Vec, - } -); - -impl GeneratedKeysDb { - #[allow(clippy::type_complexity)] - fn read_keys( - getter: &impl Get, - key: &[u8], - ) -> Option<(Vec, (Vec>, Vec>))> { - let keys_vec = getter.get(key)?; - let mut keys_ref: &[u8] = keys_vec.as_ref(); - - let mut substrate_keys = vec![]; - let mut network_keys = vec![]; - while !keys_ref.is_empty() { - substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap())); - let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - N::tweak_keys(&mut these_network_keys); - network_keys.push(these_network_keys); - } - Some((keys_vec, (substrate_keys, network_keys))) - } - - fn save_keys( - txn: &mut impl DbTxn, - id: &KeyGenId, - substrate_keys: &[ThresholdCore], - network_keys: &[ThresholdKeys], - ) { - let mut keys = Zeroizing::new(vec![]); - for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { - keys.extend(substrate_keys.serialize().as_slice()); - keys.extend(network_keys.serialize().as_slice()); - } - txn.put( - Self::key( - &id.session, - &substrate_keys[0].group_key().to_bytes(), - network_keys[0].group_key().to_bytes().as_ref(), - ), - keys, - ); - } -} - -impl KeysDb { - fn confirm_keys( - txn: &mut impl DbTxn, - session: Session, - key_pair: &KeyPair, - ) -> (Vec>, Vec>) { - let (keys_vec, keys) = GeneratedKeysDb::read_keys::( - txn, - &GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()), - ) - .unwrap(); - assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes()); - assert_eq!( - { - let network_key: &[u8] = key_pair.1.as_ref(); - network_key - }, - keys.1[0].group_key().to_bytes().as_ref(), - ); - txn.put(Self::key(key_pair.1.as_ref()), keys_vec); - NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner()); - SessionDb::set(txn, key_pair.1.as_ref(), &session); - keys - } - - #[allow(clippy::type_complexity)] - fn keys( - getter: &impl Get, - network_key: &::G, - ) -> Option<(Session, (Vec>, Vec>))> { - let res = - GeneratedKeysDb::read_keys::(getter, &Self::key(network_key.to_bytes().as_ref()))?.1; - assert_eq!(&res.1[0].group_key(), network_key); - Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res)) - } - - pub fn substrate_keys_by_session( - getter: &impl Get, - session: Session, - ) -> Option>> { - let network_key = NetworkKeyDb::get(getter, session)?; - Some(GeneratedKeysDb::read_keys::(getter, &Self::key(&network_key))?.1 .0) - } -} - -type SecretShareMachines = - Vec<(SecretShareMachine, SecretShareMachine<::Curve>)>; -type KeyMachines = Vec<(KeyMachine, KeyMachine<::Curve>)>; - -#[derive(Debug)] -pub struct KeyGen { - db: D, - entropy: Zeroizing<[u8; 32]>, - - active_commit: HashMap, Vec>)>, - #[allow(clippy::type_complexity)] - active_share: HashMap, Vec>>)>, -} - -impl KeyGen { - #[allow(clippy::new_ret_no_self)] - pub fn new(db: D, entropy: Zeroizing<[u8; 32]>) -> KeyGen { - KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() } - } - - pub fn in_set(&self, session: &Session) -> bool { - // We determine if we're in set using if we have the parameters for a session's key generation - // The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly - // aren't fatally slashed - // TODO: Revisit once we do DKG removals for being offline - ParamsDb::get(&self.db, session, 0).is_some() - } - - #[allow(clippy::type_complexity)] - pub fn keys( - &self, - key: &::G, - ) -> Option<(Session, (Vec>, Vec>))> { - // This is safe, despite not having a txn, since it's a static value - // It doesn't change over time/in relation to other operations - KeysDb::keys::(&self.db, key) - } - - pub fn substrate_keys_by_session( - &self, - session: Session, - ) -> Option>> { - KeysDb::substrate_keys_by_session::(&self.db, session) - } - - pub fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> ProcessorMessage { - const SUBSTRATE_KEY_CONTEXT: &str = "substrate"; - const NETWORK_KEY_CONTEXT: &str = "network"; - let context = |id: &KeyGenId, key| { - // TODO2: Also embed the chain ID/genesis block - format!( - "Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}", - id.session, - N::NETWORK, - id.attempt, - key, - ) - }; - - let rng = |label, id: KeyGenId| { - let mut transcript = RecommendedTranscript::new(label); - transcript.append_message(b"entropy", &self.entropy); - transcript.append_message(b"context", context(&id, "rng")); - ChaCha20Rng::from_seed(transcript.rng_seed(b"rng")) - }; - let coefficients_rng = |id| rng(b"Key Gen Coefficients", id); - let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id); - let share_rng = |id| rng(b"Key Gen Share", id); - - let key_gen_machines = |id, params: ThresholdParams, shares| { - let mut rng = coefficients_rng(id); - let mut machines = vec![]; - let mut commitments = vec![]; - for s in 0 .. shares { - let params = ThresholdParams::new( - params.t(), - params.n(), - Participant::new(u16::from(params.i()) + s).unwrap(), - ) - .unwrap(); - let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT)) - .generate_coefficients(&mut rng); - let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT)) - .generate_coefficients(&mut rng); - machines.push((substrate.0, network.0)); - let mut serialized = vec![]; - substrate.1.write(&mut serialized).unwrap(); - network.1.write(&mut serialized).unwrap(); - commitments.push(serialized); - } - (machines, commitments) - }; - - let secret_share_machines = |id, - params: ThresholdParams, - machines: SecretShareMachines, - commitments: HashMap>| - -> Result<_, ProcessorMessage> { - let mut rng = secret_shares_rng(id); - - #[allow(clippy::type_complexity)] - fn handle_machine( - rng: &mut ChaCha20Rng, - id: KeyGenId, - machine: SecretShareMachine, - commitments: HashMap>>, - ) -> Result< - (KeyMachine, HashMap>>), - ProcessorMessage, - > { - match machine.generate_secret_shares(rng, commitments) { - Ok(res) => Ok(res), - Err(e) => match e { - DkgError::ZeroParameter(_, _) | - DkgError::InvalidThreshold(_, _) | - DkgError::InvalidParticipant(_, _) | - DkgError::InvalidSigningSet | - DkgError::InvalidShare { .. } => unreachable!("{e:?}"), - DkgError::InvalidParticipantQuantity(_, _) | - DkgError::DuplicatedParticipant(_) | - DkgError::MissingParticipant(_) => { - panic!("coordinator sent invalid DKG commitments: {e:?}") - } - DkgError::InvalidCommitments(i) => { - Err(ProcessorMessage::InvalidCommitments { id, faulty: i })? - } - }, - } - } - - let mut substrate_commitments = HashMap::new(); - let mut network_commitments = HashMap::new(); - for i in 1 ..= params.n() { - let i = Participant::new(i).unwrap(); - let mut commitments = commitments[&i].as_slice(); - substrate_commitments.insert( - i, - EncryptionKeyMessage::>::read(&mut commitments, params) - .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?, - ); - network_commitments.insert( - i, - EncryptionKeyMessage::>::read(&mut commitments, params) - .map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?, - ); - if !commitments.is_empty() { - // Malicious Participant included extra bytes in their commitments - // (a potential DoS attack) - Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?; - } - } - - let mut key_machines = vec![]; - let mut shares = vec![]; - for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() { - let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(); - - let mut substrate_commitments = substrate_commitments.clone(); - substrate_commitments.remove(&actual_i); - let (substrate_machine, mut substrate_shares) = - handle_machine::(&mut rng, id, substrate_machine, substrate_commitments)?; - - let mut network_commitments = network_commitments.clone(); - network_commitments.remove(&actual_i); - let (network_machine, network_shares) = - handle_machine(&mut rng, id, network_machine, network_commitments.clone())?; - - key_machines.push((substrate_machine, network_machine)); - - let mut these_shares: HashMap<_, _> = - substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect(); - for (i, share) in &mut these_shares { - share.extend(network_shares[i].serialize()); - } - shares.push(these_shares); - } - Ok((key_machines, shares)) - }; - - match msg { - CoordinatorMessage::GenerateKey { id, params, shares } => { - info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}"); - - // Remove old attempts - if self.active_commit.remove(&id.session).is_none() && - self.active_share.remove(&id.session).is_none() - { - // If we haven't handled this session before, save the params - ParamsDb::set(txn, &id.session, id.attempt, &(params, shares)); - } - - let (machines, commitments) = key_gen_machines(id, params, shares); - self.active_commit.insert(id.session, (machines, commitments.clone())); - - ProcessorMessage::Commitments { id, commitments } - } - - CoordinatorMessage::Commitments { id, mut commitments } => { - info!("Received commitments for {:?}", id); - - if self.active_share.contains_key(&id.session) { - // We should've been told of a new attempt before receiving commitments again - // The coordinator is either missing messages or repeating itself - // Either way, it's faulty - panic!("commitments when already handled commitments"); - } - - let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); - - // Unwrap the machines, rebuilding them if we didn't have them in our cache - // We won't if the processor rebooted - // This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for - // attempt y - // The coordinator is trusted to be proper in this regard - let (prior, our_commitments) = self - .active_commit - .remove(&id.session) - .unwrap_or_else(|| key_gen_machines(id, params, share_quantity)); - - for (i, our_commitments) in our_commitments.into_iter().enumerate() { - assert!(commitments - .insert( - Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(), - our_commitments, - ) - .is_none()); - } - - CommitmentsDb::set(txn, &id, &commitments); - - match secret_share_machines(id, params, prior, commitments) { - Ok((machines, shares)) => { - self.active_share.insert(id.session, (machines, shares.clone())); - ProcessorMessage::Shares { id, shares } - } - Err(e) => e, - } - } - - CoordinatorMessage::Shares { id, shares } => { - info!("Received shares for {:?}", id); - - let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); - - // Same commentary on inconsistency as above exists - let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| { - let prior = key_gen_machines(id, params, share_quantity).0; - let (machines, shares) = - secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap()) - .expect("got Shares for a key gen which faulted"); - (machines, shares) - }); - - let mut rng = share_rng(id); - - fn handle_machine( - rng: &mut ChaCha20Rng, - id: KeyGenId, - // These are the params of our first share, not this machine's shares - params: ThresholdParams, - m: usize, - machine: KeyMachine, - shares_ref: &mut HashMap, - ) -> Result, ProcessorMessage> { - let params = ThresholdParams::new( - params.t(), - params.n(), - Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(), - ) - .unwrap(); - - // Parse the shares - let mut shares = HashMap::new(); - for i in 1 ..= params.n() { - let i = Participant::new(i).unwrap(); - let Some(share) = shares_ref.get_mut(&i) else { continue }; - shares.insert( - i, - EncryptedMessage::>::read(share, params).map_err(|_| { - ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None } - })?, - ); - } - - Ok( - (match machine.calculate_share(rng, shares) { - Ok(res) => res, - Err(e) => match e { - DkgError::ZeroParameter(_, _) | - DkgError::InvalidThreshold(_, _) | - DkgError::InvalidParticipant(_, _) | - DkgError::InvalidSigningSet | - DkgError::InvalidCommitments(_) => unreachable!("{e:?}"), - DkgError::InvalidParticipantQuantity(_, _) | - DkgError::DuplicatedParticipant(_) | - DkgError::MissingParticipant(_) => { - panic!("coordinator sent invalid DKG shares: {e:?}") - } - DkgError::InvalidShare { participant, blame } => { - Err(ProcessorMessage::InvalidShare { - id, - accuser: params.i(), - faulty: participant, - blame: Some(blame.map(|blame| blame.serialize())).flatten(), - })? - } - }, - }) - .complete(), - ) - } - - let mut substrate_keys = vec![]; - let mut network_keys = vec![]; - for (m, machines) in machines.into_iter().enumerate() { - let mut shares_ref: HashMap = - shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect(); - for (i, our_shares) in our_shares.iter().enumerate() { - if m != i { - assert!(shares_ref - .insert( - Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(), - our_shares - [&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()] - .as_ref(), - ) - .is_none()); - } - } - - let these_substrate_keys = - match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) { - Ok(keys) => keys, - Err(msg) => return msg, - }; - let these_network_keys = - match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) { - Ok(keys) => keys, - Err(msg) => return msg, - }; - - for i in 1 ..= params.n() { - let i = Participant::new(i).unwrap(); - let Some(shares) = shares_ref.get(&i) else { continue }; - if !shares.is_empty() { - return ProcessorMessage::InvalidShare { - id, - accuser: these_substrate_keys.params().i(), - faulty: i, - blame: None, - }; - } - } - - let mut these_network_keys = ThresholdKeys::new(these_network_keys); - N::tweak_keys(&mut these_network_keys); - - substrate_keys.push(these_substrate_keys); - network_keys.push(these_network_keys); - } - - let mut generated_substrate_key = None; - let mut generated_network_key = None; - for keys in substrate_keys.iter().zip(&network_keys) { - if generated_substrate_key.is_none() { - generated_substrate_key = Some(keys.0.group_key()); - generated_network_key = Some(keys.1.group_key()); - } else { - assert_eq!(generated_substrate_key, Some(keys.0.group_key())); - assert_eq!(generated_network_key, Some(keys.1.group_key())); - } - } - - GeneratedKeysDb::save_keys::(txn, &id, &substrate_keys, &network_keys); - - ProcessorMessage::GeneratedKeyPair { - id, - substrate_key: generated_substrate_key.unwrap().to_bytes(), - // TODO: This can be made more efficient since tweaked keys may be a subset of keys - network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), - } - } - - CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => { - let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0; - - let mut share_ref = share.as_slice(); - let Ok(substrate_share) = EncryptedMessage::< - Ristretto, - SecretShare<::F>, - >::read(&mut share_ref, params) else { - return ProcessorMessage::Blame { id, participant: accused }; - }; - let Ok(network_share) = EncryptedMessage::< - N::Curve, - SecretShare<::F>, - >::read(&mut share_ref, params) else { - return ProcessorMessage::Blame { id, participant: accused }; - }; - if !share_ref.is_empty() { - return ProcessorMessage::Blame { id, participant: accused }; - } - - let mut substrate_commitment_msgs = HashMap::new(); - let mut network_commitment_msgs = HashMap::new(); - let commitments = CommitmentsDb::get(txn, &id).unwrap(); - for (i, commitments) in commitments { - let mut commitments = commitments.as_slice(); - substrate_commitment_msgs - .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap()); - network_commitment_msgs - .insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap()); - } - - // There is a mild DoS here where someone with a valid blame bloats it to the maximum size - // Given the ambiguity, and limited potential to DoS (this being called means *someone* is - // getting fatally slashed) voids the need to ensure blame is minimal - let substrate_blame = - blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok()); - let network_blame = - blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok()); - - let substrate_blame = AdditionalBlameMachine::new( - &mut rand_core::OsRng, - context(&id, SUBSTRATE_KEY_CONTEXT), - params.n(), - substrate_commitment_msgs, - ) - .unwrap() - .blame(accuser, accused, substrate_share, substrate_blame); - let network_blame = AdditionalBlameMachine::new( - &mut rand_core::OsRng, - context(&id, NETWORK_KEY_CONTEXT), - params.n(), - network_commitment_msgs, - ) - .unwrap() - .blame(accuser, accused, network_share, network_blame); - - // If the accused was blamed for either, mark them as at fault - if (substrate_blame == accused) || (network_blame == accused) { - return ProcessorMessage::Blame { id, participant: accused }; - } - - ProcessorMessage::Blame { id, participant: accuser } - } - } - } - - // This should only be called if we're participating, hence taking our instance - #[allow(clippy::unused_self)] - pub fn confirm( - &mut self, - txn: &mut D::Transaction<'_>, - session: Session, - key_pair: &KeyPair, - ) -> KeyConfirmed { - info!( - "Confirmed key pair {} {} for {:?}", - hex::encode(key_pair.0), - hex::encode(&key_pair.1), - session, - ); - - let (substrate_keys, network_keys) = KeysDb::confirm_keys::(txn, session, key_pair); - - KeyConfirmed { substrate_keys, network_keys } - } -} diff --git a/processor/src/lib.rs b/processor/src/lib.rs deleted file mode 100644 index 19f67508..00000000 --- a/processor/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![allow(dead_code)] - -mod plan; -pub use plan::*; - -mod db; -pub(crate) use db::*; - -mod key_gen; - -pub mod networks; -pub(crate) mod multisigs; - -mod additional_key; -pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs deleted file mode 100644 index 98b09a06..00000000 --- a/processor/src/main.rs +++ /dev/null @@ -1,768 +0,0 @@ -use std::{time::Duration, collections::HashMap}; - -use zeroize::{Zeroize, Zeroizing}; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use log::{info, warn}; -use tokio::time::sleep; - -use serai_client::{ - primitives::{BlockHash, ExternalNetworkId}, - validator_sets::primitives::{Session, KeyPair}, -}; - -use messages::{ - coordinator::{ - SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage, - }, - CoordinatorMessage, -}; - -use serai_env as env; - -use message_queue::{Service, client::MessageQueue}; - -mod plan; -pub use plan::*; - -mod networks; -use networks::{Block, Network}; -#[cfg(feature = "bitcoin")] -use networks::Bitcoin; -#[cfg(feature = "ethereum")] -use networks::Ethereum; -#[cfg(feature = "monero")] -use networks::Monero; - -mod additional_key; -pub use additional_key::additional_key; - -mod db; -pub use db::*; - -mod coordinator; -pub use coordinator::*; - -mod key_gen; -use key_gen::{SessionDb, KeyConfirmed, KeyGen}; - -mod signer; -use signer::Signer; - -mod cosigner; -use cosigner::Cosigner; - -mod batch_signer; -use batch_signer::BatchSigner; - -mod slash_report_signer; -use slash_report_signer::SlashReportSigner; - -mod multisigs; -use multisigs::{MultisigEvent, MultisigManager}; - -#[cfg(test)] -mod tests; - -#[global_allocator] -static ALLOCATOR: zalloc::ZeroizingAlloc = - zalloc::ZeroizingAlloc(std::alloc::System); - -// Items which are mutably borrowed by Tributary. -// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't -// violated. -struct TributaryMutable { - // The following are actually mutably borrowed by Substrate as well. - // - Substrate triggers key gens, and determines which to use. - // - SubstrateBlock events cause scheduling which causes signing. - // - // This is still considered Tributary-mutable as most mutation (preprocesses/shares) happens by - // the Tributary. - // - // Creation of tasks is by Substrate, yet this is safe since the mutable borrow is transferred to - // Tributary. - // - // Tributary stops mutating a key gen attempt before Substrate is made aware of it, ensuring - // Tributary drops its mutable borrow before Substrate acquires it. Tributary will maintain a - // mutable borrow on the *key gen task*, yet the finalization code can successfully run for any - // attempt. - // - // The only other note is how the scanner may cause a signer task to be dropped, effectively - // invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage - // of a dropped task. - key_gen: KeyGen, - signers: HashMap>, - - // This is also mutably borrowed by the Scanner. - // The Scanner starts new sign tasks. - // The Tributary mutates already-created signed tasks, potentially completing them. - // Substrate may mark tasks as completed, invalidating any existing mutable borrows. - // The safety of this follows as written above. - - // There should only be one BatchSigner at a time (see #277) - batch_signer: Option>, - - // Solely mutated by the tributary. - cosigner: Option, - slash_report_signer: Option, -} - -// Items which are mutably borrowed by Substrate. -// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't -// violated. - -/* - The MultisigManager contains the Scanner and Schedulers. - - The scanner is expected to autonomously operate, scanning blocks as they appear. When a block is - sufficiently confirmed, the scanner causes the Substrate signer to sign a batch. It itself only - mutates its list of finalized blocks, to protect against re-orgs, and its in-memory state though. - - Disk mutations to the scan-state only happens once the relevant `Batch` is included on Substrate. - It can't be mutated as soon as the `Batch` is signed as we need to know the order of `Batch`s - relevant to `Burn`s. - - Schedulers take in new outputs, confirmed in `Batch`s, and outbound payments, triggered by - `Burn`s. - - Substrate also decides when to move to a new multisig, hence why this entire object is - Substate-mutable. - - Since MultisigManager should always be verifiable, and the Tributary is temporal, MultisigManager - being entirely SubstrateMutable shows proper data pipe-lining. -*/ - -type SubstrateMutable = MultisigManager; - -async fn handle_coordinator_msg( - txn: &mut D::Transaction<'_>, - network: &N, - coordinator: &mut Co, - tributary_mutable: &mut TributaryMutable, - substrate_mutable: &mut SubstrateMutable, - msg: &Message, -) { - // If this message expects a higher block number than we have, halt until synced - async fn wait( - txn: &D::Transaction<'_>, - substrate_mutable: &SubstrateMutable, - block_hash: &BlockHash, - ) { - let mut needed_hash = >::Id::default(); - needed_hash.as_mut().copy_from_slice(&block_hash.0); - - loop { - // Ensure our scanner has scanned this block, which means our daemon has this block at - // a sufficient depth - if substrate_mutable.block_number(txn, &needed_hash).await.is_none() { - warn!( - "node is desynced. we haven't scanned {} which should happen after {} confirms", - hex::encode(&needed_hash), - N::CONFIRMATIONS, - ); - sleep(Duration::from_secs(10)).await; - continue; - }; - break; - } - - // TODO2: Sanity check we got an AckBlock (or this is the AckBlock) for the block in question - - /* - let synced = |context: &SubstrateContext, key| -> Result<(), ()> { - // Check that we've synced this block and can actually operate on it ourselves - let latest = scanner.latest_scanned(key); - if usize::try_from(context.network_latest_finalized_block).unwrap() < latest { - log::warn!( - "external network node disconnected/desynced from rest of the network. \ - our block: {latest:?}, network's acknowledged: {}", - context.network_latest_finalized_block, - ); - Err(())?; - } - Ok(()) - }; - */ - } - - if let Some(required) = msg.msg.required_block() { - // wait only reads from, it doesn't mutate, substrate_mutable - wait(txn, substrate_mutable, &required).await; - } - - async fn activate_key( - network: &N, - substrate_mutable: &mut SubstrateMutable, - tributary_mutable: &mut TributaryMutable, - txn: &mut D::Transaction<'_>, - session: Session, - key_pair: KeyPair, - activation_number: usize, - ) { - info!("activating {session:?}'s keys at {activation_number}"); - - let network_key = ::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref()) - .expect("Substrate finalized invalid point as a network's key"); - - if tributary_mutable.key_gen.in_set(&session) { - // See TributaryMutable's struct definition for why this block is safe - let KeyConfirmed { substrate_keys, network_keys } = - tributary_mutable.key_gen.confirm(txn, session, &key_pair); - if session.0 == 0 { - tributary_mutable.batch_signer = - Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - tributary_mutable - .signers - .insert(session, Signer::new(network.clone(), session, network_keys)); - } - - substrate_mutable.add_key(txn, activation_number, network_key).await; - } - - match msg.msg.clone() { - CoordinatorMessage::KeyGen(msg) => { - coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await; - } - - CoordinatorMessage::Sign(msg) => { - if let Some(msg) = tributary_mutable - .signers - .get_mut(&msg.session()) - .expect("coordinator told us to sign with a signer we don't have") - .handle(txn, msg) - .await - { - coordinator.send(msg).await; - } - } - - CoordinatorMessage::Coordinator(msg) => match msg { - CoordinatorCoordinatorMessage::CosignSubstrateBlock { id, block_number } => { - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock") - }; - let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { - panic!("didn't have key shares for the key we were told to cosign with"); - }; - if let Some((cosigner, msg)) = - Cosigner::new(txn, id.session, keys, block_number, block, id.attempt) - { - tributary_mutable.cosigner = Some(cosigner); - coordinator.send(msg).await; - } else { - log::warn!("Cosigner::new returned None"); - } - } - CoordinatorCoordinatorMessage::SignSlashReport { id, report } => { - assert_eq!(id.id, SubstrateSignableId::SlashReport); - let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { - panic!("didn't have key shares for the key we were told to perform a slash report with"); - }; - if let Some((slash_report_signer, msg)) = - SlashReportSigner::new(txn, N::NETWORK, id.session, keys, report, id.attempt) - { - tributary_mutable.slash_report_signer = Some(slash_report_signer); - coordinator.send(msg).await; - } else { - log::warn!("SlashReportSigner::new returned None"); - } - } - _ => { - let (is_cosign, is_batch, is_slash_report) = match msg { - CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } | - CoordinatorCoordinatorMessage::SignSlashReport { .. } => (false, false, false), - CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } | - CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => ( - matches!(&id.id, SubstrateSignableId::CosigningSubstrateBlock(_)), - matches!(&id.id, SubstrateSignableId::Batch(_)), - matches!(&id.id, SubstrateSignableId::SlashReport), - ), - CoordinatorCoordinatorMessage::BatchReattempt { .. } => (false, true, false), - }; - - if is_cosign { - if let Some(cosigner) = tributary_mutable.cosigner.as_mut() { - if let Some(msg) = cosigner.handle(txn, msg) { - coordinator.send(msg).await; - } - } else { - log::warn!( - "received message for cosigner yet didn't have a cosigner. {}", - "this is an error if we didn't reboot", - ); - } - } else if is_batch { - if let Some(msg) = tributary_mutable - .batch_signer - .as_mut() - .expect( - "coordinator told us to sign a batch when we don't currently have a Substrate signer", - ) - .handle(txn, msg) - { - coordinator.send(msg).await; - } - } else if is_slash_report { - if let Some(slash_report_signer) = tributary_mutable.slash_report_signer.as_mut() { - if let Some(msg) = slash_report_signer.handle(txn, msg) { - coordinator.send(msg).await; - } - } else { - log::warn!( - "received message for slash report signer yet didn't have {}", - "a slash report signer. this is an error if we didn't reboot", - ); - } - } - } - }, - - CoordinatorMessage::Substrate(msg) => { - match msg { - messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => { - // This is the first key pair for this network so no block has been finalized yet - // TODO: Write documentation for this in docs/ - // TODO: Use an Option instead of a magic? - if context.network_latest_finalized_block.0 == [0; 32] { - assert!(tributary_mutable.signers.is_empty()); - assert!(tributary_mutable.batch_signer.is_none()); - assert!(tributary_mutable.cosigner.is_none()); - // We can't check this as existing is no longer pub - // assert!(substrate_mutable.existing.as_ref().is_none()); - - // Wait until a network's block's time exceeds Serai's time - // These time calls are extremely expensive for what they do, yet they only run when - // confirming the first key pair, before any network activity has occurred, so they - // should be fine - - // If the latest block number is 10, then the block indexed by 1 has 10 confirms - // 10 + 1 - 10 = 1 - let mut block_i; - while { - block_i = (network.get_latest_block_number_with_retries().await + 1) - .saturating_sub(N::CONFIRMATIONS); - network.get_block_with_retries(block_i).await.time(network).await < context.serai_time - } { - info!( - "serai confirmed the first key pair for a set. {} {}", - "we're waiting for a network's finalized block's time to exceed unix time ", - context.serai_time, - ); - sleep(Duration::from_secs(5)).await; - } - - // Find the first block to do so - let mut earliest = block_i; - // earliest > 0 prevents a panic if Serai creates keys before the genesis block - // which... should be impossible - // Yet a prevented panic is a prevented panic - while (earliest > 0) && - (network.get_block_with_retries(earliest - 1).await.time(network).await >= - context.serai_time) - { - earliest -= 1; - } - - // Use this as the activation block - let activation_number = earliest; - - activate_key( - network, - substrate_mutable, - tributary_mutable, - txn, - session, - key_pair, - activation_number, - ) - .await; - } else { - let mut block_before_queue_block = >::Id::default(); - block_before_queue_block - .as_mut() - .copy_from_slice(&context.network_latest_finalized_block.0); - // We can't set these keys for activation until we know their queue block, which we - // won't until the next Batch is confirmed - // Set this variable so when we get the next Batch event, we can handle it - PendingActivationsDb::set_pending_activation::( - txn, - &block_before_queue_block, - session, - key_pair, - ); - } - } - - messages::substrate::CoordinatorMessage::SubstrateBlock { - context, - block: substrate_block, - burns, - batches, - } => { - if let Some((block, session, key_pair)) = - PendingActivationsDb::pending_activation::(txn) - { - // Only run if this is a Batch belonging to a distinct block - if context.network_latest_finalized_block.as_ref() != block.as_ref() { - let mut queue_block = >::Id::default(); - queue_block.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); - - let activation_number = substrate_mutable - .block_number(txn, &queue_block) - .await - .expect("KeyConfirmed from context we haven't synced") + - N::CONFIRMATIONS; - - activate_key( - network, - substrate_mutable, - tributary_mutable, - txn, - session, - key_pair, - activation_number, - ) - .await; - //clear pending activation - txn.del(PendingActivationsDb::key()); - } - } - - // Since this block was acknowledged, we no longer have to sign the batches within it - if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - for batch_id in batches { - batch_signer.batch_signed(txn, batch_id); - } - } - - let (acquired_lock, to_sign) = - substrate_mutable.substrate_block(txn, network, context, burns).await; - - // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these - // plans - if !tributary_mutable.signers.is_empty() { - coordinator - .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck { - block: substrate_block, - plans: to_sign - .iter() - .filter_map(|signable| { - SessionDb::get(txn, signable.0.to_bytes().as_ref()) - .map(|session| PlanMeta { session, id: signable.1 }) - }) - .collect(), - }) - .await; - } - - // See commentary in TributaryMutable for why this is safe - let signers = &mut tributary_mutable.signers; - for (key, id, tx, eventuality) in to_sign { - if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) { - let signer = signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await { - coordinator.send(msg).await; - } - } - } - - // This is not premature, even if this block had multiple `Batch`s created, as the first - // `Batch` alone will trigger all Plans/Eventualities/Signs - if acquired_lock { - substrate_mutable.release_scanner_lock().await; - } - } - } - } - } -} - -async fn boot( - raw_db: &mut D, - network: &N, - coordinator: &mut Co, -) -> (D, TributaryMutable, SubstrateMutable) { - let mut entropy_transcript = { - let entropy = Zeroizing::new(env::var("ENTROPY").expect("entropy wasn't specified")); - if entropy.len() != 64 { - panic!("entropy isn't the right length"); - } - let mut bytes = - Zeroizing::new(hex::decode(entropy).map_err(|_| ()).expect("entropy wasn't hex-formatted")); - if bytes.len() != 32 { - bytes.zeroize(); - panic!("entropy wasn't 32 bytes"); - } - let mut entropy = Zeroizing::new([0; 32]); - let entropy_mut: &mut [u8] = entropy.as_mut(); - entropy_mut.copy_from_slice(bytes.as_ref()); - - let mut transcript = RecommendedTranscript::new(b"Serai Processor Entropy"); - transcript.append_message(b"entropy", entropy); - transcript - }; - - // TODO: Save a hash of the entropy to the DB and make sure the entropy didn't change - - let mut entropy = |label| { - let mut challenge = entropy_transcript.challenge(label); - let mut res = Zeroizing::new([0; 32]); - let res_mut: &mut [u8] = res.as_mut(); - res_mut.copy_from_slice(&challenge[.. 32]); - challenge.zeroize(); - res - }; - - // We don't need to re-issue GenerateKey orders because the coordinator is expected to - // schedule/notify us of new attempts - // TODO: Is this above comment still true? Not at all due to the planned lack of DKG timeouts? - let key_gen = KeyGen::::new(raw_db.clone(), entropy(b"key-gen_entropy")); - - let (multisig_manager, current_keys, actively_signing) = - MultisigManager::new(raw_db, network).await; - - let mut batch_signer = None; - let mut signers = HashMap::new(); - - for (i, key) in current_keys.iter().enumerate() { - let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue }; - let network_key = network_keys[0].group_key(); - - // If this is the oldest key, load the BatchSigner for it as the active BatchSigner - // The new key only takes responsibility once the old key is fully deprecated - // - // We don't have to load any state for this since the Scanner will re-fire any events - // necessary, only no longer scanning old blocks once Substrate acks them - if i == 0 { - batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - - // The Scanner re-fires events as needed for batch_signer yet not signer - // This is due to the transactions which we start signing from due to a block not being - // guaranteed to be signed before we stop scanning the block on reboot - // We could simplify the Signer flow by delaying when it acks a block, yet that'd: - // 1) Increase the startup time - // 2) Cause re-emission of Batch events, which we'd need to check the safety of - // (TODO: Do anyways?) - // 3) Violate the attempt counter (TODO: Is this already being violated?) - let mut signer = Signer::new(network.clone(), session, network_keys); - - // Sign any TXs being actively signed - for (plan, tx, eventuality) in &actively_signing { - if plan.key == network_key { - let mut txn = raw_db.txn(); - if let Some(msg) = - signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await - { - coordinator.send(msg).await; - } - // This should only have re-writes of existing data - drop(txn); - } - } - - signers.insert(session, signer); - } - - // Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block - // This hedges against being dropped due to full mempools, temporarily too low of a fee... - tokio::spawn(Signer::::rebroadcast_task(raw_db.clone(), network.clone())); - - ( - raw_db.clone(), - TributaryMutable { key_gen, batch_signer, cosigner: None, slash_report_signer: None, signers }, - multisig_manager, - ) -} - -#[allow(clippy::await_holding_lock)] // Needed for txn, unfortunately can't be down-scoped -async fn run(mut raw_db: D, network: N, mut coordinator: Co) { - // We currently expect a contextless bidirectional mapping between these two values - // (which is that any value of A can be interpreted as B and vice versa) - // While we can write a contextual mapping, we have yet to do so - // This check ensures no network which doesn't have a bidirectional mapping is defined - assert_eq!(>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); - - let (main_db, mut tributary_mutable, mut substrate_mutable) = - boot(&mut raw_db, &network, &mut coordinator).await; - - // We can't load this from the DB as we can't guarantee atomic increments with the ack function - // TODO: Load with a slight tolerance - let mut last_coordinator_msg = None; - - loop { - let mut txn = raw_db.txn(); - - log::trace!("new db txn in run"); - - let mut outer_msg = None; - - tokio::select! { - // This blocks the entire processor until it finishes handling this message - // KeyGen specifically may take a notable amount of processing time - // While that shouldn't be an issue in practice, as after processing an attempt it'll handle - // the other messages in the queue, it may be beneficial to parallelize these - // They could potentially be parallelized by type (KeyGen, Sign, Substrate) without issue - msg = coordinator.recv() => { - if let Some(last_coordinator_msg) = last_coordinator_msg { - assert_eq!(msg.id, last_coordinator_msg + 1); - } - last_coordinator_msg = Some(msg.id); - - // Only handle this if we haven't already - if HandledMessageDb::get(&main_db, msg.id).is_none() { - HandledMessageDb::set(&mut txn, msg.id, &()); - - // This is isolated to better think about how its ordered, or rather, about how the other - // cases aren't ordered - // - // While the coordinator messages are ordered, they're not deterministically ordered - // Tributary-caused messages are deterministically ordered, and Substrate-caused messages - // are deterministically-ordered, yet they're both shoved into a singular queue - // The order at which they're shoved in together isn't deterministic - // - // This is safe so long as Tributary and Substrate messages don't both expect mutable - // references over the same data - handle_coordinator_msg( - &mut txn, - &network, - &mut coordinator, - &mut tributary_mutable, - &mut substrate_mutable, - &msg, - ).await; - } - - outer_msg = Some(msg); - }, - - scanner_event = substrate_mutable.next_scanner_event() => { - let msg = substrate_mutable.scanner_event_to_multisig_event( - &mut txn, - &network, - scanner_event - ).await; - - match msg { - MultisigEvent::Batches(retired_key_new_key, batches) => { - // Start signing this batch - for batch in batches { - info!("created batch {} ({} instructions)", batch.id, batch.instructions.len()); - - // The coordinator expects BatchPreprocess to immediately follow Batch - coordinator.send( - messages::substrate::ProcessorMessage::Batch { batch: batch.clone() } - ).await; - - if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - if let Some(msg) = batch_signer.sign(&mut txn, batch) { - coordinator.send(msg).await; - } - } - } - - if let Some((retired_key, new_key)) = retired_key_new_key { - // Safe to mutate since all signing operations are done and no more will be added - if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) { - tributary_mutable.signers.remove(&retired_session); - } - tributary_mutable.batch_signer.take(); - let keys = tributary_mutable.key_gen.keys(&new_key); - if let Some((session, (substrate_keys, _))) = keys { - tributary_mutable.batch_signer = - Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - } - }, - MultisigEvent::Completed(key, id, tx) => { - if let Some(session) = SessionDb::get(&txn, &key) { - let signer = tributary_mutable.signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.completed(&mut txn, id, &tx) { - coordinator.send(msg).await; - } - } - } - } - }, - } - - txn.commit(); - if let Some(msg) = outer_msg { - coordinator.ack(msg).await; - } - } -} - -#[tokio::main] -async fn main() { - // Override the panic handler with one which will panic if any tokio task panics - { - let existing = std::panic::take_hook(); - std::panic::set_hook(Box::new(move |panic| { - existing(panic); - const MSG: &str = "exiting the process due to a task panicking"; - println!("{MSG}"); - log::error!("{MSG}"); - std::process::exit(1); - })); - } - - if std::env::var("RUST_LOG").is_err() { - std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); - } - env_logger::init(); - - #[allow(unused_variables, unreachable_code)] - let db = { - #[cfg(all(feature = "parity-db", feature = "rocksdb"))] - panic!("built with parity-db and rocksdb"); - #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] - let db = - serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - #[cfg(feature = "rocksdb")] - let db = - serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - db - }; - - // Network configuration - let url = { - let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); - let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); - let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); - "http://".to_string() + &login + "@" + &hostname + ":" + &port - }; - let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { - "bitcoin" => ExternalNetworkId::Bitcoin, - "ethereum" => ExternalNetworkId::Ethereum, - "monero" => ExternalNetworkId::Monero, - _ => panic!("unrecognized network"), - }; - - let coordinator = MessageQueue::from_env(Service::Processor(network_id)); - - // This allow is necessary since each configuration deletes the other networks from the following - // match arms. So we match all cases but since all cases already there according to the compiler - // we put this to allow clippy to get pass this. - #[allow(unreachable_patterns)] - match network_id { - #[cfg(feature = "bitcoin")] - ExternalNetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, - #[cfg(feature = "ethereum")] - ExternalNetworkId::Ethereum => { - let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") - .expect("ethereum relayer hostname wasn't specified") - .to_string(); - let relayer_port = - env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); - let relayer_url = relayer_hostname + ":" + &relayer_port; - run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await - } - #[cfg(feature = "monero")] - ExternalNetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, - _ => panic!("spawning a processor for an unsupported network"), - } -} diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs deleted file mode 100644 index f5e05f68..00000000 --- a/processor/src/multisigs/db.rs +++ /dev/null @@ -1,264 +0,0 @@ -use std::io; - -use ciphersuite::Ciphersuite; -pub use serai_db::*; - -use scale::{Encode, Decode}; -#[rustfmt::skip] -use serai_client::{ - in_instructions::primitives::InInstructionWithBalance, - primitives::ExternalBalance -}; - -use crate::{ - Get, Plan, - networks::{Output, Transaction, Network}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum PlanFromScanning { - Refund(N::Output, N::Address), - Forward(N::Output), -} - -impl PlanFromScanning { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let output = N::Output::read(reader)?; - - let mut address_vec_len = [0; 4]; - reader.read_exact(&mut address_vec_len)?; - let mut address_vec = - vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; - reader.read_exact(&mut address_vec)?; - let address = - N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); - - Ok(PlanFromScanning::Refund(output, address)) - } - 1 => { - let output = N::Output::read(reader)?; - Ok(PlanFromScanning::Forward(output)) - } - _ => panic!("reading unrecognized PlanFromScanning"), - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - PlanFromScanning::Refund(output, address) => { - writer.write_all(&[0])?; - output.write(writer)?; - - let address_vec: Vec = - address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); - writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; - writer.write_all(&address_vec) - } - PlanFromScanning::Forward(output) => { - writer.write_all(&[1])?; - output.write(writer) - } - } - } -} - -create_db!( - MultisigsDb { - NextBatchDb: () -> u32, - PlanDb: (id: &[u8]) -> Vec, - PlansFromScanningDb: (block_number: u64) -> Vec, - OperatingCostsDb: () -> u64, - ResolvedDb: (tx: &[u8]) -> [u8; 32], - SigningDb: (key: &[u8]) -> Vec, - ForwardedOutputDb: (balance: ExternalBalance) -> Vec, - DelayedOutputDb: () -> Vec - } -); - -impl PlanDb { - pub fn save_active_plan( - txn: &mut impl DbTxn, - key: &[u8], - block_number: usize, - plan: &Plan, - operating_costs_at_time: u64, - ) { - let id = plan.id(); - - { - let mut signing = SigningDb::get(txn, key).unwrap_or_default(); - - // If we've already noted we're signing this, return - assert_eq!(signing.len() % 32, 0); - for i in 0 .. (signing.len() / 32) { - if signing[(i * 32) .. ((i + 1) * 32)] == id { - return; - } - } - - signing.extend(&id); - SigningDb::set(txn, key, &signing); - } - - { - let mut buf = block_number.to_le_bytes().to_vec(); - plan.write(&mut buf).unwrap(); - buf.extend(&operating_costs_at_time.to_le_bytes()); - Self::set(txn, &id, &buf); - } - } - - pub fn active_plans(getter: &impl Get, key: &[u8]) -> Vec<(u64, Plan, u64)> { - let signing = SigningDb::get(getter, key).unwrap_or_default(); - let mut res = vec![]; - - assert_eq!(signing.len() % 32, 0); - for i in 0 .. (signing.len() / 32) { - let id = &signing[(i * 32) .. ((i + 1) * 32)]; - let buf = Self::get(getter, id).unwrap(); - - let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap()); - let plan = Plan::::read::<&[u8]>(&mut &buf[8 ..]).unwrap(); - assert_eq!(id, &plan.id()); - let operating_costs = u64::from_le_bytes(buf[(buf.len() - 8) ..].try_into().unwrap()); - res.push((block_number, plan, operating_costs)); - } - res - } - - pub fn plan_by_key_with_self_change( - getter: &impl Get, - key: ::G, - id: [u8; 32], - ) -> bool { - let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); - assert_eq!(plan.id(), id); - if let Some(change) = N::change_address(plan.key) { - (key == plan.key) && (Some(change) == plan.change) - } else { - false - } - } -} - -impl OperatingCostsDb { - pub fn take_operating_costs(txn: &mut impl DbTxn) -> u64 { - let existing = Self::get(txn).unwrap_or_default(); - txn.del(Self::key()); - existing - } - pub fn set_operating_costs(txn: &mut impl DbTxn, amount: u64) { - if amount != 0 { - Self::set(txn, &amount); - } - } -} - -impl ResolvedDb { - pub fn resolve_plan( - txn: &mut impl DbTxn, - key: &[u8], - plan: [u8; 32], - resolution: &>::Id, - ) { - let mut signing = SigningDb::get(txn, key).unwrap_or_default(); - assert_eq!(signing.len() % 32, 0); - - let mut found = false; - for i in 0 .. (signing.len() / 32) { - let start = i * 32; - let end = i + 32; - if signing[start .. end] == plan { - found = true; - signing = [&signing[.. start], &signing[end ..]].concat(); - break; - } - } - - if !found { - log::warn!("told to finish signing {} yet wasn't actively signing it", hex::encode(plan)); - } - SigningDb::set(txn, key, &signing); - Self::set(txn, resolution.as_ref(), &plan); - } -} - -impl PlansFromScanningDb { - pub fn set_plans_from_scanning( - txn: &mut impl DbTxn, - block_number: usize, - plans: Vec>, - ) { - let mut buf = vec![]; - for plan in plans { - plan.write(&mut buf).unwrap(); - } - Self::set(txn, block_number.try_into().unwrap(), &buf); - } - - pub fn take_plans_from_scanning( - txn: &mut impl DbTxn, - block_number: usize, - ) -> Option>> { - let block_number = u64::try_from(block_number).unwrap(); - let res = Self::get(txn, block_number).map(|plans| { - let mut plans_ref = plans.as_slice(); - let mut res = vec![]; - while !plans_ref.is_empty() { - res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); - } - res - }); - if res.is_some() { - txn.del(Self::key(block_number)); - } - res - } -} - -impl ForwardedOutputDb { - pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { - let mut existing = Self::get(txn, instruction.balance).unwrap_or_default(); - existing.extend(instruction.encode()); - Self::set(txn, instruction.balance, &existing); - } - - pub fn take_forwarded_output( - txn: &mut impl DbTxn, - balance: ExternalBalance, - ) -> Option { - let outputs = Self::get(txn, balance)?; - let mut outputs_ref = outputs.as_slice(); - let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap(); - assert!(outputs_ref.len() < outputs.len()); - if outputs_ref.is_empty() { - txn.del(Self::key(balance)); - } else { - Self::set(txn, balance, &outputs); - } - Some(res) - } -} - -impl DelayedOutputDb { - pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { - let mut existing = Self::get(txn).unwrap_or_default(); - existing.extend(instruction.encode()); - Self::set(txn, &existing); - } - - pub fn take_delayed_outputs(txn: &mut impl DbTxn) -> Vec { - let Some(outputs) = Self::get(txn) else { return vec![] }; - txn.del(Self::key()); - - let mut outputs_ref = outputs.as_slice(); - let mut res = vec![]; - while !outputs_ref.is_empty() { - res.push(InInstructionWithBalance::decode(&mut outputs_ref).unwrap()); - } - res - } -} diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs deleted file mode 100644 index 12f01715..00000000 --- a/processor/src/multisigs/mod.rs +++ /dev/null @@ -1,1068 +0,0 @@ -use core::time::Duration; -use std::collections::HashSet; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use scale::{Encode, Decode}; -use messages::SubstrateContext; - -use serai_client::{ - primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, - in_instructions::primitives::{ - InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, - }, - coins::primitives::{OutInstruction, OutInstructionWithBalance}, -}; - -use log::{info, error}; - -use tokio::time::sleep; - -#[cfg(not(test))] -mod scanner; -#[cfg(test)] -pub mod scanner; - -use scanner::{ScannerEvent, ScannerHandle, Scanner}; - -mod db; -use db::*; - -pub(crate) mod scheduler; -use scheduler::Scheduler; - -use crate::{ - Get, Db, Payment, Plan, - networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, -}; - -// InInstructionWithBalance from an external output -fn instruction_from_output( - output: &N::Output, -) -> (Option, Option) { - assert_eq!(output.kind(), OutputType::External); - - let presumed_origin = output.presumed_origin().map(|address| { - ExternalAddress::new( - address - .try_into() - .map_err(|_| ()) - .expect("presumed origin couldn't be converted to a Vec"), - ) - .expect("presumed origin exceeded address limits") - }); - - let mut data = output.data(); - let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); - if data.len() > max_data_len { - error!( - "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", - hex::encode(output.id()), - data.len(), - ); - return (presumed_origin, None); - } - - let shorthand = match Shorthand::decode(&mut data) { - Ok(shorthand) => shorthand, - Err(e) => { - info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); - return (presumed_origin, None); - } - }; - let instruction = match RefundableInInstruction::try_from(shorthand) { - Ok(instruction) => instruction, - Err(e) => { - info!( - "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", - hex::encode(output.id()) - ); - return (presumed_origin, None); - } - }; - - let mut balance = output.balance(); - // Deduct twice the cost to aggregate to prevent economic attacks by malicious miners against - // other users - balance.amount.0 -= 2 * N::COST_TO_AGGREGATE; - - ( - instruction.origin.or(presumed_origin), - Some(InInstructionWithBalance { instruction: instruction.instruction, balance }), - ) -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum RotationStep { - // Use the existing multisig for all actions (steps 1-3) - UseExisting, - // Use the new multisig as change (step 4) - NewAsChange, - // The existing multisig is expected to solely forward transactions at this point (step 5) - ForwardFromExisting, - // The existing multisig is expected to finish its own transactions and do nothing more - // (step 6) - ClosingExisting, -} - -// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee -// estimates -async fn prepare_send( - network: &N, - block_number: usize, - plan: Plan, - operating_costs: u64, -) -> PreparedSend { - loop { - match network.prepare_send(block_number, plan.clone(), operating_costs).await { - Ok(prepared) => { - return prepared; - } - Err(e) => { - error!("couldn't prepare a send for plan {}: {e}", hex::encode(plan.id())); - // The processor is either trying to create an invalid TX (fatal) or the node went - // offline - // The former requires a patch, the latter is a connection issue - // If the latter, this is an appropriate sleep. If the former, we should panic, yet - // this won't flood the console ad infinitum - sleep(Duration::from_secs(60)).await; - } - } - } -} - -pub struct MultisigViewer { - activation_block: usize, - key: ::G, - scheduler: N::Scheduler, -} - -#[allow(clippy::type_complexity)] -#[derive(Clone, Debug)] -pub enum MultisigEvent { - // Batches to publish - Batches(Option<(::G, ::G)>, Vec), - // Eventuality completion found on-chain - Completed(Vec, [u8; 32], ::Completion), -} - -pub struct MultisigManager { - scanner: ScannerHandle, - existing: Option>, - new: Option>, -} - -impl MultisigManager { - pub async fn new( - raw_db: &D, - network: &N, - ) -> ( - Self, - Vec<::G>, - Vec<(Plan, N::SignableTransaction, N::Eventuality)>, - ) { - // The scanner has no long-standing orders to re-issue - let (mut scanner, current_keys) = Scanner::new(network.clone(), raw_db.clone()); - - let mut schedulers = vec![]; - - assert!(current_keys.len() <= 2); - let mut actively_signing = vec![]; - for (_, key) in ¤t_keys { - schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); - - // Load any TXs being actively signed - let key = key.to_bytes(); - for (block_number, plan, operating_costs) in PlanDb::active_plans::(raw_db, key.as_ref()) { - let block_number = block_number.try_into().unwrap(); - - let id = plan.id(); - info!("reloading plan {}: {:?}", hex::encode(id), plan); - - let key_bytes = plan.key.to_bytes(); - - let Some((tx, eventuality)) = - prepare_send(network, block_number, plan.clone(), operating_costs).await.tx - else { - panic!("previously created transaction is no longer being created") - }; - - scanner - .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) - .await; - actively_signing.push((plan, tx, eventuality)); - } - } - - ( - MultisigManager { - scanner, - existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer { - activation_block, - key, - scheduler: schedulers.remove(0), - }), - new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer { - activation_block, - key, - scheduler: schedulers.remove(0), - }), - }, - current_keys.into_iter().map(|(_, key)| key).collect(), - actively_signing, - ) - } - - /// Returns the block number for a block hash, if it's known and all keys have scanned the block. - // This is guaranteed to atomically increment so long as no new keys are added to the scanner - // which activate at a block before the currently highest scanned block. This is prevented by - // the processor waiting for `Batch` inclusion before scanning too far ahead, and activation only - // happening after the "too far ahead" window. - pub async fn block_number( - &self, - getter: &G, - hash: &>::Id, - ) -> Option { - let latest = ScannerHandle::::block_number(getter, hash)?; - - // While the scanner has cemented this block, that doesn't mean it's been scanned for all - // keys - // ram_scanned will return the lowest scanned block number out of all keys - if latest > self.scanner.ram_scanned().await { - return None; - } - Some(latest) - } - - pub async fn add_key( - &mut self, - txn: &mut D::Transaction<'_>, - activation_block: usize, - external_key: ::G, - ) { - self.scanner.register_key(txn, activation_block, external_key).await; - let viewer = Some(MultisigViewer { - activation_block, - key: external_key, - scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), - }); - - if self.existing.is_none() { - self.existing = viewer; - return; - } - self.new = viewer; - } - - fn current_rotation_step(&self, block_number: usize) -> RotationStep { - let Some(new) = self.new.as_ref() else { return RotationStep::UseExisting }; - - // Period numbering here has no meaning other than these are the time values useful here, and - // the order they're calculated in. They have no reference/shared marker with anything else - - // ESTIMATED_BLOCK_TIME_IN_SECONDS is fine to use here. While inaccurate, it shouldn't be - // drastically off, and even if it is, it's a hiccup to latency handling only possible when - // rotating. The error rate wouldn't be acceptable if it was allowed to accumulate over time, - // yet rotation occurs on Serai's clock, disconnecting any errors here from any prior. - - // N::CONFIRMATIONS + 10 minutes - let period_1_start = new.activation_block + - N::CONFIRMATIONS + - (10usize * 60).div_ceil(N::ESTIMATED_BLOCK_TIME_IN_SECONDS); - - // N::CONFIRMATIONS - let period_2_start = period_1_start + N::CONFIRMATIONS; - - // 6 hours after period 2 - // Also ensure 6 hours is greater than the amount of CONFIRMATIONS, for sanity purposes - let period_3_start = - period_2_start + ((6 * 60 * 60) / N::ESTIMATED_BLOCK_TIME_IN_SECONDS).max(N::CONFIRMATIONS); - - if block_number < period_1_start { - RotationStep::UseExisting - } else if block_number < period_2_start { - RotationStep::NewAsChange - } else if block_number < period_3_start { - RotationStep::ForwardFromExisting - } else { - RotationStep::ClosingExisting - } - } - - // Convert new Burns to Payments. - // - // Also moves payments from the old Scheduler to the new multisig if the step calls for it. - fn burns_to_payments( - &mut self, - txn: &mut D::Transaction<'_>, - step: RotationStep, - burns: Vec, - ) -> (Vec>, Vec>) { - let mut payments = vec![]; - for out in burns { - let OutInstructionWithBalance { instruction: OutInstruction { address, data }, balance } = - out; - assert_eq!(balance.coin.network(), N::NETWORK); - - if let Ok(address) = N::Address::try_from(address.consume()) { - payments.push(Payment { address, data: data.map(Data::consume), balance }); - } - } - - let payments = payments; - match step { - RotationStep::UseExisting | RotationStep::NewAsChange => (payments, vec![]), - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => { - // Consume any payments the prior scheduler was unable to complete - // This should only actually matter once - let mut new_payments = self.existing.as_mut().unwrap().scheduler.consume_payments::(txn); - // Add the new payments - new_payments.extend(payments); - (vec![], new_payments) - } - } - } - - fn split_outputs_by_key(&self, outputs: Vec) -> (Vec, Vec) { - let mut existing_outputs = Vec::with_capacity(outputs.len()); - let mut new_outputs = vec![]; - - let existing_key = self.existing.as_ref().unwrap().key; - let new_key = self.new.as_ref().map(|new| new.key); - for output in outputs { - if output.key() == existing_key { - existing_outputs.push(output); - } else { - assert_eq!(Some(output.key()), new_key); - new_outputs.push(output); - } - } - - (existing_outputs, new_outputs) - } - - fn refund_plan( - scheduler: &mut N::Scheduler, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - log::info!("creating refund plan for {}", hex::encode(output.id())); - assert_eq!(output.kind(), OutputType::External); - scheduler.refund_plan::(txn, output, refund_to) - } - - // Returns the plan for forwarding if one is needed. - // Returns None if one is not needed to forward this output. - fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { - log::info!("creating forwarding plan for {}", hex::encode(output.id())); - let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( - txn, - output.clone(), - self.new.as_ref().expect("forwarding plan yet no new multisig").key, - ); - if res.is_none() { - log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); - } - res - } - - // Filter newly received outputs due to the step being RotationStep::ClosingExisting. - // - // Returns the Plans for the `Branch`s which should be created off outputs which passed the - // filter. - fn filter_outputs_due_to_closing( - &mut self, - txn: &mut D::Transaction<'_>, - existing_outputs: &mut Vec, - ) -> Vec> { - /* - The document says to only handle outputs we created. We don't know what outputs we - created. We do have an ordered view of equivalent outputs however, and can assume the - first (and likely only) ones are the ones we created. - - Accordingly, only handling outputs we created should be definable as only handling - outputs from the resolution of Eventualities. - - This isn't feasible. It requires knowing what Eventualities were completed in this block, - when we handle this block, which we don't know without fully serialized scanning + Batch - publication. - - Take the following scenario: - 1) A network uses 10 confirmations. Block x is scanned, meaning x+9a exists. - 2) 67% of nodes process x, create, sign, and publish a TX, creating an Eventuality. - 3) A reorganization to a shorter chain occurs, including the published TX in x+1b. - 4) The 33% of nodes which are latent will be allowed to scan x+1b as soon as x+10b - exists. They won't wait for Serai to include the Batch for x until they try to scan - x+10b. - 5) These latent nodes will handle x+1b, post-create an Eventuality, post-learn x+1b - contained resolutions, changing how x+1b should've been interpreted. - - We either have to: - A) Fully serialize scanning (removing the ability to utilize throughput to allow higher - latency, at least while the step is `ClosingExisting`). - B) Create Eventualities immediately, which we can't do as then both the external - network's clock AND Serai's clock can trigger Eventualities, removing ordering. - We'd need to shift entirely to the external network's clock, only handling Burns - outside the parallelization window (which would be extremely latent). - C) Use a different mechanism to determine if we created an output. - D) Re-define which outputs are still to be handled after the 6 hour period expires, such - that the multisig's lifetime cannot be further extended yet it does fulfill its - responsibility. - - External outputs to the existing multisig will be: - - Scanned before the rotation and unused (as used External outputs become Change) - - Forwarded immediately upon scanning - - Not scanned before the cut off time (and accordingly dropped) - - For the first case, since they're scanned before the rotation and unused, they'll be - forwarded with all other available outputs (since they'll be available when scanned). - - Change outputs will be: - - Scanned before the rotation and forwarded with all other available outputs - - Forwarded immediately upon scanning - - Not scanned before the cut off time, requiring an extension exclusive to these outputs - - The important thing to note about honest Change outputs to the existing multisig is that - they'll only be created within `CONFIRMATIONS+1` blocks of the activation block. Also - important to note is that there's another explicit window of `CONFIRMATIONS` before the - 6 hour window. - - Eventualities are not guaranteed to be known before we scan the block containing their - resolution. They are guaranteed to be known within `CONFIRMATIONS-1` blocks however, due - to the limitation on how far we'll scan ahead. - - This means we will know of all Eventualities related to Change outputs we need to forward - before the 6 hour period begins (as forwarding outputs will not create any Change outputs - to the existing multisig). - - This means a definition of complete can be defined as: - 1) Handled all Branch outputs - 2) Forwarded all External outputs received before the end of 6 hour window - 3) Forwarded the results of all Eventualities with Change, which will have been created - before the 6 hour window - - How can we track and ensure this without needing to check if an output is from the - resolution of an Eventuality? - - 1) We only create Branch outputs before the 6 hour window starts. These are guaranteed - to appear within `CONFIRMATIONS` blocks. They will exist with arbitrary depth however, - meaning that upon completion they will spawn several more Eventualities. The further - created Eventualities re-risk being present after the 6 hour period ends. - - We can: - 1) Build a queue for Branch outputs, delaying their handling until relevant - Eventualities are guaranteed to be present. - - This solution would theoretically work for all outputs and allow collapsing this - problem to simply: - - > Accordingly, only handling outputs we created should be definable as only - handling outputs from the resolution of Eventualities. - - 2) Create all Eventualities under a Branch at time of Branch creation. - This idea fails as Plans are tightly bound to outputs. - - 3) Don't track Branch outputs by Eventualities, yet by the amount of Branch outputs - remaining. Any Branch output received, of a useful amount, is assumed to be our - own and handled. All other Branch outputs, even if they're the completion of some - Eventuality, are dropped. - - This avoids needing any additional queue, avoiding additional pipelining/latency. - - 2) External outputs are self-evident. We simply stop handling them at the cut-off point, - and only start checking after `CONFIRMATIONS` blocks if all Eventualities are - complete. - - 3) Since all Change Eventualities will be known prior to the 6 hour window's beginning, - we can safely check if a received Change output is the resolution of an Eventuality. - We only need to forward it if so. Forwarding it simply requires only checking if - Eventualities are complete after `CONFIRMATIONS` blocks, same as for straggling - External outputs. - */ - - let mut plans = vec![]; - existing_outputs.retain(|output| { - match output.kind() { - OutputType::External | OutputType::Forwarded => false, - OutputType::Branch => { - let scheduler = &mut self.existing.as_mut().unwrap().scheduler; - // There *would* be a race condition here due to the fact we only mark a `Branch` output - // as needed when we process the block (and handle scheduling), yet actual `Branch` - // outputs may appear as soon as the next block (and we scan the next block before we - // process the prior block) - // - // Unlike Eventuality checking, which happens on scanning and is therefore asynchronous, - // all scheduling (and this check against the scheduler) happens on processing, which is - // synchronous - // - // While we could move Eventuality checking into the block processing, removing its - // asynchonicity, we could only check data the Scanner deems important. The Scanner won't - // deem important Eventuality resolutions which don't create an output to Serai unless - // it knows of the Eventuality. Accordingly, at best we could have a split role (the - // Scanner noting completion of Eventualities which don't have relevant outputs, the - // processing noting completion of ones which do) - // - // This is unnecessary, due to the current flow around Eventuality resolutions and the - // current bounds naturally found being sufficiently amenable, yet notable for the future - if scheduler.can_use_branch(output.balance()) { - // We could simply call can_use_branch, yet it'd have an edge case where if we receive - // two outputs for 100, and we could use one such output, we'd handle both. - // - // Individually schedule each output once confirming they're usable in order to avoid - // this. - let mut plan = scheduler.schedule::( - txn, - vec![output.clone()], - vec![], - self.new.as_ref().unwrap().key, - false, - ); - assert_eq!(plan.len(), 1); - let plan = plan.remove(0); - plans.push(plan); - } - false - } - OutputType::Change => { - // If the TX containing this output resolved an Eventuality... - if let Some(plan) = ResolvedDb::get(txn, output.tx_id().as_ref()) { - // And the Eventuality had change... - // We need this check as Eventualities have a race condition and can't be relied - // on, as extensively detailed above. Eventualities explicitly with change do have - // a safe timing window however - if PlanDb::plan_by_key_with_self_change::( - txn, - // Pass the key so the DB checks the Plan's key is this multisig's, preventing a - // potential issue where the new multisig creates a Plan with change *and a - // payment to the existing multisig's change address* - self.existing.as_ref().unwrap().key, - plan, - ) { - // Then this is an honest change output we need to forward - // (or it's a payment to the change address in the same transaction as an honest - // change output, which is fine to let slip in) - return true; - } - } - false - } - } - }); - plans - } - - // Returns the Plans caused from a block being acknowledged. - // - // Will rotate keys if the block acknowledged is the retirement block. - async fn plans_from_block( - &mut self, - txn: &mut D::Transaction<'_>, - block_number: usize, - block_id: >::Id, - step: &mut RotationStep, - burns: Vec, - ) -> (bool, Vec>, HashSet<[u8; 32]>) { - let (mut existing_payments, mut new_payments) = self.burns_to_payments(txn, *step, burns); - - let mut plans = vec![]; - let mut plans_from_scanning = HashSet::new(); - - // We now have to acknowledge the acknowledged block, if it's new - // It won't be if this block's `InInstruction`s were split into multiple `Batch`s - let (acquired_lock, (mut existing_outputs, new_outputs)) = { - let (acquired_lock, mut outputs) = if ScannerHandle::::db_scanned(txn) - .expect("published a Batch despite never scanning a block") < - block_number - { - // Load plans crated when we scanned the block - let scanning_plans = - PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); - // Expand into actual plans - plans = scanning_plans - .into_iter() - .map(|plan| match plan { - PlanFromScanning::Refund(output, refund_to) => { - let existing = self.existing.as_mut().unwrap(); - if output.key() == existing.key { - Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) - } else { - let new = self - .new - .as_mut() - .expect("new multisig didn't expect yet output wasn't for existing multisig"); - assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); - Self::refund_plan(&mut new.scheduler, txn, output, refund_to) - } - } - PlanFromScanning::Forward(output) => self - .forward_plan(txn, &output) - .expect("supposed to forward an output yet no forwarding plan"), - }) - .collect(); - - for plan in &plans { - plans_from_scanning.insert(plan.id()); - } - - let (is_retirement_block, outputs) = self.scanner.ack_block(txn, block_id.clone()).await; - if is_retirement_block { - let existing = self.existing.take().unwrap(); - assert!(existing.scheduler.empty()); - self.existing = self.new.take(); - *step = RotationStep::UseExisting; - assert!(existing_payments.is_empty()); - existing_payments = new_payments; - new_payments = vec![]; - } - (true, outputs) - } else { - (false, vec![]) - }; - - // Remove all outputs already present in plans - let mut output_set = HashSet::new(); - for plan in &plans { - for input in &plan.inputs { - output_set.insert(input.id().as_ref().to_vec()); - } - } - outputs.retain(|output| !output_set.remove(output.id().as_ref())); - assert_eq!(output_set.len(), 0); - - (acquired_lock, self.split_outputs_by_key(outputs)) - }; - - // If we're closing the existing multisig, filter its outputs down - if *step == RotationStep::ClosingExisting { - plans.extend(self.filter_outputs_due_to_closing(txn, &mut existing_outputs)); - } - - // Now that we've done all our filtering, schedule the existing multisig's outputs - plans.extend({ - let existing = self.existing.as_mut().unwrap(); - let existing_key = existing.key; - self.existing.as_mut().unwrap().scheduler.schedule::( - txn, - existing_outputs, - existing_payments, - match *step { - RotationStep::UseExisting => existing_key, - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => self.new.as_ref().unwrap().key, - }, - match *step { - RotationStep::UseExisting | RotationStep::NewAsChange => false, - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, - }, - ) - }); - - for plan in &plans { - // This first equality should 'never meaningfully' be false - // All created plans so far are by the existing multisig EXCEPT: - // A) If we created a refund plan from the new multisig (yet that wouldn't have change) - // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC - // scheduler, yet that doesn't have change) - // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust - if plan.key == self.existing.as_ref().unwrap().key { - if let Some(change) = N::change_address(plan.key) { - if plan.change == Some(change) { - // Assert these (self-change) are only created during the expected step - match *step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), - } - } - } - } - } - - // Schedule the new multisig's outputs too - if let Some(new) = self.new.as_mut() { - plans.extend(new.scheduler.schedule::(txn, new_outputs, new_payments, new.key, false)); - } - - (acquired_lock, plans, plans_from_scanning) - } - - /// Handle a SubstrateBlock event, building the relevant Plans. - pub async fn substrate_block( - &mut self, - txn: &mut D::Transaction<'_>, - network: &N, - context: SubstrateContext, - burns: Vec, - ) -> (bool, Vec<(::G, [u8; 32], N::SignableTransaction, N::Eventuality)>) - { - let mut block_id = >::Id::default(); - block_id.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); - let block_number = ScannerHandle::::block_number(txn, &block_id) - .expect("SubstrateBlock with context we haven't synced"); - - // Determine what step of rotation we're currently in - let mut step = self.current_rotation_step(block_number); - - // Get the Plans from this block - let (acquired_lock, plans, plans_from_scanning) = - self.plans_from_block(txn, block_number, block_id, &mut step, burns).await; - - let res = { - let mut res = Vec::with_capacity(plans.len()); - - for plan in plans { - let id = plan.id(); - info!("preparing plan {}: {:?}", hex::encode(id), plan); - - let key = plan.key; - let key_bytes = key.to_bytes(); - - let (tx, post_fee_branches) = { - let running_operating_costs = OperatingCostsDb::take_operating_costs(txn); - - PlanDb::save_active_plan::( - txn, - key_bytes.as_ref(), - block_number, - &plan, - running_operating_costs, - ); - - // If this Plan is from the scanner handler below, don't take the opportunity to amortze - // operating costs - // It operates with limited context, and on a different clock, making it nable to react - // to operating costs - // Despite this, in order to properly save forwarded outputs' instructions, it needs to - // know the actual value forwarded outputs will be created with - // Including operating costs prevents that - let from_scanning = plans_from_scanning.contains(&plan.id()); - let to_use_operating_costs = if from_scanning { 0 } else { running_operating_costs }; - - let PreparedSend { tx, post_fee_branches, mut operating_costs } = - prepare_send(network, block_number, plan, to_use_operating_costs).await; - - // Restore running_operating_costs to operating_costs - if from_scanning { - // If we're forwarding (or refunding) this output, operating_costs should still be 0 - // Either this TX wasn't created, causing no operating costs, or it was yet it'd be - // amortized - assert_eq!(operating_costs, 0); - - operating_costs += running_operating_costs; - } - - OperatingCostsDb::set_operating_costs(txn, operating_costs); - - (tx, post_fee_branches) - }; - - for branch in post_fee_branches { - let existing = self.existing.as_mut().unwrap(); - let to_use = if key == existing.key { - existing - } else { - let new = self - .new - .as_mut() - .expect("plan wasn't for existing multisig yet there wasn't a new multisig"); - assert_eq!(key, new.key); - new - }; - - to_use.scheduler.created_output::(txn, branch.expected, branch.actual); - } - - if let Some((tx, eventuality)) = tx { - // The main function we return to will send an event to the coordinator which must be - // fired before these registered Eventualities have their Completions fired - // Safety is derived from a mutable lock on the Scanner being preserved, preventing - // scanning (and detection of Eventuality resolutions) before it's released - // It's only released by the main function after it does what it will - self - .scanner - .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) - .await; - - res.push((key, id, tx, eventuality)); - } - - // TODO: If the TX is None, restore its inputs to the scheduler for efficiency's sake - // If this TODO is removed, also reduce the operating costs - } - res - }; - (acquired_lock, res) - } - - pub async fn release_scanner_lock(&mut self) { - self.scanner.release_lock().await; - } - - pub async fn scanner_event_to_multisig_event( - &self, - txn: &mut D::Transaction<'_>, - network: &N, - msg: ScannerEvent, - ) -> MultisigEvent { - let (block_number, event) = match msg { - ScannerEvent::Block { is_retirement_block, block, mut outputs } => { - // Since the Scanner is asynchronous, the following is a concern for race conditions - // We safely know the step of a block since keys are declared, and the Scanner is safe - // with respect to the declaration of keys - // Accordingly, the following calls regarding new keys and step should be safe - let block_number = ScannerHandle::::block_number(txn, &block) - .expect("didn't have the block number for a block we just scanned"); - let step = self.current_rotation_step(block_number); - - // Instructions created from this block - let mut instructions = vec![]; - - // If any of these outputs were forwarded, create their instruction now - for output in &outputs { - if output.kind() != OutputType::Forwarded { - continue; - } - - if let Some(instruction) = ForwardedOutputDb::take_forwarded_output(txn, output.balance()) - { - instructions.push(instruction); - } - } - - // If the remaining outputs aren't externally received funds, don't handle them as - // instructions - outputs.retain(|output| output.kind() == OutputType::External); - - // These plans are of limited context. They're only allowed the outputs newly received - // within this block and are intended to handle forwarding transactions/refunds - let mut plans = vec![]; - - // If the old multisig is explicitly only supposed to forward, create all such plans now - if step == RotationStep::ForwardFromExisting { - let mut i = 0; - while i < outputs.len() { - let output = &outputs[i]; - let plans = &mut plans; - let txn = &mut *txn; - - #[allow(clippy::redundant_closure_call)] - let should_retain = (|| async move { - // If this output doesn't belong to the existing multisig, it shouldn't be forwarded - if output.key() != self.existing.as_ref().unwrap().key { - return true; - } - - let plans_at_start = plans.len(); - let (refund_to, instruction) = instruction_from_output::(output); - if let Some(mut instruction) = instruction { - let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( - output.clone(), - self.new.as_ref().expect("forwarding from existing yet no new multisig").key, - ) else { - // If this network doesn't need forwarding, report the output now - return true; - }; - plans.push(PlanFromScanning::::Forward(output.clone())); - - // Set the instruction for this output to be returned - // We need to set it under the amount it's forwarded with, so prepare its forwarding - // TX to determine the fees involved - let PreparedSend { tx, post_fee_branches: _, operating_costs } = - prepare_send(network, block_number, shimmed_plan, 0).await; - // operating_costs should not increase in a forwarding TX - assert_eq!(operating_costs, 0); - - // If this actually forwarded any coins, save the output as forwarded - // If this didn't create a TX, we don't bother saving the output as forwarded - // The fact we already created and pushed a plan still using this output will cause - // it to not be retained here, and later the plan will be dropped as this did here, - // letting it die out - if let Some(tx) = &tx { - instruction.balance.amount.0 -= tx.0.fee(); - - /* - Sending a Plan, with arbitrary data proxying the InInstruction, would require - adding a flow for networks which drop their data to still embed arbitrary data. - It'd also have edge cases causing failures (we'd need to manually provide the - origin if it was implied, which may exceed the encoding limit). - - Instead, we save the InInstruction as we scan this output. Then, when the - output is successfully forwarded, we simply read it from the local database. - This also saves the costs of embedding arbitrary data. - - Since we can't rely on the Eventuality system to detect if it's a forwarded - transaction, due to the asynchonicity of the Eventuality system, we instead - interpret an Forwarded output which has an amount associated with an - InInstruction which was forwarded as having been forwarded. - */ - ForwardedOutputDb::save_forwarded_output(txn, &instruction); - } - } else if let Some(refund_to) = refund_to { - if let Ok(refund_to) = refund_to.consume().try_into() { - // Build a dedicated Plan refunding this - plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); - } - } - - // Only keep if we didn't make a Plan consuming it - plans_at_start == plans.len() - })() - .await; - if should_retain { - i += 1; - continue; - } - outputs.remove(i); - } - } - - for output in outputs { - // If this is an External transaction to the existing multisig, and we're either solely - // forwarding or closing the existing multisig, drop it - // In the case of the forwarding case, we'll report it once it hits the new multisig - if (match step { - RotationStep::UseExisting | RotationStep::NewAsChange => false, - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, - }) && (output.key() == self.existing.as_ref().unwrap().key) - { - continue; - } - - let (refund_to, instruction) = instruction_from_output::(&output); - let Some(instruction) = instruction else { - if let Some(refund_to) = refund_to { - if let Ok(refund_to) = refund_to.consume().try_into() { - plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); - } - } - continue; - }; - - // Delay External outputs received to new multisig earlier than expected - if Some(output.key()) == self.new.as_ref().map(|new| new.key) { - match step { - RotationStep::UseExisting => { - DelayedOutputDb::save_delayed_output(txn, &instruction); - continue; - } - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => {} - } - } - - instructions.push(instruction); - } - - // Save the plans created while scanning - // TODO: Should we combine all of these plans to reduce the fees incurred from their - // execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities. - PlansFromScanningDb::set_plans_from_scanning(txn, block_number, plans); - - // If any outputs were delayed, append them into this block - match step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => { - instructions.extend(DelayedOutputDb::take_delayed_outputs(txn)); - } - } - - let mut block_hash = [0; 32]; - block_hash.copy_from_slice(block.as_ref()); - let mut batch_id = NextBatchDb::get(txn).unwrap_or_default(); - - // start with empty batch - let mut batches = vec![Batch { - network: N::NETWORK, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![], - }]; - - for instruction in instructions { - let batch = batches.last_mut().unwrap(); - batch.instructions.push(instruction); - - // check if batch is over-size - if batch.encode().len() > MAX_BATCH_SIZE { - // pop the last instruction so it's back in size - let instruction = batch.instructions.pop().unwrap(); - - // bump the id for the new batch - batch_id += 1; - - // make a new batch with this instruction included - batches.push(Batch { - network: N::NETWORK, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![instruction], - }); - } - } - - // Save the next batch ID - NextBatchDb::set(txn, &(batch_id + 1)); - - ( - block_number, - MultisigEvent::Batches( - if is_retirement_block { - Some((self.existing.as_ref().unwrap().key, self.new.as_ref().unwrap().key)) - } else { - None - }, - batches, - ), - ) - } - - // This must be emitted before ScannerEvent::Block for all completions of known Eventualities - // within the block. Unknown Eventualities may have their Completed events emitted after - // ScannerEvent::Block however. - ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { - ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); - (block_number, MultisigEvent::Completed(key, id, completion)) - } - }; - - // If we either received a Block event (which will be the trigger when we have no - // Plans/Eventualities leading into ClosingExisting), or we received the last Completed for - // this multisig, set its retirement block - let existing = self.existing.as_ref().unwrap(); - - // This multisig is closing - let closing = self.current_rotation_step(block_number) == RotationStep::ClosingExisting; - // There's nothing left in its Scheduler. This call is safe as: - // 1) When ClosingExisting, all outputs should've been already forwarded, preventing - // new UTXOs from accumulating. - // 2) No new payments should be issued. - // 3) While there may be plans, they'll be dropped to create Eventualities. - // If this Eventuality is resolved, the Plan has already been dropped. - // 4) If this Eventuality will trigger a Plan, it'll still be in the plans HashMap. - let scheduler_is_empty = closing && existing.scheduler.empty(); - // Nothing is still being signed - let no_active_plans = scheduler_is_empty && - PlanDb::active_plans::(txn, existing.key.to_bytes().as_ref()).is_empty(); - - self - .scanner - .multisig_completed - // The above explicitly included their predecessor to ensure short-circuiting, yet their - // names aren't defined as an aggregate check. Still including all three here ensures all are - // used in the final value - .send(closing && scheduler_is_empty && no_active_plans) - .unwrap(); - - event - } - - pub async fn next_scanner_event(&mut self) -> ScannerEvent { - self.scanner.events.recv().await.unwrap() - } -} diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs deleted file mode 100644 index 1b25e108..00000000 --- a/processor/src/multisigs/scanner.rs +++ /dev/null @@ -1,739 +0,0 @@ -use core::marker::PhantomData; -use std::{ - sync::Arc, - io::Read, - time::Duration, - collections::{VecDeque, HashSet, HashMap}, -}; - -use ciphersuite::group::GroupEncoding; -use frost::curve::Ciphersuite; - -use log::{info, debug, warn}; -use tokio::{ - sync::{RwLockReadGuard, RwLockWriteGuard, RwLock, mpsc}, - time::sleep, -}; - -use crate::{ - Get, DbTxn, Db, - networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, -}; - -#[derive(Clone, Debug)] -pub enum ScannerEvent { - // Block scanned - Block { - is_retirement_block: bool, - block: >::Id, - outputs: Vec, - }, - // Eventuality completion found on-chain - // TODO: Move this from a tuple - Completed( - Vec, - usize, - [u8; 32], - >::Id, - ::Completion, - ), -} - -pub type ScannerEventChannel = mpsc::UnboundedReceiver>; - -#[derive(Clone, Debug)] -struct ScannerDb(PhantomData, PhantomData); -impl ScannerDb { - fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { - D::key(b"SCANNER", dst, key) - } - - fn block_key(number: usize) -> Vec { - Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes()) - } - fn block_number_key(id: &>::Id) -> Vec { - Self::scanner_key(b"block_number", id) - } - fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &>::Id) { - txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes()); - txn.put(Self::block_key(number), id); - } - fn block(getter: &G, number: usize) -> Option<>::Id> { - getter.get(Self::block_key(number)).map(|id| { - let mut res = >::Id::default(); - res.as_mut().copy_from_slice(&id); - res - }) - } - fn block_number(getter: &G, id: &>::Id) -> Option { - getter - .get(Self::block_number_key(id)) - .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap()) - } - - fn keys_key() -> Vec { - Self::scanner_key(b"keys", b"") - } - fn register_key( - txn: &mut D::Transaction<'_>, - activation_number: usize, - key: ::G, - ) { - let mut keys = txn.get(Self::keys_key()).unwrap_or(vec![]); - - let key_bytes = key.to_bytes(); - - let key_len = key_bytes.as_ref().len(); - assert_eq!(keys.len() % (8 + key_len), 0); - - // Sanity check this key isn't already present - let mut i = 0; - while i < keys.len() { - if &keys[(i + 8) .. ((i + 8) + key_len)] == key_bytes.as_ref() { - panic!("adding {} as a key yet it was already present", hex::encode(key_bytes)); - } - i += 8 + key_len; - } - - keys.extend(u64::try_from(activation_number).unwrap().to_le_bytes()); - keys.extend(key_bytes.as_ref()); - txn.put(Self::keys_key(), keys); - } - fn keys(getter: &G) -> Vec<(usize, ::G)> { - let bytes_vec = getter.get(Self::keys_key()).unwrap_or(vec![]); - let mut bytes: &[u8] = bytes_vec.as_ref(); - - // Assumes keys will be 32 bytes when calculating the capacity - // If keys are larger, this may allocate more memory than needed - // If keys are smaller, this may require additional allocations - // Either are fine - let mut res = Vec::with_capacity(bytes.len() / (8 + 32)); - while !bytes.is_empty() { - let mut activation_number = [0; 8]; - bytes.read_exact(&mut activation_number).unwrap(); - let activation_number = u64::from_le_bytes(activation_number).try_into().unwrap(); - - res.push((activation_number, N::Curve::read_G(&mut bytes).unwrap())); - } - res - } - fn retire_key(txn: &mut D::Transaction<'_>) { - let keys = Self::keys(txn); - assert_eq!(keys.len(), 2); - txn.del(Self::keys_key()); - Self::register_key(txn, keys[1].0, keys[1].1); - } - - fn seen_key(id: &>::Id) -> Vec { - Self::scanner_key(b"seen", id) - } - fn seen(getter: &G, id: &>::Id) -> bool { - getter.get(Self::seen_key(id)).is_some() - } - - fn outputs_key(block: &>::Id) -> Vec { - Self::scanner_key(b"outputs", block.as_ref()) - } - fn save_outputs( - txn: &mut D::Transaction<'_>, - block: &>::Id, - outputs: &[N::Output], - ) { - let mut bytes = Vec::with_capacity(outputs.len() * 64); - for output in outputs { - output.write(&mut bytes).unwrap(); - } - txn.put(Self::outputs_key(block), bytes); - } - fn outputs( - txn: &D::Transaction<'_>, - block: &>::Id, - ) -> Option> { - let bytes_vec = txn.get(Self::outputs_key(block))?; - let mut bytes: &[u8] = bytes_vec.as_ref(); - - let mut res = vec![]; - while !bytes.is_empty() { - res.push(N::Output::read(&mut bytes).unwrap()); - } - Some(res) - } - - fn scanned_block_key() -> Vec { - Self::scanner_key(b"scanned_block", []) - } - - fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec { - let id = Self::block(txn, block); // It may be None for the first key rotated to - let outputs = - if let Some(id) = id.as_ref() { Self::outputs(txn, id).unwrap_or(vec![]) } else { vec![] }; - - // Mark all the outputs from this block as seen - for output in &outputs { - txn.put(Self::seen_key(&output.id()), b""); - } - - txn.put(Self::scanned_block_key(), u64::try_from(block).unwrap().to_le_bytes()); - - // Return this block's outputs so they can be pruned from the RAM cache - outputs - } - fn latest_scanned_block(getter: &G) -> Option { - getter - .get(Self::scanned_block_key()) - .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap()) - } - - fn retirement_block_key(key: &::G) -> Vec { - Self::scanner_key(b"retirement_block", key.to_bytes()) - } - fn save_retirement_block( - txn: &mut D::Transaction<'_>, - key: &::G, - block: usize, - ) { - txn.put(Self::retirement_block_key(key), u64::try_from(block).unwrap().to_le_bytes()); - } - fn retirement_block(getter: &G, key: &::G) -> Option { - getter - .get(Self::retirement_block_key(key)) - .map(|bytes| usize::try_from(u64::from_le_bytes(bytes.try_into().unwrap())).unwrap()) - } -} - -/// The Scanner emits events relating to the blockchain, notably received outputs. -/// -/// It WILL NOT fail to emit an event, even if it reboots at selected moments. -/// -/// It MAY fire the same event multiple times. -#[derive(Debug)] -pub struct Scanner { - _db: PhantomData, - - keys: Vec<(usize, ::G)>, - - eventualities: HashMap, EventualitiesTracker>, - - ram_scanned: Option, - ram_outputs: HashSet>, - - need_ack: VecDeque, - - events: mpsc::UnboundedSender>, -} - -#[derive(Clone, Debug)] -struct ScannerHold { - scanner: Arc>>>, -} -impl ScannerHold { - async fn read(&self) -> RwLockReadGuard<'_, Option>> { - loop { - let lock = self.scanner.read().await; - if lock.is_none() { - drop(lock); - tokio::task::yield_now().await; - continue; - } - return lock; - } - } - async fn write(&self) -> RwLockWriteGuard<'_, Option>> { - loop { - let lock = self.scanner.write().await; - if lock.is_none() { - drop(lock); - tokio::task::yield_now().await; - continue; - } - return lock; - } - } - // This is safe to not check if something else already acquired the Scanner as the only caller is - // sequential. - async fn long_term_acquire(&self) -> Scanner { - self.scanner.write().await.take().unwrap() - } - async fn restore(&self, scanner: Scanner) { - let _ = self.scanner.write().await.insert(scanner); - } -} - -#[derive(Debug)] -pub struct ScannerHandle { - scanner: ScannerHold, - held_scanner: Option>, - pub events: ScannerEventChannel, - pub multisig_completed: mpsc::UnboundedSender, -} - -impl ScannerHandle { - pub async fn ram_scanned(&self) -> usize { - self.scanner.read().await.as_ref().unwrap().ram_scanned.unwrap_or(0) - } - - /// Register a key to scan for. - pub async fn register_key( - &mut self, - txn: &mut D::Transaction<'_>, - activation_number: usize, - key: ::G, - ) { - info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); - - let mut scanner_lock = self.scanner.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - assert!( - activation_number > scanner.ram_scanned.unwrap_or(0), - "activation block of new keys was already scanned", - ); - - if scanner.keys.is_empty() { - assert!(scanner.ram_scanned.is_none()); - scanner.ram_scanned = Some(activation_number); - assert!(ScannerDb::::save_scanned_block(txn, activation_number).is_empty()); - } - - ScannerDb::::register_key(txn, activation_number, key); - scanner.keys.push((activation_number, key)); - #[cfg(not(test))] // TODO: A test violates this. Improve the test with a better flow - assert!(scanner.keys.len() <= 2); - - scanner.eventualities.insert(key.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); - } - - pub fn db_scanned(getter: &G) -> Option { - ScannerDb::::latest_scanned_block(getter) - } - - // This perform a database read which isn't safe with regards to if the value is set or not - // It may be set, when it isn't expected to be set, or not set, when it is expected to be set - // Since the value is static, if it's set, it's correctly set - pub fn block_number(getter: &G, id: &>::Id) -> Option { - ScannerDb::::block_number(getter, id) - } - - /// Acknowledge having handled a block. - /// - /// Creates a lock over the Scanner, preventing its independent scanning operations until - /// released. - /// - /// This must only be called on blocks which have been scanned in-memory. - pub async fn ack_block( - &mut self, - txn: &mut D::Transaction<'_>, - id: >::Id, - ) -> (bool, Vec) { - debug!("block {} acknowledged", hex::encode(&id)); - - let mut scanner = self.scanner.long_term_acquire().await; - - // Get the number for this block - let number = ScannerDb::::block_number(txn, &id) - .expect("main loop trying to operate on data we haven't scanned"); - log::trace!("block {} was {number}", hex::encode(&id)); - - let outputs = ScannerDb::::save_scanned_block(txn, number); - // This has a race condition if we try to ack a block we scanned on a prior boot, and we have - // yet to scan it on this boot - assert!(number <= scanner.ram_scanned.unwrap()); - for output in &outputs { - assert!(scanner.ram_outputs.remove(output.id().as_ref())); - } - - assert_eq!(scanner.need_ack.pop_front().unwrap(), number); - - self.held_scanner = Some(scanner); - - // Load the key from the DB, as it will have already been removed from RAM if retired - let key = ScannerDb::::keys(txn)[0].1; - let is_retirement_block = ScannerDb::::retirement_block(txn, &key) == Some(number); - if is_retirement_block { - ScannerDb::::retire_key(txn); - } - (is_retirement_block, outputs) - } - - pub async fn register_eventuality( - &mut self, - key: &[u8], - block_number: usize, - id: [u8; 32], - eventuality: N::Eventuality, - ) { - let mut lock; - // We won't use held_scanner if we're re-registering on boot - (if let Some(scanner) = self.held_scanner.as_mut() { - scanner - } else { - lock = Some(self.scanner.write().await); - lock.as_mut().unwrap().as_mut().unwrap() - }) - .eventualities - .get_mut(key) - .unwrap() - .register(block_number, id, eventuality) - } - - pub async fn release_lock(&mut self) { - self.scanner.restore(self.held_scanner.take().unwrap()).await - } -} - -impl Scanner { - #[allow(clippy::type_complexity, clippy::new_ret_no_self)] - pub fn new( - network: N, - db: D, - ) -> (ScannerHandle, Vec<(usize, ::G)>) { - let (events_send, events_recv) = mpsc::unbounded_channel(); - let (multisig_completed_send, multisig_completed_recv) = mpsc::unbounded_channel(); - - let keys = ScannerDb::::keys(&db); - let mut eventualities = HashMap::new(); - for key in &keys { - eventualities.insert(key.1.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); - } - - let ram_scanned = ScannerDb::::latest_scanned_block(&db); - - let scanner = ScannerHold { - scanner: Arc::new(RwLock::new(Some(Scanner { - _db: PhantomData, - - keys: keys.clone(), - - eventualities, - - ram_scanned, - ram_outputs: HashSet::new(), - - need_ack: VecDeque::new(), - - events: events_send, - }))), - }; - tokio::spawn(Scanner::run(db, network, scanner.clone(), multisig_completed_recv)); - - ( - ScannerHandle { - scanner, - held_scanner: None, - events: events_recv, - multisig_completed: multisig_completed_send, - }, - keys, - ) - } - - fn emit(&mut self, event: ScannerEvent) -> bool { - if self.events.send(event).is_err() { - info!("Scanner handler was dropped. Shutting down?"); - return false; - } - true - } - - // An async function, to be spawned on a task, to discover and report outputs - async fn run( - mut db: D, - network: N, - scanner_hold: ScannerHold, - mut multisig_completed: mpsc::UnboundedReceiver, - ) { - loop { - let (ram_scanned, latest_block_to_scan) = { - // Sleep 5 seconds to prevent hammering the node/scanner lock - sleep(Duration::from_secs(5)).await; - - let ram_scanned = { - let scanner_lock = scanner_hold.read().await; - let scanner = scanner_lock.as_ref().unwrap(); - - // If we're not scanning for keys yet, wait until we are - if scanner.keys.is_empty() { - continue; - } - - let ram_scanned = scanner.ram_scanned.unwrap(); - // If a Batch has taken too long to be published, start waiting until it is before - // continuing scanning - // Solves a race condition around multisig rotation, documented in the relevant doc - // and demonstrated with mini - if let Some(needing_ack) = scanner.need_ack.front() { - let next = ram_scanned + 1; - let limit = needing_ack + N::CONFIRMATIONS; - assert!(next <= limit); - if next == limit { - continue; - } - }; - - ram_scanned - }; - - ( - ram_scanned, - loop { - break match network.get_latest_block_number().await { - // Only scan confirmed blocks, which we consider effectively finalized - // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm - Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)), - Err(_) => { - warn!("couldn't get latest block number"); - sleep(Duration::from_secs(60)).await; - continue; - } - }; - }, - ) - }; - - for block_being_scanned in (ram_scanned + 1) ..= latest_block_to_scan { - // Redo the checks for if we're too far ahead - { - let needing_ack = { - let scanner_lock = scanner_hold.read().await; - let scanner = scanner_lock.as_ref().unwrap(); - scanner.need_ack.front().copied() - }; - - if let Some(needing_ack) = needing_ack { - let limit = needing_ack + N::CONFIRMATIONS; - assert!(block_being_scanned <= limit); - if block_being_scanned == limit { - break; - } - } - } - - let Ok(block) = network.get_block(block_being_scanned).await else { - warn!("couldn't get block {block_being_scanned}"); - break; - }; - let block_id = block.id(); - - info!("scanning block: {} ({block_being_scanned})", hex::encode(&block_id)); - - // These DB calls are safe, despite not having a txn, since they're static values - // There's no issue if they're written in advance of expected (such as on reboot) - // They're also only expected here - if let Some(id) = ScannerDb::::block(&db, block_being_scanned) { - if id != block_id { - panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id)); - } - } else { - // TODO: Move this to an unwrap - if let Some(id) = ScannerDb::::block(&db, block_being_scanned.saturating_sub(1)) { - if id != block.parent() { - panic!( - "block {} doesn't build off expected parent {}", - hex::encode(block_id), - hex::encode(id), - ); - } - } - - let mut txn = db.txn(); - ScannerDb::::save_block(&mut txn, block_being_scanned, &block_id); - txn.commit(); - } - - // Scan new blocks - // TODO: This lock acquisition may be long-lived... - let mut scanner_lock = scanner_hold.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - - let mut has_activation = false; - let mut outputs = vec![]; - let mut completion_block_numbers = vec![]; - for (activation_number, key) in scanner.keys.clone() { - if activation_number > block_being_scanned { - continue; - } - - if activation_number == block_being_scanned { - has_activation = true; - } - - let key_vec = key.to_bytes().as_ref().to_vec(); - - // TODO: These lines are the ones which will cause a really long-lived lock acquisition - for output in network.get_outputs(&block, key).await { - assert_eq!(output.key(), key); - if output.balance().amount.0 >= N::DUST { - outputs.push(output); - } - } - - for (id, (block_number, tx, completion)) in network - .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) - .await - { - info!( - "eventuality {} resolved by {}, as found on chain", - hex::encode(id), - hex::encode(tx.as_ref()) - ); - - completion_block_numbers.push(block_number); - // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed( - key_vec.clone(), - block_number, - id, - tx, - completion, - )) { - return; - } - } - } - - // Panic if we've already seen these outputs - for output in &outputs { - let id = output.id(); - info!( - "block {} had output {} worth {:?}", - hex::encode(&block_id), - hex::encode(&id), - output.balance(), - ); - - // On Bitcoin, the output ID should be unique for a given chain - // On Monero, it's trivial to make an output sharing an ID with another - // We should only scan outputs with valid IDs however, which will be unique - - /* - The safety of this code must satisfy the following conditions: - 1) seen is not set for the first occurrence - 2) seen is set for any future occurrence - - seen is only written to after this code completes. Accordingly, it cannot be set - before the first occurrence UNLESSS it's set, yet the last scanned block isn't. - They are both written in the same database transaction, preventing this. - - As for future occurrences, the RAM entry ensures they're handled properly even if - the database has yet to be set. - - On reboot, which will clear the RAM, if seen wasn't set, neither was latest scanned - block. Accordingly, this will scan from some prior block, re-populating the RAM. - - If seen was set, then this will be successfully read. - - There's also no concern ram_outputs was pruned, yet seen wasn't set, as pruning - from ram_outputs will acquire a write lock (preventing this code from acquiring - its own write lock and running), and during its holding of the write lock, it - commits the transaction setting seen and the latest scanned block. - - This last case isn't true. Committing seen/latest_scanned_block happens after - relinquishing the write lock. - - TODO2: Only update ram_outputs after committing the TXN in question. - */ - let seen = ScannerDb::::seen(&db, &id); - let id = id.as_ref().to_vec(); - if seen || scanner.ram_outputs.contains(&id) { - panic!("scanned an output multiple times"); - } - scanner.ram_outputs.insert(id); - } - - // We could remove this, if instead of doing the first block which passed - // requirements + CONFIRMATIONS, we simply emitted an event for every block where - // `number % CONFIRMATIONS == 0` (once at the final stage for the existing multisig) - // There's no need at this point, yet the latter may be more suitable for modeling... - async fn check_multisig_completed( - db: &mut D, - multisig_completed: &mut mpsc::UnboundedReceiver, - block_number: usize, - ) -> bool { - match multisig_completed.recv().await { - None => { - info!("Scanner handler was dropped. Shutting down?"); - false - } - Some(completed) => { - // Set the retirement block as block_number + CONFIRMATIONS - if completed { - let mut txn = db.txn(); - // The retiring key is the earliest one still around - let retiring_key = ScannerDb::::keys(&txn)[0].1; - // This value is static w.r.t. the key - ScannerDb::::save_retirement_block( - &mut txn, - &retiring_key, - block_number + N::CONFIRMATIONS, - ); - txn.commit(); - } - true - } - } - } - - drop(scanner_lock); - // Now that we've dropped the Scanner lock, we need to handle the multisig_completed - // channel before we decide if this block should be fired or not - // (holding the Scanner risks a deadlock) - for block_number in completion_block_numbers { - if !check_multisig_completed::(&mut db, &mut multisig_completed, block_number).await - { - return; - }; - } - - // Reacquire the scanner - let mut scanner_lock = scanner_hold.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - - // Only emit an event if any of the following is true: - // - This is an activation block - // - This is a retirement block - // - There's outputs - // as only those blocks are meaningful and warrant obtaining synchrony over - let is_retirement_block = - ScannerDb::::retirement_block(&db, &scanner.keys[0].1) == Some(block_being_scanned); - let sent_block = if has_activation || is_retirement_block || (!outputs.is_empty()) { - // Save the outputs to disk - let mut txn = db.txn(); - ScannerDb::::save_outputs(&mut txn, &block_id, &outputs); - txn.commit(); - - // Send all outputs - if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) { - return; - } - - // Since we're creating a Batch, mark it as needing ack - scanner.need_ack.push_back(block_being_scanned); - true - } else { - false - }; - - // Remove it from memory - if is_retirement_block { - let retired = scanner.keys.remove(0).1; - scanner.eventualities.remove(retired.to_bytes().as_ref()); - } - - // Update ram_scanned - scanner.ram_scanned = Some(block_being_scanned); - - drop(scanner_lock); - // If we sent a Block event, once again check multisig_completed - if sent_block && - (!check_multisig_completed::( - &mut db, - &mut multisig_completed, - block_being_scanned, - ) - .await) - { - return; - } - } - } - } -} diff --git a/processor/src/multisigs/scheduler/mod.rs b/processor/src/multisigs/scheduler/mod.rs deleted file mode 100644 index b34e2f3e..00000000 --- a/processor/src/multisigs/scheduler/mod.rs +++ /dev/null @@ -1,96 +0,0 @@ -use core::fmt::Debug; -use std::io; - -use ciphersuite::Ciphersuite; - -use serai_client::primitives::{ExternalBalance, ExternalNetworkId}; - -use crate::{networks::Network, Db, Payment, Plan}; - -pub(crate) mod utxo; -pub(crate) mod smart_contract; - -pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug { - fn read(reader: &mut R) -> io::Result; - fn write(&self, writer: &mut W) -> io::Result<()>; -} - -impl SchedulerAddendum for () { - fn read(_: &mut R) -> io::Result { - Ok(()) - } - fn write(&self, _: &mut W) -> io::Result<()> { - Ok(()) - } -} - -pub trait Scheduler: Sized + Clone + PartialEq + Debug { - type Addendum: SchedulerAddendum; - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool; - - /// Create a new Scheduler. - fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: ExternalNetworkId, - ) -> Self; - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: ExternalNetworkId, - ) -> io::Result; - - /// Check if a branch is usable. - fn can_use_branch(&self, balance: ExternalBalance) -> bool; - - /// Schedule a series of outputs/payments. - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - // TODO: Tighten this to multisig_for_any_change - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec>; - - /// Consume all payments still pending within this Scheduler, without scheduling them. - fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; - - /// Note a branch output as having been created, with the amount it was actually created with, - /// or not having been created due to being too small. - fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ); - - /// Refund a specific output. - fn refund_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan; - - /// Shim the forwarding Plan as necessary to obtain a fee estimate. - /// - /// If this Scheduler is for a Network which requires forwarding, this must return Some with a - /// plan with identical fee behavior. If forwarding isn't necessary, returns None. - fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; - - /// Forward a specific output to the new multisig. - /// - /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. - fn forward_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - to: ::G, - ) -> Option>; -} diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs deleted file mode 100644 index a6bbbdaf..00000000 --- a/processor/src/multisigs/scheduler/smart_contract.rs +++ /dev/null @@ -1,208 +0,0 @@ -use std::{io, collections::HashSet}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use serai_client::primitives::{ExternalBalance, ExternalCoin, ExternalNetworkId}; - -use crate::{ - Get, DbTxn, Db, Payment, Plan, create_db, - networks::{Output, Network}, - multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, - coins: HashSet, - rotated: bool, -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum Addendum { - Nonce(u64), - RotateTo { nonce: u64, new_key: ::G }, -} - -impl SchedulerAddendum for Addendum { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let mut nonce = [0; 8]; - reader.read_exact(&mut nonce)?; - Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) - } - 1 => { - let mut nonce = [0; 8]; - reader.read_exact(&mut nonce)?; - let nonce = u64::from_le_bytes(nonce); - - let new_key = N::Curve::read_G(reader)?; - Ok(Addendum::RotateTo { nonce, new_key }) - } - _ => Err(io::Error::other("reading unknown Addendum type"))?, - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Addendum::Nonce(nonce) => { - writer.write_all(&[0])?; - writer.write_all(&nonce.to_le_bytes()) - } - Addendum::RotateTo { nonce, new_key } => { - writer.write_all(&[1])?; - writer.write_all(&nonce.to_le_bytes())?; - writer.write_all(new_key.to_bytes().as_ref()) - } - } - } -} - -create_db! { - SchedulerDb { - LastNonce: () -> u64, - RotatedTo: (key: &[u8]) -> Vec, - } -} - -impl> SchedulerTrait for Scheduler { - type Addendum = Addendum; - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool { - self.rotated - } - - /// Create a new Scheduler. - fn new( - _txn: &mut D::Transaction<'_>, - key: ::G, - network: ExternalNetworkId, - ) -> Self { - assert!(N::branch_address(key).is_none()); - assert!(N::change_address(key).is_none()); - assert!(N::forward_address(key).is_none()); - - Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } - } - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: ExternalNetworkId, - ) -> io::Result { - Ok(Scheduler { - key, - coins: network.coins().iter().copied().collect(), - rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), - }) - } - - fn can_use_branch(&self, _balance: ExternalBalance) -> bool { - false - } - - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - for utxo in utxos { - assert!(self.coins.contains(&utxo.balance().coin)); - } - - let mut nonce = LastNonce::get(txn).unwrap_or(1); - let mut plans = vec![]; - for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { - // Once we rotate, all further payments should be scheduled via the new multisig - assert!(!self.rotated); - plans.push(Plan { - key: self.key, - inputs: vec![], - payments: chunk.to_vec(), - change: None, - scheduler_addendum: Addendum::Nonce(nonce), - }); - nonce += 1; - } - - // If we're supposed to rotate to the new key, create an empty Plan which will signify the key - // update - if force_spend && (!self.rotated) { - plans.push(Plan { - key: self.key, - inputs: vec![], - payments: vec![], - change: None, - scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, - }); - nonce += 1; - self.rotated = true; - RotatedTo::set( - txn, - self.key.to_bytes().as_ref(), - &key_for_any_change.to_bytes().as_ref().to_vec(), - ); - } - - LastNonce::set(txn, &nonce); - - plans - } - - fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { - vec![] - } - - fn created_output( - &mut self, - _txn: &mut D::Transaction<'_>, - _expected: u64, - _actual: Option, - ) { - panic!("Smart Contract Scheduler created a Branch output") - } - - /// Refund a specific output. - fn refund_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) - .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) - .unwrap_or(self.key); - - let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); - LastNonce::set(txn, &(nonce + 1)); - Plan { - key: current_key, - inputs: vec![], - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - change: None, - scheduler_addendum: Addendum::Nonce(nonce), - } - } - - fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { - None - } - - /// Forward a specific output to the new multisig. - /// - /// Returns None if no forwarding is necessary. - fn forward_plan( - &mut self, - _txn: &mut D::Transaction<'_>, - _output: N::Output, - _to: ::G, - ) -> Option> { - None - } -} diff --git a/processor/src/multisigs/scheduler/utxo.rs b/processor/src/multisigs/scheduler/utxo.rs deleted file mode 100644 index 3758e54c..00000000 --- a/processor/src/multisigs/scheduler/utxo.rs +++ /dev/null @@ -1,631 +0,0 @@ -use std::{ - io::{self, Read}, - collections::{VecDeque, HashMap}, -}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use serai_client::primitives::{ExternalNetworkId, ExternalCoin, Amount, ExternalBalance}; - -use crate::{ - DbTxn, Db, Payment, Plan, - networks::{OutputType, Output, Network, UtxoNetwork}, - multisigs::scheduler::Scheduler as SchedulerTrait, -}; - -/// Deterministic output/payment manager. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, - coin: ExternalCoin, - - // Serai, when it has more outputs expected than it can handle in a single transaction, will - // schedule the outputs to be handled later. Immediately, it just creates additional outputs - // which will eventually handle those outputs - // - // These maps map output amounts, which we'll receive in the future, to the payments they should - // be used on - // - // When those output amounts appear, their payments should be scheduled - // The Vec is for all payments that should be done per output instance - // The VecDeque allows multiple sets of payments with the same sum amount to properly co-exist - // - // queued_plans are for outputs which we will create, yet when created, will have their amount - // reduced by the fee it cost to be created. The Scheduler will then be told how what amount the - // output actually has, and it'll be moved into plans - queued_plans: HashMap>>>, - plans: HashMap>>>, - - // UTXOs available - utxos: Vec, - - // Payments awaiting scheduling due to the output availability problem - payments: VecDeque>, -} - -fn scheduler_key(key: &G) -> Vec { - D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) -} - -impl> Scheduler { - pub fn empty(&self) -> bool { - self.queued_plans.is_empty() && - self.plans.is_empty() && - self.utxos.is_empty() && - self.payments.is_empty() - } - - fn read( - key: ::G, - coin: ExternalCoin, - reader: &mut R, - ) -> io::Result { - let mut read_plans = || -> io::Result<_> { - let mut all_plans = HashMap::new(); - let mut all_plans_len = [0; 4]; - reader.read_exact(&mut all_plans_len)?; - for _ in 0 .. u32::from_le_bytes(all_plans_len) { - let mut amount = [0; 8]; - reader.read_exact(&mut amount)?; - let amount = u64::from_le_bytes(amount); - - let mut plans = VecDeque::new(); - let mut plans_len = [0; 4]; - reader.read_exact(&mut plans_len)?; - for _ in 0 .. u32::from_le_bytes(plans_len) { - let mut payments = vec![]; - let mut payments_len = [0; 4]; - reader.read_exact(&mut payments_len)?; - - for _ in 0 .. u32::from_le_bytes(payments_len) { - payments.push(Payment::read(reader)?); - } - plans.push_back(payments); - } - all_plans.insert(amount, plans); - } - Ok(all_plans) - }; - let queued_plans = read_plans()?; - let plans = read_plans()?; - - let mut utxos = vec![]; - let mut utxos_len = [0; 4]; - reader.read_exact(&mut utxos_len)?; - for _ in 0 .. u32::from_le_bytes(utxos_len) { - utxos.push(N::Output::read(reader)?); - } - - let mut payments = VecDeque::new(); - let mut payments_len = [0; 4]; - reader.read_exact(&mut payments_len)?; - for _ in 0 .. u32::from_le_bytes(payments_len) { - payments.push_back(Payment::read(reader)?); - } - - Ok(Scheduler { key, coin, queued_plans, plans, utxos, payments }) - } - - // TODO2: Get rid of this - // We reserialize the entire scheduler on any mutation to save it to the DB which is horrible - // We should have an incremental solution - fn serialize(&self) -> Vec { - let mut res = Vec::with_capacity(4096); - - let mut write_plans = |plans: &HashMap>>>| { - res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes()); - for (amount, list_of_plans) in plans { - res.extend(amount.to_le_bytes()); - res.extend(u32::try_from(list_of_plans.len()).unwrap().to_le_bytes()); - for plan in list_of_plans { - res.extend(u32::try_from(plan.len()).unwrap().to_le_bytes()); - for payment in plan { - payment.write(&mut res).unwrap(); - } - } - } - }; - write_plans(&self.queued_plans); - write_plans(&self.plans); - - res.extend(u32::try_from(self.utxos.len()).unwrap().to_le_bytes()); - for utxo in &self.utxos { - utxo.write(&mut res).unwrap(); - } - - res.extend(u32::try_from(self.payments.len()).unwrap().to_le_bytes()); - for payment in &self.payments { - payment.write(&mut res).unwrap(); - } - - debug_assert_eq!(&Self::read(self.key, self.coin, &mut res.as_slice()).unwrap(), self); - res - } - - pub fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: ExternalNetworkId, - ) -> Self { - assert!(N::branch_address(key).is_some()); - assert!(N::change_address(key).is_some()); - assert!(N::forward_address(key).is_some()); - - let coin = { - let coins = network.coins(); - assert_eq!(coins.len(), 1); - coins[0] - }; - - let res = Scheduler { - key, - coin, - queued_plans: HashMap::new(), - plans: HashMap::new(), - utxos: vec![], - payments: VecDeque::new(), - }; - // Save it to disk so from_db won't panic if we don't mutate it before rebooting - txn.put(scheduler_key::(&res.key), res.serialize()); - res - } - - pub fn from_db( - db: &D, - key: ::G, - network: ExternalNetworkId, - ) -> io::Result { - let coin = { - let coins = network.coins(); - assert_eq!(coins.len(), 1); - coins[0] - }; - - let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { - panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) - }); - let mut reader_slice = scheduler.as_slice(); - let reader = &mut reader_slice; - - Self::read(key, coin, reader) - } - - pub fn can_use_branch(&self, balance: ExternalBalance) -> bool { - assert_eq!(balance.coin, self.coin); - self.plans.contains_key(&balance.amount.0) - } - - fn execute( - &mut self, - inputs: Vec, - mut payments: Vec>, - key_for_any_change: ::G, - ) -> Plan { - let mut change = false; - let mut max = N::MAX_OUTPUTS; - - let payment_amounts = |payments: &Vec>| { - payments.iter().map(|payment| payment.balance.amount.0).sum::() - }; - - // Requires a change output - if inputs.iter().map(|output| output.balance().amount.0).sum::() != - payment_amounts(&payments) - { - change = true; - max -= 1; - } - - let mut add_plan = |payments| { - let amount = payment_amounts(&payments); - self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments); - amount - }; - - let branch_address = N::branch_address(self.key).unwrap(); - - // If we have more payments than we can handle in a single TX, create plans for them - // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: - // 15 branches of 16 leaves - // 1 branch of: - // - 1 branch of 16 leaves - // - 2 leaves - // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves - while payments.len() > max { - // The resulting TX will have the remaining payments and a new branch payment - let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS; - // Don't remove more than possible - let to_remove = to_remove.min(N::MAX_OUTPUTS); - - // Create the plan - let removed = payments.drain((payments.len() - to_remove) ..).collect::>(); - assert_eq!(removed.len(), to_remove); - let amount = add_plan(removed); - - // Create the payment for the plan - // Push it to the front so it's not moved into a branch until all lower-depth items are - payments.insert( - 0, - Payment { - address: branch_address.clone(), - data: None, - balance: ExternalBalance { coin: self.coin, amount: Amount(amount) }, - }, - ); - } - - Plan { - key: self.key, - inputs, - payments, - change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), - scheduler_addendum: (), - } - } - - fn add_outputs( - &mut self, - mut utxos: Vec, - key_for_any_change: ::G, - ) -> Vec> { - log::info!("adding {} outputs", utxos.len()); - - let mut txs = vec![]; - - for utxo in utxos.drain(..) { - if utxo.kind() == OutputType::Branch { - let amount = utxo.balance().amount.0; - if let Some(plans) = self.plans.get_mut(&amount) { - // Execute the first set of payments possible with an output of this amount - let payments = plans.pop_front().unwrap(); - // They won't be equal if we dropped payments due to being dust - assert!(amount >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); - - // If we've grabbed the last plan for this output amount, remove it from the map - if plans.is_empty() { - self.plans.remove(&amount); - } - - // Create a TX for these payments - txs.push(self.execute(vec![utxo], payments, key_for_any_change)); - continue; - } - } - - self.utxos.push(utxo); - } - - log::info!("{} planned TXs have had their required inputs confirmed", txs.len()); - txs - } - - // Schedule a series of outputs/payments. - pub fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - mut payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - for utxo in &utxos { - assert_eq!(utxo.balance().coin, self.coin); - } - for payment in &payments { - assert_eq!(payment.balance.coin, self.coin); - } - - // Drop payments to our own branch address - /* - created_output will be called any time we send to a branch address. If it's called, and it - wasn't expecting to be called, that's almost certainly an error. The only way to guarantee - this however is to only have us send to a branch address when creating a branch, hence the - dropping of pointless payments. - - This is not comprehensive as a payment may still be made to another active multisig's branch - address, depending on timing. This is safe as the issue only occurs when a multisig sends to - its *own* branch address, since created_output is called on the signer's Scheduler. - */ - { - let branch_address = N::branch_address(self.key).unwrap(); - payments = - payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); - } - - let mut plans = self.add_outputs(utxos, key_for_any_change); - - log::info!("scheduling {} new payments", payments.len()); - - // Add all new payments to the list of pending payments - self.payments.extend(payments); - let payments_at_start = self.payments.len(); - log::info!("{} payments are now scheduled", payments_at_start); - - // If we don't have UTXOs available, don't try to continue - if self.utxos.is_empty() { - log::info!("no utxos currently available"); - return plans; - } - - // Sort UTXOs so the highest valued ones are first - self.utxos.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); - - // We always want to aggregate our UTXOs into a single UTXO in the name of simplicity - // We may have more UTXOs than will fit into a TX though - // We use the most valuable UTXOs to handle our current payments, and we return aggregation TXs - // for the rest of the inputs - // Since we do multiple aggregation TXs at once, this will execute in logarithmic time - let utxos = self.utxos.drain(..).collect::>(); - let mut utxo_chunks = - utxos.chunks(N::MAX_INPUTS).map(<[::Output]>::to_vec).collect::>(); - - // Use the first chunk for any scheduled payments, since it has the most value - let utxos = utxo_chunks.remove(0); - - // If the last chunk exists and only has one output, don't try aggregating it - // Set it to be restored to UTXO set - let mut to_restore = None; - if let Some(mut chunk) = utxo_chunks.pop() { - if chunk.len() == 1 { - to_restore = Some(chunk.pop().unwrap()); - } else { - utxo_chunks.push(chunk); - } - } - - for chunk in utxo_chunks.drain(..) { - log::debug!("aggregating a chunk of {} inputs", chunk.len()); - plans.push(Plan { - key: self.key, - inputs: chunk, - payments: vec![], - change: Some(N::change_address(key_for_any_change).unwrap()), - scheduler_addendum: (), - }) - } - - // We want to use all possible UTXOs for all possible payments - let mut balance = utxos.iter().map(|output| output.balance().amount.0).sum::(); - - // If we can't fulfill the next payment, we have encountered an instance of the UTXO - // availability problem - // This shows up in networks like Monero, where because we spent outputs, our change has yet to - // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset - // of our total balance - // Despite this, we may be ordered to fulfill a payment which is our total balance - // The solution is to wait for the temporarily unavailable change outputs to re-appear, - // granting us access to our full balance - let mut executing = vec![]; - while !self.payments.is_empty() { - let amount = self.payments[0].balance.amount.0; - if balance.checked_sub(amount).is_some() { - balance -= amount; - executing.push(self.payments.pop_front().unwrap()); - } else { - // Doesn't check if other payments would fit into the current batch as doing so may never - // let enough inputs become simultaneously availabile to enable handling of payments[0] - break; - } - } - - // Now that we have the list of payments we can successfully handle right now, create the TX - // for them - if !executing.is_empty() { - plans.push(self.execute(utxos, executing, key_for_any_change)); - } else { - // If we don't have any payments to execute, save these UTXOs for later - self.utxos.extend(utxos); - } - - // If we're instructed to force a spend, do so - // This is used when an old multisig is retiring and we want to always transfer outputs to the - // new one, regardless if we currently have payments - if force_spend && (!self.utxos.is_empty()) { - assert!(self.utxos.len() <= N::MAX_INPUTS); - plans.push(Plan { - key: self.key, - inputs: self.utxos.drain(..).collect::>(), - payments: vec![], - change: Some(N::change_address(key_for_any_change).unwrap()), - scheduler_addendum: (), - }); - } - - // If there's a UTXO to restore, restore it - // This is done now as if there is a to_restore output, and it was inserted into self.utxos - // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1` - // The prior block requires the len to be `<= N::MAX_INPUTS` - if let Some(to_restore) = to_restore { - self.utxos.push(to_restore); - } - - txn.put(scheduler_key::(&self.key), self.serialize()); - - log::info!( - "created {} plans containing {} payments to sign, with {} payments pending scheduling", - plans.len(), - payments_at_start - self.payments.len(), - self.payments.len(), - ); - plans - } - - pub fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { - let res: Vec<_> = self.payments.drain(..).collect(); - if !res.is_empty() { - txn.put(scheduler_key::(&self.key), self.serialize()); - } - res - } - - // Note a branch output as having been created, with the amount it was actually created with, - // or not having been created due to being too small - pub fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ) { - log::debug!("output expected to have {} had {:?} after fees", expected, actual); - - // Get the payments this output is expected to handle - let queued = self.queued_plans.get_mut(&expected).unwrap(); - let mut payments = queued.pop_front().unwrap(); - assert_eq!(expected, payments.iter().map(|payment| payment.balance.amount.0).sum::()); - // If this was the last set of payments at this amount, remove it - if queued.is_empty() { - self.queued_plans.remove(&expected); - } - - // If we didn't actually create this output, return, dropping the child payments - let Some(actual) = actual else { return }; - - // Amortize the fee amongst all payments underneath this branch - { - let mut to_amortize = actual - expected; - // If the payments are worth less than this fee we need to amortize, return, dropping them - if payments.iter().map(|payment| payment.balance.amount.0).sum::() < to_amortize { - return; - } - while to_amortize != 0 { - let payments_len = u64::try_from(payments.len()).unwrap(); - let per_payment = to_amortize / payments_len; - let mut overage = to_amortize % payments_len; - - for payment in &mut payments { - let to_subtract = per_payment + overage; - // Only subtract the overage once - overage = 0; - - let subtractable = payment.balance.amount.0.min(to_subtract); - to_amortize -= subtractable; - payment.balance.amount.0 -= subtractable; - } - } - } - - // Drop payments now below the dust threshold - let payments = payments - .into_iter() - .filter(|payment| payment.balance.amount.0 >= N::DUST) - .collect::>(); - // Sanity check this was done properly - assert!(actual >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); - - // If there's no payments left, return - if payments.is_empty() { - return; - } - - self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments); - - // TODO2: This shows how ridiculous the serialize function is - txn.put(scheduler_key::(&self.key), self.serialize()); - } -} - -impl> SchedulerTrait for Scheduler { - type Addendum = (); - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool { - Scheduler::empty(self) - } - - /// Create a new Scheduler. - fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: ExternalNetworkId, - ) -> Self { - Scheduler::new::(txn, key, network) - } - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: ExternalNetworkId, - ) -> io::Result { - Scheduler::from_db::(db, key, network) - } - - /// Check if a branch is usable. - fn can_use_branch(&self, balance: ExternalBalance) -> bool { - Scheduler::can_use_branch(self, balance) - } - - /// Schedule a series of outputs/payments. - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) - } - - /// Consume all payments still pending within this Scheduler, without scheduling them. - fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { - Scheduler::consume_payments::(self, txn) - } - - /// Note a branch output as having been created, with the amount it was actually created with, - /// or not having been created due to being too small. - // TODO: Move this to ExternalBalance. - fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ) { - Scheduler::created_output::(self, txn, expected, actual) - } - - fn refund_plan( - &mut self, - _: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - let output_id = output.id().as_ref().to_vec(); - let res = Plan { - key: output.key(), - // Uses a payment as this will still be successfully sent due to fee amortization, - // and because change is currently always a Serai key - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - inputs: vec![output], - change: None, - scheduler_addendum: (), - }; - log::info!("refund plan for {} has ID {}", hex::encode(output_id), hex::encode(res.id())); - res - } - - fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { - Some(Plan { - key: output.key(), - payments: vec![Payment { - address: N::forward_address(to).unwrap(), - data: None, - balance: output.balance(), - }], - inputs: vec![output], - change: None, - scheduler_addendum: (), - }) - } - - fn forward_plan( - &mut self, - _: &mut D::Transaction<'_>, - output: N::Output, - to: ::G, - ) -> Option> { - assert_eq!(self.key, output.key()); - // Call shim as shim returns the actual - Self::shim_forward_plan(output, to) - } -} diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs deleted file mode 100644 index 5702f5ed..00000000 --- a/processor/src/networks/bitcoin.rs +++ /dev/null @@ -1,942 +0,0 @@ -use std::{sync::OnceLock, time::Duration, io, collections::HashMap}; - -use async_trait::async_trait; - -use scale::{Encode, Decode}; - -use ciphersuite::group::ff::PrimeField; -use k256::{ProjectivePoint, Scalar}; -use frost::{ - curve::{Curve, Secp256k1}, - ThresholdKeys, -}; - -use tokio::time::sleep; - -use bitcoin_serai::{ - bitcoin::{ - hashes::Hash as HashTrait, - key::{Parity, XOnlyPublicKey}, - consensus::{Encodable, Decodable}, - script::Instruction, - Transaction, Block, ScriptBuf, - opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, - }, - wallet::{ - tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, - SignableTransaction as BSignableTransaction, TransactionMachine, - }, - rpc::{RpcError, Rpc}, -}; - -#[cfg(test)] -use bitcoin_serai::bitcoin::{ - secp256k1::{SECP256K1, SecretKey, Message}, - PrivateKey, PublicKey, - sighash::{EcdsaSighashType, SighashCache}, - script::PushBytesBuf, - absolute::LockTime, - Amount as BAmount, Sequence, Script, Witness, OutPoint, - transaction::Version, - blockdata::transaction::{TxIn, TxOut}, -}; - -use serai_client::{ - primitives::{MAX_DATA_LEN, ExternalCoin, ExternalNetworkId, Amount, ExternalBalance}, - networks::bitcoin::Address, -}; - -use crate::{ - networks::{ - NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, - }, - Payment, - multisigs::scheduler::utxo::Scheduler, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OutputId(pub [u8; 36]); -impl Default for OutputId { - fn default() -> Self { - Self([0; 36]) - } -} -impl AsRef<[u8]> for OutputId { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} -impl AsMut<[u8]> for OutputId { - fn as_mut(&mut self) -> &mut [u8] { - self.0.as_mut() - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Output { - kind: OutputType, - presumed_origin: Option
, - output: ReceivedOutput, - data: Vec, -} - -impl OutputTrait for Output { - type Id = OutputId; - - fn kind(&self) -> OutputType { - self.kind - } - - fn id(&self) -> Self::Id { - let mut res = OutputId::default(); - self.output.outpoint().consensus_encode(&mut res.as_mut()).unwrap(); - debug_assert_eq!( - { - let mut outpoint = vec![]; - self.output.outpoint().consensus_encode(&mut outpoint).unwrap(); - outpoint - }, - res.as_ref().to_vec() - ); - res - } - - fn tx_id(&self) -> [u8; 32] { - let mut hash = *self.output.outpoint().txid.as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - fn key(&self) -> ProjectivePoint { - let script = &self.output.output().script_pubkey; - assert!(script.is_p2tr()); - let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { - panic!("last item in v1 Taproot script wasn't bytes") - }; - let key = XOnlyPublicKey::from_slice(key.as_ref()) - .expect("last item in v1 Taproot script wasn't x-only public key"); - Secp256k1::read_G(&mut key.public_key(Parity::Even).serialize().as_slice()).unwrap() - - (ProjectivePoint::GENERATOR * self.output.offset()) - } - - fn presumed_origin(&self) -> Option
{ - self.presumed_origin.clone() - } - - fn balance(&self) -> ExternalBalance { - ExternalBalance { coin: ExternalCoin::Bitcoin, amount: Amount(self.output.value()) } - } - - fn data(&self) -> &[u8] { - &self.data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - self.kind.write(writer)?; - let presumed_origin: Option> = self.presumed_origin.clone().map(Into::into); - writer.write_all(&presumed_origin.encode())?; - self.output.write(writer)?; - writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; - writer.write_all(&self.data) - } - - fn read(mut reader: &mut R) -> io::Result { - Ok(Output { - kind: OutputType::read(reader)?, - presumed_origin: { - let mut io_reader = scale::IoReader(reader); - let res = Option::>::decode(&mut io_reader) - .unwrap() - .map(|address| Address::try_from(address).unwrap()); - reader = io_reader.0; - res - }, - output: ReceivedOutput::read(reader)?, - data: { - let mut data_len = [0; 2]; - reader.read_exact(&mut data_len)?; - - let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; - reader.read_exact(&mut data)?; - data - }, - }) - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Fee(u64); - -#[async_trait] -impl TransactionTrait for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - let mut hash = *self.compute_txid().as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - #[cfg(test)] - async fn fee(&self, network: &Bitcoin) -> u64 { - let mut value = 0; - for input in &self.input { - let output = input.previous_output; - let mut hash = *output.txid.as_raw_hash().as_byte_array(); - hash.reverse(); - value += network.rpc.get_transaction(&hash).await.unwrap().output - [usize::try_from(output.vout).unwrap()] - .value - .to_sat(); - } - for output in &self.output { - value -= output.value.to_sat(); - } - value - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Eventuality([u8; 32]); - -#[derive(Clone, PartialEq, Eq, Default, Debug)] -pub struct EmptyClaim; -impl AsRef<[u8]> for EmptyClaim { - fn as_ref(&self) -> &[u8] { - &[] - } -} -impl AsMut<[u8]> for EmptyClaim { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } -} - -impl EventualityTrait for Eventuality { - type Claim = EmptyClaim; - type Completion = Transaction; - - fn lookup(&self) -> Vec { - self.0.to_vec() - } - - fn read(reader: &mut R) -> io::Result { - let mut id = [0; 32]; - reader - .read_exact(&mut id) - .map_err(|_| io::Error::other("couldn't decode ID in eventuality"))?; - Ok(Eventuality(id)) - } - fn serialize(&self) -> Vec { - self.0.to_vec() - } - - fn claim(_: &Transaction) -> EmptyClaim { - EmptyClaim - } - fn serialize_completion(completion: &Transaction) -> Vec { - let mut buf = vec![]; - completion.consensus_encode(&mut buf).unwrap(); - buf - } - fn read_completion(reader: &mut R) -> io::Result { - Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader)) - .map_err(|e| io::Error::other(format!("{e}"))) - } -} - -#[derive(Clone, Debug)] -pub struct SignableTransaction { - actual: BSignableTransaction, -} -impl PartialEq for SignableTransaction { - fn eq(&self, other: &SignableTransaction) -> bool { - self.actual == other.actual - } -} -impl Eq for SignableTransaction {} -impl SignableTransactionTrait for SignableTransaction { - fn fee(&self) -> u64 { - self.actual.fee() - } -} - -#[async_trait] -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - let mut hash = *self.block_hash().as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - fn parent(&self) -> Self::Id { - let mut hash = *self.header.prev_blockhash.as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - async fn time(&self, rpc: &Bitcoin) -> u64 { - // Use the network median time defined in BIP-0113 since the in-block time isn't guaranteed to - // be monotonic - let mut timestamps = vec![u64::from(self.header.time)]; - let mut parent = self.parent(); - // BIP-0113 uses a median of the prior 11 blocks - while timestamps.len() < 11 { - let mut parent_block; - while { - parent_block = rpc.rpc.get_block(&parent).await; - parent_block.is_err() - } { - log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); - sleep(Duration::from_secs(5)).await; - } - let parent_block = parent_block.unwrap(); - timestamps.push(u64::from(parent_block.header.time)); - parent = parent_block.parent(); - - if parent == [0; 32] { - break; - } - } - timestamps.sort(); - timestamps[timestamps.len() / 2] - } -} - -const KEY_DST: &[u8] = b"Serai Bitcoin Output Offset"; -static BRANCH_OFFSET: OnceLock = OnceLock::new(); -static CHANGE_OFFSET: OnceLock = OnceLock::new(); -static FORWARD_OFFSET: OnceLock = OnceLock::new(); - -// Always construct the full scanner in order to ensure there's no collisions -fn scanner( - key: ProjectivePoint, -) -> (Scanner, HashMap, HashMap, OutputType>) { - let mut scanner = Scanner::new(key).unwrap(); - let mut offsets = HashMap::from([(OutputType::External, Scalar::ZERO)]); - - let zero = Scalar::ZERO.to_repr(); - let zero_ref: &[u8] = zero.as_ref(); - let mut kinds = HashMap::from([(zero_ref.to_vec(), OutputType::External)]); - - let mut register = |kind, offset| { - let offset = scanner.register_offset(offset).expect("offset collision"); - offsets.insert(kind, offset); - - let offset = offset.to_repr(); - let offset_ref: &[u8] = offset.as_ref(); - kinds.insert(offset_ref.to_vec(), kind); - }; - - register( - OutputType::Branch, - *BRANCH_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"branch")), - ); - register( - OutputType::Change, - *CHANGE_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"change")), - ); - register( - OutputType::Forwarded, - *FORWARD_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"forward")), - ); - - (scanner, offsets, kinds) -} - -#[derive(Clone, Debug)] -pub struct Bitcoin { - pub(crate) rpc: Rpc, -} -// Shim required for testing/debugging purposes due to generic arguments also necessitating trait -// bounds -impl PartialEq for Bitcoin { - fn eq(&self, _: &Self) -> bool { - true - } -} -impl Eq for Bitcoin {} - -impl Bitcoin { - pub async fn new(url: String) -> Bitcoin { - let mut res = Rpc::new(url.clone()).await; - while let Err(e) = res { - log::error!("couldn't connect to Bitcoin node: {e:?}"); - sleep(Duration::from_secs(5)).await; - res = Rpc::new(url.clone()).await; - } - Bitcoin { rpc: res.unwrap() } - } - - #[cfg(test)] - pub async fn fresh_chain(&self) { - if self.rpc.get_latest_block_number().await.unwrap() > 0 { - self - .rpc - .rpc_call( - "invalidateblock", - serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]), - ) - .await - .unwrap() - } - } - - // This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine - async fn median_fee(&self, block: &Block) -> Result { - let mut fees = vec![]; - if block.txdata.len() > 1 { - for tx in &block.txdata[1 ..] { - let mut in_value = 0; - for input in &tx.input { - let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); - input_tx.reverse(); - in_value += self - .rpc - .get_transaction(&input_tx) - .await - .map_err(|_| NetworkError::ConnectionError)? - .output[usize::try_from(input.previous_output.vout).unwrap()] - .value - .to_sat(); - } - let out = tx.output.iter().map(|output| output.value.to_sat()).sum::(); - fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap()); - } - } - fees.sort(); - let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); - - // The DUST constant documentation notes a relay rule practically enforcing a - // 1000 sat/kilo-vbyte minimum fee. - Ok(Fee(fee.max(1))) - } - - async fn make_signable_transaction( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - calculating_fee: bool, - ) -> Result, NetworkError> { - for payment in payments { - assert_eq!(payment.balance.coin, ExternalCoin::Bitcoin); - } - - // TODO2: Use an fee representative of several blocks, cached inside Self - let block_for_fee = self.get_block(block_number).await?; - let fee = self.median_fee(&block_for_fee).await?; - - let payments = payments - .iter() - .map(|payment| { - ( - payment.address.clone().into(), - // If we're solely estimating the fee, don't specify the actual amount - // This won't affect the fee calculation yet will ensure we don't hit a not enough funds - // error - if calculating_fee { Self::DUST } else { payment.balance.amount.0 }, - ) - }) - .collect::>(); - - match BSignableTransaction::new( - inputs.iter().map(|input| input.output.clone()).collect(), - &payments, - change.clone().map(Into::into), - None, - fee.0, - ) { - Ok(signable) => Ok(Some(signable)), - Err(TransactionError::NoInputs) => { - panic!("trying to create a bitcoin transaction without inputs") - } - // No outputs left and the change isn't worth enough/not even enough funds to pay the fee - Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds { .. }) => Ok(None), - // amortize_fee removes payments which fall below the dust threshold - Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"), - Err(TransactionError::TooMuchData) => { - panic!("too much data despite not specifying data") - } - Err(TransactionError::TooLowFee) => { - panic!("created a transaction whose fee is below the minimum") - } - Err(TransactionError::TooLargeTransaction) => { - panic!("created a too large transaction despite limiting inputs/outputs") - } - } - } - - // Expected script has to start with SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. - fn segwit_data_pattern(script: &ScriptBuf) -> Option { - let mut ins = script.instructions(); - - // first item should be SHA256 code - if ins.next()?.ok()?.opcode()? != OP_SHA256 { - return Some(false); - } - - // next should be a data push - ins.next()?.ok()?.push_bytes()?; - - // next should be a equality check - if ins.next()?.ok()?.opcode()? != OP_EQUALVERIFY { - return Some(false); - } - - Some(true) - } - - fn extract_serai_data(tx: &Transaction) -> Vec { - // check outputs - let mut data = (|| { - for output in &tx.output { - if output.script_pubkey.is_op_return() { - match output.script_pubkey.instructions_minimal().last() { - Some(Ok(Instruction::PushBytes(data))) => return data.as_bytes().to_vec(), - _ => continue, - } - } - } - vec![] - })(); - - // check inputs - if data.is_empty() { - for input in &tx.input { - let witness = input.witness.to_vec(); - // expected witness at least has to have 2 items, msg and the redeem script. - if witness.len() >= 2 { - let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); - if Self::segwit_data_pattern(&redeem_script) == Some(true) { - data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script - break; - } - } - } - } - - data.truncate(MAX_DATA_LEN.try_into().unwrap()); - data - } - - #[cfg(test)] - pub fn sign_btc_input_for_p2pkh( - tx: &Transaction, - input_index: usize, - private_key: &PrivateKey, - ) -> ScriptBuf { - use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; - - let public_key = PublicKey::from_private_key(SECP256K1, private_key); - let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); - - let mut der = SECP256K1 - .sign_ecdsa_low_r( - &Message::from_digest_slice( - SighashCache::new(tx) - .legacy_signature_hash( - input_index, - &main_addr.script_pubkey(), - EcdsaSighashType::All.to_u32(), - ) - .unwrap() - .to_raw_hash() - .as_ref(), - ) - .unwrap(), - &private_key.inner, - ) - .serialize_der() - .to_vec(); - der.push(1); - - ScriptBuf::builder() - .push_slice(PushBytesBuf::try_from(der).unwrap()) - .push_key(&public_key) - .into_script() - } -} - -// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) -// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes -// While our inputs are entirely SegWit, such fine tuning is not necessary and could create -// issues in the future (if the size decreases or we misevaluate it) -// It also offers a minimal amount of benefit when we are able to logarithmically accumulate -// inputs -// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and -// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 -// bytes -// 100,000 / 192 = 520 -// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself -const MAX_INPUTS: usize = 520; -const MAX_OUTPUTS: usize = 520; - -fn address_from_key(key: ProjectivePoint) -> Address { - Address::new( - p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), - ) - .expect("couldn't create Serai-representable address for P2TR script") -} - -#[async_trait] -impl Network for Bitcoin { - type Curve = Secp256k1; - - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type Eventuality = Eventuality; - type TransactionMachine = TransactionMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: ExternalNetworkId = ExternalNetworkId::Bitcoin; - const ID: &'static str = "Bitcoin"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 600; - const CONFIRMATIONS: usize = 6; - - /* - A Taproot input is: - - 36 bytes for the OutPoint - - 0 bytes for the script (+1 byte for the length) - - 4 bytes for the sequence - Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format - - There's also: - - 1 byte for the witness length - - 1 byte for the signature length - - 64 bytes for the signature - which have the SegWit discount. - - (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units - 230 ceil div 4 = 57 vbytes - - Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: - - 1000 sat/kilo-vbyte for a transaction to be relayed - - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte - The DUST constant needs to be determined by the latter. - Since these are solely relay rules, and may be raised, we require all outputs be spendable - under a 5000 sat/kilo-vbyte fee rate. - - 5000 sat/kilo-vbyte = 5 sat/vbyte - 5 * 57 = 285 sats/spent-output - - Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding - 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. - - Increase by an order of magnitude, in order to ensure this is actually worth our time, and we - get 10,000 satoshis. - */ - const DUST: u64 = 10_000; - - // 2 inputs should be 2 * 230 = 460 weight units - // The output should be ~36 bytes, or 144 weight units - // The overhead should be ~20 bytes at most, or 80 weight units - // 684 weight units, 171 vbytes, round up to 200 - // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the - // aggregation TX - const COST_TO_AGGREGATE: u64 = 800; - - const MAX_OUTPUTS: usize = MAX_OUTPUTS; - - fn tweak_keys(keys: &mut ThresholdKeys) { - *keys = tweak_keys(keys); - // Also create a scanner to assert these keys, and all expected paths, are usable - scanner(keys.group_key()); - } - - #[cfg(test)] - async fn external_address(&self, key: ProjectivePoint) -> Address { - address_from_key(key) - } - - fn branch_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) - } - - fn change_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) - } - - fn forward_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) - } - - async fn get_latest_block_number(&self) -> Result { - self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError) - } - - async fn get_block(&self, number: usize) -> Result { - let block_hash = - self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?; - self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError) - } - - async fn get_outputs(&self, block: &Self::Block, key: ProjectivePoint) -> Vec { - let (scanner, _, kinds) = scanner(key); - - let mut outputs = vec![]; - // Skip the coinbase transaction which is burdened by maturity - for tx in &block.txdata[1 ..] { - for output in scanner.scan_transaction(tx) { - let offset_repr = output.offset().to_repr(); - let offset_repr_ref: &[u8] = offset_repr.as_ref(); - let kind = kinds[offset_repr_ref]; - - let output = Output { kind, presumed_origin: None, output, data: vec![] }; - assert_eq!(output.tx_id(), tx.id()); - outputs.push(output); - } - - if outputs.is_empty() { - continue; - } - - // populate the outputs with the origin and data - let presumed_origin = { - // This may identify the P2WSH output *embedding the InInstruction* as the origin, which - // would be a bit trickier to spend that a traditional output... - // There's no risk of the InInstruction going missing as it'd already be on-chain though - // We *could* parse out the script *without the InInstruction prefix* and declare that the - // origin - // TODO - let spent_output = { - let input = &tx.input[0]; - let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array(); - spent_tx.reverse(); - let mut tx; - while { - tx = self.rpc.get_transaction(&spent_tx).await; - tx.is_err() - } { - log::error!("couldn't get transaction from bitcoin node: {tx:?}"); - sleep(Duration::from_secs(5)).await; - } - tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) - }; - Address::new(spent_output.script_pubkey) - }; - let data = Self::extract_serai_data(tx); - for output in &mut outputs { - if output.kind == OutputType::External { - output.data.clone_from(&data); - } - output.presumed_origin.clone_from(&presumed_origin); - } - } - - outputs - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - fn check_block( - eventualities: &mut EventualitiesTracker, - block: &Block, - res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, - ) { - for tx in &block.txdata[1 ..] { - if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { - res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); - } - } - - eventualities.block_number += 1; - } - - let this_block_hash = block.id(); - let this_block_num = (async { - loop { - match self.rpc.get_block_number(&this_block_hash).await { - Ok(number) => return number, - Err(e) => { - log::error!("couldn't get the block number for {}: {}", hex::encode(this_block_hash), e) - } - } - sleep(Duration::from_secs(60)).await; - } - }) - .await; - - for block_num in (eventualities.block_number + 1) .. this_block_num { - let block = { - let mut block; - while { - block = self.get_block(block_num).await; - block.is_err() - } { - log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - block.unwrap() - }; - - check_block(eventualities, &block, &mut res); - } - - // Also check the current block - check_block(eventualities, block, &mut res); - assert_eq!(eventualities.block_number, this_block_num); - - res - } - - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - ) -> Result, NetworkError> { - Ok( - self - .make_signable_transaction(block_number, inputs, payments, change, true) - .await? - .map(|signable| signable.needed_fee()), - ) - } - - async fn signable_transaction( - &self, - block_number: usize, - _plan_id: &[u8; 32], - _key: ProjectivePoint, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - (): &(), - ) -> Result, NetworkError> { - Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( - |signable| { - let eventuality = Eventuality(signable.txid()); - (SignableTransaction { actual: signable }, eventuality) - }, - )) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result { - Ok(transaction.actual.clone().multisig(&keys).expect("used the wrong keys")) - } - - async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { - match self.rpc.send_raw_transaction(tx).await { - Ok(_) => (), - Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, - // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs - // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", tx.compute_txid()), - } - Ok(()) - } - - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - _: &EmptyClaim, - ) -> Result, NetworkError> { - Ok(Some( - self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, - )) - } - - #[cfg(test)] - async fn get_block_number(&self, id: &[u8; 32]) -> usize { - self.rpc.get_block_number(id).await.unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - _: &EmptyClaim, - ) -> bool { - self.rpc.get_transaction(&eventuality.0).await.is_ok() - } - - #[cfg(test)] - async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { - self.rpc.get_transaction(&id.0).await.unwrap() - } - - #[cfg(test)] - async fn mine_block(&self) { - use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; - - self - .rpc - .rpc_call::>( - "generatetoaddress", - serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]), - ) - .await - .unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Address) -> Block { - use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; - - let secret_key = SecretKey::new(&mut rand_core::OsRng); - let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); - let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - self - .rpc - .rpc_call::>("generatetoaddress", serde_json::json!([100, main_addr])) - .await - .unwrap(); - - let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); - let mut tx = Transaction { - version: Version(2), - lock_time: LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, - script_sig: Script::new().into(), - sequence: Sequence(u32::MAX), - witness: Witness::default(), - }], - output: vec![TxOut { - value: tx.output[0].value - BAmount::from_sat(10000), - script_pubkey: address.clone().into(), - }], - }; - tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); - - let block = self.get_latest_block_number().await.unwrap() + 1; - self.rpc.send_raw_transaction(&tx).await.unwrap(); - for _ in 0 .. Self::CONFIRMATIONS { - self.mine_block().await; - } - self.get_block(block).await.unwrap() - } -} - -impl UtxoNetwork for Bitcoin { - const MAX_INPUTS: usize = MAX_INPUTS; -} diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs deleted file mode 100644 index f4788849..00000000 --- a/processor/src/networks/ethereum.rs +++ /dev/null @@ -1,940 +0,0 @@ -use core::{fmt, time::Duration}; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, - io, -}; - -use async_trait::async_trait; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; -use frost::ThresholdKeys; - -use ethereum_serai::{ - alloy::{ - primitives::U256, - rpc_types::{BlockTransactionsKind, BlockNumberOrTag, Transaction}, - simple_request_transport::SimpleRequest, - rpc_client::ClientBuilder, - provider::{Provider, RootProvider}, - }, - crypto::{PublicKey, Signature}, - erc20::Erc20, - deployer::Deployer, - router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, - machine::*, -}; -#[cfg(test)] -use ethereum_serai::alloy::primitives::B256; - -use tokio::{ - time::sleep, - sync::{RwLock, RwLockReadGuard}, -}; -#[cfg(not(test))] -use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - net::TcpStream, -}; - -use serai_client::{ - primitives::{ExternalCoin, Amount, ExternalBalance, ExternalNetworkId}, - validator_sets::primitives::Session, -}; - -use crate::{ - Db, Payment, - networks::{ - OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, - Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, - }, - key_gen::NetworkKeyDb, - multisigs::scheduler::{ - Scheduler as SchedulerTrait, - smart_contract::{Addendum, Scheduler}, - }, -}; - -#[cfg(not(test))] -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { - Ok(res) => res, - Err(_) => panic!("invalid non-test DAI hex address"), - }; -#[cfg(test)] // TODO -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { - Ok(res) => res, - Err(_) => panic!("invalid test DAI hex address"), - }; - -fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { - match coin { - EthereumCoin::Ether => Some(ExternalCoin::Ether), - EthereumCoin::Erc20(token) => { - if *token == DAI { - return Some(ExternalCoin::Dai); - } - None - } - } -} - -fn amount_to_serai_amount(coin: ExternalCoin, amount: U256) -> Amount { - assert_eq!(coin.network(), ExternalNetworkId::Ethereum); - assert_eq!(coin.decimals(), 8); - // Remove 10 decimals so we go from 18 decimals to 8 decimals - let divisor = U256::from(10_000_000_000u64); - // This is valid up to 184b, which is assumed for the coins allowed - Amount(u64::try_from(amount / divisor).unwrap()) -} - -fn balance_to_ethereum_amount(balance: ExternalBalance) -> U256 { - assert_eq!(balance.coin.network(), ExternalNetworkId::Ethereum); - assert_eq!(balance.coin.decimals(), 8); - // Restore 10 decimals so we go from 8 decimals to 18 decimals - let factor = U256::from(10_000_000_000u64); - U256::from(balance.amount.0) * factor -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Address(pub [u8; 20]); -impl TryFrom> for Address { - type Error = (); - fn try_from(bytes: Vec) -> Result { - if bytes.len() != 20 { - Err(())?; - } - let mut res = [0; 20]; - res.copy_from_slice(&bytes); - Ok(Address(res)) - } -} -impl TryInto> for Address { - type Error = (); - fn try_into(self) -> Result, ()> { - Ok(self.0.to_vec()) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f) - } -} - -impl SignableTransaction for RouterCommand { - fn fee(&self) -> u64 { - // Return a fee of 0 as we'll handle amortization on our end - 0 - } -} - -#[async_trait] -impl TransactionTrait> for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash.0 - } - - #[cfg(test)] - async fn fee(&self, _network: &Ethereum) -> u64 { - // Return a fee of 0 as we'll handle amortization on our end - 0 - } -} - -// We use 32-block Epochs to represent blocks. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Epoch { - // The hash of the block which ended the prior Epoch. - prior_end_hash: [u8; 32], - // The first block number within this Epoch. - start: u64, - // The hash of the last block within this Epoch. - end_hash: [u8; 32], - // The monotonic time for this Epoch. - time: u64, -} - -impl Epoch { - fn end(&self) -> u64 { - self.start + 31 - } -} - -#[async_trait] -impl Block> for Epoch { - type Id = [u8; 32]; - fn id(&self) -> [u8; 32] { - self.end_hash - } - fn parent(&self) -> [u8; 32] { - self.prior_end_hash - } - async fn time(&self, _: &Ethereum) -> u64 { - self.time - } -} - -impl Output> for EthereumInInstruction { - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - OutputType::External - } - - fn id(&self) -> Self::Id { - let mut id = [0; 40]; - id[.. 32].copy_from_slice(&self.id.0); - id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); - *ethereum_serai::alloy::primitives::keccak256(id) - } - fn tx_id(&self) -> [u8; 32] { - self.id.0 - } - fn key(&self) -> ::G { - self.key_at_end_of_block - } - - fn presumed_origin(&self) -> Option
{ - Some(Address(self.from)) - } - - fn balance(&self) -> ExternalBalance { - let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { - panic!( - "requesting coin for an EthereumInInstruction with a coin {}", - "we don't handle. this never should have been yielded" - ) - }); - ExternalBalance { coin, amount: amount_to_serai_amount(coin, self.amount) } - } - fn data(&self) -> &[u8] { - &self.data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - EthereumInInstruction::write(self, writer) - } - fn read(reader: &mut R) -> io::Result { - EthereumInInstruction::read(reader) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Claim { - signature: [u8; 64], -} -impl AsRef<[u8]> for Claim { - fn as_ref(&self) -> &[u8] { - &self.signature - } -} -impl AsMut<[u8]> for Claim { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.signature - } -} -impl Default for Claim { - fn default() -> Self { - Self { signature: [0; 64] } - } -} -impl From<&Signature> for Claim { - fn from(sig: &Signature) -> Self { - Self { signature: sig.to_bytes() } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Eventuality(PublicKey, RouterCommand); -impl EventualityTrait for Eventuality { - type Claim = Claim; - type Completion = SignedRouterCommand; - - fn lookup(&self) -> Vec { - match self.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - nonce.as_le_bytes().to_vec() - } - } - } - - fn read(reader: &mut R) -> io::Result { - let point = Secp256k1::read_G(reader)?; - let command = RouterCommand::read(reader)?; - Ok(Eventuality( - PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, - command, - )) - } - fn serialize(&self) -> Vec { - let mut res = vec![]; - res.extend(self.0.point().to_bytes().as_slice()); - self.1.write(&mut res).unwrap(); - res - } - - fn claim(completion: &Self::Completion) -> Self::Claim { - Claim::from(completion.signature()) - } - fn serialize_completion(completion: &Self::Completion) -> Vec { - let mut res = vec![]; - completion.write(&mut res).unwrap(); - res - } - fn read_completion(reader: &mut R) -> io::Result { - SignedRouterCommand::read(reader) - } -} - -#[derive(Clone)] -pub struct Ethereum { - // This DB is solely used to access the first key generated, as needed to determine the Router's - // address. Accordingly, all methods present are consistent to a Serai chain with a finalized - // first key (regardless of local state), and this is safe. - db: D, - #[cfg_attr(test, allow(unused))] - relayer_url: String, - provider: Arc>, - deployer: Deployer, - router: Arc>>, -} -impl PartialEq for Ethereum { - fn eq(&self, _other: &Ethereum) -> bool { - true - } -} -impl fmt::Debug for Ethereum { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Ethereum") - .field("deployer", &self.deployer) - .field("router", &self.router) - .finish_non_exhaustive() - } -} -impl Ethereum { - pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { - let provider = Arc::new(RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true), - )); - - let mut deployer = Deployer::new(provider.clone()).await; - while !matches!(deployer, Ok(Some(_))) { - log::error!("Deployer wasn't deployed yet or networking error"); - sleep(Duration::from_secs(5)).await; - deployer = Deployer::new(provider.clone()).await; - } - let deployer = deployer.unwrap().unwrap(); - - dbg!(&relayer_url); - dbg!(relayer_url.len()); - Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) } - } - - // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. - // This is guaranteed to return Some. - pub async fn router(&self) -> RwLockReadGuard<'_, Option> { - // If we've already instantiated the Router, return a read reference - { - let router = self.router.read().await; - if router.is_some() { - return router; - } - } - - // Instantiate it - let mut router = self.router.write().await; - // If another attempt beat us to it, return - if router.is_some() { - drop(router); - return self.router.read().await; - } - - // Get the first key from the DB - let first_key = - NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); - let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); - let public_key = PublicKey::new(key).unwrap(); - - // Find the router - let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; - while !matches!(found, Ok(Some(_))) { - log::error!("Router wasn't deployed yet or networking error"); - sleep(Duration::from_secs(5)).await; - found = self.deployer.find_router(self.provider.clone(), &public_key).await; - } - - // Set it - *router = Some(found.unwrap().unwrap()); - - // Downgrade to a read lock - // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no - // longer necessary - drop(router); - self.router.read().await - } -} - -#[async_trait] -impl Network for Ethereum { - type Curve = Secp256k1; - - type Transaction = Transaction; - type Block = Epoch; - - type Output = EthereumInInstruction; - type SignableTransaction = RouterCommand; - type Eventuality = Eventuality; - type TransactionMachine = RouterCommandMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: ExternalNetworkId = ExternalNetworkId::Ethereum; - const ID: &'static str = "Ethereum"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; - const CONFIRMATIONS: usize = 1; - - const DUST: u64 = 0; // TODO - - const COST_TO_AGGREGATE: u64 = 0; - - // TODO: usize::max, with a merkle tree in the router - const MAX_OUTPUTS: usize = 256; - - fn tweak_keys(keys: &mut ThresholdKeys) { - while PublicKey::new(keys.group_key()).is_none() { - *keys = keys.offset(::F::ONE); - } - } - - #[cfg(test)] - async fn external_address(&self, _key: ::G) -> Address { - Address(self.router().await.as_ref().unwrap().address()) - } - - fn branch_address(_key: ::G) -> Option
{ - None - } - - fn change_address(_key: ::G) -> Option
{ - None - } - - fn forward_address(_key: ::G) -> Option
{ - None - } - - async fn get_latest_block_number(&self) -> Result { - let actual_number = self - .provider - .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) - .await - .map_err(|_| NetworkError::ConnectionError)? - .ok_or(NetworkError::ConnectionError)? - .header - .number; - // Error if there hasn't been a full epoch yet - if actual_number < 32 { - Err(NetworkError::ConnectionError)? - } - // If this is 33, the division will return 1, yet 1 is the epoch in progress - let latest_full_epoch = (actual_number / 32).saturating_sub(1); - Ok(latest_full_epoch.try_into().unwrap()) - } - - async fn get_block(&self, number: usize) -> Result { - let latest_finalized = self.get_latest_block_number().await?; - if number > latest_finalized { - Err(NetworkError::ConnectionError)? - } - - let start = number * 32; - let prior_end_hash = if start == 0 { - [0; 32] - } else { - self - .provider - .get_block(u64::try_from(start - 1).unwrap().into(), BlockTransactionsKind::Hashes) - .await - .ok() - .flatten() - .ok_or(NetworkError::ConnectionError)? - .header - .hash - .into() - }; - - let end_header = self - .provider - .get_block(u64::try_from(start + 31).unwrap().into(), BlockTransactionsKind::Hashes) - .await - .ok() - .flatten() - .ok_or(NetworkError::ConnectionError)? - .header; - - let end_hash = end_header.hash.into(); - let time = end_header.timestamp; - - Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) - } - - async fn get_outputs( - &self, - block: &Self::Block, - _: ::G, - ) -> Vec { - let router = self.router().await; - let router = router.as_ref().unwrap(); - // Grab the key at the end of the epoch - let key_at_end_of_block = loop { - match router.key_at_end_of_block(block.start + 31).await { - Ok(Some(key)) => break key, - Ok(None) => return vec![], - Err(e) => { - log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); - sleep(Duration::from_secs(5)).await; - continue; - } - } - }; - - let mut all_events = vec![]; - let mut top_level_txids = HashSet::new(); - for erc20_addr in [DAI] { - let erc20 = Erc20::new(self.provider.clone(), erc20_addr); - - for block in block.start .. (block.start + 32) { - let transfers = loop { - match erc20.top_level_transfers(block, router.address()).await { - Ok(transfers) => break transfers, - Err(e) => { - log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); - sleep(Duration::from_secs(5)).await; - continue; - } - } - }; - - for transfer in transfers { - top_level_txids.insert(transfer.id); - all_events.push(EthereumInInstruction { - id: (transfer.id, 0), - from: transfer.from, - coin: EthereumCoin::Erc20(erc20_addr), - amount: transfer.amount, - data: transfer.data, - key_at_end_of_block, - }); - } - } - } - - for block in block.start .. (block.start + 32) { - let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; - while let Err(e) = events { - log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); - sleep(Duration::from_secs(5)).await; - events = router.in_instructions(block, &HashSet::from([DAI])).await; - } - let mut events = events.unwrap(); - for event in &mut events { - // A transaction should either be a top-level transfer or a Router InInstruction - if top_level_txids.contains(&event.id.0) { - panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); - } - // Overwrite the key at end of block to key at end of epoch - event.key_at_end_of_block = key_at_end_of_block; - } - all_events.extend(events); - } - - for event in &all_events { - assert!( - coin_to_serai_coin(&event.coin).is_some(), - "router yielded events for unrecognized coins" - ); - } - all_events - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap< - [u8; 32], - ( - usize, - >::Id, - ::Completion, - ), - > { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let past_scanned_epoch = loop { - match self.get_block(eventualities.block_number).await { - Ok(block) => break block, - Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), - } - sleep(Duration::from_secs(10)).await; - }; - assert_eq!( - past_scanned_epoch.start / 32, - u64::try_from(eventualities.block_number).unwrap(), - "assumption of tracker block number's relation to epoch start is incorrect" - ); - - // Iterate from after the epoch number in the tracker to the end of this epoch - for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { - let executed = loop { - match router.executed_commands(block_num).await { - Ok(executed) => break executed, - Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), - } - sleep(Duration::from_secs(10)).await; - }; - - for executed in executed { - let lookup = executed.nonce.to_le_bytes().to_vec(); - if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { - if let Some(command) = - SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) - { - res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); - eventualities.map.remove(&lookup); - } - } - } - } - eventualities.block_number = (block.start / 32).try_into().unwrap(); - - res - } - - async fn needed_fee( - &self, - _block_number: usize, - inputs: &[Self::Output], - _payments: &[Payment], - _change: &Option, - ) -> Result, NetworkError> { - assert_eq!(inputs.len(), 0); - // Claim no fee is needed so we can perform amortization ourselves - Ok(Some(0)) - } - - async fn signable_transaction( - &self, - _block_number: usize, - _plan_id: &[u8; 32], - key: ::G, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - scheduler_addendum: &>::Addendum, - ) -> Result, NetworkError> { - assert_eq!(inputs.len(), 0); - assert!(change.is_none()); - let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; - - // TODO: Perform fee amortization (in scheduler? - // TODO: Make this function internal and have needed_fee properly return None as expected? - // TODO: signable_transaction is written as cannot return None if needed_fee returns Some - // TODO: Why can this return None at all if it isn't allowed to return None? - - let command = match scheduler_addendum { - Addendum::Nonce(nonce) => RouterCommand::Execute { - chain_id: U256::try_from(chain_id).unwrap(), - nonce: U256::try_from(*nonce).unwrap(), - outs: payments - .iter() - .filter_map(|payment| { - Some(OutInstruction { - target: if let Some(data) = payment.data.as_ref() { - // This introspects the Call serialization format, expecting the first 20 bytes to - // be the address - // This avoids wasting the 20-bytes allocated within address - let full_data = [payment.address.0.as_slice(), data].concat(); - let mut reader = full_data.as_slice(); - - let mut calls = vec![]; - while !reader.is_empty() { - calls.push(Call::read(&mut reader).ok()?) - } - // The above must have executed at least once since reader contains the address - assert_eq!(calls[0].to, payment.address.0); - - OutInstructionTarget::Calls(calls) - } else { - OutInstructionTarget::Direct(payment.address.0) - }, - value: { - assert_eq!(payment.balance.coin, ExternalCoin::Ether); // TODO - balance_to_ethereum_amount(payment.balance) - }, - }) - }) - .collect(), - }, - Addendum::RotateTo { nonce, new_key } => { - assert!(payments.is_empty()); - RouterCommand::UpdateSeraiKey { - chain_id: U256::try_from(chain_id).unwrap(), - nonce: U256::try_from(*nonce).unwrap(), - key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), - } - } - }; - Ok(Some(( - command.clone(), - Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), - ))) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result { - Ok( - RouterCommandMachine::new(keys, transaction) - .expect("keys weren't usable to sign router commands"), - ) - } - - async fn publish_completion( - &self, - completion: &::Completion, - ) -> Result<(), NetworkError> { - // Publish this to the dedicated TX server for a solver to actually publish - #[cfg(not(test))] - { - let mut msg = vec![]; - match completion.command() { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); - } - } - completion.write(&mut msg).unwrap(); - - let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { - log::warn!("couldn't connect to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { - log::warn!("couldn't send the message's len to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - let Ok(()) = socket.write_all(&msg).await else { - log::warn!("couldn't write the message to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - if socket.read_u8().await.ok() != Some(1) { - log::warn!("didn't get the ack from the relayer server"); - Err(NetworkError::ConnectionError)?; - } - - Ok(()) - } - - // Publish this using a dummy account we fund with magic RPC commands - #[cfg(test)] - { - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let mut tx = match completion.command() { - RouterCommand::UpdateSeraiKey { key, .. } => { - router.update_serai_key(key, completion.signature()) - } - RouterCommand::Execute { outs, .. } => router.execute( - &outs.iter().cloned().map(Into::into).collect::>(), - completion.signature(), - ), - }; - tx.gas_limit = 1_000_000u64; - tx.gas_price = 1_000_000_000u64.into(); - let tx = ethereum_serai::crypto::deterministically_sign(&tx); - - if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { - self - .provider - .raw_request::<_, ()>( - "anvil_setBalance".into(), - [ - tx.recover_signer().unwrap().to_string(), - (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), - ], - ) - .await - .unwrap(); - - let (tx, sig, _) = tx.into_parts(); - let mut bytes = vec![]; - tx.encode_with_signature_fields(&sig, &mut bytes); - let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); - self.mine_block().await; - assert!(pending_tx.get_receipt().await.unwrap().status()); - } - - Ok(()) - } - } - - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> Result::Completion>, NetworkError> { - Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) - } - - #[cfg(test)] - async fn get_block_number(&self, id: &>::Id) -> usize { - self - .provider - .get_block(B256::from(*id).into(), BlockTransactionsKind::Hashes) - .await - .unwrap() - .unwrap() - .header - .number - .try_into() - .unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> bool { - SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() - } - - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Self::Eventuality, - ) -> Self::Transaction { - // We mine 96 blocks to ensure the 32 blocks relevant are finalized - // Back-check the prior two epochs in response to this - // TODO: Review why this is sub(3) and not sub(2) - for block in block.saturating_sub(3) ..= block { - match eventuality.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let block = u64::try_from(block).unwrap(); - let filter = router - .key_updated_filter() - .from_block(block * 32) - .to_block(((block + 1) * 32) - 1) - .topic1(nonce); - let logs = self.provider.get_logs(&filter).await.unwrap(); - if let Some(log) = logs.first() { - return self - .provider - .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) - .await - .unwrap() - .unwrap(); - }; - - let filter = router - .executed_filter() - .from_block(block * 32) - .to_block(((block + 1) * 32) - 1) - .topic1(nonce); - let logs = self.provider.get_logs(&filter).await.unwrap(); - if logs.is_empty() { - continue; - } - return self - .provider - .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) - .await - .unwrap() - .unwrap(); - } - } - } - panic!("couldn't find completion in any three of checked blocks"); - } - - #[cfg(test)] - async fn mine_block(&self) { - self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, send_to: Self::Address) -> Self::Block { - use rand_core::OsRng; - use ciphersuite::group::ff::Field; - use ethereum_serai::alloy::sol_types::SolCall; - - let key = ::F::random(&mut OsRng); - let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); - - // Set a 1.1 ETH balance - self - .provider - .raw_request::<_, ()>( - "anvil_setBalance".into(), - [Address(address).to_string(), "1100000000000000000".into()], - ) - .await - .unwrap(); - - let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); - let tx = ethereum_serai::alloy::consensus::TxLegacy { - chain_id: None, - nonce: 0, - gas_price: 1_000_000_000u128, - gas_limit: 200_000, - to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), - // 1 ETH - value, - input: ethereum_serai::router::abi::inInstructionCall::new(( - [0; 20].into(), - value, - vec![].into(), - )) - .abi_encode() - .into(), - }; - - use ethereum_serai::alloy::{ - primitives::{Parity, Signature}, - consensus::SignableTransaction, - }; - let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) - .sign_prehash_recoverable(tx.signature_hash().as_ref()) - .unwrap(); - - let mut bytes = vec![]; - let parity = Parity::NonEip155(Parity::from(sig.1).y_parity()); - tx.encode_with_signature_fields(&Signature::from(sig).with_parity(parity), &mut bytes); - let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); - - // Mine an epoch containing this TX - self.mine_block().await; - assert!(pending_tx.get_receipt().await.unwrap().status()); - // Yield the freshly mined block - self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() - } -} diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs deleted file mode 100644 index 9cfeff89..00000000 --- a/processor/src/networks/mod.rs +++ /dev/null @@ -1,655 +0,0 @@ -use core::{fmt::Debug, time::Duration}; -use std::{io, collections::HashMap}; - -use async_trait::async_trait; -use thiserror::Error; - -use frost::{ - curve::{Ciphersuite, Curve}, - ThresholdKeys, - sign::PreprocessMachine, -}; - -use serai_client::primitives::{ExternalBalance, ExternalNetworkId}; - -use log::error; - -use tokio::time::sleep; - -#[cfg(feature = "bitcoin")] -pub mod bitcoin; -#[cfg(feature = "bitcoin")] -pub use self::bitcoin::Bitcoin; - -#[cfg(feature = "ethereum")] -pub mod ethereum; -#[cfg(feature = "ethereum")] -pub use ethereum::Ethereum; - -#[cfg(feature = "monero")] -pub mod monero; -#[cfg(feature = "monero")] -pub use monero::Monero; - -use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; - -#[derive(Clone, Copy, Error, Debug)] -pub enum NetworkError { - #[error("failed to connect to network daemon")] - ConnectionError, -} - -pub trait Id: - Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug -{ -} -impl + AsMut<[u8]> + Debug> Id for I {} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum OutputType { - // Needs to be processed/sent up to Substrate - External, - - // Given a known output set, and a known series of outbound transactions, we should be able to - // form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs - // in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, say - // S[1], build off S[0], we need to observe when S[0] is included on-chain. - // - // We cannot. - // - // Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to - // create S[0], and the actual payment info behind it, we cannot observe it on the blockchain - // unless we participated in creating it. Locking the entire schedule, when we cannot sign for - // the entire schedule at once, to a single signing set isn't feasible. - // - // While any member of the active signing set can provide data enabling other signers to - // participate, it's several KB of data which we then have to code communication for. - // The other option is to simply not observe S[0]. Instead, observe a TX with an identical output - // to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a malicious - // actor, has sent us a forged TX which is... equally as usable? so who cares? - // - // The only issue is if we have multiple outputs on-chain with identical amounts and purposes. - // Accordingly, when the scheduler makes a plan for when a specific output is available, it - // shouldn't write that plan. It should *push* that plan to a queue of plans to perform when - // instances of that output occur. - Branch, - - // Should be added to the available UTXO pool with no further action - Change, - - // Forwarded output from the prior multisig - Forwarded, -} - -impl OutputType { - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&[match self { - OutputType::External => 0, - OutputType::Branch => 1, - OutputType::Change => 2, - OutputType::Forwarded => 3, - }]) - } - - fn read(reader: &mut R) -> io::Result { - let mut byte = [0; 1]; - reader.read_exact(&mut byte)?; - Ok(match byte[0] { - 0 => OutputType::External, - 1 => OutputType::Branch, - 2 => OutputType::Change, - 3 => OutputType::Forwarded, - _ => Err(io::Error::other("invalid OutputType"))?, - }) - } -} - -pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { - type Id: 'static + Id; - - fn kind(&self) -> OutputType; - - fn id(&self) -> Self::Id; - fn tx_id(&self) -> >::Id; // TODO: Review use of - fn key(&self) -> ::G; - - fn presumed_origin(&self) -> Option; - - fn balance(&self) -> ExternalBalance; - fn data(&self) -> &[u8]; - - fn write(&self, writer: &mut W) -> io::Result<()>; - fn read(reader: &mut R) -> io::Result; -} - -#[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { - type Id: 'static + Id; - fn id(&self) -> Self::Id; - // TODO: Move to ExternalBalance - #[cfg(test)] - async fn fee(&self, network: &N) -> u64; -} - -pub trait SignableTransaction: Send + Sync + Clone + Debug { - // TODO: Move to ExternalBalance - fn fee(&self) -> u64; -} - -pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { - type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; - type Completion: Send + Sync + Clone + PartialEq + Debug; - - fn lookup(&self) -> Vec; - - fn read(reader: &mut R) -> io::Result; - fn serialize(&self) -> Vec; - - fn claim(completion: &Self::Completion) -> Self::Claim; - - // TODO: Make a dedicated Completion trait - fn serialize_completion(completion: &Self::Completion) -> Vec; - fn read_completion(reader: &mut R) -> io::Result; -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct EventualitiesTracker { - // Lookup property (input, nonce, TX extra...) -> (plan ID, eventuality) - map: HashMap, ([u8; 32], E)>, - // Block number we've scanned these eventualities too - block_number: usize, -} - -impl EventualitiesTracker { - pub fn new() -> Self { - EventualitiesTracker { map: HashMap::new(), block_number: usize::MAX } - } - - pub fn register(&mut self, block_number: usize, id: [u8; 32], eventuality: E) { - log::info!("registering eventuality for {}", hex::encode(id)); - - let lookup = eventuality.lookup(); - if self.map.contains_key(&lookup) { - panic!("registering an eventuality multiple times or lookup collision"); - } - self.map.insert(lookup, (id, eventuality)); - // If our self tracker already went past this block number, set it back - self.block_number = self.block_number.min(block_number); - } - - pub fn drop(&mut self, id: [u8; 32]) { - // O(n) due to the lack of a reverse lookup - let mut found_key = None; - for (key, value) in &self.map { - if value.0 == id { - found_key = Some(key.clone()); - break; - } - } - - if let Some(key) = found_key { - self.map.remove(&key); - } - } -} - -impl Default for EventualitiesTracker { - fn default() -> Self { - Self::new() - } -} - -#[async_trait] -pub trait Block: Send + Sync + Sized + Clone + Debug { - // This is currently bounded to being 32 bytes. - type Id: 'static + Id; - fn id(&self) -> Self::Id; - fn parent(&self) -> Self::Id; - /// The monotonic network time at this block. - /// - /// This call is presumed to be expensive and should only be called sparingly. - async fn time(&self, rpc: &N) -> u64; -} - -// The post-fee value of an expected branch. -pub struct PostFeeBranch { - pub expected: u64, - pub actual: Option, -} - -// Return the PostFeeBranches needed when dropping a transaction -fn drop_branches( - key: ::G, - payments: &[Payment], -) -> Vec { - let mut branch_outputs = vec![]; - for payment in payments { - if Some(&payment.address) == N::branch_address(key).as_ref() { - branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); - } - } - branch_outputs -} - -pub struct PreparedSend { - /// None for the transaction if the SignableTransaction was dropped due to lack of value. - pub tx: Option<(N::SignableTransaction, N::Eventuality)>, - pub post_fee_branches: Vec, - /// The updated operating costs after preparing this transaction. - pub operating_costs: u64, -} - -#[async_trait] -pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { - /// The elliptic curve used for this network. - type Curve: Curve; - - /// The type representing the transaction for this network. - type Transaction: Transaction; // TODO: Review use of - /// The type representing the block for this network. - type Block: Block; - - /// The type containing all information on a scanned output. - // This is almost certainly distinct from the network's native output type. - type Output: Output; - /// The type containing all information on a planned transaction, waiting to be signed. - type SignableTransaction: SignableTransaction; - /// The type containing all information to check if a plan was completed. - /// - /// This must be binding to both the outputs expected and the plan ID. - type Eventuality: Eventuality; - /// The FROST machine to sign a transaction. - type TransactionMachine: PreprocessMachine< - Signature = ::Completion, - >; - - /// The scheduler for this network. - type Scheduler: Scheduler; - - /// The type representing an address. - // This should NOT be a String, yet a tailored type representing an efficient binary encoding, - // as detailed in the integration documentation. - type Address: Send - + Sync - + Clone - + PartialEq - + Eq - + Debug - + ToString - + TryInto> - + TryFrom>; - - /// Network ID for this network. - const NETWORK: ExternalNetworkId; - /// String ID for this network. - const ID: &'static str; - /// The estimated amount of time a block will take. - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; - /// The amount of confirmations required to consider a block 'final'. - const CONFIRMATIONS: usize; - /// The maximum amount of outputs which will fit in a TX. - /// This should be equal to MAX_INPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_OUTPUTS: usize; - - /// Minimum output value which will be handled. - /// - /// For any received output, there's the cost to spend the output. This value MUST exceed the - /// cost to spend said output, and should by a notable margin (not just 2x, yet an order of - /// magnitude). - // TODO: Dust needs to be diversified per ExternalCoin - const DUST: u64; - - /// The cost to perform input aggregation with a 2-input 1-output TX. - const COST_TO_AGGREGATE: u64; - - /// Tweak keys for this network. - fn tweak_keys(key: &mut ThresholdKeys); - - /// Address for the given group key to receive external coins to. - #[cfg(test)] - async fn external_address(&self, key: ::G) -> Self::Address; - /// Address for the given group key to use for scheduled branches. - fn branch_address(key: ::G) -> Option; - /// Address for the given group key to use for change. - fn change_address(key: ::G) -> Option; - /// Address for forwarded outputs from prior multisigs. - /// - /// forward_address must only return None if explicit forwarding isn't necessary. - fn forward_address(key: ::G) -> Option; - - /// Get the latest block's number. - async fn get_latest_block_number(&self) -> Result; - /// Get a block by its number. - async fn get_block(&self, number: usize) -> Result; - - /// Get the latest block's number, retrying until success. - async fn get_latest_block_number_with_retries(&self) -> usize { - loop { - match self.get_latest_block_number().await { - Ok(number) => { - return number; - } - Err(e) => { - error!( - "couldn't get the latest block number in the with retry get_latest_block_number: {e:?}", - ); - sleep(Duration::from_secs(10)).await; - } - } - } - } - - /// Get a block, retrying until success. - async fn get_block_with_retries(&self, block_number: usize) -> Self::Block { - loop { - match self.get_block(block_number).await { - Ok(block) => { - return block; - } - Err(e) => { - error!("couldn't get block {block_number} in the with retry get_block: {:?}", e); - sleep(Duration::from_secs(10)).await; - } - } - } - } - - /// Get the outputs within a block for a specific key. - async fn get_outputs( - &self, - block: &Self::Block, - key: ::G, - ) -> Vec; - - /// Get the registered eventualities completed within this block, and any prior blocks which - /// registered eventualities may have been completed in. - /// - /// This may panic if not fed a block greater than the tracker's block number. - /// - /// Plan ID -> (block number, TX ID, completion) - // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common - // code - // TODO: Consider having this return the Transaction + the Completion? - // Or Transaction with extract_completion? - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap< - [u8; 32], - ( - usize, - >::Id, - ::Completion, - ), - >; - - /// Returns the needed fee to fulfill this Plan at this fee rate. - /// - /// Returns None if this Plan isn't fulfillable (such as when the fee exceeds the input value). - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - ) -> Result, NetworkError>; - - /// Create a SignableTransaction for the given Plan. - /// - /// The expected flow is: - /// 1) Call needed_fee - /// 2) If the Plan is fulfillable, amortize the fee - /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* - /// - /// This takes a destructured Plan as some of these arguments are malleated from the original - /// Plan. - // TODO: Explicit AmortizedPlan? - #[allow(clippy::too_many_arguments)] - async fn signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - key: ::G, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - scheduler_addendum: &>::Addendum, - ) -> Result, NetworkError>; - - /// Prepare a SignableTransaction for a transaction. - /// - /// This must not persist anything as we will prepare Plans we never intend to execute. - async fn prepare_send( - &self, - block_number: usize, - plan: Plan, - operating_costs: u64, - ) -> Result, NetworkError> { - // Sanity check this has at least one output planned - assert!((!plan.payments.is_empty()) || plan.change.is_some()); - - let plan_id = plan.id(); - let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; - let theoretical_change_amount = if change.is_some() { - inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::() - } else { - 0 - }; - - let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { - // This Plan is not fulfillable - // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? - return Ok(PreparedSend { - tx: None, - // Have all of its branches dropped - post_fee_branches: drop_branches(key, &payments), - // This plan expects a change output valued at sum(inputs) - sum(outputs) - // Since we can no longer create this change output, it becomes an operating cost - // TODO: Look at input restoration to reduce this operating cost - operating_costs: operating_costs + - if change.is_some() { theoretical_change_amount } else { 0 }, - }); - }; - - // Amortize the fee over the plan's payments - let (post_fee_branches, mut operating_costs) = (|| { - // If we're creating a change output, letting us recoup coins, amortize the operating costs - // as well - let total_fee = tx_fee + if change.is_some() { operating_costs } else { 0 }; - - let original_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); - // If this isn't enough for the total fee, drop and move on - if original_outputs < total_fee { - let mut remaining_operating_costs = operating_costs; - if change.is_some() { - // Operating costs increase by the TX fee - remaining_operating_costs += tx_fee; - // Yet decrease by the payments we managed to drop - remaining_operating_costs = remaining_operating_costs.saturating_sub(original_outputs); - } - return (drop_branches(key, &payments), remaining_operating_costs); - } - - let initial_payment_amounts = - payments.iter().map(|payment| payment.balance.amount.0).collect::>(); - - // Amortize the transaction fee across outputs - let mut remaining_fee = total_fee; - // Run as many times as needed until we can successfully subtract this fee - while remaining_fee != 0 { - // This shouldn't be a / by 0 as these payments have enough value to cover the fee - let this_iter_fee = remaining_fee / u64::try_from(payments.len()).unwrap(); - let mut overage = remaining_fee % u64::try_from(payments.len()).unwrap(); - for payment in &mut payments { - let this_payment_fee = this_iter_fee + overage; - // Only subtract the overage once - overage = 0; - - let subtractable = payment.balance.amount.0.min(this_payment_fee); - remaining_fee -= subtractable; - payment.balance.amount.0 -= subtractable; - } - } - - // If any payment is now below the dust threshold, set its value to 0 so it'll be dropped - for payment in &mut payments { - if payment.balance.amount.0 < Self::DUST { - payment.balance.amount.0 = 0; - } - } - - // Note the branch outputs' new values - let mut branch_outputs = vec![]; - for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { - if Some(&payment.address) == Self::branch_address(key).as_ref() { - branch_outputs.push(PostFeeBranch { - expected: initial_amount, - actual: if payment.balance.amount.0 == 0 { - None - } else { - Some(payment.balance.amount.0) - }, - }); - } - } - - // Drop payments now worth 0 - payments = payments - .drain(..) - .filter(|payment| { - if payment.balance.amount.0 != 0 { - true - } else { - log::debug!("dropping dust payment from plan {}", hex::encode(plan_id)); - false - } - }) - .collect(); - - // Sanity check the fee was successfully amortized - let new_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); - assert!((new_outputs + total_fee) <= original_outputs); - - ( - branch_outputs, - if change.is_none() { - // If the change is None, this had no effect on the operating costs - operating_costs - } else { - // Since the change is some, and we successfully amortized, the operating costs were - // recouped - 0 - }, - ) - })(); - - let Some(tx) = self - .signable_transaction( - block_number, - &plan_id, - key, - &inputs, - &payments, - &change, - &scheduler_addendum, - ) - .await? - else { - panic!( - "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", - "signable_transaction returned None for a TX we prior successfully calculated the fee for", - "id", - hex::encode(plan_id), - "inputs", - inputs, - "post-amortization payments", - payments, - "change", - change, - "successfully amoritized fee", - tx_fee, - "scheduler's addendum", - scheduler_addendum, - ) - }; - - if change.is_some() { - let on_chain_expected_change = - inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::() - - tx_fee; - // If the change value is less than the dust threshold, it becomes an operating cost - // This may be slightly inaccurate as dropping payments may reduce the fee, raising the - // change above dust - // That's fine since it'd have to be in a very precarious state AND then it's over-eager in - // tabulating costs - if on_chain_expected_change < Self::DUST { - operating_costs += theoretical_change_amount; - } - } - - Ok(PreparedSend { tx: Some(tx), post_fee_branches, operating_costs }) - } - - /// Attempt to sign a SignableTransaction. - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result; - - /// Publish a completion. - async fn publish_completion( - &self, - completion: &::Completion, - ) -> Result<(), NetworkError>; - - /// Confirm a plan was completed by the specified transaction, per our bounds. - /// - /// Returns Err if there was an error with the confirmation methodology. - /// Returns Ok(None) if this is not a valid completion. - /// Returns Ok(Some(_)) with the completion if it's valid. - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> Result::Completion>, NetworkError>; - - /// Get a block's number by its ID. - #[cfg(test)] - async fn get_block_number(&self, id: &>::Id) -> usize; - - /// Check an Eventuality is fulfilled by a claim. - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> bool; - - /// Get a transaction by the Eventuality it completes. - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Self::Eventuality, - ) -> Self::Transaction; - - #[cfg(test)] - async fn mine_block(&self); - - /// Sends to the specified address. - /// Additionally mines enough blocks so that the TX is past the confirmation depth. - #[cfg(test)] - async fn test_send(&self, key: Self::Address) -> Self::Block; -} - -pub trait UtxoNetwork: Network { - /// The maximum amount of inputs which will fit in a TX. - /// This should be equal to MAX_OUTPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_INPUTS: usize; -} diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs deleted file mode 100644 index 4e70c002..00000000 --- a/processor/src/networks/monero.rs +++ /dev/null @@ -1,807 +0,0 @@ -use std::{time::Duration, collections::HashMap, io}; - -use async_trait::async_trait; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::group::{ff::Field, Group}; -use dalek_ff_group::{Scalar, EdwardsPoint}; -use frost::{curve::Ed25519, ThresholdKeys}; - -use monero_simple_request_rpc::SimpleRequestRpc; -use monero_wallet::{ - ringct::RctType, - transaction::Transaction, - block::Block, - rpc::{FeeRate, RpcError, Rpc}, - address::{Network as MoneroNetwork, SubaddressIndex}, - ViewPair, GuaranteedViewPair, WalletOutput, OutputWithDecoys, GuaranteedScanner, - send::{ - SendError, Change, SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine, - }, -}; -#[cfg(test)] -use monero_wallet::Scanner; - -use tokio::time::sleep; - -pub use serai_client::{ - primitives::{MAX_DATA_LEN, ExternalCoin, ExternalNetworkId, Amount, ExternalBalance}, - networks::monero::Address, -}; - -use crate::{ - Payment, additional_key, - networks::{ - NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, - }, - multisigs::scheduler::utxo::Scheduler, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Output(WalletOutput); - -const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(0, 0); -const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(1, 0); -const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 0); -const FORWARD_SUBADDRESS: Option = SubaddressIndex::new(3, 0); - -impl OutputTrait for Output { - // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. - // While we already are immune, thanks to using featured address, this doesn't hurt and is - // technically more efficient. - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - match self.0.subaddress() { - EXTERNAL_SUBADDRESS => OutputType::External, - BRANCH_SUBADDRESS => OutputType::Branch, - CHANGE_SUBADDRESS => OutputType::Change, - FORWARD_SUBADDRESS => OutputType::Forwarded, - _ => panic!("unrecognized address was scanned for"), - } - } - - fn id(&self) -> Self::Id { - self.0.key().compress().to_bytes() - } - - fn tx_id(&self) -> [u8; 32] { - self.0.transaction() - } - - fn key(&self) -> EdwardsPoint { - EdwardsPoint(self.0.key() - (EdwardsPoint::generator().0 * self.0.key_offset())) - } - - fn presumed_origin(&self) -> Option
{ - None - } - - fn balance(&self) -> ExternalBalance { - ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(self.0.commitment().amount) } - } - - fn data(&self) -> &[u8] { - let Some(data) = self.0.arbitrary_data().first() else { return &[] }; - // If the data is too large, prune it - // This should cause decoding the instruction to fail, and trigger a refund as appropriate - if data.len() > usize::try_from(MAX_DATA_LEN).unwrap() { - return &[]; - } - data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - self.0.write(writer)?; - Ok(()) - } - - fn read(reader: &mut R) -> io::Result { - Ok(Output(WalletOutput::read(reader)?)) - } -} - -// TODO: Consider ([u8; 32], TransactionPruned) -#[async_trait] -impl TransactionTrait for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash() - } - - #[cfg(test)] - async fn fee(&self, _: &Monero) -> u64 { - match self { - Transaction::V1 { .. } => panic!("v1 TX in test-only function"), - Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, - } - } -} - -impl EventualityTrait for Eventuality { - type Claim = [u8; 32]; - type Completion = Transaction; - - // Use the TX extra to look up potential matches - // While anyone can forge this, a transaction with distinct outputs won't actually match - // Extra includess the one time keys which are derived from the plan ID, so a collision here is a - // hash collision - fn lookup(&self) -> Vec { - self.extra() - } - - fn read(reader: &mut R) -> io::Result { - Eventuality::read(reader) - } - fn serialize(&self) -> Vec { - self.serialize() - } - - fn claim(tx: &Transaction) -> [u8; 32] { - tx.id() - } - fn serialize_completion(completion: &Transaction) -> Vec { - completion.serialize() - } - fn read_completion(reader: &mut R) -> io::Result { - Transaction::read(reader) - } -} - -#[derive(Clone, Debug)] -pub struct SignableTransaction(MSignableTransaction); -impl SignableTransactionTrait for SignableTransaction { - fn fee(&self) -> u64 { - self.0.necessary_fee() - } -} - -#[async_trait] -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash() - } - - fn parent(&self) -> Self::Id { - self.header.previous - } - - async fn time(&self, rpc: &Monero) -> u64 { - // Constant from Monero - const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: usize = 60; - - // If Monero doesn't have enough blocks to build a window, it doesn't define a network time - if (self.number().unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { - // Use the block number as the time - return u64::try_from(self.number().unwrap()).unwrap(); - } - - let mut timestamps = vec![self.header.timestamp]; - let mut parent = self.parent(); - while timestamps.len() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { - let mut parent_block; - while { - parent_block = rpc.rpc.get_block(parent).await; - parent_block.is_err() - } { - log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); - sleep(Duration::from_secs(5)).await; - } - let parent_block = parent_block.unwrap(); - timestamps.push(parent_block.header.timestamp); - parent = parent_block.parent(); - - if parent_block.number().unwrap() == 0 { - break; - } - } - timestamps.sort(); - - // Because 60 has two medians, Monero's epee picks the in-between value, calculated by the - // following formula (from the "get_mid" function) - let n = timestamps.len() / 2; - let a = timestamps[n - 1]; - let b = timestamps[n]; - #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` - let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; - // Technically, res may be 1 if all prior blocks had a timestamp by 0, which would break - // monotonicity with our above definition of height as time - // Monero also solely requires the block's time not be less than the median, it doesn't ensure - // it advances the median forward - // Ensure monotonicity despite both these issues by adding the block number to the median time - res + u64::try_from(self.number().unwrap()).unwrap() - } -} - -#[derive(Clone, Debug)] -pub struct Monero { - rpc: SimpleRequestRpc, -} -// Shim required for testing/debugging purposes due to generic arguments also necessitating trait -// bounds -impl PartialEq for Monero { - fn eq(&self, _: &Self) -> bool { - true - } -} -impl Eq for Monero {} - -#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations -fn map_rpc_err(err: RpcError) -> NetworkError { - if let RpcError::InvalidNode(reason) = &err { - log::error!("Monero RpcError::InvalidNode({reason})"); - } else { - log::debug!("Monero RpcError {err:?}"); - } - NetworkError::ConnectionError -} - -enum MakeSignableTransactionResult { - Fee(u64), - SignableTransaction(MSignableTransaction), -} - -impl Monero { - pub async fn new(url: String) -> Monero { - let mut res = SimpleRequestRpc::new(url.clone()).await; - while let Err(e) = res { - log::error!("couldn't connect to Monero node: {e:?}"); - tokio::time::sleep(Duration::from_secs(5)).await; - res = SimpleRequestRpc::new(url.clone()).await; - } - Monero { rpc: res.unwrap() } - } - - fn view_pair(spend: EdwardsPoint) -> GuaranteedViewPair { - GuaranteedViewPair::new(spend.0, Zeroizing::new(additional_key::(0).0)).unwrap() - } - - fn address_internal(spend: EdwardsPoint, subaddress: Option) -> Address { - Address::new(Self::view_pair(spend).address(MoneroNetwork::Mainnet, subaddress, None)).unwrap() - } - - fn scanner(spend: EdwardsPoint) -> GuaranteedScanner { - let mut scanner = GuaranteedScanner::new(Self::view_pair(spend)); - debug_assert!(EXTERNAL_SUBADDRESS.is_none()); - scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); - scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); - scanner.register_subaddress(FORWARD_SUBADDRESS.unwrap()); - scanner - } - - async fn median_fee(&self, block: &Block) -> Result { - let mut fees = vec![]; - for tx_hash in &block.transactions { - let tx = - self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; - // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate - let fee = match &tx { - Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, - _ => continue, - }; - fees.push(fee / u64::try_from(tx.weight()).unwrap()); - } - fees.sort(); - let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); - - // TODO: Set a sane minimum fee - const MINIMUM_FEE: u64 = 1_500_000; - Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) - } - - async fn make_signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - inputs: &[Output], - payments: &[Payment], - change: &Option
, - calculating_fee: bool, - ) -> Result, NetworkError> { - for payment in payments { - assert_eq!(payment.balance.coin, ExternalCoin::Monero); - } - - // TODO2: Use an fee representative of several blocks, cached inside Self - let block_for_fee = self.get_block(block_number).await?; - let fee_rate = self.median_fee(&block_for_fee).await?; - - // Determine the RCT proofs to make based off the hard fork - // TODO: Make a fn for this block which is duplicated with tests - let rct_type = match block_for_fee.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), - }; - - let mut transcript = - RecommendedTranscript::new(b"Serai Processor Monero Transaction Transcript"); - transcript.append_message(b"plan", plan_id); - - // All signers need to select the same decoys - // All signers use the same height and a seeded RNG to make sure they do so. - let mut inputs_actual = Vec::with_capacity(inputs.len()); - for input in inputs { - inputs_actual.push( - OutputWithDecoys::fingerprintable_deterministic_new( - &mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")), - &self.rpc, - // TODO: Have Decoys take RctType - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - block_number + 1, - input.0.clone(), - ) - .await - .map_err(map_rpc_err)?, - ); - } - - // Monero requires at least two outputs - // If we only have one output planned, add a dummy payment - let mut payments = payments.to_vec(); - let outputs = payments.len() + usize::from(u8::from(change.is_some())); - if outputs == 0 { - return Ok(None); - } else if outputs == 1 { - payments.push(Payment { - address: Address::new( - ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) - .unwrap() - .legacy_address(MoneroNetwork::Mainnet), - ) - .unwrap(), - balance: ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(0) }, - data: None, - }); - } - - let payments = payments - .into_iter() - .map(|payment| (payment.address.into(), payment.balance.amount.0)) - .collect::>(); - - match MSignableTransaction::new( - rct_type, - // Use the plan ID as the outgoing view key - Zeroizing::new(*plan_id), - inputs_actual, - payments, - Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), - vec![], - fee_rate, - ) { - Ok(signable) => Ok(Some({ - if calculating_fee { - MakeSignableTransactionResult::Fee(signable.necessary_fee()) - } else { - MakeSignableTransactionResult::SignableTransaction(signable) - } - })), - Err(e) => match e { - SendError::UnsupportedRctType => { - panic!("trying to use an RctType unsupported by monero-wallet") - } - SendError::NoInputs | - SendError::InvalidDecoyQuantity | - SendError::NoOutputs | - SendError::TooManyOutputs | - SendError::NoChange | - SendError::TooMuchArbitraryData | - SendError::TooLargeTransaction | - SendError::WrongPrivateKey => { - panic!("created an invalid Monero transaction: {e}"); - } - SendError::MultiplePaymentIds => { - panic!("multiple payment IDs despite not supporting integrated addresses"); - } - SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => { - log::debug!( - "Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}", - inputs, - outputs - ); - match necessary_fee { - Some(necessary_fee) => { - // If we're solely calculating the fee, return the fee this TX will cost - if calculating_fee { - Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee))) - } else { - // If we're actually trying to make the TX, return None - Ok(None) - } - } - // We didn't have enough funds to even cover the outputs - None => { - // Ensure we're not misinterpreting this - assert!(outputs > inputs); - Ok(None) - } - } - } - SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => { - panic!("supposedly unreachable (at this time) Monero error: {e}"); - } - }, - } - } - - #[cfg(test)] - fn test_view_pair() -> ViewPair { - ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() - } - - #[cfg(test)] - fn test_scanner() -> Scanner { - Scanner::new(Self::test_view_pair()) - } - - #[cfg(test)] - fn test_address() -> Address { - Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() - } -} - -#[async_trait] -impl Network for Monero { - type Curve = Ed25519; - - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type Eventuality = Eventuality; - type TransactionMachine = TransactionMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: ExternalNetworkId = ExternalNetworkId::Monero; - const ID: &'static str = "Monero"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; - const CONFIRMATIONS: usize = 10; - - const MAX_OUTPUTS: usize = 16; - - // 0.01 XMR - const DUST: u64 = 10000000000; - - // TODO - const COST_TO_AGGREGATE: u64 = 0; - - // Monero doesn't require/benefit from tweaking - fn tweak_keys(_: &mut ThresholdKeys) {} - - #[cfg(test)] - async fn external_address(&self, key: EdwardsPoint) -> Address { - Self::address_internal(key, EXTERNAL_SUBADDRESS) - } - - fn branch_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, BRANCH_SUBADDRESS)) - } - - fn change_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, CHANGE_SUBADDRESS)) - } - - fn forward_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, FORWARD_SUBADDRESS)) - } - - async fn get_latest_block_number(&self) -> Result { - // Monero defines height as chain length, so subtract 1 for block number - Ok(self.rpc.get_height().await.map_err(map_rpc_err)? - 1) - } - - async fn get_block(&self, number: usize) -> Result { - Ok( - self - .rpc - .get_block(self.rpc.get_block_hash(number).await.map_err(map_rpc_err)?) - .await - .map_err(map_rpc_err)?, - ) - } - - async fn get_outputs(&self, block: &Block, key: EdwardsPoint) -> Vec { - let outputs = loop { - match self - .rpc - .get_scannable_block(block.clone()) - .await - .map_err(|e| format!("{e:?}")) - .and_then(|block| Self::scanner(key).scan(block).map_err(|e| format!("{e:?}"))) - { - Ok(outputs) => break outputs, - Err(e) => { - log::error!("couldn't scan block {}: {e:?}", hex::encode(block.id())); - sleep(Duration::from_secs(60)).await; - continue; - } - } - }; - - // Miner transactions are required to explicitly state their timelock, so this does exclude - // those (which have an extended timelock we don't want to deal with) - let raw_outputs = outputs.not_additionally_locked(); - let mut outputs = Vec::with_capacity(raw_outputs.len()); - for output in raw_outputs { - // This should be pointless as we shouldn't be able to scan for any other subaddress - // This just helps ensures nothing invalid makes it through - assert!([EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARD_SUBADDRESS] - .contains(&output.subaddress())); - - outputs.push(Output(output)); - } - - outputs - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Block, - ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - async fn check_block( - network: &Monero, - eventualities: &mut EventualitiesTracker, - block: &Block, - res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, - ) { - for hash in &block.transactions { - let tx = { - let mut tx; - while { - tx = network.rpc.get_transaction(*hash).await; - tx.is_err() - } { - log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - tx.unwrap() - }; - - if let Some((_, eventuality)) = eventualities.map.get(&tx.prefix().extra) { - if eventuality.matches(&tx.clone().into()) { - res.insert( - eventualities.map.remove(&tx.prefix().extra).unwrap().0, - (block.number().unwrap(), tx.id(), tx), - ); - } - } - } - - eventualities.block_number += 1; - assert_eq!(eventualities.block_number, block.number().unwrap()); - } - - for block_num in (eventualities.block_number + 1) .. block.number().unwrap() { - let block = { - let mut block; - while { - block = self.get_block(block_num).await; - block.is_err() - } { - log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - block.unwrap() - }; - - check_block(self, eventualities, &block, &mut res).await; - } - - // Also check the current block - check_block(self, eventualities, block, &mut res).await; - assert_eq!(eventualities.block_number, block.number().unwrap()); - - res - } - - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - ) -> Result, NetworkError> { - let res = self - .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) - .await?; - let Some(res) = res else { return Ok(None) }; - let MakeSignableTransactionResult::Fee(fee) = res else { - panic!("told make_signable_transaction calculating_fee and got transaction") - }; - Ok(Some(fee)) - } - - async fn signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - _key: EdwardsPoint, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - (): &(), - ) -> Result, NetworkError> { - let res = self - .make_signable_transaction(block_number, plan_id, inputs, payments, change, false) - .await?; - let Some(res) = res else { return Ok(None) }; - let MakeSignableTransactionResult::SignableTransaction(signable) = res else { - panic!("told make_signable_transaction not calculating_fee and got fee") - }; - - let signable = SignableTransaction(signable); - let eventuality = signable.0.clone().into(); - Ok(Some((signable, eventuality))) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: SignableTransaction, - ) -> Result { - match transaction.0.clone().multisig(&keys) { - Ok(machine) => Ok(machine), - Err(e) => panic!("failed to create a multisig machine for TX: {e}"), - } - } - - async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { - match self.rpc.publish_transaction(tx).await { - Ok(()) => Ok(()), - Err(RpcError::ConnectionError(e)) => { - log::debug!("Monero ConnectionError: {e}"); - Err(NetworkError::ConnectionError)? - } - // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs - // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())), - } - } - - async fn confirm_completion( - &self, - eventuality: &Eventuality, - id: &[u8; 32], - ) -> Result, NetworkError> { - let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; - if eventuality.matches(&tx.clone().into()) { - Ok(Some(tx)) - } else { - Ok(None) - } - } - - #[cfg(test)] - async fn get_block_number(&self, id: &[u8; 32]) -> usize { - self.rpc.get_block(*id).await.unwrap().number().unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &[u8; 32], - ) -> bool { - return eventuality.matches(&self.rpc.get_pruned_transaction(*claim).await.unwrap()); - } - - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Eventuality, - ) -> Transaction { - let block = self.rpc.get_block_by_number(block).await.unwrap(); - for tx in &block.transactions { - let tx = self.rpc.get_transaction(*tx).await.unwrap(); - if eventuality.matches(&tx.clone().into()) { - return tx; - } - } - panic!("block didn't have a transaction for this eventuality") - } - - #[cfg(test)] - async fn mine_block(&self) { - // https://github.com/serai-dex/serai/issues/198 - sleep(std::time::Duration::from_millis(100)).await; - self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Address) -> Block { - use zeroize::Zeroizing; - use rand_core::{RngCore, OsRng}; - use monero_wallet::rpc::FeePriority; - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - for _ in 0 .. 80 { - self.mine_block().await; - } - - let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); - let mut outputs = Self::test_scanner() - .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) - .unwrap() - .ignore_additional_timelock(); - let output = outputs.swap_remove(0); - - let amount = output.commitment().amount; - // The dust should always be sufficient for the fee - let fee = Monero::DUST; - - let rct_type = match new_block.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), - }; - - let output = OutputWithDecoys::fingerprintable_deterministic_new( - &mut OsRng, - &self.rpc, - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - self.rpc.get_height().await.unwrap(), - output, - ) - .await - .unwrap(); - - let mut outgoing_view_key = Zeroizing::new([0; 32]); - OsRng.fill_bytes(outgoing_view_key.as_mut()); - let tx = MSignableTransaction::new( - rct_type, - outgoing_view_key, - vec![output], - vec![(address.into(), amount - fee)], - Change::fingerprintable(Some(Self::test_address().into())), - vec![], - self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), - ) - .unwrap() - .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) - .unwrap(); - - let block = self.get_latest_block_number().await.unwrap() + 1; - self.rpc.publish_transaction(&tx).await.unwrap(); - for _ in 0 .. 10 { - self.mine_block().await; - } - self.get_block(block).await.unwrap() - } -} - -impl UtxoNetwork for Monero { - // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction - // larger than 150kb. This fits within the 100kb mark - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; -} diff --git a/processor/src/plan.rs b/processor/src/plan.rs deleted file mode 100644 index caadef5a..00000000 --- a/processor/src/plan.rs +++ /dev/null @@ -1,212 +0,0 @@ -use std::io; - -use scale::{Encode, Decode}; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::group::GroupEncoding; -use frost::curve::Ciphersuite; - -use serai_client::primitives::ExternalBalance; - -use crate::{ - networks::{Output, Network}, - multisigs::scheduler::{SchedulerAddendum, Scheduler}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Payment { - pub address: N::Address, - pub data: Option>, - pub balance: ExternalBalance, -} - -impl Payment { - pub fn transcript(&self, transcript: &mut T) { - transcript.domain_separate(b"payment"); - transcript.append_message(b"address", self.address.to_string().as_bytes()); - if let Some(data) = self.data.as_ref() { - transcript.append_message(b"data", data); - } - transcript.append_message(b"coin", self.balance.coin.encode()); - transcript.append_message(b"amount", self.balance.amount.0.to_le_bytes()); - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - // TODO: Don't allow creating Payments with an Address which can't be serialized - let address: Vec = self - .address - .clone() - .try_into() - .map_err(|_| io::Error::other("address couldn't be serialized"))?; - writer.write_all(&u32::try_from(address.len()).unwrap().to_le_bytes())?; - writer.write_all(&address)?; - - writer.write_all(&[u8::from(self.data.is_some())])?; - if let Some(data) = &self.data { - writer.write_all(&u32::try_from(data.len()).unwrap().to_le_bytes())?; - writer.write_all(data)?; - } - - writer.write_all(&self.balance.encode()) - } - - pub fn read(reader: &mut R) -> io::Result { - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; - reader.read_exact(&mut address)?; - let address = N::Address::try_from(address).map_err(|_| io::Error::other("invalid address"))?; - - let mut buf = [0; 1]; - reader.read_exact(&mut buf)?; - let data = if buf[0] == 1 { - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - let mut data = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; - reader.read_exact(&mut data)?; - Some(data) - } else { - None - }; - - let balance = ExternalBalance::decode(&mut scale::IoReader(reader)) - .map_err(|_| io::Error::other("invalid balance"))?; - - Ok(Payment { address, data, balance }) - } -} - -#[derive(Clone, PartialEq)] -pub struct Plan { - pub key: ::G, - pub inputs: Vec, - /// The payments this Plan is intended to create. - /// - /// This should only contain payments leaving Serai. While it is acceptable for users to enter - /// Serai's address(es) as the payment address, as that'll be handled by anything which expects - /// certain properties, Serai as a system MUST NOT use payments for internal transfers. Doing - /// so will cause a reduction in their value by the TX fee/operating costs, creating an - /// incomplete transfer. - pub payments: Vec>, - /// The change this Plan should use. - /// - /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this - /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup - /// the operating costs. - // - // TODO: Consider moving to ::G? - pub change: Option, - /// The scheduler's additional data. - pub scheduler_addendum: >::Addendum, -} -impl core::fmt::Debug for Plan { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt - .debug_struct("Plan") - .field("key", &hex::encode(self.key.to_bytes())) - .field("inputs", &self.inputs) - .field("payments", &self.payments) - .field("change", &self.change.as_ref().map(ToString::to_string)) - .field("scheduler_addendum", &self.scheduler_addendum) - .finish() - } -} - -impl Plan { - pub fn transcript(&self) -> RecommendedTranscript { - let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID"); - transcript.domain_separate(b"meta"); - transcript.append_message(b"network", N::ID); - transcript.append_message(b"key", self.key.to_bytes()); - - transcript.domain_separate(b"inputs"); - for input in &self.inputs { - transcript.append_message(b"input", input.id()); - } - - transcript.domain_separate(b"payments"); - for payment in &self.payments { - payment.transcript(&mut transcript); - } - - if let Some(change) = &self.change { - transcript.append_message(b"change", change.to_string()); - } - - let mut addendum_bytes = vec![]; - self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); - transcript.append_message(b"scheduler_addendum", addendum_bytes); - - transcript - } - - pub fn id(&self) -> [u8; 32] { - let challenge = self.transcript().challenge(b"id"); - let mut res = [0; 32]; - res.copy_from_slice(&challenge[.. 32]); - res - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key.to_bytes().as_ref())?; - - writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; - for input in &self.inputs { - input.write(writer)?; - } - - writer.write_all(&u32::try_from(self.payments.len()).unwrap().to_le_bytes())?; - for payment in &self.payments { - payment.write(writer)?; - } - - // TODO: Have Plan construction fail if change cannot be serialized - let change = if let Some(change) = &self.change { - change.clone().try_into().map_err(|_| { - io::Error::other(format!( - "an address we said to use as change couldn't be converted to a Vec: {}", - change.to_string(), - )) - })? - } else { - vec![] - }; - assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); - writer.write_all(&[u8::try_from(change.len()).unwrap()])?; - writer.write_all(&change)?; - self.scheduler_addendum.write(writer) - } - - pub fn read(reader: &mut R) -> io::Result { - let key = N::Curve::read_G(reader)?; - - let mut inputs = vec![]; - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - for _ in 0 .. u32::from_le_bytes(buf) { - inputs.push(N::Output::read(reader)?); - } - - let mut payments = vec![]; - reader.read_exact(&mut buf)?; - for _ in 0 .. u32::from_le_bytes(buf) { - payments.push(Payment::::read(reader)?); - } - - let mut len = [0; 1]; - reader.read_exact(&mut len)?; - let mut change = vec![0; usize::from(len[0])]; - reader.read_exact(&mut change)?; - let change = - if change.is_empty() { - None - } else { - Some(N::Address::try_from(change).map_err(|_| { - io::Error::other("couldn't deserialize an Address serialized into a Plan") - })?) - }; - - let scheduler_addendum = >::Addendum::read(reader)?; - Ok(Plan { key, inputs, payments, change, scheduler_addendum }) - } -} diff --git a/processor/src/signer.rs b/processor/src/signer.rs deleted file mode 100644 index cab0bceb..00000000 --- a/processor/src/signer.rs +++ /dev/null @@ -1,654 +0,0 @@ -use core::{marker::PhantomData, fmt}; -use std::collections::HashMap; - -use rand_core::OsRng; -use frost::{ - ThresholdKeys, FrostError, - sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, -}; - -use log::{info, debug, warn, error}; - -use serai_client::validator_sets::primitives::Session; -use messages::sign::*; - -pub use serai_db::*; - -use crate::{ - Get, DbTxn, Db, - networks::{Eventuality, Network}, -}; - -create_db!( - SignerDb { - CompletionsDb: (id: [u8; 32]) -> Vec, - EventualityDb: (id: [u8; 32]) -> Vec, - AttemptDb: (id: &SignId) -> (), - CompletionDb: (claim: &[u8]) -> Vec, - ActiveSignsDb: () -> Vec<[u8; 32]>, - CompletedOnChainDb: (id: &[u8; 32]) -> (), - } -); - -impl ActiveSignsDb { - fn add_active_sign(txn: &mut impl DbTxn, id: &[u8; 32]) { - if CompletedOnChainDb::get(txn, id).is_some() { - return; - } - let mut active = ActiveSignsDb::get(txn).unwrap_or_default(); - active.push(*id); - ActiveSignsDb::set(txn, &active); - } -} - -impl CompletedOnChainDb { - fn complete_on_chain(txn: &mut impl DbTxn, id: &[u8; 32]) { - CompletedOnChainDb::set(txn, id, &()); - ActiveSignsDb::set( - txn, - &ActiveSignsDb::get(txn) - .unwrap_or_default() - .into_iter() - .filter(|active| active != id) - .collect::>(), - ); - } -} -impl CompletionsDb { - fn completions( - getter: &impl Get, - id: [u8; 32], - ) -> Vec<::Claim> { - let Some(completions) = Self::get(getter, id) else { return vec![] }; - - // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 - if completions.is_empty() { - let default = ::Claim::default(); - assert_eq!(default.as_ref().len(), 0); - return vec![default]; - } - - let mut completions_ref = completions.as_slice(); - let mut res = vec![]; - while !completions_ref.is_empty() { - let mut id = ::Claim::default(); - let id_len = id.as_ref().len(); - id.as_mut().copy_from_slice(&completions_ref[.. id_len]); - completions_ref = &completions_ref[id_len ..]; - res.push(id); - } - res - } - - fn complete( - txn: &mut impl DbTxn, - id: [u8; 32], - completion: &::Completion, - ) { - // Completions can be completed by multiple signatures - // Save every solution in order to be robust - CompletionDb::save_completion::(txn, completion); - - let claim = N::Eventuality::claim(completion); - let claim: &[u8] = claim.as_ref(); - - // If claim has a 0-byte encoding, the set key, even if empty, is the claim - if claim.is_empty() { - Self::set(txn, id, &vec![]); - return; - } - - let mut existing = Self::get(txn, id).unwrap_or_default(); - assert_eq!(existing.len() % claim.len(), 0); - - // Don't add this completion if it's already present - let mut i = 0; - while i < existing.len() { - if &existing[i .. (i + claim.len())] == claim { - return; - } - i += claim.len(); - } - - existing.extend(claim); - Self::set(txn, id, &existing); - } -} - -impl EventualityDb { - fn save_eventuality( - txn: &mut impl DbTxn, - id: [u8; 32], - eventuality: &N::Eventuality, - ) { - txn.put(Self::key(id), eventuality.serialize()); - } - - fn eventuality(getter: &impl Get, id: [u8; 32]) -> Option { - Some(N::Eventuality::read(&mut getter.get(Self::key(id))?.as_slice()).unwrap()) - } -} - -impl CompletionDb { - fn save_completion( - txn: &mut impl DbTxn, - completion: &::Completion, - ) { - let claim = N::Eventuality::claim(completion); - let claim: &[u8] = claim.as_ref(); - Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); - } - - fn completion( - getter: &impl Get, - claim: &::Claim, - ) -> Option<::Completion> { - Self::get(getter, claim.as_ref()) - .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) - } -} - -type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; -type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; -type SignatureShareFor = as SignMachine< - <::Eventuality as Eventuality>::Completion, ->>::SignatureShare; -type SignatureMachineFor = as SignMachine< - <::Eventuality as Eventuality>::Completion, ->>::SignatureMachine; - -pub struct Signer { - db: PhantomData, - - network: N, - - session: Session, - keys: Vec>, - - signable: HashMap<[u8; 32], N::SignableTransaction>, - attempt: HashMap<[u8; 32], u32>, - #[allow(clippy::type_complexity)] - preprocessing: HashMap<[u8; 32], (Vec>, Vec>)>, - #[allow(clippy::type_complexity)] - signing: HashMap<[u8; 32], (SignatureMachineFor, Vec>)>, -} - -impl fmt::Debug for Signer { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Signer") - .field("network", &self.network) - .field("signable", &self.signable) - .field("attempt", &self.attempt) - .finish_non_exhaustive() - } -} - -impl Signer { - /// Rebroadcast already signed TXs which haven't had their completions mined into a sufficiently - /// confirmed block. - pub async fn rebroadcast_task(db: D, network: N) { - log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); - loop { - for active in ActiveSignsDb::get(&db).unwrap_or_default() { - for claim in CompletionsDb::completions::(&db, active) { - log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); - // TODO: Don't drop the error entirely. Check for invariants - let _ = - network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; - } - } - // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from - // the DB - tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await; - } - } - pub fn new(network: N, session: Session, keys: Vec>) -> Signer { - assert!(!keys.is_empty()); - Signer { - db: PhantomData, - - network, - - session, - keys, - - signable: HashMap::new(), - attempt: HashMap::new(), - preprocessing: HashMap::new(), - signing: HashMap::new(), - } - } - - fn verify_id(&self, id: &SignId) -> Result<(), ()> { - // Check the attempt lines up - match self.attempt.get(&id.id) { - // If we don't have an attempt logged, it's because the coordinator is faulty OR because we - // rebooted OR we detected the signed transaction on chain, so there's notable network - // latency/a malicious validator - None => { - warn!( - "not attempting {} #{}. this is an error if we didn't reboot", - hex::encode(id.id), - id.attempt - ); - Err(())?; - } - Some(attempt) => { - if attempt != &id.attempt { - warn!( - "sent signing data for {} #{} yet we have attempt #{}", - hex::encode(id.id), - id.attempt, - attempt - ); - Err(())?; - } - } - } - - Ok(()) - } - - #[must_use] - fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { - if !CompletionsDb::completions::(txn, id).is_empty() { - debug!( - "SignTransaction/Reattempt order for {}, which we've already completed signing", - hex::encode(id) - ); - - true - } else { - false - } - } - - #[must_use] - fn complete( - &mut self, - id: [u8; 32], - claim: &::Claim, - ) -> ProcessorMessage { - // Assert we're actively signing for this TX - assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); - assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have"); - // If we weren't selected to participate, we'll have a preprocess - self.preprocessing.remove(&id); - // If we were selected, the signature will only go through if we contributed a share - // Despite this, we then need to get everyone's shares, and we may get a completion before - // we get everyone's shares - // This would be if the coordinator fails and we find the eventuality completion on-chain - self.signing.remove(&id); - - // Emit the event for it - ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } - } - - #[must_use] - pub fn completed( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - completion: &::Completion, - ) -> Option { - let first_completion = !Self::already_completed(txn, id); - - // Save this completion to the DB - CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, completion); - - if first_completion { - Some(self.complete(id, &N::Eventuality::claim(completion))) - } else { - None - } - } - - /// Returns Some if the first completion. - // Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways - #[must_use] - async fn claimed_eventuality_completion( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - claim: &::Claim, - ) -> Option { - if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { - match self.network.confirm_completion(&eventuality, claim).await { - Ok(Some(completion)) => { - info!( - "signer eventuality for {} resolved in {}", - hex::encode(id), - hex::encode(claim.as_ref()) - ); - - let first_completion = !Self::already_completed(txn, id); - - // Save this completion to the DB - CompletionsDb::complete::(txn, id, &completion); - - if first_completion { - return Some(self.complete(id, claim)); - } - } - Ok(None) => { - warn!( - "a validator claimed {} completed {} when it did not", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - Err(_) => { - // Transaction hasn't hit our mempool/was dropped for a different signature - // The latter can happen given certain latency conditions/a single malicious signer - // In the case of a single malicious signer, they can drag multiple honest validators down - // with them, so we unfortunately can't slash on this case - warn!( - "a validator claimed {} completed {} yet we couldn't check that claim", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - } - } else { - warn!( - "informed of completion {} for eventuality {}, when we didn't have that eventuality", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - None - } - - #[must_use] - async fn attempt( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - attempt: u32, - ) -> Option { - if Self::already_completed(txn, id) { - return None; - } - - // Check if we're already working on this attempt - if let Some(curr_attempt) = self.attempt.get(&id) { - if curr_attempt >= &attempt { - warn!( - "told to attempt {} #{} yet we're already working on {}", - hex::encode(id), - attempt, - curr_attempt - ); - return None; - } - } - - // Start this attempt - // Clone the TX so we don't have an immutable borrow preventing the below mutable actions - // (also because we do need an owned tx anyways) - let Some(tx) = self.signable.get(&id).cloned() else { - warn!("told to attempt a TX we aren't currently signing for"); - return None; - }; - - // Delete any existing machines - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // Update the attempt number - self.attempt.insert(id, attempt); - - let id = SignId { session: self.session, id, attempt }; - - info!("signing for {} #{}", hex::encode(id.id), id.attempt); - - // If we reboot mid-sign, the current design has us abort all signs and wait for latter - // attempts/new signing protocols - // This is distinct from the DKG which will continue DKG sessions, even on reboot - // This is because signing is tolerant of failures of up to 1/3rd of the group - // The DKG requires 100% participation - // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for - // reboots, it's not worth the complexity when messing up here leaks our secret share - // - // Despite this, on reboot, we'll get told of active signing items, and may be in this - // branch again for something we've already attempted - // - // Only run if this hasn't already been attempted - // TODO: This isn't complete as this txn may not be committed with the expected timing - if AttemptDb::get(txn, &id).is_some() { - warn!( - "already attempted {} #{}. this is an error if we didn't reboot", - hex::encode(id.id), - id.attempt - ); - return None; - } - AttemptDb::set(txn, &id, &()); - - // Attempt to create the TX - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &self.keys { - let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { - Err(e) => { - error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); - return None; - } - Ok(machine) => machine, - }; - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize()); - preprocesses.push(preprocess); - } - - self.preprocessing.insert(id.id, (machines, preprocesses)); - - // Broadcast our preprocess - Some(ProcessorMessage::Preprocess { id, preprocesses: serialized_preprocesses }) - } - - #[must_use] - pub async fn sign_transaction( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - tx: N::SignableTransaction, - eventuality: &N::Eventuality, - ) -> Option { - // The caller is expected to re-issue sign orders on reboot - // This is solely used by the rebroadcast task - ActiveSignsDb::add_active_sign(txn, &id); - - if Self::already_completed(txn, id) { - return None; - } - - EventualityDb::save_eventuality::(txn, id, eventuality); - - self.signable.insert(id, tx); - self.attempt(txn, id, 0).await - } - - #[must_use] - pub async fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::Preprocesses { id, preprocesses } => { - if self.verify_id(&id).is_err() { - return None; - } - - let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) { - // Either rebooted or RPC error, or some invariant - None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id.id) - ); - return None; - } - Some(machine) => machine, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - // Use an empty message, as expected of TransactionMachines - let (machine, share) = match machine.sign(preprocesses, &[]) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - serialized_shares.push(share.serialize()); - shares.push(share); - } - self.signing.insert(id.id, (signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::Share { id, shares: serialized_shares }) - } - - CoordinatorMessage::Shares { id, shares } => { - if self.verify_id(&id).is_err() { - return None; - } - - let (machine, our_shares) = match self.signing.remove(&id.id) { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.contains_key(&id.id) { - panic!("never preprocessed yet signing?"); - } - - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id.id) - ); - return None; - } - Some(machine) => machine, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let completion = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - // Save the completion in case it's needed for recovery - CompletionsDb::complete::(txn, id.id, &completion); - - // Publish it - if let Err(e) = self.network.publish_completion(&completion).await { - error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); - } else { - info!("published completion for plan {}", hex::encode(id.id)); - } - - // Stop trying to sign for this TX - Some(self.complete(id.id, &N::Eventuality::claim(&completion))) - } - - CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, - - CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { - let mut claim = ::Claim::default(); - if claim.as_ref().len() != claim_vec.len() { - let true_len = claim_vec.len(); - claim_vec.truncate(2 * claim.as_ref().len()); - warn!( - "a validator claimed {}... (actual length {}) completed {} yet {}", - hex::encode(&claim_vec), - true_len, - hex::encode(id), - "that's not a valid Claim", - ); - return None; - } - claim.as_mut().copy_from_slice(&claim_vec); - - self.claimed_eventuality_completion(txn, id, &claim).await - } - } - } -} diff --git a/processor/src/slash_report_signer.rs b/processor/src/slash_report_signer.rs deleted file mode 100644 index 056055f7..00000000 --- a/processor/src/slash_report_signer.rs +++ /dev/null @@ -1,293 +0,0 @@ -use core::fmt; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, warn}; - -use serai_client::{ - primitives::ExternalNetworkId, - validator_sets::primitives::{report_slashes_message, ExternalValidatorSet, Session}, - Public, -}; - -use messages::coordinator::*; -use crate::{Get, DbTxn, create_db}; - -create_db! { - SlashReportSignerDb { - Completed: (session: Session) -> (), - Attempt: (session: Session, attempt: u32) -> (), - } -} - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct SlashReportSigner { - network: ExternalNetworkId, - session: Session, - keys: Vec>, - report: Vec<([u8; 32], u32)>, - - attempt: u32, - #[allow(clippy::type_complexity)] - preprocessing: Option<(Vec>, Vec)>, - #[allow(clippy::type_complexity)] - signing: Option<(AlgorithmSignatureMachine, Vec)>, -} - -impl fmt::Debug for SlashReportSigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("SlashReportSigner") - .field("session", &self.session) - .field("report", &self.report) - .field("attempt", &self.attempt) - .field("preprocessing", &self.preprocessing.is_some()) - .field("signing", &self.signing.is_some()) - .finish_non_exhaustive() - } -} - -impl SlashReportSigner { - pub fn new( - txn: &mut impl DbTxn, - network: ExternalNetworkId, - session: Session, - keys: Vec>, - report: Vec<([u8; 32], u32)>, - attempt: u32, - ) -> Option<(SlashReportSigner, ProcessorMessage)> { - assert!(!keys.is_empty()); - - if Completed::get(txn, session).is_some() { - return None; - } - - if Attempt::get(txn, session, attempt).is_some() { - warn!( - "already attempted signing slash report for session {:?}, attempt #{}. {}", - session, attempt, "this is an error if we didn't reboot", - ); - return None; - } - Attempt::set(txn, session, attempt, &()); - - info!("signing slash report for session {:?} with attempt #{}", session, attempt); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - let preprocessing = Some((machines, preprocesses)); - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::SlashReport, attempt }; - - Some(( - SlashReportSigner { network, session, keys, report, attempt, preprocessing, signing: None }, - ProcessorMessage::SlashReportPreprocess { - id: substrate_sign_id, - preprocesses: serialized_preprocesses, - }, - )) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut impl DbTxn, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("SlashReportSigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("SlashReportSigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - assert_eq!(id.session, self.session); - assert_eq!(id.id, SubstrateSignableId::SlashReport); - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") - } - - let (machines, our_preprocesses) = match self.preprocessing.take() { - // Either rebooted or RPC error, or some invariant - None => { - warn!("not preprocessing. this is an error if we didn't reboot"); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = match machine.sign( - preprocesses, - &report_slashes_message( - &ExternalValidatorSet { network: self.network, session: self.session }, - &self - .report - .clone() - .into_iter() - .map(|(validator, points)| (Public(validator), points)) - .collect::>(), - ), - ) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing = Some((signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - assert_eq!(id.session, self.session); - assert_eq!(id.id, SubstrateSignableId::SlashReport); - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") - } - - let (machine, our_shares) = match self.signing.take() { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.is_some() { - panic!("never preprocessed yet signing?"); - } - - warn!("not preprocessing. this is an error if we didn't reboot"); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - info!("signed slash report for session {:?} with attempt #{}", self.session, id.attempt); - - Completed::set(txn, self.session, &()); - - Some(ProcessorMessage::SignedSlashReport { - session: self.session, - signature: sig.to_bytes().to_vec(), - }) - } - CoordinatorMessage::BatchReattempt { .. } => { - panic!("BatchReattempt passed to SlashReportSigner") - } - } - } -} diff --git a/processor/src/tests/key_gen.rs b/processor/src/tests/key_gen.rs deleted file mode 100644 index 047e006a..00000000 --- a/processor/src/tests/key_gen.rs +++ /dev/null @@ -1,149 +0,0 @@ -use std::collections::HashMap; - -use zeroize::Zeroizing; - -use rand_core::{RngCore, OsRng}; - -use ciphersuite::group::GroupEncoding; -use frost::{Participant, ThresholdParams, tests::clone_without}; - -use serai_db::{DbTxn, Db, MemDb}; - -use sp_application_crypto::sr25519; -use serai_client::validator_sets::primitives::{Session, KeyPair}; - -use messages::key_gen::*; -use crate::{ - networks::Network, - key_gen::{KeyConfirmed, KeyGen}, -}; - -const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 }; - -pub fn test_key_gen() { - let mut entropies = HashMap::new(); - let mut dbs = HashMap::new(); - let mut key_gens = HashMap::new(); - for i in 1 ..= 5 { - let mut entropy = Zeroizing::new([0; 32]); - OsRng.fill_bytes(entropy.as_mut()); - entropies.insert(i, entropy); - let db = MemDb::new(); - dbs.insert(i, db.clone()); - key_gens.insert(i, KeyGen::::new(db, entropies[&i].clone())); - } - - let mut all_commitments = HashMap::new(); - for i in 1 ..= 5 { - let key_gen = key_gens.get_mut(&i).unwrap(); - let mut txn = dbs.get_mut(&i).unwrap().txn(); - if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle( - &mut txn, - CoordinatorMessage::GenerateKey { - id: ID, - params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) - .unwrap(), - shares: 1, - }, - ) { - assert_eq!(id, ID); - assert_eq!(commitments.len(), 1); - all_commitments - .insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments.swap_remove(0)); - } else { - panic!("didn't get commitments back"); - } - txn.commit(); - } - - // 1 is rebuilt on every step - // 2 is rebuilt here - // 3 ... are rebuilt once, one at each of the following steps - let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| { - key_gens.remove(&i); - key_gens.insert(i, KeyGen::::new(dbs[&i].clone(), entropies[&i].clone())); - }; - rebuild(&mut key_gens, &dbs, 1); - rebuild(&mut key_gens, &dbs, 2); - - let mut all_shares = HashMap::new(); - for i in 1 ..= 5 { - let key_gen = key_gens.get_mut(&i).unwrap(); - let mut txn = dbs.get_mut(&i).unwrap().txn(); - let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle( - &mut txn, - CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) }, - ) { - assert_eq!(id, ID); - assert_eq!(shares.len(), 1); - all_shares.insert(i, shares.swap_remove(0)); - } else { - panic!("didn't get shares back"); - } - txn.commit(); - } - - // Rebuild 1 and 3 - rebuild(&mut key_gens, &dbs, 1); - rebuild(&mut key_gens, &dbs, 3); - - let mut res = None; - for i in 1 ..= 5 { - let key_gen = key_gens.get_mut(&i).unwrap(); - let mut txn = dbs.get_mut(&i).unwrap().txn(); - let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle( - &mut txn, - CoordinatorMessage::Shares { - id: ID, - shares: vec![all_shares - .iter() - .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) - .collect()], - }, - ) { - assert_eq!(id, ID); - if res.is_none() { - res = Some((substrate_key, network_key.clone())); - } - assert_eq!(res.as_ref().unwrap(), &(substrate_key, network_key)); - } else { - panic!("didn't get key back"); - } - txn.commit(); - } - let res = res.unwrap(); - - // Rebuild 1 and 4 - rebuild(&mut key_gens, &dbs, 1); - rebuild(&mut key_gens, &dbs, 4); - - for i in 1 ..= 5 { - let key_gen = key_gens.get_mut(&i).unwrap(); - let mut txn = dbs.get_mut(&i).unwrap().txn(); - let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm( - &mut txn, - ID.session, - &KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), - ); - txn.commit(); - - assert_eq!(substrate_keys.len(), 1); - let substrate_keys = substrate_keys.swap_remove(0); - assert_eq!(network_keys.len(), 1); - let network_keys = network_keys.swap_remove(0); - - let params = - ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap(); - assert_eq!(substrate_keys.params(), params); - assert_eq!(network_keys.params(), params); - assert_eq!( - ( - substrate_keys.group_key().to_bytes(), - network_keys.group_key().to_bytes().as_ref().to_vec() - ), - res - ); - } -} diff --git a/processor/view-keys/Cargo.toml b/processor/view-keys/Cargo.toml new file mode 100644 index 00000000..d76ca32b --- /dev/null +++ b/processor/view-keys/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "serai-processor-view-keys" +version = "0.1.0" +description = "View keys for the Serai processor" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/view-keys" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.80" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +ciphersuite = { version = "0.4", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } diff --git a/processor/view-keys/LICENSE b/processor/view-keys/LICENSE new file mode 100644 index 00000000..91d893c1 --- /dev/null +++ b/processor/view-keys/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/processor/view-keys/README.md b/processor/view-keys/README.md new file mode 100644 index 00000000..4354eed6 --- /dev/null +++ b/processor/view-keys/README.md @@ -0,0 +1,6 @@ +# Serai Processor View Keys + +View keys for the Serai processor. + +This is a MIT-licensed library made available for anyone to generate Serai's +view keys, as necessary for auditing reasons and for sending coins to Serai. diff --git a/processor/view-keys/src/lib.rs b/processor/view-keys/src/lib.rs new file mode 100644 index 00000000..c0d4c68e --- /dev/null +++ b/processor/view-keys/src/lib.rs @@ -0,0 +1,13 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use ciphersuite::Ciphersuite; + +/// Generate a view key for usage within Serai. +/// +/// `k` is the index of the key to generate (enabling generating multiple view keys within a +/// single context). +pub fn view_key(k: u64) -> C::F { + C::hash_to_F(b"Serai DEX View Key", &k.to_le_bytes()) +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 73cb338c..d99e6588 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.80" +channel = "1.81" targets = ["wasm32-unknown-unknown"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] diff --git a/spec/DKG Exclusions.md b/spec/DKG Exclusions.md deleted file mode 100644 index 1677da8a..00000000 --- a/spec/DKG Exclusions.md +++ /dev/null @@ -1,23 +0,0 @@ -Upon an issue with the DKG, the honest validators must remove the malicious -validators. Ideally, a threshold signature would be used, yet that would require -a threshold key (which would require authentication by a MuSig signature). A -MuSig signature which specifies the signing set (or rather, the excluded -signers) achieves the most efficiency. - -While that resolves the on-chain behavior, the Tributary also has to perform -exclusion. This has the following forms: - -1) Rejecting further transactions (required) -2) Rejecting further participation in Tendermint - -With regards to rejecting further participation in Tendermint, it's *ideal* to -remove the validator from the list of validators. Each validator removed from -participation, yet not from the list of validators, increases the likelihood of -the network failing to form consensus. - -With regards to the economic security, an honest 67% may remove a faulty -(explicitly or simply offline) 33%, letting 67% of the remaining 67% (4/9ths) -take control of the associated private keys. In such a case, the malicious -parties are defined as the 4/9ths of validators with access to the private key -and the 33% removed (who together form >67% of the originally intended -validator set and have presumably provided enough stake to cover losses). diff --git a/spec/cryptography/Distributed Key Generation.md b/spec/cryptography/Distributed Key Generation.md index fae5ff90..d0f209c1 100644 --- a/spec/cryptography/Distributed Key Generation.md +++ b/spec/cryptography/Distributed Key Generation.md @@ -1,35 +1,7 @@ # Distributed Key Generation -Serai uses a modification of Pedersen's Distributed Key Generation, which is -actually Feldman's Verifiable Secret Sharing Scheme run by every participant, as -described in the FROST paper. The modification included in FROST was to include -a Schnorr Proof of Knowledge for coefficient zero, preventing rogue key attacks. -This results in a two-round protocol. - -### Encryption - -In order to protect the secret shares during communication, the `dkg` library -establishes a public key for encryption at the start of a given protocol. -Every encrypted message (such as the secret shares) then includes a per-message -encryption key. These two keys are used in an Elliptic-curve Diffie-Hellman -handshake to derive a shared key. This shared key is then hashed to obtain a key -and IV for use in a ChaCha20 stream cipher instance, which is xor'd against a -message to encrypt it. - -### Blame - -Since each message has a distinct key attached, and accordingly a distinct -shared key, it's possible to reveal the shared key for a specific message -without revealing any other message's decryption keys. This is utilized when a -participant misbehaves. A participant who receives an invalid encrypted message -publishes its key, able to without concern for side effects, With the key -published, all participants can decrypt the message in order to decide blame. - -While key reuse by a participant is considered as them revealing the messages -themselves, and therefore out of scope, there is an attack where a malicious -adversary claims another participant's encryption key. They'll fail to encrypt -their message, and the recipient will issue a blame statement. This blame -statement, intended to reveal the malicious adversary, also reveals the message -by the participant whose keys were co-opted. To resolve this, a -proof-of-possession is also included with encrypted messages, ensuring only -those actually with per-message keys can claim to use them. +Serai uses a modification of the one-round Distributed Key Generation described +in the [eVRF](https://eprint.iacr.org/2024/397) paper. We only require a +threshold to participate, sacrificing unbiased for robustness, and implement a +verifiable encryption scheme such that anyone can can verify a ciphertext +encrypts the expected secret share. diff --git a/spec/processor/Multisig Rotation.md b/spec/processor/Multisig Rotation.md index ff5c3d28..86708025 100644 --- a/spec/processor/Multisig Rotation.md +++ b/spec/processor/Multisig Rotation.md @@ -12,11 +12,11 @@ The following timeline is established: 1) The new multisig is created, and has its keys set on Serai. Once the next `Batch` with a new external network block is published, its block becomes the "queue block". The new multisig is set to activate at the "queue block", plus - `CONFIRMATIONS` blocks (the "activation block"). + `WINDOW_LENGTH` blocks (the "activation block"). We don't use the last `Batch`'s external network block, as that `Batch` may - be older than `CONFIRMATIONS` blocks. Any yet-to-be-included-and-finalized - `Batch` will be within `CONFIRMATIONS` blocks of what any processor has + be older than `WINDOW_LENGTH` blocks. Any yet-to-be-included-and-finalized + `Batch` will be within `WINDOW_LENGTH` blocks of what any processor has scanned however, as it'll wait for inclusion and finalization before continuing scanning. @@ -102,7 +102,8 @@ The following timeline is established: 5) For the next 6 hours, all non-`Branch` outputs received are immediately forwarded to the new multisig. Only external transactions to the new multisig - are included in `Batch`s. + are included in `Batch`s. Any outputs not yet transferred as change are + explicitly transferred. The new multisig infers the `InInstruction`, and refund address, for forwarded `External` outputs via reading what they were for the original @@ -121,7 +122,7 @@ The following timeline is established: Once all the 6 hour period has expired, no `Eventuality`s remain, and all outputs are forwarded, the multisig publishes a final `Batch` of the first - block, plus `CONFIRMATIONS`, which met these conditions, regardless of if it + block, plus `WINDOW_LENGTH`, which met these conditions, regardless of if it would've otherwise had a `Batch`. No further actions by it, nor its validators, are expected (unless, of course, those validators remain present in the new multisig). diff --git a/spec/processor/Processor.md b/spec/processor/Processor.md index ca8cf428..55d3baf3 100644 --- a/spec/processor/Processor.md +++ b/spec/processor/Processor.md @@ -9,29 +9,23 @@ This document primarily discusses its flow with regards to the coordinator. ### Generate Key On `key_gen::CoordinatorMessage::GenerateKey`, the processor begins a pair of -instances of the distributed key generation protocol specified in the FROST -paper. +instances of the distributed key generation protocol. -The first instance is for a key to use on the external network. The second -instance is for a Ristretto public key used to publish data to the Serai -blockchain. This pair of FROST DKG instances is considered a single instance of -Serai's overall key generation protocol. +The first instance is for a Ristretto public key used to publish data to the +Serai blockchain. The second instance is for a key to use on the external +network. This pair of DKG instances is considered a single instance of Serai's +overall DKG protocol. -The commitments for both protocols are sent to the coordinator in a single -`key_gen::ProcessorMessage::Commitments`. +The participations in both protocols are sent to the coordinator in +`key_gen::ProcessorMessage::Participation` messages, individually, as they come +in. -### Key Gen Commitments +### Key Gen Participations -On `key_gen::CoordinatorMessage::Commitments`, the processor continues the -specified key generation instance. The secret shares for each fellow -participant are sent to the coordinator in a -`key_gen::ProcessorMessage::Shares`. - -#### Key Gen Shares - -On `key_gen::CoordinatorMessage::Shares`, the processor completes the specified -key generation instance. The generated key pair is sent to the coordinator in a -`key_gen::ProcessorMessage::GeneratedKeyPair`. +On `key_gen::CoordinatorMessage::Participation`, the processor stores the +contained participation, verifying participations as sane. Once it receives `t` +honest participations, the processor completes the DKG and sends the generated +key pair to the coordinator in a `key_gen::ProcessorMessage::GeneratedKeyPair`. ### Confirm Key Pair diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 072f7460..772cdd32 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/abi" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -16,8 +16,10 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } -scale-info = { version = "2", default-features = false, features = ["derive"] } +bitvec = { version = "1", default-features = false, features = ["alloc", "serde"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "bit-vec"] } +scale-info = { version = "2", default-features = false, features = ["derive", "bit-vec"] } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } @@ -40,6 +42,8 @@ serai-signals-primitives = { path = "../signals/primitives", version = "0.1", de [features] std = [ + "bitvec/std", + "scale/std", "scale-info/std", diff --git a/substrate/abi/src/in_instructions.rs b/substrate/abi/src/in_instructions.rs index 765a5053..1f5a64de 100644 --- a/substrate/abi/src/in_instructions.rs +++ b/substrate/abi/src/in_instructions.rs @@ -2,6 +2,7 @@ use serai_primitives::*; pub use serai_in_instructions_primitives as primitives; use primitives::SignedBatch; +use serai_validator_sets_primitives::Session; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] @@ -12,11 +13,18 @@ pub enum Call { } #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] -#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] pub enum Event { - Batch { network: ExternalNetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] }, - InstructionFailure { network: ExternalNetworkId, id: u32, index: u32 }, - Halt { network: ExternalNetworkId }, + Batch { + network: ExternalNetworkId, + publishing_session: Session, + id: u32, + external_network_block_hash: BlockHash, + in_instructions_hash: [u8; 32], + in_instruction_results: bitvec::vec::BitVec, + }, + Halt { + network: ExternalNetworkId, + }, } diff --git a/substrate/abi/src/validator_sets.rs b/substrate/abi/src/validator_sets.rs index 8e7d2e1e..b47a7a68 100644 --- a/substrate/abi/src/validator_sets.rs +++ b/substrate/abi/src/validator_sets.rs @@ -11,13 +11,17 @@ use serai_validator_sets_primitives::*; pub enum Call { set_keys { network: ExternalNetworkId, - removed_participants: BoundedVec>, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, }, + set_embedded_elliptic_curve_key { + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + }, report_slashes { network: ExternalNetworkId, - slashes: BoundedVec<(SeraiAddress, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>, + slashes: SlashReport, signature: Signature, }, allocate { diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index f1184551..cb06ae05 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/substrate/client" authors = ["Luke Parker "] keywords = ["serai"] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -18,10 +18,13 @@ workspace = true [dependencies] zeroize = "^1.5" -thiserror = { version = "1", optional = true } +thiserror = { version = "2", default-features = false, optional = true } + +bitvec = { version = "1", default-features = false, features = ["alloc", "serde"] } hex = "0.4" scale = { package = "parity-scale-codec", version = "3" } +borsh = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } @@ -39,7 +42,7 @@ simple-request = { path = "../../common/request", version = "0.1", optional = tr bitcoin = { version = "0.32", optional = true } ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true } -monero-wallet = { path = "../../networks/monero/wallet", version = "0.1.0", default-features = false, features = ["std"], optional = true } +monero-address = { path = "../../networks/monero/wallet/address", version = "0.1.0", default-features = false, features = ["std"], optional = true } [dev-dependencies] rand_core = "0.6" @@ -57,12 +60,13 @@ dockertest = "0.5" serai-docker-tests = { path = "../../tests/docker" } [features] -serai = ["thiserror", "serde", "serde_json", "serai-abi/serde", "multiaddr", "sp-core", "sp-runtime", "frame-system", "simple-request"] +serai = ["thiserror/std", "serde", "serde_json", "serai-abi/serde", "multiaddr", "sp-core", "sp-runtime", "frame-system", "simple-request"] borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"] -monero = ["networks", "ciphersuite/ed25519", "monero-wallet"] +ethereum = ["networks"] +monero = ["networks", "ciphersuite/ed25519", "monero-address"] # Assumes the default usage is to use Serai as a DEX, which doesn't actually # require connecting to a Serai node diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 502bfb44..28f66053 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -1,6 +1,7 @@ use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; use bitcoin::{ hashes::{Hash as HashTrait, hash160::Hash}, @@ -10,47 +11,10 @@ use bitcoin::{ address::{AddressType, NetworkChecked, Address as BAddress}, }; -#[derive(Clone, Eq, Debug)] -pub struct Address(ScriptBuf); +use crate::primitives::ExternalAddress; -impl PartialEq for Address { - fn eq(&self, other: &Self) -> bool { - // Since Serai defines the Bitcoin-address specification as a variant of the script alone, - // define equivalency as the script alone - self.0 == other.0 - } -} - -impl From
for ScriptBuf { - fn from(addr: Address) -> ScriptBuf { - addr.0 - } -} - -impl FromStr for Address { - type Err = (); - fn from_str(str: &str) -> Result { - Address::new( - BAddress::from_str(str) - .map_err(|_| ())? - .require_network(Network::Bitcoin) - .map_err(|_| ())? - .script_pubkey(), - ) - .ok_or(()) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - BAddress::::from_script(&self.0, Network::Bitcoin) - .map_err(|_| fmt::Error)? - .fmt(f) - } -} - -// SCALE-encoded variant of Monero addresses. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +// SCALE-encodable representation of Bitcoin addresses, used internally. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] enum EncodedAddress { P2PKH([u8; 20]), P2SH([u8; 20]), @@ -59,34 +23,13 @@ enum EncodedAddress { P2TR([u8; 32]), } -impl TryFrom> for Address { +impl TryFrom<&ScriptBuf> for EncodedAddress { type Error = (); - fn try_from(data: Vec) -> Result { - Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { - EncodedAddress::P2PKH(hash) => { - ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2SH(hash) => { - ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2WPKH(hash) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) - } - EncodedAddress::P2WSH(hash) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) - } - EncodedAddress::P2TR(key) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) - } - })) - } -} - -fn try_to_vec(addr: &Address) -> Result, ()> { - let parsed_addr = - BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; - Ok( - (match parsed_addr.address_type() { + fn try_from(script_buf: &ScriptBuf) -> Result { + // This uses mainnet as our encodings don't specify a network. + let parsed_addr = + BAddress::::from_script(script_buf, Network::Bitcoin).map_err(|_| ())?; + Ok(match parsed_addr.address_type() { Some(AddressType::P2pkh) => { EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) } @@ -110,23 +53,119 @@ fn try_to_vec(addr: &Address) -> Result, ()> { } _ => Err(())?, }) - .encode(), - ) + } } -impl From
for Vec { - fn from(addr: Address) -> Vec { +impl From for ScriptBuf { + fn from(encoded: EncodedAddress) -> Self { + match encoded { + EncodedAddress::P2PKH(hash) => { + ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2SH(hash) => { + ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2WPKH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2WSH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2TR(key) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) + } + } + } +} + +/// A Bitcoin address usable with Serai. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Address(ScriptBuf); + +// Support consuming into the underlying ScriptBuf. +impl From
for ScriptBuf { + fn from(addr: Address) -> ScriptBuf { + addr.0 + } +} + +impl From<&Address> for BAddress { + fn from(addr: &Address) -> BAddress { + // This fails if the script doesn't have an address representation, yet all our representable + // addresses' scripts do + BAddress::::from_script(&addr.0, Network::Bitcoin).unwrap() + } +} + +// Support converting a string into an address. +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + Address::new( + BAddress::from_str(str) + .map_err(|_| ())? + .require_network(Network::Bitcoin) + .map_err(|_| ())? + .script_pubkey(), + ) + .ok_or(()) + } +} + +// Support converting an address into a string. +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + BAddress::from(self).fmt(f) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + // Decode as an EncodedAddress, then map to a ScriptBuf + let mut data = data.as_ref(); + let encoded = EncodedAddress::decode(&mut data).map_err(|_| ())?; + if !data.is_empty() { + Err(())? + } + Ok(Address(ScriptBuf::from(encoded))) + } +} + +impl From
for EncodedAddress { + fn from(addr: Address) -> EncodedAddress { // Safe since only encodable addresses can be created - try_to_vec(&addr).unwrap() + EncodedAddress::try_from(&addr.0).unwrap() + } +} + +impl From
for ExternalAddress { + fn from(addr: Address) -> ExternalAddress { + // Safe since all variants are fixed-length and fit into MAX_ADDRESS_LEN + ExternalAddress::new(EncodedAddress::from(addr).encode()).unwrap() + } +} + +impl BorshSerialize for Address { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + EncodedAddress::from(self.clone()).serialize(writer) + } +} + +impl BorshDeserialize for Address { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + Ok(Self(ScriptBuf::from(EncodedAddress::deserialize_reader(reader)?))) } } impl Address { - pub fn new(address: ScriptBuf) -> Option { - let res = Self(address); - if try_to_vec(&res).is_ok() { - return Some(res); + /// Create a new Address from a ScriptBuf. + pub fn new(script_buf: ScriptBuf) -> Option { + // If we can represent this Script, it's an acceptable address + if EncodedAddress::try_from(&script_buf).is_ok() { + return Some(Self(script_buf)); } + // Else, it isn't acceptable None } } diff --git a/substrate/client/src/networks/ethereum.rs b/substrate/client/src/networks/ethereum.rs new file mode 100644 index 00000000..7e94dfb8 --- /dev/null +++ b/substrate/client/src/networks/ethereum.rs @@ -0,0 +1,129 @@ +use core::str::FromStr; +use std::io::Read; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use crate::primitives::{MAX_ADDRESS_LEN, ExternalAddress}; + +/// THe maximum amount of gas an address is allowed to specify as its gas limit. +/// +/// Payments to an address with a gas limit which exceed this value will be dropped entirely. +pub const ADDRESS_GAS_LIMIT: u32 = 950_000; + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct ContractDeployment { + /// The gas limit to use for this contract's execution. + /// + /// This MUST be less than the Serai gas limit. The cost of it, and the associated costs with + /// making this transaction, will be deducted from the amount transferred. + gas_limit: u32, + /// The initialization code of the contract to deploy. + /// + /// This contract will be deployed (executing the initialization code). No further calls will + /// be made. + code: Vec, +} + +/// A contract to deploy, enabling executing arbitrary code. +impl ContractDeployment { + pub fn new(gas_limit: u32, code: Vec) -> Option { + // Check the gas limit is less the address gas limit + if gas_limit > ADDRESS_GAS_LIMIT { + None?; + } + + // The max address length, minus the type byte, minus the size of the gas + const MAX_CODE_LEN: usize = (MAX_ADDRESS_LEN as usize) - (1 + core::mem::size_of::()); + if code.len() > MAX_CODE_LEN { + None?; + } + + Some(Self { gas_limit, code }) + } + + pub fn gas_limit(&self) -> u32 { + self.gas_limit + } + pub fn code(&self) -> &[u8] { + &self.code + } +} + +/// A representation of an Ethereum address. +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub enum Address { + /// A traditional address. + Address([u8; 20]), + /// A contract to deploy, enabling executing arbitrary code. + Contract(ContractDeployment), +} + +impl From<[u8; 20]> for Address { + fn from(address: [u8; 20]) -> Self { + Address::Address(address) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + let mut kind = [0xff]; + let mut reader: &[u8] = data.as_ref(); + reader.read_exact(&mut kind).map_err(|_| ())?; + Ok(match kind[0] { + 0 => { + let mut address = [0xff; 20]; + reader.read_exact(&mut address).map_err(|_| ())?; + Address::Address(address) + } + 1 => { + let mut gas_limit = [0xff; 4]; + reader.read_exact(&mut gas_limit).map_err(|_| ())?; + Address::Contract(ContractDeployment { + gas_limit: { + let gas_limit = u32::from_le_bytes(gas_limit); + if gas_limit > ADDRESS_GAS_LIMIT { + Err(())?; + } + gas_limit + }, + // The code is whatever's left since the ExternalAddress is a delimited container of + // appropriately bounded length + code: reader.to_vec(), + }) + } + _ => Err(())?, + }) + } +} +impl From
for ExternalAddress { + fn from(address: Address) -> ExternalAddress { + let mut res = Vec::with_capacity(1 + 20); + match address { + Address::Address(address) => { + res.push(0); + res.extend(&address); + } + Address::Contract(ContractDeployment { gas_limit, code }) => { + res.push(1); + res.extend(&gas_limit.to_le_bytes()); + res.extend(&code); + } + } + // We only construct addresses whose code is small enough this can safely be constructed + ExternalAddress::new(res).unwrap() + } +} + +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + let Some(address) = str.strip_prefix("0x") else { Err(())? }; + if address.len() != 40 { + Err(())? + }; + Ok(Address::Address( + hex::decode(address.to_lowercase()).map_err(|_| ())?.try_into().map_err(|_| ())?, + )) + } +} diff --git a/substrate/client/src/networks/mod.rs b/substrate/client/src/networks/mod.rs index 63ebf481..7a99631a 100644 --- a/substrate/client/src/networks/mod.rs +++ b/substrate/client/src/networks/mod.rs @@ -1,5 +1,8 @@ #[cfg(feature = "bitcoin")] pub mod bitcoin; +#[cfg(feature = "ethereum")] +pub mod ethereum; + #[cfg(feature = "monero")] pub mod monero; diff --git a/substrate/client/src/networks/monero.rs b/substrate/client/src/networks/monero.rs index bd5e0a15..c99a0abd 100644 --- a/substrate/client/src/networks/monero.rs +++ b/substrate/client/src/networks/monero.rs @@ -1,102 +1,141 @@ use core::{str::FromStr, fmt}; -use scale::{Encode, Decode}; - use ciphersuite::{Ciphersuite, Ed25519}; -use monero_wallet::address::{AddressError, Network, AddressType, MoneroAddress}; +use monero_address::{Network, AddressType as MoneroAddressType, MoneroAddress}; -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Address(MoneroAddress); -impl Address { - pub fn new(address: MoneroAddress) -> Option
{ - if address.payment_id().is_some() { - return None; - } - Some(Address(address)) - } -} +use crate::primitives::ExternalAddress; -impl FromStr for Address { - type Err = AddressError; - fn from_str(str: &str) -> Result { - MoneroAddress::from_str(Network::Mainnet, str).map(Address) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -// SCALE-encoded variant of Monero addresses. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -enum EncodedAddressType { +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +enum AddressType { Legacy, Subaddress, Featured(u8), } -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -struct EncodedAddress { - kind: EncodedAddressType, - spend: [u8; 32], - view: [u8; 32], +/// A representation of a Monero address. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address { + kind: AddressType, + spend: ::G, + view: ::G, } -impl TryFrom> for Address { - type Error = (); - fn try_from(data: Vec) -> Result { - // Decode as SCALE - let addr = EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())?; - // Convert over - Ok(Address(MoneroAddress::new( - Network::Mainnet, - match addr.kind { - EncodedAddressType::Legacy => AddressType::Legacy, - EncodedAddressType::Subaddress => AddressType::Subaddress, - EncodedAddressType::Featured(flags) => { - let subaddress = (flags & 1) != 0; - let integrated = (flags & (1 << 1)) != 0; - let guaranteed = (flags & (1 << 2)) != 0; - if integrated { - Err(())?; - } - AddressType::Featured { subaddress, payment_id: None, guaranteed } - } - }, - Ed25519::read_G::<&[u8]>(&mut addr.spend.as_ref()).map_err(|_| ())?.0, - Ed25519::read_G::<&[u8]>(&mut addr.view.as_ref()).map_err(|_| ())?.0, - ))) - } -} - -#[allow(clippy::from_over_into)] -impl Into for Address { - fn into(self) -> MoneroAddress { - self.0 - } -} - -#[allow(clippy::from_over_into)] -impl Into> for Address { - fn into(self) -> Vec { - EncodedAddress { - kind: match self.0.kind() { - AddressType::Legacy => EncodedAddressType::Legacy, - AddressType::LegacyIntegrated(_) => { - panic!("integrated address became Serai Monero address") - } - AddressType::Subaddress => EncodedAddressType::Subaddress, - AddressType::Featured { subaddress, payment_id, guaranteed } => { - debug_assert!(payment_id.is_none()); - EncodedAddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) - } - }, - spend: self.0.spend().compress().0, - view: self.0.view().compress().0, +fn byte_for_kind(kind: AddressType) -> u8 { + // We use the second and third highest bits for the type + // This leaves the top bit open for interpretation as a VarInt later + match kind { + AddressType::Legacy => 0, + AddressType::Subaddress => 1 << 5, + AddressType::Featured(flags) => { + // The flags only take up the low three bits + debug_assert!(flags <= 0b111); + (2 << 5) | flags } - .encode() + } +} + +impl borsh::BorshSerialize for Address { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(&[byte_for_kind(self.kind)])?; + writer.write_all(&self.spend.compress().to_bytes())?; + writer.write_all(&self.view.compress().to_bytes()) + } +} +impl borsh::BorshDeserialize for Address { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut kind_byte = [0xff]; + reader.read_exact(&mut kind_byte)?; + let kind_byte = kind_byte[0]; + let kind = match kind_byte >> 5 { + 0 => AddressType::Legacy, + 1 => AddressType::Subaddress, + 2 => AddressType::Featured(kind_byte & 0b111), + _ => Err(borsh::io::Error::other("unrecognized type"))?, + }; + // Check this wasn't malleated + if byte_for_kind(kind) != kind_byte { + Err(borsh::io::Error::other("malleated type byte"))?; + } + let spend = Ed25519::read_G(reader)?; + let view = Ed25519::read_G(reader)?; + Ok(Self { kind, spend, view }) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(address: MoneroAddress) -> Result { + let spend = address.spend().compress().to_bytes(); + let view = address.view().compress().to_bytes(); + let kind = match address.kind() { + MoneroAddressType::Legacy => AddressType::Legacy, + MoneroAddressType::LegacyIntegrated(_) => Err(())?, + MoneroAddressType::Subaddress => AddressType::Subaddress, + MoneroAddressType::Featured { subaddress, payment_id, guaranteed } => { + if payment_id.is_some() { + Err(())? + } + // This maintains the same bit layout as featured addresses use + AddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) + } + }; + Ok(Address { + kind, + spend: Ed25519::read_G(&mut spend.as_slice()).map_err(|_| ())?, + view: Ed25519::read_G(&mut view.as_slice()).map_err(|_| ())?, + }) + } +} + +impl From
for MoneroAddress { + fn from(address: Address) -> MoneroAddress { + let kind = match address.kind { + AddressType::Legacy => MoneroAddressType::Legacy, + AddressType::Subaddress => MoneroAddressType::Subaddress, + AddressType::Featured(features) => { + debug_assert!(features <= 0b111); + let subaddress = (features & 1) != 0; + let integrated = (features & (1 << 1)) != 0; + debug_assert!(!integrated); + let guaranteed = (features & (1 << 2)) != 0; + MoneroAddressType::Featured { subaddress, payment_id: None, guaranteed } + } + }; + MoneroAddress::new(Network::Mainnet, kind, address.spend.0, address.view.0) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + // Decode as an Address + let mut data = data.as_ref(); + let address = +
::deserialize_reader(&mut data).map_err(|_| ())?; + if !data.is_empty() { + Err(())? + } + Ok(address) + } +} +impl From
for ExternalAddress { + fn from(address: Address) -> ExternalAddress { + // This is 65 bytes which is less than MAX_ADDRESS_LEN + ExternalAddress::new(borsh::to_vec(&address).unwrap()).unwrap() + } +} + +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + let Ok(address) = MoneroAddress::from_str(Network::Mainnet, str) else { Err(())? }; + Address::try_from(address) + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + MoneroAddress::from(*self).fmt(f) } } diff --git a/substrate/client/src/serai/coins.rs b/substrate/client/src/serai/coins.rs index c5bef95d..2da598fd 100644 --- a/substrate/client/src/serai/coins.rs +++ b/substrate/client/src/serai/coins.rs @@ -12,7 +12,7 @@ pub type CoinsEvent = serai_abi::coins::Event; #[derive(Clone, Copy)] pub struct SeraiCoins<'a>(pub(crate) &'a TemporalSerai<'a>); -impl<'a> SeraiCoins<'a> { +impl SeraiCoins<'_> { pub async fn mint_events(&self) -> Result, SeraiError> { self .0 diff --git a/substrate/client/src/serai/dex.rs b/substrate/client/src/serai/dex.rs index 1c44107f..f820d3de 100644 --- a/substrate/client/src/serai/dex.rs +++ b/substrate/client/src/serai/dex.rs @@ -9,7 +9,7 @@ const PALLET: &str = "Dex"; #[derive(Clone, Copy)] pub struct SeraiDex<'a>(pub(crate) &'a TemporalSerai<'a>); -impl<'a> SeraiDex<'a> { +impl SeraiDex<'_> { pub async fn events(&self) -> Result, SeraiError> { self .0 diff --git a/substrate/client/src/serai/genesis_liquidity.rs b/substrate/client/src/serai/genesis_liquidity.rs index e8a152fa..fbbf0d6d 100644 --- a/substrate/client/src/serai/genesis_liquidity.rs +++ b/substrate/client/src/serai/genesis_liquidity.rs @@ -15,7 +15,7 @@ const PALLET: &str = "GenesisLiquidity"; #[derive(Clone, Copy)] pub struct SeraiGenesisLiquidity<'a>(pub(crate) &'a TemporalSerai<'a>); -impl<'a> SeraiGenesisLiquidity<'a> { +impl SeraiGenesisLiquidity<'_> { pub async fn events(&self) -> Result, SeraiError> { self .0 diff --git a/substrate/client/src/serai/in_instructions.rs b/substrate/client/src/serai/in_instructions.rs index d27ce695..db9a4f78 100644 --- a/substrate/client/src/serai/in_instructions.rs +++ b/substrate/client/src/serai/in_instructions.rs @@ -1,10 +1,7 @@ pub use serai_abi::in_instructions::primitives; use primitives::SignedBatch; -use crate::{ - primitives::{BlockHash, ExternalNetworkId}, - Transaction, SeraiError, Serai, TemporalSerai, -}; +use crate::{primitives::ExternalNetworkId, Transaction, SeraiError, Serai, TemporalSerai}; pub type InInstructionsEvent = serai_abi::in_instructions::Event; @@ -12,14 +9,7 @@ const PALLET: &str = "InInstructions"; #[derive(Clone, Copy)] pub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>); -impl<'a> SeraiInInstructions<'a> { - pub async fn latest_block_for_network( - &self, - network: ExternalNetworkId, - ) -> Result, SeraiError> { - self.0.storage(PALLET, "LatestNetworkBlock", network).await - } - +impl SeraiInInstructions<'_> { pub async fn last_batch_for_network( &self, network: ExternalNetworkId, diff --git a/substrate/client/src/serai/liquidity_tokens.rs b/substrate/client/src/serai/liquidity_tokens.rs index c7ec93cf..e8706a64 100644 --- a/substrate/client/src/serai/liquidity_tokens.rs +++ b/substrate/client/src/serai/liquidity_tokens.rs @@ -8,7 +8,7 @@ const PALLET: &str = "LiquidityTokens"; #[derive(Clone, Copy)] pub struct SeraiLiquidityTokens<'a>(pub(crate) &'a TemporalSerai<'a>); -impl<'a> SeraiLiquidityTokens<'a> { +impl SeraiLiquidityTokens<'_> { pub async fn token_supply(&self, coin: ExternalCoin) -> Result { Ok(self.0.storage(PALLET, "Supply", coin).await?.unwrap_or(Amount(0))) } diff --git a/substrate/client/src/serai/mod.rs b/substrate/client/src/serai/mod.rs index b1894df1..ceb61ada 100644 --- a/substrate/client/src/serai/mod.rs +++ b/substrate/client/src/serai/mod.rs @@ -46,17 +46,17 @@ impl Block { } /// Returns the time of this block, set by its producer, in milliseconds since the epoch. - pub fn time(&self) -> Result { + pub fn time(&self) -> Option { for transaction in &self.transactions { if let Call::Timestamp(timestamp::Call::set { now }) = transaction.call() { - return Ok(*now); + return Some(*now); } } - Err(SeraiError::InvalidNode("no time was present in block".to_string())) + None } } -#[derive(Error, Debug)] +#[derive(Debug, Error)] pub enum SeraiError { #[error("failed to communicate with serai")] ConnectionError, @@ -81,7 +81,7 @@ pub struct TemporalSerai<'a> { block: [u8; 32], events: RwLock>, } -impl<'a> Clone for TemporalSerai<'a> { +impl Clone for TemporalSerai<'_> { fn clone(&self) -> Self { Self { serai: self.serai, block: self.block, events: RwLock::new(None) } } @@ -314,7 +314,7 @@ impl Serai { /// Return the P2P Multiaddrs for the validators of the specified network. pub async fn p2p_validators( &self, - network: NetworkId, + network: ExternalNetworkId, ) -> Result, SeraiError> { self.call("p2p_validators", network).await } @@ -338,7 +338,7 @@ impl Serai { } } -impl<'a> TemporalSerai<'a> { +impl TemporalSerai<'_> { async fn events( &self, filter_map: impl Fn(&Event) -> Option, @@ -408,27 +408,27 @@ impl<'a> TemporalSerai<'a> { }) } - pub fn coins(&'a self) -> SeraiCoins<'a> { + pub fn coins(&self) -> SeraiCoins<'_> { SeraiCoins(self) } - pub fn dex(&'a self) -> SeraiDex<'a> { + pub fn dex(&self) -> SeraiDex<'_> { SeraiDex(self) } - pub fn in_instructions(&'a self) -> SeraiInInstructions<'a> { + pub fn in_instructions(&self) -> SeraiInInstructions<'_> { SeraiInInstructions(self) } - pub fn validator_sets(&'a self) -> SeraiValidatorSets<'a> { + pub fn validator_sets(&self) -> SeraiValidatorSets<'_> { SeraiValidatorSets(self) } - pub fn genesis_liquidity(&'a self) -> SeraiGenesisLiquidity { + pub fn genesis_liquidity(&self) -> SeraiGenesisLiquidity { SeraiGenesisLiquidity(self) } - pub fn liquidity_tokens(&'a self) -> SeraiLiquidityTokens { + pub fn liquidity_tokens(&self) -> SeraiLiquidityTokens { SeraiLiquidityTokens(self) } } diff --git a/substrate/client/src/serai/validator_sets.rs b/substrate/client/src/serai/validator_sets.rs index 07339ef8..a978b494 100644 --- a/substrate/client/src/serai/validator_sets.rs +++ b/substrate/client/src/serai/validator_sets.rs @@ -1,13 +1,14 @@ use scale::Encode; use sp_core::sr25519::{Public, Signature}; +use sp_runtime::BoundedVec; use serai_abi::{primitives::Amount, validator_sets::primitives::ExternalValidatorSet}; pub use serai_abi::validator_sets::primitives; -use primitives::{Session, KeyPair}; +use primitives::{MAX_KEY_LEN, Session, KeyPair, SlashReport}; use crate::{ - primitives::{NetworkId, ExternalNetworkId, SeraiAddress}, + primitives::{NetworkId, ExternalNetworkId, EmbeddedEllipticCurve}, Transaction, Serai, TemporalSerai, SeraiError, }; @@ -17,7 +18,7 @@ pub type ValidatorSetsEvent = serai_abi::validator_sets::Event; #[derive(Clone, Copy)] pub struct SeraiValidatorSets<'a>(pub(crate) &'a TemporalSerai<'a>); -impl<'a> SeraiValidatorSets<'a> { +impl SeraiValidatorSets<'_> { pub async fn new_set_events(&self) -> Result, SeraiError> { self .0 @@ -107,6 +108,21 @@ impl<'a> SeraiValidatorSets<'a> { self.0.storage(PALLET, "CurrentSession", network).await } + pub async fn embedded_elliptic_curve_key( + &self, + validator: Public, + embedded_elliptic_curve: EmbeddedEllipticCurve, + ) -> Result>, SeraiError> { + self + .0 + .storage( + PALLET, + "EmbeddedEllipticCurveKeys", + (sp_core::hashing::blake2_128(&validator.encode()), validator, embedded_elliptic_curve), + ) + .await + } + pub async fn participants( &self, network: NetworkId, @@ -188,21 +204,30 @@ impl<'a> SeraiValidatorSets<'a> { pub fn set_keys( network: ExternalNetworkId, - removed_participants: sp_runtime::BoundedVec< - SeraiAddress, - sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>, - >, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) -> Transaction { Serai::unsigned(serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::set_keys { network, - removed_participants, key_pair, + signature_participants, signature, })) } + pub fn set_embedded_elliptic_curve_key( + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + ) -> serai_abi::Call { + serai_abi::Call::ValidatorSets( + serai_abi::validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + }, + ) + } + pub fn allocate(network: NetworkId, amount: Amount) -> serai_abi::Call { serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::allocate { network, amount }) } @@ -213,10 +238,7 @@ impl<'a> SeraiValidatorSets<'a> { pub fn report_slashes( network: ExternalNetworkId, - slashes: sp_runtime::BoundedVec< - (SeraiAddress, u32), - sp_core::ConstU32<{ primitives::MAX_KEY_SHARES_PER_SET / 3 }>, - >, + slashes: SlashReport, signature: Signature, ) -> Transaction { Serai::unsigned(serai_abi::Call::ValidatorSets( diff --git a/substrate/client/tests/batch.rs b/substrate/client/tests/batch.rs index 1a5b3866..2d32462f 100644 --- a/substrate/client/tests/batch.rs +++ b/substrate/client/tests/batch.rs @@ -8,12 +8,13 @@ use blake2::{ use scale::Encode; use serai_client::{ - primitives::{Amount, BlockHash, ExternalBalance, ExternalCoin, SeraiAddress}, + primitives::{BlockHash, ExternalCoin, Amount, ExternalBalance, SeraiAddress}, + coins::CoinsEvent, + validator_sets::primitives::Session, in_instructions::{ primitives::{InInstruction, InInstructionWithBalance, Batch}, InInstructionsEvent, }, - coins::CoinsEvent, Serai, }; @@ -23,8 +24,6 @@ use common::in_instructions::provide_batch; serai_test!( publish_batch: (|serai: Serai| async move { let id = 0; - let mut block_hash = BlockHash([0; 32]); - OsRng.fill_bytes(&mut block_hash.0); let mut address = SeraiAddress::new([0; 32]); OsRng.fill_bytes(&mut address.0); @@ -34,10 +33,13 @@ serai_test!( let amount = Amount(OsRng.next_u64().saturating_add(1)); let balance = ExternalBalance { coin, amount }; + let mut external_network_block_hash = BlockHash([0; 32]); + OsRng.fill_bytes(&mut external_network_block_hash.0); + let batch = Batch { network, id, - block: block_hash, + external_network_block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(address), balance, @@ -49,16 +51,16 @@ serai_test!( let serai = serai.as_of(block); { let serai = serai.in_instructions(); - let latest_finalized = serai.latest_block_for_network(network).await.unwrap(); - assert_eq!(latest_finalized, Some(block_hash)); let batches = serai.batch_events().await.unwrap(); assert_eq!( batches, vec![InInstructionsEvent::Batch { network, + publishing_session: Session(0), id, - block: block_hash, - instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), + external_network_block_hash, + in_instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), + in_instruction_results: bitvec::bitvec![u8, bitvec::order::Lsb0; 1; 1], }] ); } diff --git a/substrate/client/tests/burn.rs b/substrate/client/tests/burn.rs index 3dc0c127..8351781e 100644 --- a/substrate/client/tests/burn.rs +++ b/substrate/client/tests/burn.rs @@ -7,19 +7,22 @@ use blake2::{ use scale::Encode; -use serai_abi::coins::primitives::OutInstructionWithBalance; use sp_core::Pair; use serai_client::{ primitives::{ - Amount, ExternalCoin, ExternalBalance, BlockHash, SeraiAddress, Data, ExternalAddress, + BlockHash, ExternalCoin, Amount, ExternalBalance, SeraiAddress, ExternalAddress, insecure_pair_from_name, }, + coins::{ + primitives::{OutInstruction, OutInstructionWithBalance}, + CoinsEvent, + }, + validator_sets::primitives::Session, in_instructions::{ InInstructionsEvent, primitives::{InInstruction, InInstructionWithBalance, Batch}, }, - coins::{primitives::OutInstruction, CoinsEvent}, Serai, SeraiCoins, }; @@ -44,7 +47,7 @@ serai_test!( let batch = Batch { network, id, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(address), balance, @@ -54,17 +57,19 @@ serai_test!( let block = provide_batch(&serai, batch.clone()).await; let instruction = { - let serai = serai.as_of(block); - let batches = serai.in_instructions().batch_events().await.unwrap(); - assert_eq!( - batches, - vec![InInstructionsEvent::Batch { - network, - id, - block: block_hash, - instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), - }] - ); + let serai = serai.as_of(block); + let batches = serai.in_instructions().batch_events().await.unwrap(); + assert_eq!( + batches, + vec![InInstructionsEvent::Batch { + network, + publishing_session: Session(0), + id, + external_network_block_hash: block_hash, + in_instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), + in_instruction_results: bitvec::bitvec![u8, bitvec::order::Lsb0; 1; 1], + }] + ); assert_eq!( serai.coins().mint_events().await.unwrap(), @@ -73,20 +78,16 @@ serai_test!( assert_eq!(serai.coins().coin_supply(coin.into()).await.unwrap(), amount); assert_eq!(serai.coins().coin_balance(coin.into(), address).await.unwrap(), amount); - // Now burn it - let mut rand_bytes = vec![0; 32]; - OsRng.fill_bytes(&mut rand_bytes); - let external_address = ExternalAddress::new(rand_bytes).unwrap(); + // Now burn it + let mut rand_bytes = vec![0; 32]; + OsRng.fill_bytes(&mut rand_bytes); + let external_address = ExternalAddress::new(rand_bytes).unwrap(); - let mut rand_bytes = vec![0; 32]; - OsRng.fill_bytes(&mut rand_bytes); - let data = Data::new(rand_bytes).unwrap(); - - OutInstructionWithBalance { - balance, - instruction: OutInstruction { address: external_address, data: Some(data) }, - } -}; + OutInstructionWithBalance { + balance, + instruction: OutInstruction { address: external_address }, + } + }; let block = publish_tx( &serai, diff --git a/substrate/client/tests/common/genesis_liquidity.rs b/substrate/client/tests/common/genesis_liquidity.rs index 55824d36..cba6bdea 100644 --- a/substrate/client/tests/common/genesis_liquidity.rs +++ b/substrate/client/tests/common/genesis_liquidity.rs @@ -10,13 +10,13 @@ use schnorrkel::Schnorrkel; use sp_core::{sr25519::Signature, Pair as PairTrait}; use serai_abi::{ - genesis_liquidity::primitives::{oraclize_values_message, Values}, - in_instructions::primitives::{Batch, InInstruction, InInstructionWithBalance}, primitives::{ - insecure_pair_from_name, Amount, ExternalBalance, BlockHash, ExternalCoin, ExternalNetworkId, - NetworkId, SeraiAddress, EXTERNAL_COINS, + EXTERNAL_COINS, BlockHash, ExternalNetworkId, NetworkId, ExternalCoin, Amount, ExternalBalance, + SeraiAddress, insecure_pair_from_name, }, - validator_sets::primitives::{musig_context, Session, ValidatorSet}, + validator_sets::primitives::{Session, ValidatorSet, musig_context}, + genesis_liquidity::primitives::{Values, oraclize_values_message}, + in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch}, }; use serai_client::{Serai, SeraiGenesisLiquidity}; @@ -53,7 +53,7 @@ pub async fn set_up_genesis( }) .collect::>(); - // set up bloch hash + // set up block hash let mut block = BlockHash([0; 32]); OsRng.fill_bytes(&mut block.0); @@ -65,8 +65,12 @@ pub async fn set_up_genesis( }) .or_insert(0); - let batch = - Batch { network: coin.network(), id: batch_ids[&coin.network()], block, instructions }; + let batch = Batch { + network: coin.network(), + external_network_block_hash: block, + id: batch_ids[&coin.network()], + instructions, + }; provide_batch(serai, batch).await; } diff --git a/substrate/client/tests/common/in_instructions.rs b/substrate/client/tests/common/in_instructions.rs index a87e3292..87e26c5d 100644 --- a/substrate/client/tests/common/in_instructions.rs +++ b/substrate/client/tests/common/in_instructions.rs @@ -9,7 +9,7 @@ use scale::Encode; use sp_core::Pair; use serai_client::{ - primitives::{insecure_pair_from_name, BlockHash, ExternalBalance, SeraiAddress}, + primitives::{BlockHash, ExternalBalance, SeraiAddress, insecure_pair_from_name}, validator_sets::primitives::{ExternalValidatorSet, KeyPair}, in_instructions::{ primitives::{Batch, SignedBatch, batch_message, InInstruction, InInstructionWithBalance}, @@ -45,17 +45,29 @@ pub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] { ) .await; - let batches = serai.as_of(block).in_instructions().batch_events().await.unwrap(); - // TODO: impl From for BatchEvent? - assert_eq!( - batches, - vec![InInstructionsEvent::Batch { - network: batch.network, - id: batch.id, - block: batch.block, - instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), - }], - ); + { + let mut batches = serai.as_of(block).in_instructions().batch_events().await.unwrap(); + assert_eq!(batches.len(), 1); + let InInstructionsEvent::Batch { + network, + publishing_session, + id, + external_network_block_hash, + in_instructions_hash, + in_instruction_results: _, + } = batches.swap_remove(0) + else { + panic!("Batch event wasn't Batch event") + }; + assert_eq!(network, batch.network); + assert_eq!(publishing_session, session); + assert_eq!(id, batch.id); + assert_eq!(external_network_block_hash, batch.external_network_block_hash); + assert_eq!( + in_instructions_hash, + <[u8; 32]>::from(Blake2b::::digest(batch.instructions.encode())) + ); + } // TODO: Check the tokens events @@ -75,7 +87,7 @@ pub async fn mint_coin( let batch = Batch { network: balance.coin.network(), id: batch_id, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Transfer(address), balance, diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index 20f7e951..008cb3fc 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -5,6 +5,8 @@ use zeroize::Zeroizing; use rand_core::OsRng; use sp_core::{ + ConstU32, + bounded_vec::BoundedVec, sr25519::{Pair, Signature}, Pair as PairTrait, }; @@ -14,11 +16,12 @@ use frost::dkg::musig::musig; use schnorrkel::Schnorrkel; use serai_client::{ + primitives::{EmbeddedEllipticCurve, Amount}, validator_sets::{ - primitives::{ExternalValidatorSet, KeyPair, musig_context, set_keys_message}, + primitives::{MAX_KEY_LEN, ExternalValidatorSet, KeyPair, musig_context, set_keys_message}, ValidatorSetsEvent, }, - Amount, Serai, SeraiValidatorSets, + SeraiValidatorSets, Serai, }; use crate::common::tx::publish_tx; @@ -59,7 +62,7 @@ pub async fn set_keys( let sig = frost::tests::sign_without_caching( &mut OsRng, frost::tests::algorithm_machines(&mut OsRng, &Schnorrkel::new(b"substrate"), &musig_keys), - &set_keys_message(&set, &[], &key_pair), + &set_keys_message(&set, &key_pair), ); // Set the key pair @@ -67,8 +70,8 @@ pub async fn set_keys( serai, &SeraiValidatorSets::set_keys( set.network, - vec![].try_into().unwrap(), key_pair.clone(), + bitvec::bitvec!(u8, bitvec::prelude::Lsb0; 1; musig_keys.len()), Signature(sig.to_bytes()), ), ) @@ -83,6 +86,24 @@ pub async fn set_keys( block } +#[allow(dead_code)] +pub async fn set_embedded_elliptic_curve_key( + serai: &Serai, + pair: &Pair, + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + nonce: u32, +) -> [u8; 32] { + // get the call + let tx = serai.sign( + pair, + SeraiValidatorSets::set_embedded_elliptic_curve_key(embedded_elliptic_curve, key), + nonce, + 0, + ); + publish_tx(serai, &tx).await +} + #[allow(dead_code)] pub async fn allocate_stake( serai: &Serai, diff --git a/substrate/client/tests/dex.rs b/substrate/client/tests/dex.rs index f41eef6b..93422f5e 100644 --- a/substrate/client/tests/dex.rs +++ b/substrate/client/tests/dex.rs @@ -6,8 +6,8 @@ use serai_abi::in_instructions::primitives::DexCall; use serai_client::{ primitives::{ - Amount, Coin, Balance, BlockHash, insecure_pair_from_name, ExternalAddress, SeraiAddress, - ExternalCoin, ExternalBalance, + BlockHash, ExternalCoin, Coin, Amount, ExternalBalance, Balance, SeraiAddress, ExternalAddress, + insecure_pair_from_name, }, in_instructions::primitives::{ InInstruction, InInstructionWithBalance, Batch, IN_INSTRUCTION_EXECUTOR, OutAddress, @@ -247,7 +247,7 @@ serai_test!( let batch = Batch { network: coin.network(), id: batch_id, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(pair.public().into())), balance: ExternalBalance { coin, amount: Amount(20_000_000_000_000) }, @@ -329,7 +329,7 @@ serai_test!( let batch = Batch { network: coin1.network(), id: coin1_batch_id, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address)), balance: ExternalBalance { coin: coin1, amount: Amount(200_000_000_000_000) }, @@ -369,7 +369,7 @@ serai_test!( let batch = Batch { network: coin2.network(), id: coin2_batch_id, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address.clone())), balance: ExternalBalance { coin: coin2, amount: Amount(200_000_000_000) }, @@ -407,7 +407,7 @@ serai_test!( let batch = Batch { network: coin1.network(), id: coin1_batch_id, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![InInstructionWithBalance { instruction: InInstruction::Dex(DexCall::Swap(out_balance, out_address.clone())), balance: ExternalBalance { coin: coin1, amount: Amount(100_000_000_000_000) }, diff --git a/substrate/client/tests/dht.rs b/substrate/client/tests/dht.rs index 0d27c91e..8b8a078b 100644 --- a/substrate/client/tests/dht.rs +++ b/substrate/client/tests/dht.rs @@ -44,7 +44,7 @@ async fn dht() { assert!(!Serai::new(serai_rpc.clone()) .await .unwrap() - .p2p_validators(ExternalNetworkId::Bitcoin.into()) + .p2p_validators(ExternalNetworkId::Bitcoin) .await .unwrap() .is_empty()); diff --git a/substrate/client/tests/emissions.rs b/substrate/client/tests/emissions.rs index 3e2b46f2..7ee843cb 100644 --- a/substrate/client/tests/emissions.rs +++ b/substrate/client/tests/emissions.rs @@ -4,13 +4,13 @@ use rand_core::{RngCore, OsRng}; use serai_client::TemporalSerai; use serai_abi::{ - emissions::primitives::{INITIAL_REWARD_PER_BLOCK, SECURE_BY}, - in_instructions::primitives::Batch, primitives::{ - BlockHash, ExternalBalance, ExternalCoin, ExternalNetworkId, EXTERNAL_NETWORKS, - FAST_EPOCH_DURATION, FAST_EPOCH_INITIAL_PERIOD, NETWORKS, TARGET_BLOCK_TIME, Amount, NetworkId, + EXTERNAL_NETWORKS, NETWORKS, TARGET_BLOCK_TIME, FAST_EPOCH_DURATION, FAST_EPOCH_INITIAL_PERIOD, + BlockHash, ExternalNetworkId, NetworkId, ExternalCoin, Amount, ExternalBalance, }, validator_sets::primitives::Session, + emissions::primitives::{INITIAL_REWARD_PER_BLOCK, SECURE_BY}, + in_instructions::primitives::Batch, }; use serai_client::Serai; @@ -38,7 +38,16 @@ async fn send_batches(serai: &Serai, ids: &mut HashMap) let mut block = BlockHash([0; 32]); OsRng.fill_bytes(&mut block.0); - provide_batch(serai, Batch { network, id: ids[&network], block, instructions: vec![] }).await; + provide_batch( + serai, + Batch { + network, + id: ids[&network], + external_network_block_hash: block, + instructions: vec![], + }, + ) + .await; } } diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index dee2bb42..32f5d481 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -7,11 +7,11 @@ use sp_core::{ use serai_client::{ primitives::{ - NETWORKS, NetworkId, BlockHash, insecure_pair_from_name, FAST_EPOCH_DURATION, - TARGET_BLOCK_TIME, ExternalNetworkId, Amount, + FAST_EPOCH_DURATION, TARGET_BLOCK_TIME, NETWORKS, BlockHash, ExternalNetworkId, NetworkId, + EmbeddedEllipticCurve, Amount, insecure_pair_from_name, }, validator_sets::{ - primitives::{Session, ValidatorSet, ExternalValidatorSet, KeyPair}, + primitives::{Session, ExternalValidatorSet, ValidatorSet, KeyPair}, ValidatorSetsEvent, }, in_instructions::{ @@ -24,7 +24,7 @@ use serai_client::{ mod common; use common::{ tx::publish_tx, - validator_sets::{allocate_stake, deallocate_stake, set_keys}, + validator_sets::{set_embedded_elliptic_curve_key, allocate_stake, deallocate_stake, set_keys}, }; fn get_random_key_pair() -> KeyPair { @@ -224,12 +224,39 @@ async fn validator_set_rotation() { // add 1 participant let last_participant = accounts[4].clone(); + + // If this is the first iteration, set embedded elliptic curve keys + if i == 0 { + for (i, embedded_elliptic_curve) in + [EmbeddedEllipticCurve::Embedwards25519, EmbeddedEllipticCurve::Secq256k1] + .into_iter() + .enumerate() + { + set_embedded_elliptic_curve_key( + &serai, + &last_participant, + embedded_elliptic_curve, + vec![ + 0; + match embedded_elliptic_curve { + EmbeddedEllipticCurve::Embedwards25519 => 32, + EmbeddedEllipticCurve::Secq256k1 => 33, + } + ] + .try_into() + .unwrap(), + i.try_into().unwrap(), + ) + .await; + } + } + let hash = allocate_stake( &serai, network, key_shares[&network], &last_participant, - i.try_into().unwrap(), + (2 + i).try_into().unwrap(), ) .await; participants.push(last_participant.public()); @@ -289,7 +316,7 @@ async fn validator_set_rotation() { let batch = Batch { network: network.try_into().unwrap(), id: 0, - block: block_hash, + external_network_block_hash: block_hash, instructions: vec![], }; publish_tx( diff --git a/substrate/coins/pallet/Cargo.toml b/substrate/coins/pallet/Cargo.toml index 8c59fb3e..ae17b5ba 100644 --- a/substrate/coins/pallet/Cargo.toml +++ b/substrate/coins/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -34,6 +34,9 @@ pallet-transaction-payment = { git = "https://github.com/serai-dex/substrate", d serai-primitives = { path = "../../primitives", default-features = false, features = ["serde"] } coins-primitives = { package = "serai-coins-primitives", path = "../primitives", default-features = false } +[dev-dependencies] +sp-io = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } + [features] std = [ "frame-system/std", @@ -49,8 +52,12 @@ std = [ "coins-primitives/std", ] -# TODO -try-runtime = [] +try-runtime = [ + "frame-system/try-runtime", + "frame-support/try-runtime", + + "sp-runtime/try-runtime", +] runtime-benchmarks = [ "frame-system/runtime-benchmarks", diff --git a/substrate/coins/pallet/src/lib.rs b/substrate/coins/pallet/src/lib.rs index dd64b2b6..4499f432 100644 --- a/substrate/coins/pallet/src/lib.rs +++ b/substrate/coins/pallet/src/lib.rs @@ -1,5 +1,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + use serai_primitives::{Balance, Coin, ExternalBalance, SubstrateAmount}; pub trait AllowMint { diff --git a/substrate/coins/pallet/src/mock.rs b/substrate/coins/pallet/src/mock.rs new file mode 100644 index 00000000..bd4ebc55 --- /dev/null +++ b/substrate/coins/pallet/src/mock.rs @@ -0,0 +1,70 @@ +//! Test environment for Coins pallet. + +use super::*; + +use frame_support::{ + construct_runtime, + traits::{ConstU32, ConstU64}, +}; + +use sp_core::{H256, sr25519::Public}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +use crate as coins; + +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Coins: coins, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = Public; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + + type AllowMint = (); +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + crate::GenesisConfig:: { accounts: vec![], _ignore: Default::default() } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(0)); + ext +} diff --git a/substrate/coins/pallet/src/tests.rs b/substrate/coins/pallet/src/tests.rs new file mode 100644 index 00000000..52b81d37 --- /dev/null +++ b/substrate/coins/pallet/src/tests.rs @@ -0,0 +1,129 @@ +use crate::{mock::*, primitives::*}; + +use frame_system::RawOrigin; +use sp_core::Pair; + +use serai_primitives::*; + +pub type CoinsEvent = crate::Event; + +#[test] +fn mint() { + new_test_ext().execute_with(|| { + // minting u64::MAX should work + let coin = Coin::Serai; + let to = insecure_pair_from_name("random1").public(); + let balance = Balance { coin, amount: Amount(u64::MAX) }; + + Coins::mint(to, balance).unwrap(); + assert_eq!(Coins::balance(to, coin), balance.amount); + + // minting more should fail + assert!(Coins::mint(to, Balance { coin, amount: Amount(1) }).is_err()); + + // supply now should be equal to sum of the accounts balance sum + assert_eq!(Coins::supply(coin), balance.amount.0); + + // test events + let mint_events = System::events() + .iter() + .filter_map(|event| { + if let RuntimeEvent::Coins(e) = &event.event { + if matches!(e, CoinsEvent::Mint { .. }) { + Some(e.clone()) + } else { + None + } + } else { + None + } + }) + .collect::>(); + + assert_eq!(mint_events, vec![CoinsEvent::Mint { to, balance }]); + }) +} + +#[test] +fn burn_with_instruction() { + new_test_ext().execute_with(|| { + // mint some coin + let coin = Coin::External(ExternalCoin::Bitcoin); + let to = insecure_pair_from_name("random1").public(); + let balance = Balance { coin, amount: Amount(10 * 10u64.pow(coin.decimals())) }; + + Coins::mint(to, balance).unwrap(); + assert_eq!(Coins::balance(to, coin), balance.amount); + assert_eq!(Coins::supply(coin), balance.amount.0); + + // we shouldn't be able to burn more than what we have + let mut instruction = OutInstructionWithBalance { + instruction: OutInstruction { address: ExternalAddress::new(vec![]).unwrap() }, + balance: ExternalBalance { + coin: coin.try_into().unwrap(), + amount: Amount(balance.amount.0 + 1), + }, + }; + assert!( + Coins::burn_with_instruction(RawOrigin::Signed(to).into(), instruction.clone()).is_err() + ); + + // it should now work + instruction.balance.amount = balance.amount; + Coins::burn_with_instruction(RawOrigin::Signed(to).into(), instruction.clone()).unwrap(); + + // balance & supply now should be back to 0 + assert_eq!(Coins::balance(to, coin), Amount(0)); + assert_eq!(Coins::supply(coin), 0); + + let burn_events = System::events() + .iter() + .filter_map(|event| { + if let RuntimeEvent::Coins(e) = &event.event { + if matches!(e, CoinsEvent::BurnWithInstruction { .. }) { + Some(e.clone()) + } else { + None + } + } else { + None + } + }) + .collect::>(); + + assert_eq!(burn_events, vec![CoinsEvent::BurnWithInstruction { from: to, instruction }]); + }) +} + +#[test] +fn transfer() { + new_test_ext().execute_with(|| { + // mint some coin + let coin = Coin::External(ExternalCoin::Bitcoin); + let from = insecure_pair_from_name("random1").public(); + let balance = Balance { coin, amount: Amount(10 * 10u64.pow(coin.decimals())) }; + + Coins::mint(from, balance).unwrap(); + assert_eq!(Coins::balance(from, coin), balance.amount); + assert_eq!(Coins::supply(coin), balance.amount.0); + + // we can't send more than what we have + let to = insecure_pair_from_name("random2").public(); + assert!(Coins::transfer( + RawOrigin::Signed(from).into(), + to, + Balance { coin, amount: Amount(balance.amount.0 + 1) } + ) + .is_err()); + + // we can send it all + Coins::transfer(RawOrigin::Signed(from).into(), to, balance).unwrap(); + + // check the balances + assert_eq!(Coins::balance(from, coin), Amount(0)); + assert_eq!(Coins::balance(to, coin), balance.amount); + + // supply shouldn't change + assert_eq!(Coins::supply(coin), balance.amount.0); + }) +} diff --git a/substrate/coins/primitives/Cargo.toml b/substrate/coins/primitives/Cargo.toml index ec906929..ed160f9d 100644 --- a/substrate/coins/primitives/Cargo.toml +++ b/substrate/coins/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai coins primitives" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/coins/primitives/src/lib.rs b/substrate/coins/primitives/src/lib.rs index 5aca029a..97a48c8d 100644 --- a/substrate/coins/primitives/src/lib.rs +++ b/substrate/coins/primitives/src/lib.rs @@ -13,17 +13,17 @@ use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; -use serai_primitives::{system_address, Data, ExternalAddress, ExternalBalance, SeraiAddress}; +use serai_primitives::{ExternalBalance, SeraiAddress, ExternalAddress, system_address}; pub const FEE_ACCOUNT: SeraiAddress = system_address(b"Coins-fees"); +// TODO: Replace entirely with just Address #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct OutInstruction { pub address: ExternalAddress, - pub data: Option, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] diff --git a/substrate/dex/pallet/Cargo.toml b/substrate/dex/pallet/Cargo.toml index 7e8a83e8..d27ffdeb 100644 --- a/substrate/dex/pallet/Cargo.toml +++ b/substrate/dex/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet" authors = ["Parity Technologies , Akil Demir "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/economic-security/pallet/Cargo.toml b/substrate/economic-security/pallet/Cargo.toml index cefeee8e..efd969c8 100644 --- a/substrate/economic-security/pallet/Cargo.toml +++ b/substrate/economic-security/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/economic-security/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.77" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -30,6 +30,19 @@ coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", de serai-primitives = { path = "../../primitives", default-features = false } + +[dev-dependencies] +pallet-babe = { git = "https://github.com/serai-dex/substrate", default-features = false } +pallet-grandpa = { git = "https://github.com/serai-dex/substrate", default-features = false } +pallet-timestamp = { git = "https://github.com/serai-dex/substrate", default-features = false } + +validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../../validator-sets/pallet", default-features = false } + +sp-io = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-consensus-babe = { git = "https://github.com/serai-dex/substrate", default-features = false } + [features] std = [ "scale/std", @@ -38,11 +51,27 @@ std = [ "frame-system/std", "frame-support/std", + "sp-io/std", + "sp-core/std", + "sp-consensus-babe/std", + "dex-pallet/std", "coins-pallet/std", + "validator-sets-pallet/std", "serai-primitives/std", + + "pallet-babe/std", + "pallet-grandpa/std", + "pallet-timestamp/std", ] -try-runtime = [] # TODO + +try-runtime = [ + "frame-system/try-runtime", + "frame-support/try-runtime", + + "sp-runtime/try-runtime", +] + default = ["std"] diff --git a/substrate/economic-security/pallet/src/lib.rs b/substrate/economic-security/pallet/src/lib.rs index 045297f4..20897aaa 100644 --- a/substrate/economic-security/pallet/src/lib.rs +++ b/substrate/economic-security/pallet/src/lib.rs @@ -1,5 +1,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + #[allow( unreachable_patterns, clippy::cast_possible_truncation, diff --git a/substrate/economic-security/pallet/src/mock.rs b/substrate/economic-security/pallet/src/mock.rs new file mode 100644 index 00000000..ffa7d7fb --- /dev/null +++ b/substrate/economic-security/pallet/src/mock.rs @@ -0,0 +1,217 @@ +//! Test environment for EconomicSecurity pallet. + +use super::*; + +use core::marker::PhantomData; +use std::collections::HashMap; + +use frame_support::{ + construct_runtime, + traits::{ConstU16, ConstU32, ConstU64}, +}; + +use sp_core::{ + H256, Pair as PairTrait, + sr25519::{Public, Pair}, +}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +use serai_primitives::*; +use validator_sets::{primitives::MAX_KEY_SHARES_PER_SET, MembershipProof}; + +pub use crate as economic_security; +pub use coins_pallet as coins; +pub use dex_pallet as dex; +pub use pallet_babe as babe; +pub use pallet_grandpa as grandpa; +pub use pallet_timestamp as timestamp; +pub use validator_sets_pallet as validator_sets; + +type Block = frame_system::mocking::MockBlock; +// Maximum number of authorities per session. +pub type MaxAuthorities = ConstU32<{ MAX_KEY_SHARES_PER_SET }>; + +pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { + c: PRIMARY_PROBABILITY, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots, + }; + +pub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = 10; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Timestamp: timestamp, + Coins: coins, + LiquidityTokens: coins::::{Pallet, Call, Storage, Event}, + ValidatorSets: validator_sets, + EconomicSecurity: economic_security, + Dex: dex, + Babe: babe, + Grandpa: grandpa, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = Public; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>; + type WeightInfo = (); +} + +impl babe::Config for Test { + type EpochDuration = ConstU64<{ FAST_EPOCH_DURATION }>; + + type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; + type EpochChangeTrigger = babe::ExternalTrigger; + type DisabledValidators = ValidatorSets; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = (); +} + +impl grandpa::Config for Test { + type RuntimeEvent = RuntimeEvent; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + + type MaxSetIdSessionEntries = ConstU64<0>; + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = (); +} + +impl coins::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AllowMint = ValidatorSets; +} + +impl coins::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AllowMint = (); +} + +impl dex::Config for Test { + type RuntimeEvent = RuntimeEvent; + + type LPFee = ConstU32<3>; // 0.3% + type MintMinLiquidity = ConstU64<10000>; + + type MaxSwapPathLength = ConstU32<3>; // coin1 -> SRI -> coin2 + + type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>; + + type WeightInfo = dex::weights::SubstrateWeight; +} + +impl validator_sets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ShouldEndSession = Babe; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +// For a const we can't define +pub fn genesis_participants() -> Vec { + vec![ + insecure_pair_from_name("Alice"), + insecure_pair_from_name("Bob"), + insecure_pair_from_name("Charlie"), + insecure_pair_from_name("Dave"), + ] +} + +// Amounts for single key share per network +pub fn key_shares() -> HashMap { + HashMap::from([ + (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))), + ]) +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let networks: Vec<(NetworkId, Amount)> = key_shares().into_iter().collect::>(); + + coins::GenesisConfig:: { + accounts: genesis_participants() + .clone() + .into_iter() + .map(|a| (a.public(), Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) + .collect(), + _ignore: Default::default(), + } + .assimilate_storage(&mut t) + .unwrap(); + + validator_sets::GenesisConfig:: { + networks, + participants: genesis_participants().into_iter().map(|p| p.public()).collect(), + } + .assimilate_storage(&mut t) + .unwrap(); + + babe::GenesisConfig:: { + authorities: genesis_participants() + .into_iter() + .map(|validator| (validator.public().into(), 1)) + .collect(), + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + _config: PhantomData, + } + .assimilate_storage(&mut t) + .unwrap(); + + grandpa::GenesisConfig:: { + authorities: genesis_participants() + .into_iter() + .map(|validator| (validator.public().into(), 1)) + .collect(), + _config: PhantomData, + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(0)); + ext +} diff --git a/substrate/economic-security/pallet/src/tests.rs b/substrate/economic-security/pallet/src/tests.rs new file mode 100644 index 00000000..a6010e71 --- /dev/null +++ b/substrate/economic-security/pallet/src/tests.rs @@ -0,0 +1,82 @@ +use crate::mock::*; + +use frame_support::traits::Hooks; +use frame_system::RawOrigin; + +use sp_core::{sr25519::Signature, Pair as PairTrait}; +use sp_runtime::BoundedVec; + +use validator_sets::primitives::KeyPair; +use serai_primitives::{ + insecure_pair_from_name, Balance, Coin, ExternalBalance, ExternalCoin, ExternalNetworkId, + EXTERNAL_COINS, EXTERNAL_NETWORKS, +}; + +fn set_keys_for_session(network: ExternalNetworkId) { + ValidatorSets::set_keys( + RawOrigin::None.into(), + network, + BoundedVec::new(), + KeyPair(insecure_pair_from_name("Alice").public(), vec![].try_into().unwrap()), + Signature([0u8; 64]), + ) + .unwrap(); +} + +fn make_pool_with_liquidity(coin: &ExternalCoin) { + // make a pool so that we have security oracle value for the coin + let liq_acc = insecure_pair_from_name("liq-acc").public(); + let balance = ExternalBalance { coin: *coin, amount: key_shares()[&coin.network().into()] }; + Coins::mint(liq_acc, balance.into()).unwrap(); + Coins::mint(liq_acc, Balance { coin: Coin::Serai, amount: balance.amount }).unwrap(); + + Dex::add_liquidity( + RawOrigin::Signed(liq_acc).into(), + *coin, + balance.amount.0 / 2, + balance.amount.0 / 2, + 1, + 1, + liq_acc, + ) + .unwrap(); + Dex::on_finalize(1); + assert!(Dex::security_oracle_value(coin).unwrap().0 > 0) +} + +#[test] +fn economic_security() { + new_test_ext().execute_with(|| { + // update the state + EconomicSecurity::on_initialize(1); + + // make sure it is right at the beginning + // this is none at this point since no set has set their keys so TAS isn't up-to-date + for network in EXTERNAL_NETWORKS { + assert_eq!(EconomicSecurity::economic_security_block(network), None); + } + + // set the keys for TAS and have pools for oracle value + for coin in EXTERNAL_COINS { + set_keys_for_session(coin.network()); + make_pool_with_liquidity(&coin); + } + + // update the state + EconomicSecurity::on_initialize(1); + + // check again. The reason we have economic security now is because we stake a key share + // per participant per network(total of 4 key share) in genesis for all networks. + for network in EXTERNAL_NETWORKS { + assert_eq!(EconomicSecurity::economic_security_block(network), Some(1)); + } + + // TODO: Not sure how much sense this test makes since we start from an economically secure + // state. Ideally we should start from not economically secure state and stake the necessary + // amount and then check whether the pallet set the value right since that will be the mainnet + // path. But we cant do that at the moment since vs-pallet genesis build auto stake per network + // to construct the set. This also makes a missing piece of logic explicit. We need genesis + // validators to be in-set but without their stake, or at least its affect on TAS. So this test + // should be updated once that logic is coded. + }); +} diff --git a/substrate/emissions/pallet/Cargo.toml b/substrate/emissions/pallet/Cargo.toml index a56bcee4..742dff08 100644 --- a/substrate/emissions/pallet/Cargo.toml +++ b/substrate/emissions/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/emissions/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.77" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -54,7 +54,7 @@ std = [ "validator-sets-pallet/std", "dex-pallet/std", "genesis-liquidity-pallet/std", - + "economic-security-pallet/std", "serai-primitives/std", diff --git a/substrate/emissions/pallet/src/lib.rs b/substrate/emissions/pallet/src/lib.rs index 99e22e8b..4241d186 100644 --- a/substrate/emissions/pallet/src/lib.rs +++ b/substrate/emissions/pallet/src/lib.rs @@ -12,7 +12,7 @@ pub mod pallet { use frame_system::{pallet_prelude::*, RawOrigin}; use frame_support::{pallet_prelude::*, sp_runtime::SaturatedConversion}; - use sp_std::{vec, vec::Vec, ops::Mul, collections::btree_map::BTreeMap}; + use sp_std::{vec, vec::Vec, collections::btree_map::BTreeMap}; use coins_pallet::{Config as CoinsConfig, Pallet as Coins}; use dex_pallet::{Config as DexConfig, Pallet as Dex}; @@ -23,7 +23,7 @@ pub mod pallet { use economic_security_pallet::{Config as EconomicSecurityConfig, Pallet as EconomicSecurity}; use serai_primitives::*; - use validator_sets_primitives::{MAX_KEY_SHARES_PER_SET, Session}; + use validator_sets_primitives::{MAX_KEY_SHARES_PER_SET_U32, Session}; pub use emissions_primitives as primitives; use primitives::*; @@ -59,6 +59,7 @@ pub mod pallet { NetworkHasEconomicSecurity, NoValueForCoin, InsufficientAllocation, + AmountOverflow, } #[pallet::event] @@ -74,7 +75,7 @@ pub mod pallet { _, Identity, NetworkId, - BoundedVec<(PublicKey, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>, + BoundedVec<(PublicKey, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>, OptionQuery, >; @@ -412,9 +413,17 @@ pub mod pallet { let last_block = >::block_number() - 1u32.into(); let value = Dex::::spot_price_for_block(last_block, balance.coin) .ok_or(Error::::NoValueForCoin)?; - // TODO: may panic? It might be best for this math ops to return the result as is instead of - // doing an unwrap so that it can be properly dealt with. - let sri_amount = balance.amount.mul(value); + + let sri_amount = Amount( + u64::try_from( + u128::from(balance.amount.0) + .checked_mul(u128::from(value.0)) + .ok_or(Error::::AmountOverflow)? + .checked_div(u128::from(10u64.pow(balance.coin.decimals()))) + .ok_or(Error::::AmountOverflow)?, + ) + .map_err(|_| Error::::AmountOverflow)?, + ); // Mint Coins::::mint(to, Balance { coin: Coin::Serai, amount: sri_amount })?; diff --git a/substrate/emissions/primitives/Cargo.toml b/substrate/emissions/primitives/Cargo.toml index 15ecbe25..077de439 100644 --- a/substrate/emissions/primitives/Cargo.toml +++ b/substrate/emissions/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/emissions/primitives" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.77" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/genesis-liquidity/pallet/Cargo.toml b/substrate/genesis-liquidity/pallet/Cargo.toml index 3668b995..4162c038 100644 --- a/substrate/genesis-liquidity/pallet/Cargo.toml +++ b/substrate/genesis-liquidity/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/genesis-liquidity/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.77" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/genesis-liquidity/pallet/src/lib.rs b/substrate/genesis-liquidity/pallet/src/lib.rs index 3a78e493..2fd24589 100644 --- a/substrate/genesis-liquidity/pallet/src/lib.rs +++ b/substrate/genesis-liquidity/pallet/src/lib.rs @@ -64,6 +64,7 @@ pub mod pallet { /// Keeps shares and the amount of coins per account. #[pallet::storage] + #[pallet::getter(fn liquidity)] pub(crate) type Liquidity = StorageDoubleMap< _, Identity, @@ -76,6 +77,7 @@ pub mod pallet { /// Keeps the total shares and the total amount of coins per coin. #[pallet::storage] + #[pallet::getter(fn supply)] pub(crate) type Supply = StorageMap<_, Identity, ExternalCoin, LiquidityAmount, OptionQuery>; diff --git a/substrate/genesis-liquidity/primitives/Cargo.toml b/substrate/genesis-liquidity/primitives/Cargo.toml index ec05bbd6..d026ccf4 100644 --- a/substrate/genesis-liquidity/primitives/Cargo.toml +++ b/substrate/genesis-liquidity/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/genesis-liquidity/primitives" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.77" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -36,7 +36,7 @@ std = [ "borsh?/std", "serde?/std", "scale-info/std", - + "serai-primitives/std", "validator-sets-primitives/std", diff --git a/substrate/in-instructions/pallet/Cargo.toml b/substrate/in-instructions/pallet/Cargo.toml index a12e38b3..c288e9fe 100644 --- a/substrate/in-instructions/pallet/Cargo.toml +++ b/substrate/in-instructions/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" authors = ["Luke Parker "] edition = "2021" publish = false -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -19,6 +19,8 @@ ignored = ["scale", "scale-info"] workspace = true [dependencies] +bitvec = { version = "1", default-features = false, features = ["alloc"] } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2", default-features = false, features = ["derive"] } @@ -40,6 +42,14 @@ validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../.. genesis-liquidity-pallet = { package = "serai-genesis-liquidity-pallet", path = "../../genesis-liquidity/pallet", default-features = false } emissions-pallet = { package = "serai-emissions-pallet", path = "../../emissions/pallet", default-features = false } + +[dev-dependencies] +pallet-babe = { git = "https://github.com/serai-dex/substrate", default-features = false } +pallet-grandpa = { git = "https://github.com/serai-dex/substrate", default-features = false } +pallet-timestamp = { git = "https://github.com/serai-dex/substrate", default-features = false } + +economic-security-pallet = { package = "serai-economic-security-pallet", path = "../../economic-security/pallet", default-features = false } + [features] std = [ "scale/std", @@ -62,8 +72,19 @@ std = [ "validator-sets-pallet/std", "genesis-liquidity-pallet/std", "emissions-pallet/std", -] -default = ["std"] -# TODO -try-runtime = [] + "economic-security-pallet/std", + + "pallet-babe/std", + "pallet-grandpa/std", + "pallet-timestamp/std", +] + +try-runtime = [ + "frame-system/try-runtime", + "frame-support/try-runtime", + + "sp-runtime/try-runtime", +] + +default = ["std"] diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index e3f69f41..7580056d 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -9,6 +9,12 @@ use serai_primitives::*; pub use in_instructions_primitives as primitives; use primitives::*; +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + // TODO: Investigate why Substrate generates these #[allow( unreachable_patterns, @@ -58,9 +64,17 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(fn deposit_event)] pub enum Event { - Batch { network: ExternalNetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] }, - InstructionFailure { network: ExternalNetworkId, id: u32, index: u32 }, - Halt { network: ExternalNetworkId }, + Batch { + network: ExternalNetworkId, + publishing_session: Session, + id: u32, + external_network_block_hash: BlockHash, + in_instructions_hash: [u8; 32], + in_instruction_results: bitvec::vec::BitVec, + }, + Halt { + network: ExternalNetworkId, + }, } #[pallet::error] @@ -88,20 +102,14 @@ pub mod pallet { #[pallet::storage] pub(crate) type Halted = StorageMap<_, Identity, ExternalNetworkId, (), OptionQuery>; - // The latest block a network has acknowledged as finalized - #[pallet::storage] - #[pallet::getter(fn latest_network_block)] - pub(crate) type LatestNetworkBlock = - StorageMap<_, Identity, ExternalNetworkId, BlockHash, OptionQuery>; - impl Pallet { // Use a dedicated transaction layer when executing this InInstruction // This lets it individually error without causing any storage modifications #[frame_support::transactional] - fn execute(instruction: InInstructionWithBalance) -> Result<(), DispatchError> { - match instruction.instruction { + fn execute(instruction: &InInstructionWithBalance) -> Result<(), DispatchError> { + match &instruction.instruction { InInstruction::Transfer(address) => { - Coins::::mint(address.into(), instruction.balance.into())?; + Coins::::mint((*address).into(), instruction.balance.into())?; } InInstruction::Dex(call) => { // This will only be initiated by external chain transactions. That is why we only need @@ -110,6 +118,7 @@ pub mod pallet { match call { DexCall::SwapAndAddLiquidity(address) => { let origin = RawOrigin::Signed(IN_INSTRUCTION_EXECUTOR.into()); + let address = *address; let coin = instruction.balance.coin; // mint the given coin on the account @@ -205,9 +214,7 @@ pub mod pallet { Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), out_balance.coin); let instruction = OutInstructionWithBalance { instruction: OutInstruction { - address: out_address.as_external().unwrap(), - // TODO: Properly pass data. Replace address with an OutInstruction entirely? - data: None, + address: out_address.clone().as_external().unwrap(), }, balance: ExternalBalance { coin: out_balance.coin.try_into().unwrap(), @@ -221,11 +228,11 @@ pub mod pallet { } InInstruction::GenesisLiquidity(address) => { Coins::::mint(GENESIS_LIQUIDITY_ACCOUNT.into(), instruction.balance.into())?; - GenesisLiq::::add_coin_liquidity(address.into(), instruction.balance)?; + GenesisLiq::::add_coin_liquidity((*address).into(), instruction.balance)?; } InInstruction::SwapToStakedSRI(address, network) => { Coins::::mint(POL_ACCOUNT.into(), instruction.balance.into())?; - Emissions::::swap_to_staked_sri(address.into(), network, instruction.balance)?; + Emissions::::swap_to_staked_sri((*address).into(), *network, instruction.balance)?; } } Ok(()) @@ -263,27 +270,10 @@ pub mod pallet { impl Pallet { #[pallet::call_index(0)] #[pallet::weight((0, DispatchClass::Operational))] // TODO - pub fn execute_batch(origin: OriginFor, batch: SignedBatch) -> DispatchResult { + pub fn execute_batch(origin: OriginFor, _batch: SignedBatch) -> DispatchResult { ensure_none(origin)?; - let batch = batch.batch; - - LatestNetworkBlock::::insert(batch.network, batch.block); - Self::deposit_event(Event::Batch { - network: batch.network, - id: batch.id, - block: batch.block, - instructions_hash: blake2_256(&batch.instructions.encode()), - }); - for (i, instruction) in batch.instructions.into_iter().enumerate() { - if Self::execute(instruction).is_err() { - Self::deposit_event(Event::InstructionFailure { - network: batch.network, - id: batch.id, - index: u32::try_from(i).unwrap(), - }); - } - } + // The entire Batch execution is handled in pre_dispatch Ok(()) } @@ -309,6 +299,7 @@ pub mod pallet { // verify the signature let (current_session, prior, current) = keys_for_network::(network)?; + let prior_session = Session(current_session.0 - 1); let batch_message = batch_message(&batch.batch); // Check the prior key first since only a single `Batch` (the last one) will be when prior is // Some yet prior wasn't the signing key @@ -324,6 +315,8 @@ pub mod pallet { Err(InvalidTransaction::BadProof)?; } + let batch = &batch.batch; + if Halted::::contains_key(network) { Err(InvalidTransaction::Custom(1))?; } @@ -334,7 +327,7 @@ pub mod pallet { if prior.is_some() && (!valid_by_prior) { ValidatorSets::::retire_set(ValidatorSet { network: network.into(), - session: Session(current_session.0 - 1), + session: prior_session, }); } @@ -344,36 +337,42 @@ pub mod pallet { if last_block >= current_block { Err(InvalidTransaction::Future)?; } - LastBatchBlock::::insert(batch.batch.network, frame_system::Pallet::::block_number()); + LastBatchBlock::::insert(batch.network, frame_system::Pallet::::block_number()); // Verify the batch is sequential // LastBatch has the last ID set. The next ID should be it + 1 // If there's no ID, the next ID should be 0 let expected = LastBatch::::get(network).map_or(0, |prev| prev + 1); - if batch.batch.id < expected { + if batch.id < expected { Err(InvalidTransaction::Stale)?; } - if batch.batch.id > expected { + if batch.id > expected { Err(InvalidTransaction::Future)?; } - LastBatch::::insert(batch.batch.network, batch.batch.id); + LastBatch::::insert(batch.network, batch.id); - // Verify all Balances in this Batch are for this network - for instruction in &batch.batch.instructions { + let in_instructions_hash = blake2_256(&batch.instructions.encode()); + let mut in_instruction_results = bitvec::vec::BitVec::new(); + for instruction in &batch.instructions { // Verify this coin is for this network - // If this is ever hit, it means the validator set has turned malicious and should be fully - // slashed - // Because we have an error here, no validator set which turns malicious should execute - // this code path - // Accordingly, there's no value in writing code to fully slash the network, when such an - // even would require a runtime upgrade to fully resolve anyways - if instruction.balance.coin.network() != batch.batch.network { + if instruction.balance.coin.network() != batch.network { Err(InvalidTransaction::Custom(2))?; } + + in_instruction_results.push(Self::execute(instruction).is_ok()); } + Self::deposit_event(Event::Batch { + network: batch.network, + publishing_session: if valid_by_prior { prior_session } else { current_session }, + id: batch.id, + external_network_block_hash: batch.external_network_block_hash, + in_instructions_hash, + in_instruction_results, + }); + ValidTransaction::with_tag_prefix("in-instructions") - .and_provides((batch.batch.network, batch.batch.id)) + .and_provides((batch.network, batch.id)) // Set a 10 block longevity, though this should be included in the next block .longevity(10) .propagate(true) diff --git a/substrate/in-instructions/pallet/src/mock.rs b/substrate/in-instructions/pallet/src/mock.rs new file mode 100644 index 00000000..05417863 --- /dev/null +++ b/substrate/in-instructions/pallet/src/mock.rs @@ -0,0 +1,201 @@ +//! Test environment for InInstructions pallet. + +use super::*; + +use std::collections::HashMap; + +use frame_support::{ + construct_runtime, + traits::{ConstU16, ConstU32, ConstU64}, +}; + +use sp_core::{H256, Pair, sr25519::Public}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +use validator_sets::{primitives::MAX_KEY_SHARES_PER_SET, MembershipProof}; + +pub use crate as in_instructions; +pub use coins_pallet as coins; +pub use validator_sets_pallet as validator_sets; +pub use genesis_liquidity_pallet as genesis_liquidity; +pub use emissions_pallet as emissions; +pub use dex_pallet as dex; +pub use pallet_babe as babe; +pub use pallet_grandpa as grandpa; +pub use pallet_timestamp as timestamp; +pub use economic_security_pallet as economic_security; + +type Block = frame_system::mocking::MockBlock; +// Maximum number of authorities per session. +pub type MaxAuthorities = ConstU32<{ MAX_KEY_SHARES_PER_SET }>; + +pub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = 10; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Timestamp: timestamp, + Coins: coins, + LiquidityTokens: coins::::{Pallet, Call, Storage, Event}, + Emissions: emissions, + ValidatorSets: validator_sets, + GenesisLiquidity: genesis_liquidity, + EconomicSecurity: economic_security, + Dex: dex, + Babe: babe, + Grandpa: grandpa, + InInstructions: in_instructions, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = Public; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>; + type WeightInfo = (); +} + +impl babe::Config for Test { + type EpochDuration = ConstU64<{ FAST_EPOCH_DURATION }>; + + type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; + type EpochChangeTrigger = babe::ExternalTrigger; + type DisabledValidators = ValidatorSets; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = (); +} + +impl grandpa::Config for Test { + type RuntimeEvent = RuntimeEvent; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + + type MaxSetIdSessionEntries = ConstU64<0>; + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = (); +} + +impl coins::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AllowMint = ValidatorSets; +} + +impl coins::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AllowMint = (); +} + +impl dex::Config for Test { + type RuntimeEvent = RuntimeEvent; + + type LPFee = ConstU32<3>; // 0.3% + type MintMinLiquidity = ConstU64<10000>; + + type MaxSwapPathLength = ConstU32<3>; // coin1 -> SRI -> coin2 + + type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>; + + type WeightInfo = dex::weights::SubstrateWeight; +} + +impl validator_sets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ShouldEndSession = Babe; +} + +impl genesis_liquidity::Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +impl emissions::Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +impl economic_security::Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +// Amounts for single key share per network +pub fn key_shares() -> HashMap { + HashMap::from([ + (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))), + ]) +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let networks: Vec<(NetworkId, Amount)> = key_shares().into_iter().collect::>(); + + let accounts: Vec = vec![ + insecure_pair_from_name("Alice").public(), + insecure_pair_from_name("Bob").public(), + insecure_pair_from_name("Charlie").public(), + insecure_pair_from_name("Dave").public(), + insecure_pair_from_name("Eve").public(), + insecure_pair_from_name("Ferdie").public(), + ]; + let validators = accounts.clone(); + + coins::GenesisConfig:: { + accounts: accounts + .into_iter() + .map(|a| (a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) + .collect(), + _ignore: Default::default(), + } + .assimilate_storage(&mut t) + .unwrap(); + + validator_sets::GenesisConfig:: { + networks: networks.clone(), + participants: validators.clone(), + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(0)); + ext +} diff --git a/substrate/in-instructions/pallet/src/tests.rs b/substrate/in-instructions/pallet/src/tests.rs new file mode 100644 index 00000000..cc2b0f3e --- /dev/null +++ b/substrate/in-instructions/pallet/src/tests.rs @@ -0,0 +1,500 @@ +use super::*; +use crate::mock::*; + +use emissions_pallet::primitives::POL_ACCOUNT; +use genesis_liquidity_pallet::primitives::INITIAL_GENESIS_LP_SHARES; +use scale::Encode; + +use frame_support::{pallet_prelude::InvalidTransaction, traits::OnFinalize}; +use frame_system::RawOrigin; + +use sp_core::{sr25519::Public, Pair}; +use sp_runtime::{traits::ValidateUnsigned, transaction_validity::TransactionSource, BoundedVec}; + +use validator_sets::{Pallet as ValidatorSets, primitives::KeyPair}; +use coins::primitives::{OutInstruction, OutInstructionWithBalance}; +use genesis_liquidity::primitives::GENESIS_LIQUIDITY_ACCOUNT; + +fn set_keys_for_session(key: Public) { + for n in EXTERNAL_NETWORKS { + ValidatorSets::::set_keys( + RawOrigin::None.into(), + n, + BoundedVec::new(), + KeyPair(key, vec![].try_into().unwrap()), + Signature([0u8; 64]), + ) + .unwrap(); + } +} + +fn get_events() -> Vec> { + let events = System::events() + .iter() + .filter_map(|event| { + if let RuntimeEvent::InInstructions(e) = &event.event { + Some(e.clone()) + } else { + None + } + }) + .collect::>(); + + System::reset_events(); + events +} + +fn make_liquid_pool(coin: ExternalCoin, amount: u64) { + // mint coins so that we can add liquidity + let account = insecure_pair_from_name("make-pool-account").public(); + Coins::mint(account, ExternalBalance { coin, amount: Amount(amount) }.into()).unwrap(); + Coins::mint(account, Balance { coin: Coin::Serai, amount: Amount(amount) }).unwrap(); + + // make some liquid pool + Dex::add_liquidity(RawOrigin::Signed(account).into(), coin, amount, amount, 1, 1, account) + .unwrap(); +} + +#[test] +fn validate_batch() { + new_test_ext().execute_with(|| { + let pair = insecure_pair_from_name("Alice"); + set_keys_for_session(pair.public()); + + let mut batch_size = 0; + let mut batch = Batch { + network: ExternalNetworkId::Monero, + id: 1, + block: BlockHash([0u8; 32]), + instructions: vec![], + }; + + // batch size bigger than MAX_BATCH_SIZE should fail + while batch_size <= MAX_BATCH_SIZE + 1000 { + batch.instructions.push(InInstructionWithBalance { + instruction: InInstruction::Transfer(SeraiAddress::new([0u8; 32])), + balance: ExternalBalance { coin: ExternalCoin::Monero, amount: Amount(1) }, + }); + batch_size = batch.encode().len(); + } + + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature: Signature([0u8; 64]) }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::ExhaustsResources.into() + ); + + // reduce the batch size into allowed size + while batch_size > MAX_BATCH_SIZE { + batch.instructions.pop(); + batch_size = batch.encode().len(); + } + + // 0 signature should be invalid + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature: Signature([0u8; 64]) }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::BadProof.into() + ); + + // submit a valid signature + let signature = pair.sign(&batch_message(&batch)); + + // network shouldn't be halted + InInstructions::halt(ExternalNetworkId::Monero).unwrap(); + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Custom(1).into() // network halted error + ); + + // submit from an un-halted network + batch.network = ExternalNetworkId::Bitcoin; + let signature = pair.sign(&batch_message(&batch)); + + // can't submit in the first block(Block 0) + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature: signature.clone() }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Future.into() + ); + + // update block number + System::set_block_number(1); + + // first batch id should be 0 + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature: signature.clone() }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Future.into() + ); + + // update batch id + batch.id = 0; + let signature = pair.sign(&batch_message(&batch)); + + // can't have more than 1 batch per block + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature: signature.clone() }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Future.into() + ); + + // update block number + System::set_block_number(2); + + // network and the instruction coins should match + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Custom(2).into() // network and instruction coins doesn't match error + ); + + // update block number & batch + System::set_block_number(3); + for ins in &mut batch.instructions { + ins.balance.coin = ExternalCoin::Bitcoin; + } + let signature = pair.sign(&batch_message(&batch)); + + // batch id can't be equal or less than previous id + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Stale.into() + ); + + // update block number & batch + System::set_block_number(4); + batch.id += 2; + let signature = pair.sign(&batch_message(&batch)); + + // batch id can't be incremented more than once per batch + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature }, + }; + assert_eq!( + InInstructions::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Future.into() + ); + + // update block number & batch + System::set_block_number(5); + batch.id = (batch.id - 2) + 1; + let signature = pair.sign(&batch_message(&batch)); + + // it should now pass + let call = pallet::Call::::execute_batch { + batch: SignedBatch { batch: batch.clone(), signature }, + }; + InInstructions::validate_unsigned(TransactionSource::External, &call).unwrap(); + }); +} + +#[test] +fn transfer_instruction() { + new_test_ext().execute_with(|| { + let coin = ExternalCoin::Bitcoin; + let amount = Amount(2 * 10u64.pow(coin.decimals())); + let account = insecure_pair_from_name("random1").public(); + let batch = SignedBatch { + batch: Batch { + network: coin.network(), + id: 0, + block: BlockHash([0u8; 32]), + instructions: vec![InInstructionWithBalance { + instruction: InInstruction::Transfer(account.into()), + balance: ExternalBalance { coin, amount }, + }], + }, + signature: Signature([0u8; 64]), + }; + InInstructions::execute_batch(RawOrigin::None.into(), batch).unwrap(); + + // check that account has the coins + assert_eq!(Coins::balance(account, coin.into()), amount); + }) +} + +#[test] +fn dex_instruction_add_liquidity() { + new_test_ext().execute_with(|| { + let coin = ExternalCoin::Ether; + let amount = Amount(2 * 10u64.pow(coin.decimals())); + let account = insecure_pair_from_name("random1").public(); + + let batch = SignedBatch { + batch: Batch { + network: coin.network(), + id: 0, + block: BlockHash([0u8; 32]), + instructions: vec![InInstructionWithBalance { + instruction: InInstruction::Dex(DexCall::SwapAndAddLiquidity(account.into())), + balance: ExternalBalance { coin, amount }, + }], + }, + signature: Signature([0u8; 64]), + }; + + // we should have a liquid pool before we can swap + InInstructions::execute_batch(RawOrigin::None.into(), batch.clone()).unwrap(); + + // check that the instruction is failed + assert_eq!( + get_events() + .into_iter() + .filter(|event| matches!(event, in_instructions::Event::::InstructionFailure { .. })) + .collect::>(), + vec![in_instructions::Event::::InstructionFailure { + network: batch.batch.network, + id: batch.batch.id, + index: 0 + }] + ); + + let original_coin_amount = 5 * 10u64.pow(coin.decimals()); + make_liquid_pool(coin, original_coin_amount); + + // this should now be successful + InInstructions::execute_batch(RawOrigin::None.into(), batch).unwrap(); + + // check that the instruction was successful + assert_eq!( + get_events() + .into_iter() + .filter(|event| matches!(event, in_instructions::Event::::InstructionFailure { .. })) + .collect::>(), + vec![] + ); + + // check that we now have a Ether pool with correct liquidity + // we can't know the actual SRI amount since we don't know the result of the swap. + // Moreover, knowing exactly how much isn't the responsibility of InInstruction pallet, + // it is responsibility of the Dex pallet. + let (coin_amount, _serai_amount) = Dex::get_reserves(&coin.into(), &Coin::Serai).unwrap(); + assert_eq!(coin_amount, original_coin_amount + amount.0); + + // assert that the account got the liquidity tokens, again we don't how much and + // it isn't this pallets responsibility. + assert!(LiquidityTokens::balance(account, coin.into()).0 > 0); + + // check that in ins account doesn't have the coins + assert_eq!(Coins::balance(IN_INSTRUCTION_EXECUTOR.into(), coin.into()), Amount(0)); + assert_eq!(Coins::balance(IN_INSTRUCTION_EXECUTOR.into(), Coin::Serai), Amount(0)); + }) +} + +#[test] +fn dex_instruction_swap() { + new_test_ext().execute_with(|| { + let coin = ExternalCoin::Bitcoin; + let amount = Amount(2 * 10u64.pow(coin.decimals())); + let account = insecure_pair_from_name("random1").public(); + + // make a pool so that can actually swap + make_liquid_pool(coin, 5 * 10u64.pow(coin.decimals())); + + let mut batch = SignedBatch { + batch: Batch { + network: coin.network(), + id: 0, + block: BlockHash([0u8; 32]), + instructions: vec![InInstructionWithBalance { + instruction: InInstruction::Dex(DexCall::Swap( + Balance { coin: Coin::Serai, amount: Amount(1) }, + OutAddress::External(ExternalAddress::new([0u8; 64].to_vec()).unwrap()), + )), + balance: ExternalBalance { coin, amount }, + }], + }, + signature: Signature([0u8; 64]), + }; + + // we can't send SRI to external address + InInstructions::execute_batch(RawOrigin::None.into(), batch.clone()).unwrap(); + + // check that the instruction was failed + assert_eq!( + get_events() + .into_iter() + .filter(|event| matches!(event, in_instructions::Event::::InstructionFailure { .. })) + .collect::>(), + vec![in_instructions::Event::::InstructionFailure { + network: batch.batch.network, + id: batch.batch.id, + index: 0 + }] + ); + + // make it internal address + batch.batch.instructions[0].instruction = InInstruction::Dex(DexCall::Swap( + Balance { coin: Coin::Serai, amount: Amount(1) }, + OutAddress::Serai(account.into()), + )); + + // check that swap is successful this time + assert_eq!(Coins::balance(account, Coin::Serai), Amount(0)); + InInstructions::execute_batch(RawOrigin::None.into(), batch.clone()).unwrap(); + assert!(Coins::balance(account, Coin::Serai).0 > 0); + + // make another pool for external coin + let coin2 = ExternalCoin::Monero; + make_liquid_pool(coin2, 5 * 10u64.pow(coin.decimals())); + + // update the batch + let out_addr = ExternalAddress::new([0u8; 64].to_vec()).unwrap(); + batch.batch.instructions[0].instruction = InInstruction::Dex(DexCall::Swap( + Balance { coin: ExternalCoin::Monero.into(), amount: Amount(1) }, + OutAddress::External(out_addr.clone()), + )); + InInstructions::execute_batch(RawOrigin::None.into(), batch.clone()).unwrap(); + + // check that we got out instruction + let events = System::events() + .iter() + .filter_map(|event| { + if let RuntimeEvent::Coins(e) = &event.event { + if matches!(e, coins::Event::::BurnWithInstruction { .. }) { + Some(e.clone()) + } else { + None + } + } else { + None + } + }) + .collect::>(); + + assert_eq!( + events, + vec![coins::Event::::BurnWithInstruction { + from: IN_INSTRUCTION_EXECUTOR.into(), + instruction: OutInstructionWithBalance { + instruction: OutInstruction { address: out_addr, data: None }, + balance: ExternalBalance { coin: coin2, amount: Amount(68228493) } + } + }] + ) + }) +} + +#[test] +fn genesis_liquidity_instruction() { + new_test_ext().execute_with(|| { + let coin = ExternalCoin::Bitcoin; + let amount = Amount(2 * 10u64.pow(coin.decimals())); + let account = insecure_pair_from_name("random1").public(); + + let batch = SignedBatch { + batch: Batch { + network: coin.network(), + id: 0, + block: BlockHash([0u8; 32]), + instructions: vec![InInstructionWithBalance { + instruction: InInstruction::GenesisLiquidity(account.into()), + balance: ExternalBalance { coin, amount }, + }], + }, + signature: Signature([0u8; 64]), + }; + + InInstructions::execute_batch(RawOrigin::None.into(), batch.clone()).unwrap(); + + // check that genesis liq account got the coins + assert_eq!(Coins::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), coin.into()), amount); + + // check that it registered the liquidity for the account + // detailed tests about the amounts has to be done in GenesisLiquidity pallet tests. + let liquidity_amount = GenesisLiquidity::liquidity(coin, account).unwrap(); + assert_eq!(liquidity_amount.coins, amount.0); + assert_eq!(liquidity_amount.shares, INITIAL_GENESIS_LP_SHARES); + + let supply = GenesisLiquidity::supply(coin).unwrap(); + assert_eq!(supply.coins, amount.0); + assert_eq!(supply.shares, INITIAL_GENESIS_LP_SHARES); + }) +} + +#[test] +fn swap_to_staked_sri_instruction() { + new_test_ext().execute_with(|| { + let coin = ExternalCoin::Monero; + let key_share = + ValidatorSets::::allocation_per_key_share(NetworkId::from(coin.network())).unwrap(); + let amount = Amount(2 * key_share.0); + let account = insecure_pair_from_name("random1").public(); + + // make a pool so that can actually swap + make_liquid_pool(coin, 5 * 10u64.pow(coin.decimals())); + + // set the keys to set the TAS for the network + ValidatorSets::::set_keys( + RawOrigin::None.into(), + coin.network(), + Vec::new().try_into().unwrap(), + KeyPair(insecure_pair_from_name("random-key").public(), Vec::new().try_into().unwrap()), + Signature([0u8; 64]), + ) + .unwrap(); + + // make sure account doesn't already have lTs or allocation + let current_liq_tokens = LiquidityTokens::balance(POL_ACCOUNT.into(), coin.into()).0; + assert_eq!(current_liq_tokens, 0); + assert_eq!(ValidatorSets::::allocation((NetworkId::from(coin.network()), account)), None); + + // we need this so that value for the coin exist + Dex::on_finalize(0); + System::set_block_number(1); // we need this for the spot price + + let batch = SignedBatch { + batch: Batch { + network: coin.network(), + id: 0, + block: BlockHash([0u8; 32]), + instructions: vec![InInstructionWithBalance { + instruction: InInstruction::SwapToStakedSRI(account.into(), coin.network().into()), + balance: ExternalBalance { coin, amount }, + }], + }, + signature: Signature([0u8; 64]), + }; + + InInstructions::execute_batch(RawOrigin::None.into(), batch.clone()).unwrap(); + + // assert that we added liq from POL account + assert!(LiquidityTokens::balance(POL_ACCOUNT.into(), coin.into()).0 > current_liq_tokens); + + // assert that user allocated SRI for the network + let value = Dex::spot_price_for_block(0, coin).unwrap(); + let sri_amount = Amount( + u64::try_from( + u128::from(amount.0) + .checked_mul(u128::from(value.0)) + .unwrap() + .checked_div(u128::from(10u64.pow(coin.decimals()))) + .unwrap(), + ) + .unwrap(), + ); + assert_eq!( + ValidatorSets::::allocation((NetworkId::from(coin.network()), account)).unwrap(), + sri_amount + ); + }) +} diff --git a/substrate/in-instructions/primitives/Cargo.toml b/substrate/in-instructions/primitives/Cargo.toml index 54551134..bd926749 100644 --- a/substrate/in-instructions/primitives/Cargo.toml +++ b/substrate/in-instructions/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai instructions library, enabling encoding and decoding" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/in-instructions/primitives/src/lib.rs b/substrate/in-instructions/primitives/src/lib.rs index 3944fd73..5c74bf55 100644 --- a/substrate/in-instructions/primitives/src/lib.rs +++ b/substrate/in-instructions/primitives/src/lib.rs @@ -20,7 +20,7 @@ use sp_std::vec::Vec; use sp_runtime::RuntimeDebug; #[rustfmt::skip] -use serai_primitives::{BlockHash, Balance, ExternalNetworkId, NetworkId, SeraiAddress, ExternalBalance, ExternalAddress, system_address}; +use serai_primitives::{BlockHash, ExternalNetworkId, NetworkId, ExternalBalance, Balance, SeraiAddress, ExternalAddress, system_address}; mod shorthand; pub use shorthand::*; @@ -107,7 +107,7 @@ pub struct InInstructionWithBalance { pub struct Batch { pub network: ExternalNetworkId, pub id: u32, - pub block: BlockHash, + pub external_network_block_hash: BlockHash, pub instructions: Vec, } diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index 4ab04fde..ebcb8755 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/substrate/node" authors = ["Luke Parker "] edition = "2021" publish = false -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -27,6 +27,10 @@ log = "0.4" schnorrkel = "0.11" +ciphersuite = { path = "../../crypto/ciphersuite" } +embedwards25519 = { path = "../../crypto/evrf/embedwards25519" } +secq256k1 = { path = "../../crypto/evrf/secq256k1" } + libp2p = "0.52" sp-core = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 972b4dd6..ebc47fcb 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,13 +1,17 @@ use core::marker::PhantomData; -use std::collections::HashSet; -use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; +use sp_core::Pair as PairTrait; use sc_service::ChainType; +use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use embedwards25519::Embedwards25519; +use secq256k1::Secq256k1; + use serai_runtime::{ - primitives::*, WASM_BINARY, BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig, - CoinsConfig, ValidatorSetsConfig, SignalsConfig, BabeConfig, GrandpaConfig, EmissionsConfig, + primitives::*, validator_sets::AllEmbeddedEllipticCurveKeysAtGenesis, WASM_BINARY, + BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig, CoinsConfig, ValidatorSetsConfig, + SignalsConfig, BabeConfig, GrandpaConfig, EmissionsConfig, }; pub type ChainSpec = sc_service::GenericChainSpec; @@ -16,6 +20,11 @@ fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } +fn insecure_arbitrary_public_key_from_name(name: &'static str) -> Vec { + let key = insecure_arbitrary_key_from_name::(name); + (C::generator() * key).to_bytes().as_ref().to_vec() +} + fn wasm_binary() -> Vec { // TODO: Accept a config of runtime path const WASM_PATH: &str = "/runtime/serai.wasm"; @@ -32,7 +41,20 @@ fn devnet_genesis( validators: &[&'static str], endowed_accounts: Vec, ) -> RuntimeGenesisConfig { - let validators = validators.iter().map(|name| account_from_name(name)).collect::>(); + let validators = validators + .iter() + .map(|name| { + ( + account_from_name(name), + AllEmbeddedEllipticCurveKeysAtGenesis { + embedwards25519: insecure_arbitrary_public_key_from_name::(name) + .try_into() + .unwrap(), + secq256k1: insecure_arbitrary_public_key_from_name::(name).try_into().unwrap(), + }, + ) + }) + .collect::>(); let key_shares = NETWORKS .iter() .map(|network| match network { @@ -66,20 +88,24 @@ fn devnet_genesis( networks: key_shares.clone(), participants: validators.clone(), }, - emissions: EmissionsConfig { networks: key_shares, participants: validators.clone() }, + emissions: EmissionsConfig { + networks: key_shares, + participants: validators.iter().map(|(validator, _)| *validator).collect(), + }, signals: SignalsConfig::default(), babe: BabeConfig { - authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), + authorities: validators.iter().map(|validator| (validator.0.into(), 1)).collect(), epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), _config: PhantomData, }, grandpa: GrandpaConfig { - authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), + authorities: validators.into_iter().map(|validator| (validator.0.into(), 1)).collect(), _config: PhantomData, }, } } +/* fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { let validators = validators .into_iter() @@ -133,6 +159,7 @@ fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> Runtime }, } } +*/ pub fn development_config() -> ChainSpec { let wasm_binary = wasm_binary(); @@ -211,7 +238,7 @@ pub fn local_config() -> ChainSpec { } pub fn testnet_config() -> ChainSpec { - let wasm_binary = wasm_binary(); + // let wasm_binary = wasm_binary(); ChainSpec::from_genesis( // Name @@ -220,7 +247,7 @@ pub fn testnet_config() -> ChainSpec { "testnet-2", ChainType::Live, move || { - let _ = testnet_genesis(&wasm_binary, vec![]); + // let _ = testnet_genesis(&wasm_binary, vec![]) todo!() }, // Bootnodes diff --git a/substrate/node/src/rpc.rs b/substrate/node/src/rpc.rs index 77e6e265..3e8b027f 100644 --- a/substrate/node/src/rpc.rs +++ b/substrate/node/src/rpc.rs @@ -72,7 +72,7 @@ where let validators = client.runtime_api().validators(latest_block, network).map_err(|_| { Error::to_call_error(std::io::Error::other(format!( "couldn't get validators from the latest block, which is likely a fatal bug. {}", - "please report this at https://github.com/serai-dex/serai", + "please report this at https://github.com/serai-dex/serai/issues", ))) })?; // Always return the protocol's bootnodes diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 983b1785..7a26f195 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/primitives" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -18,6 +18,8 @@ workspace = true [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, optional = true } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } @@ -36,7 +38,7 @@ frame-support = { git = "https://github.com/serai-dex/substrate", default-featur rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } [features] -std = ["zeroize", "scale/std", "borsh?/std", "serde?/std", "scale-info/std", "sp-core/std", "sp-runtime/std", "sp-std/std", "frame-support/std"] +std = ["zeroize", "ciphersuite/std", "scale/std", "borsh?/std", "serde?/std", "scale-info/std", "sp-core/std", "sp-runtime/std", "sp-std/std", "frame-support/std"] borsh = ["dep:borsh"] serde = ["dep:serde"] default = ["std"] diff --git a/substrate/primitives/src/account.rs b/substrate/primitives/src/account.rs index 77877a14..61940f29 100644 --- a/substrate/primitives/src/account.rs +++ b/substrate/primitives/src/account.rs @@ -52,7 +52,7 @@ pub fn borsh_deserialize_signature( // TODO: Remove this for solely Public? #[derive( - Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Encode, Decode, MaxEncodedLen, TypeInfo, + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] @@ -90,11 +90,22 @@ impl std::fmt::Display for SeraiAddress { } } +/// Create a Substraate key pair by a name. +/// +/// This should never be considered to have a secure private key. It has effectively no entropy. #[cfg(feature = "std")] pub fn insecure_pair_from_name(name: &str) -> Pair { Pair::from_string(&format!("//{name}"), None).unwrap() } +/// Create a private key for an arbitrary ciphersuite by a name. +/// +/// This key should never be considered a secure private key. It has effectively no entropy. +#[cfg(feature = "std")] +pub fn insecure_arbitrary_key_from_name(name: &str) -> C::F { + C::hash_to_F(b"insecure arbitrary key", name.as_bytes()) +} + pub struct AccountLookup; impl Lookup for AccountLookup { type Source = SeraiAddress; diff --git a/substrate/primitives/src/constants.rs b/substrate/primitives/src/constants.rs index b3db7317..a3d4b6f9 100644 --- a/substrate/primitives/src/constants.rs +++ b/substrate/primitives/src/constants.rs @@ -3,6 +3,7 @@ use crate::BlockNumber; // 1 MB pub const BLOCK_SIZE: u32 = 1024 * 1024; // 6 seconds +// TODO: Use Duration pub const TARGET_BLOCK_TIME: u64 = 6; /// Measured in blocks. diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index a92fa7e0..0092a77b 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -63,10 +63,7 @@ pub fn borsh_deserialize_bounded_vec &[u8] { - self.0.as_ref() - } - #[cfg(feature = "std")] pub fn consume(self) -> Vec { self.0.into_inner() @@ -110,51 +103,6 @@ impl AsRef<[u8]> for ExternalAddress { } } -// Should be enough for a Uniswap v3 call -pub const MAX_DATA_LEN: u32 = 512; -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Data( - #[cfg_attr( - feature = "borsh", - borsh( - serialize_with = "borsh_serialize_bounded_vec", - deserialize_with = "borsh_deserialize_bounded_vec" - ) - )] - BoundedVec>, -); - -#[cfg(feature = "std")] -impl Zeroize for Data { - fn zeroize(&mut self) { - self.0.as_mut().zeroize() - } -} - -impl Data { - #[cfg(feature = "std")] - pub fn new(data: Vec) -> Result { - Ok(Data(data.try_into().map_err(|_| "data length exceeds {MAX_DATA_LEN}")?)) - } - - pub fn data(&self) -> &[u8] { - self.0.as_ref() - } - - #[cfg(feature = "std")] - pub fn consume(self) -> Vec { - self.0.into_inner() - } -} - -impl AsRef<[u8]> for Data { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - /// Lexicographically reverses a given byte array. pub fn reverse_lexicographic_order(bytes: [u8; N]) -> [u8; N] { let mut res = [0u8; N]; diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index 64cf7cc2..ace34127 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -15,8 +15,18 @@ use sp_std::{vec, vec::Vec}; #[cfg(feature = "borsh")] use crate::{borsh_serialize_bounded_vec, borsh_deserialize_bounded_vec}; +/// Identifier for an embedded elliptic curve. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[cfg_attr(feature = "std", derive(Zeroize))] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum EmbeddedEllipticCurve { + Embedwards25519, + Secq256k1, +} + /// The type used to identify external networks. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, TypeInfo)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TypeInfo)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub enum ExternalNetworkId { @@ -125,6 +135,21 @@ impl BorshDeserialize for NetworkId { } impl ExternalNetworkId { + /// The embedded elliptic curve actively used for this network. + /// + /// This is guaranteed to return `[]`, `[Embedwards25519]`, or + /// `[Embedwards25519, *network specific curve*]`. + pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] { + match self { + // We need to generate a Ristretto key for oraclizing and a Secp256k1 key for the network + Self::Bitcoin | Self::Ethereum => { + &[EmbeddedEllipticCurve::Embedwards25519, EmbeddedEllipticCurve::Secq256k1] + } + // Since the oraclizing key curve is the same as the network's curve, we only need it + Self::Monero => &[EmbeddedEllipticCurve::Embedwards25519], + } + } + pub fn coins(&self) -> Vec { match self { Self::Bitcoin => vec![ExternalCoin::Bitcoin], @@ -135,6 +160,17 @@ impl ExternalNetworkId { } impl NetworkId { + /// The embedded elliptic curve actively used for this network. + /// + /// This is guaranteed to return `[]`, `[Embedwards25519]`, or + /// `[Embedwards25519, *network specific curve*]`. + pub fn embedded_elliptic_curves(&self) -> &'static [EmbeddedEllipticCurve] { + match self { + Self::Serai => &[], + Self::External(network) => network.embedded_elliptic_curves(), + } + } + pub fn coins(&self) -> Vec { match self { Self::Serai => vec![Coin::Serai], diff --git a/substrate/runtime/Cargo.toml b/substrate/runtime/Cargo.toml index 9cd0f5ab..c718a3a3 100644 --- a/substrate/runtime/Cargo.toml +++ b/substrate/runtime/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/runtime" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true @@ -19,7 +19,7 @@ ignored = ["scale", "scale-info"] workspace = true [dependencies] -hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } +hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } diff --git a/substrate/runtime/build.rs b/substrate/runtime/build.rs index eba52b3e..d19c8315 100644 --- a/substrate/runtime/build.rs +++ b/substrate/runtime/build.rs @@ -1,5 +1,12 @@ use substrate_wasm_builder::WasmBuilder; fn main() { - WasmBuilder::new().with_current_project().export_heap_base().import_memory().build() + WasmBuilder::new() + .with_current_project() + // https://substrate.stackexchange.com/questions/12124 + // TODO: Remove once we've moved to polkadot-sdk + .disable_runtime_version_section_check() + .export_heap_base() + .import_memory() + .build() } diff --git a/substrate/runtime/src/abi.rs b/substrate/runtime/src/abi.rs index 48b4a6c7..99b79265 100644 --- a/substrate/runtime/src/abi.rs +++ b/substrate/runtime/src/abi.rs @@ -5,8 +5,6 @@ use scale::{Encode, Decode}; use serai_abi::Call; use crate::{ - Vec, - primitives::{PublicKey, SeraiAddress}, timestamp, coins, dex, genesis_liquidity, validator_sets::{self, MembershipProof}, in_instructions, signals, babe, grandpa, RuntimeCall, @@ -92,28 +90,26 @@ impl From for RuntimeCall { Call::ValidatorSets(vs) => match vs { serai_abi::validator_sets::Call::set_keys { network, - removed_participants, key_pair, + signature_participants, signature, } => RuntimeCall::ValidatorSets(validator_sets::Call::set_keys { network, - removed_participants: <_>::try_from( - removed_participants.into_iter().map(PublicKey::from).collect::>(), - ) - .unwrap(), key_pair, + signature_participants, signature, }), + serai_abi::validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + } => RuntimeCall::ValidatorSets(validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, + }), serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } => { RuntimeCall::ValidatorSets(validator_sets::Call::report_slashes { network, - slashes: <_>::try_from( - slashes - .into_iter() - .map(|(addr, slash)| (PublicKey::from(addr), slash)) - .collect::>(), - ) - .unwrap(), + slashes, signature, }) } @@ -282,30 +278,23 @@ impl TryInto for RuntimeCall { _ => Err(())?, }), RuntimeCall::ValidatorSets(call) => Call::ValidatorSets(match call { - validator_sets::Call::set_keys { network, removed_participants, key_pair, signature } => { + validator_sets::Call::set_keys { network, key_pair, signature_participants, signature } => { serai_abi::validator_sets::Call::set_keys { network, - removed_participants: <_>::try_from( - removed_participants.into_iter().map(SeraiAddress::from).collect::>(), - ) - .unwrap(), key_pair, + signature_participants, signature, } } - validator_sets::Call::report_slashes { network, slashes, signature } => { - serai_abi::validator_sets::Call::report_slashes { - network, - slashes: <_>::try_from( - slashes - .into_iter() - .map(|(addr, slash)| (SeraiAddress::from(addr), slash)) - .collect::>(), - ) - .unwrap(), - signature, + validator_sets::Call::set_embedded_elliptic_curve_key { embedded_elliptic_curve, key } => { + serai_abi::validator_sets::Call::set_embedded_elliptic_curve_key { + embedded_elliptic_curve, + key, } } + validator_sets::Call::report_slashes { network, slashes, signature } => { + serai_abi::validator_sets::Call::report_slashes { network, slashes, signature } + } validator_sets::Call::allocate { network, amount } => { serai_abi::validator_sets::Call::allocate { network, amount } } diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index 2ef3786d..f2f3f91f 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -283,7 +283,7 @@ impl pallet_authorship::Config for Runtime { } // Maximum number of authorities per session. -pub type MaxAuthorities = ConstU32<{ validator_sets::primitives::MAX_KEY_SHARES_PER_SET }>; +pub type MaxAuthorities = ConstU32<{ validator_sets::primitives::MAX_KEY_SHARES_PER_SET_U32 }>; /// Longevity of an offence report. pub type ReportLongevity = ::EpochDuration; diff --git a/substrate/signals/pallet/Cargo.toml b/substrate/signals/pallet/Cargo.toml index e06b5e6b..4c3e3407 100644 --- a/substrate/signals/pallet/Cargo.toml +++ b/substrate/signals/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/signals/pallet" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/signals/primitives/Cargo.toml b/substrate/signals/primitives/Cargo.toml index 1c338145..dbaba0a5 100644 --- a/substrate/signals/primitives/Cargo.toml +++ b/substrate/signals/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/signals/primitives" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/validator-sets/pallet/Cargo.toml b/substrate/validator-sets/pallet/Cargo.toml index c4c748b2..aa7b4b98 100644 --- a/substrate/validator-sets/pallet/Cargo.toml +++ b/substrate/validator-sets/pallet/Cargo.toml @@ -6,23 +6,22 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/validator-sets/pallet" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] -[package.metadata.cargo-machete] -ignored = ["scale", "scale-info"] - [lints] workspace = true [dependencies] -hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } +bitvec = { version = "1", default-features = false, features = ["alloc", "serde"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } -scale-info = { version = "2", default-features = false, features = ["derive"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "bit-vec"] } +scale-info = { version = "2", default-features = false, features = ["derive", "bit-vec"] } + +serde = { version = "1", default-features = false, features = ["derive", "alloc"] } sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-io = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -45,8 +44,22 @@ validator-sets-primitives = { package = "serai-validator-sets-primitives", path coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } +[dev-dependencies] +pallet-timestamp = { git = "https://github.com/serai-dex/substrate", default-features = false } + +sp-consensus-babe = { git = "https://github.com/serai-dex/substrate", default-features = false } + +ciphersuite = { path = "../../../crypto/ciphersuite", features = ["ristretto"] } +frost = { package = "modular-frost", path = "../../../crypto/frost", features = ["tests"] } +schnorrkel = { path = "../../../crypto/schnorrkel", package = "frost-schnorrkel" } + +zeroize = "^1.5" +rand_core = "0.6" + [features] std = [ + "bitvec/std", + "scale/std", "scale-info/std", @@ -58,12 +71,15 @@ std = [ "sp-runtime/std", "sp-session/std", "sp-staking/std", + + "sp-consensus-babe/std", "frame-system/std", "frame-support/std", "pallet-babe/std", "pallet-grandpa/std", + "pallet-timestamp/std", "serai-primitives/std", "validator-sets-primitives/std", @@ -72,8 +88,12 @@ std = [ "dex-pallet/std", ] -# TODO -try-runtime = [] +try-runtime = [ + "frame-system/try-runtime", + "frame-support/try-runtime", + + "sp-runtime/try-runtime", +] runtime-benchmarks = [ "frame-system/runtime-benchmarks", diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 3100c60e..ff1cc452 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -1,5 +1,11 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + use core::marker::PhantomData; use scale::{Encode, Decode}; @@ -83,6 +89,12 @@ pub mod pallet { type ShouldEndSession: ShouldEndSession>; } + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, serde::Serialize, serde::Deserialize)] + pub struct AllEmbeddedEllipticCurveKeysAtGenesis { + pub embedwards25519: BoundedVec>, + pub secq256k1: BoundedVec>, + } + #[pallet::genesis_config] #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct GenesisConfig { @@ -92,7 +104,7 @@ pub mod pallet { /// This stake cannot be withdrawn however as there's no actual stake behind it. pub networks: Vec<(NetworkId, Amount)>, /// List of participants to place in the initial validator sets. - pub participants: Vec, + pub participants: Vec<(T::AccountId, AllEmbeddedEllipticCurveKeysAtGenesis)>, } impl Default for GenesisConfig { @@ -135,7 +147,7 @@ pub mod pallet { _, Identity, NetworkId, - BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>, + BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>, OptionQuery, >; /// The validators selected to be in-set, regardless of if removed. @@ -191,6 +203,18 @@ pub mod pallet { } } + /// A key on an embedded elliptic curve. + #[pallet::storage] + pub type EmbeddedEllipticCurveKeys = StorageDoubleMap< + _, + Blake2_128Concat, + Public, + Identity, + EmbeddedEllipticCurve, + BoundedVec>, + OptionQuery, + >; + /// The total stake allocated to this network by the active set of validators. #[pallet::storage] #[pallet::getter(fn total_allocated_stake)] @@ -303,6 +327,7 @@ pub mod pallet { /// Pending deallocations, keyed by the Session they become unlocked on. #[pallet::storage] + #[pallet::getter(fn pending_deallocations)] type PendingDeallocations = StorageDoubleMap< _, Blake2_128Concat, @@ -386,23 +411,25 @@ pub mod pallet { // Clear the current InSet assert_eq!( - InSet::::clear_prefix(network, MAX_KEY_SHARES_PER_SET, None).maybe_cursor, + InSet::::clear_prefix(network, MAX_KEY_SHARES_PER_SET_U32, None).maybe_cursor, None ); let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; let mut participants = vec![]; + let mut total_allocated_stake = 0; { let mut iter = SortedAllocationsIter::::new(network); let mut key_shares = 0; - while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { + while key_shares < u64::from(MAX_KEY_SHARES_PER_SET_U32) { let Some((key, amount)) = iter.next() else { break }; let these_key_shares = - (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET)); + (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET_U32)); participants.push((key, these_key_shares)); + total_allocated_stake += amount.0; key_shares += these_key_shares; } amortize_excess_key_shares(&mut participants); @@ -415,6 +442,12 @@ pub mod pallet { let set = ValidatorSet { network, session }; Pallet::::deposit_event(Event::NewSet { set }); + // other networks set their Session(0) TAS once they set their keys but serai network + // doesn't have that so we set it here. + if network == NetworkId::Serai && session == Session(0) { + TotalAllocatedStake::::set(network, Some(Amount(total_allocated_stake))); + } + Participants::::set(network, Some(participants.try_into().unwrap())); SessionBeginBlock::::set( network, @@ -428,6 +461,14 @@ pub mod pallet { pub enum Error { /// Validator Set doesn't exist. NonExistentValidatorSet, + /// An invalid embedded elliptic curve key was specified. + /// + /// This error not being raised does not mean the key was valid. Solely that it wasn't detected + /// by this pallet as invalid. + InvalidEmbeddedEllipticCurveKey, + /// Trying to perform an operation requiring an embedded elliptic curve key, without an + /// embedded elliptic curve key. + MissingEmbeddedEllipticCurveKey, /// Not enough allocation to obtain a key share in the set. InsufficientAllocation, /// Trying to deallocate more than allocated. @@ -471,10 +512,20 @@ pub mod pallet { fn build(&self) { for (id, stake) in self.networks.clone() { AllocationPerKeyShare::::set(id, Some(stake)); - for participant in self.participants.clone() { - if Pallet::::set_allocation(id, participant, stake) { + for participant in &self.participants { + if Pallet::::set_allocation(id, participant.0, stake) { panic!("participants contained duplicates"); } + EmbeddedEllipticCurveKeys::::set( + participant.0, + EmbeddedEllipticCurve::Embedwards25519, + Some(participant.1.embedwards25519.clone()), + ); + EmbeddedEllipticCurveKeys::::set( + participant.0, + EmbeddedEllipticCurve::Secq256k1, + Some(participant.1.secq256k1.clone()), + ); } Pallet::::new_set(id); } @@ -501,7 +552,7 @@ pub mod pallet { top = Some(key_shares); } - if key_shares > u64::from(MAX_KEY_SHARES_PER_SET) { + if key_shares > u64::from(MAX_KEY_SHARES_PER_SET_U32) { break; } } @@ -513,7 +564,7 @@ pub mod pallet { // post_amortization_key_shares_for_top_validator yields what the top validator's key shares // would be after such a reduction, letting us evaluate this correctly let top = post_amortization_key_shares_for_top_validator(validators_len, top, key_shares); - (top * 3) < key_shares.min(MAX_KEY_SHARES_PER_SET.into()) + (top * 3) < key_shares.min(MAX_KEY_SHARES_PER_SET_U32.into()) } fn increase_allocation( @@ -552,7 +603,7 @@ pub mod pallet { // The above is_bft calls are only used to check a BFT net doesn't become non-BFT // Check here if this call would prevent a non-BFT net from *ever* becoming BFT - if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET / 3).into() { + if (new_allocation / allocation_per_key_share) >= (MAX_KEY_SHARES_PER_SET_U32 / 3).into() { Err(Error::::AllocationWouldPreventFaultTolerance)?; } @@ -622,7 +673,7 @@ pub mod pallet { // If we're not removing the entire allocation, yet the allocation is no longer at or above // the threshold for a key share, error let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; - if (new_allocation != 0) && (new_allocation < allocation_per_key_share) { + if (new_allocation > 0) && (new_allocation < allocation_per_key_share) { Err(Error::::DeallocationWouldRemoveParticipant)?; } @@ -783,7 +834,7 @@ pub mod pallet { PendingDeallocations::::take((network, key), session) } - fn rotate_session() { + pub(crate) fn rotate_session() { // next serai validators that is in the queue. let now_validators = Participants::::get(NetworkId::Serai) .expect("no Serai participants upon rotate_session"); @@ -960,14 +1011,15 @@ pub mod pallet { pub fn set_keys( origin: OriginFor, network: ExternalNetworkId, - removed_participants: BoundedVec>, key_pair: KeyPair, + signature_participants: bitvec::vec::BitVec, signature: Signature, ) -> DispatchResult { ensure_none(origin)?; // signature isn't checked as this is an unsigned transaction, and validate_unsigned // (called by pre_dispatch) checks it + let _ = signature_participants; let _ = signature; let session = Self::session(NetworkId::from(network)).unwrap(); @@ -982,15 +1034,6 @@ pub mod pallet { Self::set_total_allocated_stake(NetworkId::from(network)); } - // This does not remove from TotalAllocatedStake or InSet in order to: - // 1) Not decrease the stake present in this set. This means removed participants are - // still liable for the economic security of the external network. This prevents - // a decided set, which is economically secure, from falling below the threshold. - // 2) Not allow parties removed to immediately deallocate, per commentary on deallocation - // scheduling (https://github.com/serai-dex/serai/issues/394). - for removed in removed_participants { - Self::deposit_event(Event::ParticipantRemoved { set: set.into(), removed }); - } Self::deposit_event(Event::KeyGen { set, key_pair }); Ok(()) @@ -1001,7 +1044,7 @@ pub mod pallet { pub fn report_slashes( origin: OriginFor, network: ExternalNetworkId, - slashes: BoundedVec<(Public, u32), ConstU32<{ MAX_KEY_SHARES_PER_SET / 3 }>>, + slashes: SlashReport, signature: Signature, ) -> DispatchResult { ensure_none(origin)?; @@ -1026,8 +1069,42 @@ pub mod pallet { #[pallet::call_index(2)] #[pallet::weight(0)] // TODO + pub fn set_embedded_elliptic_curve_key( + origin: OriginFor, + embedded_elliptic_curve: EmbeddedEllipticCurve, + key: BoundedVec>, + ) -> DispatchResult { + let validator = ensure_signed(origin)?; + + // We don't have the curve formulas, nor the BigInt arithmetic, necessary here to validate + // these keys. Instead, we solely check the key lengths. Validators are responsible to not + // provide invalid keys. + let expected_len = match embedded_elliptic_curve { + EmbeddedEllipticCurve::Embedwards25519 => 32, + EmbeddedEllipticCurve::Secq256k1 => 33, + }; + if key.len() != expected_len { + Err(Error::::InvalidEmbeddedEllipticCurveKey)?; + } + + // This does allow overwriting an existing key which... is unlikely to be done? + // Yet it isn't an issue as we'll fix to the key as of any set's declaration (uncaring to if + // it's distinct at the latest block) + EmbeddedEllipticCurveKeys::::set(validator, embedded_elliptic_curve, Some(key)); + Ok(()) + } + + #[pallet::call_index(3)] + #[pallet::weight(0)] // TODO pub fn allocate(origin: OriginFor, network: NetworkId, amount: Amount) -> DispatchResult { let validator = ensure_signed(origin)?; + // If this network utilizes embedded elliptic curve(s), require the validator to have set the + // appropriate key(s) + for embedded_elliptic_curve in network.embedded_elliptic_curves() { + if !EmbeddedEllipticCurveKeys::::contains_key(validator, *embedded_elliptic_curve) { + Err(Error::::MissingEmbeddedEllipticCurveKey)?; + } + } Coins::::transfer_internal( validator, Self::account(), @@ -1036,7 +1113,7 @@ pub mod pallet { Self::increase_allocation(network, validator, amount, false) } - #[pallet::call_index(3)] + #[pallet::call_index(4)] #[pallet::weight(0)] // TODO pub fn deallocate(origin: OriginFor, network: NetworkId, amount: Amount) -> DispatchResult { let account = ensure_signed(origin)?; @@ -1053,7 +1130,7 @@ pub mod pallet { Ok(()) } - #[pallet::call_index(4)] + #[pallet::call_index(5)] #[pallet::weight((0, DispatchClass::Operational))] // TODO pub fn claim_deallocation( origin: OriginFor, @@ -1081,7 +1158,7 @@ pub mod pallet { fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { // Match to be exhaustive match call { - Call::set_keys { network, ref removed_participants, ref key_pair, ref signature } => { + Call::set_keys { network, ref key_pair, ref signature_participants, ref signature } => { let network = *network; // Confirm this set has a session @@ -1100,30 +1177,24 @@ pub mod pallet { // session on this assumption assert_eq!(Pallet::::latest_decided_session(network.into()), Some(current_session)); - // This does not slash the removed participants as that'll be done at the end of the - // set's lifetime - let mut removed = hashbrown::HashSet::new(); - for participant in removed_participants { - // Confirm this wasn't duplicated - if removed.contains(&participant.0) { - Err(InvalidTransaction::Custom(2))?; - } - removed.insert(participant.0); - } - let participants = Participants::::get(NetworkId::from(network)) .expect("session existed without participants"); + // Check the bitvec is of the proper length + if participants.len() != signature_participants.len() { + Err(InvalidTransaction::Custom(2))?; + } + let mut all_key_shares = 0; let mut signers = vec![]; let mut signing_key_shares = 0; - for participant in participants { + for (participant, in_use) in participants.into_iter().zip(signature_participants) { let participant = participant.0; let shares = InSet::::get(NetworkId::from(network), participant) .expect("participant from Participants wasn't InSet"); all_key_shares += shares; - if removed.contains(&participant.0) { + if !in_use { continue; } @@ -1141,9 +1212,7 @@ pub mod pallet { // Verify the signature with the MuSig key of the signers // We theoretically don't need set_keys_message to bind to removed_participants, as the // key we're signing with effectively already does so, yet there's no reason not to - if !musig_key(set.into(), &signers) - .verify(&set_keys_message(&set, removed_participants, key_pair), signature) - { + if !musig_key(set.into(), &signers).verify(&set_keys_message(&set, key_pair), signature) { Err(InvalidTransaction::BadProof)?; } @@ -1165,19 +1234,20 @@ pub mod pallet { network, session: Session(Self::session(NetworkId::from(network)).unwrap().0 - 1), }; - if !key.verify(&report_slashes_message(&set, slashes), signature) { + if !key.verify(&slashes.report_slashes_message(), signature) { Err(InvalidTransaction::BadProof)?; } ValidTransaction::with_tag_prefix("ValidatorSets") .and_provides((1, set)) - .longevity(MAX_KEY_SHARES_PER_SET.into()) + .longevity(MAX_KEY_SHARES_PER_SET_U32.into()) .propagate(true) .build() } - Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => { - Err(InvalidTransaction::Call)? - } + Call::set_embedded_elliptic_curve_key { .. } | + Call::allocate { .. } | + Call::deallocate { .. } | + Call::claim_deallocation { .. } => Err(InvalidTransaction::Call)?, Call::__Ignore(_, _) => unreachable!(), } } diff --git a/substrate/validator-sets/pallet/src/mock.rs b/substrate/validator-sets/pallet/src/mock.rs new file mode 100644 index 00000000..d6d12050 --- /dev/null +++ b/substrate/validator-sets/pallet/src/mock.rs @@ -0,0 +1,210 @@ +//! Test environment for ValidatorSets pallet. + +use super::*; + +use std::collections::HashMap; + +use frame_support::{ + construct_runtime, + traits::{ConstU16, ConstU32, ConstU64}, +}; + +use sp_core::{ + H256, Pair as PairTrait, + sr25519::{Public, Pair}, +}; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; + +use serai_primitives::*; +use validator_sets::{primitives::MAX_KEY_SHARES_PER_SET, MembershipProof}; + +pub use crate as validator_sets; +pub use coins_pallet as coins; +pub use dex_pallet as dex; +pub use pallet_babe as babe; +pub use pallet_grandpa as grandpa; +pub use pallet_timestamp as timestamp; + +type Block = frame_system::mocking::MockBlock; +// Maximum number of authorities per session. +pub type MaxAuthorities = ConstU32<{ MAX_KEY_SHARES_PER_SET }>; + +pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { + c: PRIMARY_PROBABILITY, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots, + }; + +pub const MEDIAN_PRICE_WINDOW_LENGTH: u16 = 10; + +construct_runtime!( + pub enum Test + { + System: frame_system, + Timestamp: timestamp, + Coins: coins, + LiquidityTokens: coins::::{Pallet, Call, Storage, Event}, + ValidatorSets: validator_sets, + Dex: dex, + Babe: babe, + Grandpa: grandpa, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = Public; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>; + type WeightInfo = (); +} + +impl babe::Config for Test { + type EpochDuration = ConstU64<{ FAST_EPOCH_DURATION }>; + + type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; + type EpochChangeTrigger = babe::ExternalTrigger; + type DisabledValidators = ValidatorSets; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = (); +} + +impl grandpa::Config for Test { + type RuntimeEvent = RuntimeEvent; + + type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; + + type MaxSetIdSessionEntries = ConstU64<0>; + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = (); +} + +impl coins::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AllowMint = ValidatorSets; +} + +impl coins::Config for Test { + type RuntimeEvent = RuntimeEvent; + type AllowMint = (); +} + +impl dex::Config for Test { + type RuntimeEvent = RuntimeEvent; + + type LPFee = ConstU32<3>; // 0.3% + type MintMinLiquidity = ConstU64<10000>; + + type MaxSwapPathLength = ConstU32<3>; // coin1 -> SRI -> coin2 + + type MedianPriceWindowLength = ConstU16<{ MEDIAN_PRICE_WINDOW_LENGTH }>; + + type WeightInfo = dex::weights::SubstrateWeight; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type ShouldEndSession = Babe; +} + +// For a const we can't define +pub fn genesis_participants() -> Vec { + vec![ + insecure_pair_from_name("Alice"), + insecure_pair_from_name("Bob"), + insecure_pair_from_name("Charlie"), + insecure_pair_from_name("Dave"), + ] +} + +// Amounts for single key share per network +pub fn key_shares() -> HashMap { + HashMap::from([ + (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))), + (NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))), + ]) +} + +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + let networks: Vec<(NetworkId, Amount)> = key_shares().into_iter().collect::>(); + + coins::GenesisConfig:: { + accounts: genesis_participants() + .clone() + .into_iter() + .map(|a| (a.public(), Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) + .collect(), + _ignore: Default::default(), + } + .assimilate_storage(&mut t) + .unwrap(); + + validator_sets::GenesisConfig:: { + networks, + participants: genesis_participants().into_iter().map(|p| p.public()).collect(), + } + .assimilate_storage(&mut t) + .unwrap(); + + babe::GenesisConfig:: { + authorities: genesis_participants() + .into_iter() + .map(|validator| (validator.public().into(), 1)) + .collect(), + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + _config: PhantomData, + } + .assimilate_storage(&mut t) + .unwrap(); + + grandpa::GenesisConfig:: { + authorities: genesis_participants() + .into_iter() + .map(|validator| (validator.public().into(), 1)) + .collect(), + _config: PhantomData, + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(0)); + ext +} diff --git a/substrate/validator-sets/pallet/src/tests.rs b/substrate/validator-sets/pallet/src/tests.rs new file mode 100644 index 00000000..6c407abd --- /dev/null +++ b/substrate/validator-sets/pallet/src/tests.rs @@ -0,0 +1,561 @@ +use crate::{mock::*, primitives::*}; + +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Ristretto}; +use frost::dkg::musig::musig; +use schnorrkel::Schnorrkel; + +use zeroize::Zeroizing; +use rand_core::OsRng; + +use frame_support::{ + assert_noop, assert_ok, + pallet_prelude::{InvalidTransaction, TransactionSource}, + traits::{OnFinalize, OnInitialize}, +}; +use frame_system::RawOrigin; + +use sp_core::{ + sr25519::{Public, Pair, Signature}, + Pair as PairTrait, +}; +use sp_runtime::{traits::ValidateUnsigned, BoundedVec}; + +use serai_primitives::*; + +fn active_network_validators(network: NetworkId) -> Vec<(Public, u64)> { + if network == NetworkId::Serai { + Babe::authorities().into_iter().map(|(id, key_share)| (id.into_inner(), key_share)).collect() + } else { + ValidatorSets::participants_for_latest_decided_set(network).unwrap().into_inner() + } +} + +fn verify_session_and_active_validators(network: NetworkId, participants: &[Public], session: u32) { + let mut validators: Vec = active_network_validators(network) + .into_iter() + .map(|(p, ks)| { + assert_eq!(ks, 1); + p + }) + .collect(); + validators.sort(); + + assert_eq!(ValidatorSets::session(network).unwrap(), Session(session)); + assert_eq!(participants, validators); + + // TODO: how to make sure block finalizations work as usual here? +} + +fn get_session_at_which_changes_activate(network: NetworkId) -> u32 { + let current_session = ValidatorSets::session(network).unwrap().0; + // changes should be active in the next session + if network == NetworkId::Serai { + // it takes 1 extra session for serai net to make the changes active. + current_session + 2 + } else { + current_session + 1 + } +} + +fn set_keys_for_session(network: ExternalNetworkId) { + ValidatorSets::set_keys( + RawOrigin::None.into(), + network, + BoundedVec::new(), + KeyPair(insecure_pair_from_name("Alice").public(), vec![].try_into().unwrap()), + Signature([0u8; 64]), + ) + .unwrap(); +} + +fn set_keys_signature(set: &ExternalValidatorSet, key_pair: &KeyPair, pairs: &[Pair]) -> Signature { + let mut pub_keys = vec![]; + for pair in pairs { + let public_key = + ::read_G::<&[u8]>(&mut pair.public().0.as_ref()).unwrap(); + pub_keys.push(public_key); + } + + let mut threshold_keys = vec![]; + for i in 0 .. pairs.len() { + let secret_key = ::read_F::<&[u8]>( + &mut pairs[i].as_ref().secret.to_bytes()[.. 32].as_ref(), + ) + .unwrap(); + assert_eq!(Ristretto::generator() * secret_key, pub_keys[i]); + + threshold_keys.push( + musig::(&musig_context((*set).into()), &Zeroizing::new(secret_key), &pub_keys) + .unwrap(), + ); + } + + let mut musig_keys = HashMap::new(); + for tk in threshold_keys { + musig_keys.insert(tk.params().i(), tk.into()); + } + + let sig = frost::tests::sign_without_caching( + &mut OsRng, + frost::tests::algorithm_machines(&mut OsRng, &Schnorrkel::new(b"substrate"), &musig_keys), + &set_keys_message(set, &[], key_pair), + ); + + Signature(sig.to_bytes()) +} + +fn get_ordered_keys(network: NetworkId, participants: &[Pair]) -> Vec { + // retrieve the current session validators so that we know the order of the keys + // that is necessary for the correct musig signature. + let validators = ValidatorSets::participants_for_latest_decided_set(network).unwrap(); + + // collect the pairs of the validators + let mut pairs = vec![]; + for (v, _) in validators { + let p = participants.iter().find(|pair| pair.public() == v).unwrap().clone(); + pairs.push(p); + } + + pairs +} + +fn rotate_session_until(network: NetworkId, session: u32) { + let mut current = ValidatorSets::session(network).unwrap().0; + while current < session { + Babe::on_initialize(System::block_number() + 1); + ValidatorSets::rotate_session(); + if let NetworkId::External(n) = network { + set_keys_for_session(n); + } + ValidatorSets::retire_set(ValidatorSet { session: Session(current), network }); + current += 1; + } + assert_eq!(current, session); +} + +#[test] +fn rotate_session() { + new_test_ext().execute_with(|| { + let genesis_participants: Vec = + genesis_participants().into_iter().map(|p| p.public()).collect(); + let key_shares = key_shares(); + + let mut participants = HashMap::from([ + (NetworkId::Serai, genesis_participants.clone()), + (NetworkId::External(ExternalNetworkId::Bitcoin), genesis_participants.clone()), + (NetworkId::External(ExternalNetworkId::Ethereum), genesis_participants.clone()), + (NetworkId::External(ExternalNetworkId::Monero), genesis_participants), + ]); + + // rotate session + for network in NETWORKS { + let participants = participants.get_mut(&network).unwrap(); + + // verify for session 0 + participants.sort(); + if let NetworkId::External(n) = network { + set_keys_for_session(n); + } + verify_session_and_active_validators(network, participants, 0); + + // add 1 participant + let new_participant = insecure_pair_from_name("new-guy").public(); + Coins::mint(new_participant, Balance { coin: Coin::Serai, amount: key_shares[&network] }) + .unwrap(); + ValidatorSets::allocate( + RawOrigin::Signed(new_participant).into(), + network, + key_shares[&network], + ) + .unwrap(); + participants.push(new_participant); + + // move network to the activation session + let activation_session = get_session_at_which_changes_activate(network); + rotate_session_until(network, activation_session); + + // verify + participants.sort(); + verify_session_and_active_validators(network, participants, activation_session); + + // remove 1 participant + let participant_to_remove = participants[0]; + ValidatorSets::deallocate( + RawOrigin::Signed(participant_to_remove).into(), + network, + key_shares[&network], + ) + .unwrap(); + participants + .swap_remove(participants.iter().position(|k| *k == participant_to_remove).unwrap()); + + // check pending deallocations + let pending = ValidatorSets::pending_deallocations( + (network, participant_to_remove), + Session(if network == NetworkId::Serai { + activation_session + 3 + } else { + activation_session + 2 + }), + ); + assert_eq!(pending, Some(key_shares[&network])); + + // move network to the activation session + let activation_session = get_session_at_which_changes_activate(network); + rotate_session_until(network, activation_session); + + // verify + participants.sort(); + verify_session_and_active_validators(network, participants, activation_session); + } + }) +} + +#[test] +fn allocate() { + new_test_ext().execute_with(|| { + let genesis_participants: Vec = + genesis_participants().into_iter().map(|p| p.public()).collect(); + let key_shares = key_shares(); + let participant = insecure_pair_from_name("random1").public(); + let network = NetworkId::External(ExternalNetworkId::Ethereum); + + // check genesis TAS + set_keys_for_session(network.try_into().unwrap()); + assert_eq!( + ValidatorSets::total_allocated_stake(network).unwrap().0, + key_shares[&network].0 * u64::try_from(genesis_participants.len()).unwrap() + ); + + // we can't allocate less than a key share + let amount = Amount(key_shares[&network].0 * 3); + Coins::mint(participant, Balance { coin: Coin::Serai, amount }).unwrap(); + assert_noop!( + ValidatorSets::allocate( + RawOrigin::Signed(participant).into(), + network, + Amount(key_shares[&network].0 - 1) + ), + validator_sets::Error::::InsufficientAllocation + ); + + // we can't allocate too much that the net exhibits the ability to handle any single node + // becoming byzantine + assert_noop!( + ValidatorSets::allocate(RawOrigin::Signed(participant).into(), network, amount), + validator_sets::Error::::AllocationWouldRemoveFaultTolerance + ); + + // we should be allocate a proper amount + assert_ok!(ValidatorSets::allocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + )); + assert_eq!(Coins::balance(participant, Coin::Serai).0, amount.0 - key_shares[&network].0); + + // check new amount is reflected on TAS on new session + rotate_session_until(network, 1); + assert_eq!( + ValidatorSets::total_allocated_stake(network).unwrap().0, + key_shares[&network].0 * (u64::try_from(genesis_participants.len()).unwrap() + 1) + ); + + // check that new participants match + let mut active_participants: Vec = + active_network_validators(network).into_iter().map(|(p, _)| p).collect(); + + let mut current_participants = genesis_participants.clone(); + current_participants.push(participant); + + current_participants.sort(); + active_participants.sort(); + assert_eq!(current_participants, active_participants); + }) +} + +#[test] +fn deallocate_pending() { + new_test_ext().execute_with(|| { + let genesis_participants: Vec = + genesis_participants().into_iter().map(|p| p.public()).collect(); + let key_shares = key_shares(); + let participant = insecure_pair_from_name("random1").public(); + let network = NetworkId::External(ExternalNetworkId::Bitcoin); + + // check genesis TAS + set_keys_for_session(network.try_into().unwrap()); + assert_eq!( + ValidatorSets::total_allocated_stake(network).unwrap().0, + key_shares[&network].0 * u64::try_from(genesis_participants.len()).unwrap() + ); + + // allocate some amount + Coins::mint(participant, Balance { coin: Coin::Serai, amount: key_shares[&network] }).unwrap(); + assert_ok!(ValidatorSets::allocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + )); + assert_eq!(Coins::balance(participant, Coin::Serai).0, 0); + + // move to next session + let mut current_session = ValidatorSets::session(network).unwrap().0; + current_session += 1; + rotate_session_until(network, current_session); + assert_eq!( + ValidatorSets::total_allocated_stake(network).unwrap().0, + key_shares[&network].0 * (u64::try_from(genesis_participants.len()).unwrap() + 1) + ); + + // we can deallocate all of our allocation + assert_ok!(ValidatorSets::deallocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + )); + + // check pending deallocations + let pending_session = + if network == NetworkId::Serai { current_session + 3 } else { current_session + 2 }; + assert_eq!( + ValidatorSets::pending_deallocations((network, participant), Session(pending_session)), + Some(key_shares[&network]) + ); + + // we can't claim it immediately + assert_noop!( + ValidatorSets::claim_deallocation( + RawOrigin::Signed(participant).into(), + network, + Session(pending_session), + ), + validator_sets::Error::::NonExistentDeallocation + ); + + // we should be able to claim it in the pending session + rotate_session_until(network, pending_session); + assert_ok!(ValidatorSets::claim_deallocation( + RawOrigin::Signed(participant).into(), + network, + Session(pending_session), + )); + }) +} + +#[test] +fn deallocate_immediately() { + new_test_ext().execute_with(|| { + let genesis_participants: Vec = + genesis_participants().into_iter().map(|p| p.public()).collect(); + let key_shares = key_shares(); + let participant = insecure_pair_from_name("random1").public(); + let network = NetworkId::External(ExternalNetworkId::Monero); + + // check genesis TAS + set_keys_for_session(network.try_into().unwrap()); + assert_eq!( + ValidatorSets::total_allocated_stake(network).unwrap().0, + key_shares[&network].0 * u64::try_from(genesis_participants.len()).unwrap() + ); + + // we can't deallocate when we don't have an allocation + assert_noop!( + ValidatorSets::deallocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + ), + validator_sets::Error::::NonExistentValidator + ); + + // allocate some amount + Coins::mint(participant, Balance { coin: Coin::Serai, amount: key_shares[&network] }).unwrap(); + assert_ok!(ValidatorSets::allocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + )); + assert_eq!(Coins::balance(participant, Coin::Serai).0, 0); + + // we can't deallocate more than our allocation + assert_noop!( + ValidatorSets::deallocate( + RawOrigin::Signed(participant).into(), + network, + Amount(key_shares[&network].0 + 1) + ), + validator_sets::Error::::NotEnoughAllocated + ); + + // we can't deallocate an amount that would left us less than a key share as long as it isn't 0 + assert_noop!( + ValidatorSets::deallocate( + RawOrigin::Signed(participant).into(), + network, + Amount(key_shares[&network].0 / 2) + ), + validator_sets::Error::::DeallocationWouldRemoveParticipant + ); + + // we can deallocate all of our allocation + assert_ok!(ValidatorSets::deallocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + )); + + // It should be immediately deallocated since we are not yet in an active set + assert_eq!(Coins::balance(participant, Coin::Serai), key_shares[&network]); + assert!(ValidatorSets::pending_deallocations((network, participant), Session(1)).is_none()); + + // allocate again + assert_ok!(ValidatorSets::allocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + )); + assert_eq!(Coins::balance(participant, Coin::Serai).0, 0); + + // make a pool so that we have security oracle value for the coin + let liq_acc = insecure_pair_from_name("liq-acc").public(); + let coin = ExternalCoin::Monero; + let balance = ExternalBalance { coin, amount: Amount(2 * key_shares[&network].0) }; + Coins::mint(liq_acc, balance.into()).unwrap(); + Coins::mint(liq_acc, Balance { coin: Coin::Serai, amount: balance.amount }).unwrap(); + Dex::add_liquidity( + RawOrigin::Signed(liq_acc).into(), + coin, + balance.amount.0 / 2, + balance.amount.0 / 2, + 1, + 1, + liq_acc, + ) + .unwrap(); + Dex::on_finalize(1); + assert!(Dex::security_oracle_value(coin).unwrap().0 > 0); + + // we can't deallocate if it would break economic security + // The reason we don't have economic security for the network now is that we just set + // the value for coin/SRI to 1:1 when making the pool and we minted 2 * key_share amount + // of coin but we only allocated 1 key_share of SRI for the network although we need more than + // 3 for the same amount of coin. + assert_noop!( + ValidatorSets::deallocate( + RawOrigin::Signed(participant).into(), + network, + key_shares[&network] + ), + validator_sets::Error::::DeallocationWouldRemoveEconomicSecurity + ); + }) +} + +#[test] +fn set_keys_keys_exist() { + new_test_ext().execute_with(|| { + let network = ExternalNetworkId::Monero; + + // set the keys first + ValidatorSets::set_keys( + RawOrigin::None.into(), + network, + Vec::new().try_into().unwrap(), + KeyPair(insecure_pair_from_name("name").public(), Vec::new().try_into().unwrap()), + Signature([0u8; 64]), + ) + .unwrap(); + + let call = validator_sets::Call::::set_keys { + network, + removed_participants: Vec::new().try_into().unwrap(), + key_pair: KeyPair(insecure_pair_from_name("name").public(), Vec::new().try_into().unwrap()), + signature: Signature([0u8; 64]), + }; + + assert_eq!( + ValidatorSets::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::Stale.into() + ); + }) +} + +#[test] +fn set_keys_invalid_signature() { + new_test_ext().execute_with(|| { + let network = ExternalNetworkId::Ethereum; + let mut participants = get_ordered_keys(network.into(), &genesis_participants()); + + // we can't have invalid set + let mut set = ExternalValidatorSet { network, session: Session(1) }; + let key_pair = + KeyPair(insecure_pair_from_name("name").public(), Vec::new().try_into().unwrap()); + let signature = set_keys_signature(&set, &key_pair, &participants); + + let call = validator_sets::Call::::set_keys { + network, + removed_participants: Vec::new().try_into().unwrap(), + key_pair: key_pair.clone(), + signature, + }; + assert_eq!( + ValidatorSets::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::BadProof.into() + ); + + // fix the set + set.session = Session(0); + + // participants should match + participants.push(insecure_pair_from_name("random1")); + let signature = set_keys_signature(&set, &key_pair, &participants); + + let call = validator_sets::Call::::set_keys { + network, + removed_participants: Vec::new().try_into().unwrap(), + key_pair: key_pair.clone(), + signature, + }; + assert_eq!( + ValidatorSets::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::BadProof.into() + ); + + // fix the participants + participants.pop(); + + // msg key pair and the key pair to set should match + let key_pair2 = + KeyPair(insecure_pair_from_name("name2").public(), Vec::new().try_into().unwrap()); + let signature = set_keys_signature(&set, &key_pair2, &participants); + + let call = validator_sets::Call::::set_keys { + network, + removed_participants: Vec::new().try_into().unwrap(), + key_pair: key_pair.clone(), + signature, + }; + assert_eq!( + ValidatorSets::validate_unsigned(TransactionSource::External, &call), + InvalidTransaction::BadProof.into() + ); + + // use the same key pair + let signature = set_keys_signature(&set, &key_pair, &participants); + let call = validator_sets::Call::::set_keys { + network, + removed_participants: Vec::new().try_into().unwrap(), + key_pair, + signature, + }; + ValidatorSets::validate_unsigned(TransactionSource::External, &call).unwrap(); + + // TODO: removed_participants parameter isn't tested since it will be removed in upcoming + // commits? + }) +} + +// TODO: add report_slashes tests when the feature is complete. diff --git a/substrate/validator-sets/primitives/Cargo.toml b/substrate/validator-sets/primitives/Cargo.toml index 844e6134..8eea9d55 100644 --- a/substrate/validator-sets/primitives/Cargo.toml +++ b/substrate/validator-sets/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/validator-sets/primitives" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.74" +rust-version = "1.80" [package.metadata.docs.rs] all-features = true diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 9944d485..04e3b548 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -1,5 +1,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +use core::time::Duration; + #[cfg(feature = "std")] use zeroize::Zeroize; @@ -13,20 +15,30 @@ use borsh::{BorshSerialize, BorshDeserialize}; #[cfg(feature = "serde")] use serde::{Serialize, Deserialize}; -use sp_core::{ConstU32, sr25519::Public, bounded::BoundedVec}; +use sp_core::{ConstU32, bounded::BoundedVec, sr25519::Public}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; use serai_primitives::{ExternalNetworkId, NetworkId}; -/// The maximum amount of key shares per set. -pub const MAX_KEY_SHARES_PER_SET: u32 = 150; +mod slash_points; +pub use slash_points::*; + +/// The expected duration for a session. +// 1 week +pub const SESSION_LENGTH: Duration = Duration::from_secs(7 * 24 * 60 * 60); + +/// The maximum length for a key. // Support keys up to 96 bytes (BLS12-381 G2). pub const MAX_KEY_LEN: u32 = 96; +/// The maximum amount of key shares per set. +pub const MAX_KEY_SHARES_PER_SET: u16 = 150; +pub const MAX_KEY_SHARES_PER_SET_U32: u32 = MAX_KEY_SHARES_PER_SET as u32; + /// The type used to identify a specific session of validators. #[derive( - Clone, Copy, PartialEq, Eq, Hash, Default, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] @@ -34,7 +46,9 @@ pub const MAX_KEY_LEN: u32 = 96; pub struct Session(pub u32); /// The type used to identify a specific validator set during a specific session. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[derive( + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encode, Decode, TypeInfo, MaxEncodedLen, +)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -70,13 +84,13 @@ impl TryFrom for ExternalValidatorSet { } } -type MaxKeyLen = ConstU32; /// The type representing a Key from an external network. -pub type ExternalKey = BoundedVec; +pub type ExternalKey = BoundedVec>; /// The key pair for a validator set. /// -/// This is their Ristretto key, used for signing Batches, and their key on the external network. +/// This is their Ristretto key, used for publishing data onto Serai, and their key on the external +/// network. #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] @@ -108,12 +122,12 @@ impl Zeroize for KeyPair { /// The MuSig context for a validator set. pub fn musig_context(set: ValidatorSet) -> Vec { - [b"ValidatorSets-musig_key".as_ref(), &set.encode()].concat() + (b"ValidatorSets-musig_key".as_ref(), set).encode() } /// The MuSig public key for a validator set. /// -/// This function panics on invalid input. +/// This function panics on invalid input, per the definition of `dkg::musig::musig_key`. pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public { let mut keys = Vec::new(); for key in set_keys { @@ -125,17 +139,9 @@ pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public { Public(dkg::musig::musig_key::(&musig_context(set), &keys).unwrap().to_bytes()) } -/// The message for the set_keys signature. -pub fn set_keys_message( - set: &ExternalValidatorSet, - removed_participants: &[Public], - key_pair: &KeyPair, -) -> Vec { - (b"ValidatorSets-set_keys", set, removed_participants, key_pair).encode() -} - -pub fn report_slashes_message(set: &ExternalValidatorSet, slashes: &[(Public, u32)]) -> Vec { - (b"ValidatorSets-report_slashes", set, slashes).encode() +/// The message for the `set_keys` signature. +pub fn set_keys_message(set: &ExternalValidatorSet, key_pair: &KeyPair) -> Vec { + (b"ValidatorSets-set_keys", set, key_pair).encode() } /// For a set of validators whose key shares may exceed the maximum, reduce until they equal the diff --git a/substrate/validator-sets/primitives/src/slash_points.rs b/substrate/validator-sets/primitives/src/slash_points.rs new file mode 100644 index 00000000..0cc72b2f --- /dev/null +++ b/substrate/validator-sets/primitives/src/slash_points.rs @@ -0,0 +1,326 @@ +use core::{num::NonZero, time::Duration}; + +#[cfg(feature = "std")] +use zeroize::Zeroize; + +use scale::{Encode, Decode, MaxEncodedLen}; +use scale_info::TypeInfo; + +#[cfg(feature = "borsh")] +use borsh::{BorshSerialize, BorshDeserialize}; +#[cfg(feature = "serde")] +use serde::{Serialize, Deserialize}; + +use sp_core::{ConstU32, bounded::BoundedVec}; +#[cfg(not(feature = "std"))] +use sp_std::vec::Vec; + +use serai_primitives::{TARGET_BLOCK_TIME, Amount}; + +use crate::{SESSION_LENGTH, MAX_KEY_SHARES_PER_SET_U32}; + +/// Each slash point is equivalent to the downtime implied by missing a block proposal. +// Takes a NonZero so that the result is never 0. +fn downtime_per_slash_point(validators: NonZero) -> Duration { + Duration::from_secs(TARGET_BLOCK_TIME) * u32::from(u16::from(validators)) +} + +/// A slash for a validator. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[cfg_attr(feature = "std", derive(Zeroize))] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub enum Slash { + /// The slash points accumulated by this validator. + /// + /// Each point is considered as `downtime_per_slash_point(validators)` downtime, where + /// `validators` is the amount of validators present in the set. + Points(u32), + /// A fatal slash due to fundamentally faulty behavior. + /// + /// This should only be used for misbehavior with explicit evidence of impropriety. This should + /// not be used for liveness failures. The validator will be penalized all allocated stake. + Fatal, +} + +impl Slash { + /// Calculate the penalty which should be applied to the validator. + /// + /// Does not panic, even due to overflows, if `allocated_stake + session_rewards <= u64::MAX`. + pub fn penalty( + self, + validators: NonZero, + allocated_stake: Amount, + session_rewards: Amount, + ) -> Amount { + match self { + Self::Points(slash_points) => { + let mut slash_points = u64::from(slash_points); + // Do the logic with the stake in u128 to prevent overflow from multiplying u64s + let allocated_stake = u128::from(allocated_stake.0); + let session_rewards = u128::from(session_rewards.0); + + // A Serai validator is allowed to be offline for an average of one day every two weeks + // with no additional penalty. They'll solely not earn rewards for the time they were + // offline. + const GRACE_WINDOW: Duration = Duration::from_secs(2 * 7 * 24 * 60 * 60); + const GRACE: Duration = Duration::from_secs(24 * 60 * 60); + + // GRACE / GRACE_WINDOW is the fraction of the time a validator is allowed to be offline + // This means we want SESSION_LENGTH * (GRACE / GRACE_WINDOW), but with the parentheses + // moved so we don't incur the floordiv and hit 0 + const PENALTY_FREE_DOWNTIME: Duration = Duration::from_secs( + (SESSION_LENGTH.as_secs() * GRACE.as_secs()) / GRACE_WINDOW.as_secs(), + ); + + let downtime_per_slash_point = downtime_per_slash_point(validators); + let penalty_free_slash_points = + PENALTY_FREE_DOWNTIME.as_secs() / downtime_per_slash_point.as_secs(); + + /* + In practice, the following means: + + - Hours 0-12 are penalized as if they're hours 0-12. + - Hours 12-24 are penalized as if they're hours 12-36. + - Hours 24-36 are penalized as if they're hours 36-96. + - Hours 36-48 are penalized as if they're hours 96-168. + + /* Commented, see below explanation of why. + - Hours 48-168 are penalized for 0-2% of stake. + - 168-336 hours of slashes, for a session only lasting 168 hours, is penalized for 2-10% + of stake. + + This means a validator offline has to be offline for more than two days to start having + their stake slashed. + */ + + This means a validator offline for two days will not earn any rewards for that session. + */ + + const MULTIPLIERS: [u64; 4] = [1, 2, 5, 6]; + let reward_slash = { + // In intervals of the penalty-free slash points, weight the slash points accrued + // The multiplier for the first interval is 1 as it's penalty-free + let mut weighted_slash_points_for_reward_slash = 0; + let mut total_possible_slash_points_for_rewards_slash = 0; + for mult in MULTIPLIERS { + let slash_points_in_interval = slash_points.min(penalty_free_slash_points); + weighted_slash_points_for_reward_slash += slash_points_in_interval * mult; + total_possible_slash_points_for_rewards_slash += penalty_free_slash_points * mult; + slash_points -= slash_points_in_interval; + } + // If there are no penalty-free slash points, and the validator was slashed, slash the + // entire reward + (u128::from(weighted_slash_points_for_reward_slash) * session_rewards) + .checked_div(u128::from(total_possible_slash_points_for_rewards_slash)) + .unwrap_or({ + if weighted_slash_points_for_reward_slash == 0 { + 0 + } else { + session_rewards + } + }) + }; + // Ensure the slash never exceeds the amount slashable (due to rounding errors) + let reward_slash = reward_slash.min(session_rewards); + + /* + let slash_points_for_entire_session = + SESSION_LENGTH.as_secs() / downtime_per_slash_point.as_secs(); + + let offline_slash = { + // The amount of stake to slash for being offline + const MAX_STAKE_SLASH_PERCENTAGE_OFFLINE: u64 = 2; + + let stake_to_slash_for_being_offline = + (allocated_stake * u128::from(MAX_STAKE_SLASH_PERCENTAGE_OFFLINE)) / 100; + + // We already removed the slash points for `intervals * penalty_free_slash_points` + let slash_points_for_reward_slash = + penalty_free_slash_points * u64::try_from(MULTIPLIERS.len()).unwrap(); + let slash_points_for_offline_stake_slash = + slash_points_for_entire_session.saturating_sub(slash_points_for_reward_slash); + + let slash_points_in_interval = slash_points.min(slash_points_for_offline_stake_slash); + slash_points -= slash_points_in_interval; + // If there are no slash points for the entire session, don't slash stake + // That's an extreme edge case which shouldn't start penalizing validators + (u128::from(slash_points_in_interval) * stake_to_slash_for_being_offline) + .checked_div(u128::from(slash_points_for_offline_stake_slash)) + .unwrap_or(0) + }; + + let disruptive_slash = { + /* + A validator may have more slash points than `slash_points_for_stake_slash` if they + didn't just accrue slashes for missing block proposals, yet also accrued slashes for + being disruptive. In that case, we still want to bound their slash points so they can't + somehow be slashed for 100% of their stake (which should only happen on a fatal slash). + */ + const MAX_STAKE_SLASH_PERCENTAGE_DISRUPTIVE: u64 = 8; + + let stake_to_slash_for_being_disruptive = + (allocated_stake * u128::from(MAX_STAKE_SLASH_PERCENTAGE_DISRUPTIVE)) / 100; + // Follows the offline slash for `unwrap_or` policy + (u128::from(slash_points.min(slash_points_for_entire_session)) * + stake_to_slash_for_being_disruptive) + .checked_div(u128::from(slash_points_for_entire_session)) + .unwrap_or(0) + }; + */ + + /* + We do not slash for being offline/disruptive at this time. Doing so allows an adversary + to DoS nodes to not just take them offline, yet also take away their stake. This isn't + preferable to the increased incentive to properly maintain a node when the rewards should + already be sufficient for that purpose. + + Validators also shouldn't be able to be so disruptive due to their limiting upon + disruption *while its ongoing*. Slashes as a post-response, while an arguably worthwhile + economic penalty, can never be a response in the moment (as necessary to actually handle + the disruption). + + If stake slashing was to be re-enabled, the percentage of stake which is eligible for + slashing should be variable to how close we are to losing liveness. This would mean if + less than 10% of validators are offline, no stake is slashes. If 10% are, 2% is eligible. + If 20% are, 5% is eligible. If 30% are, 10% is eligible. + + (or similar) + + This would mean that a DoS is insufficient to cause a validator to lose their stake. + Instead, a coordinated DoS against multiple Serai validators would be needed, + strengthening our assumptions. + */ + let offline_slash = 0; + let disruptive_slash = 0; + + let stake_slash = (offline_slash + disruptive_slash).min(allocated_stake); + + let penalty_u128 = reward_slash + stake_slash; + // saturating_into + Amount(u64::try_from(penalty_u128).unwrap_or(u64::MAX)) + } + // On fatal slash, their entire stake is removed + Self::Fatal => Amount(allocated_stake.0 + session_rewards.0), + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SlashReport(pub BoundedVec>); + +#[cfg(feature = "borsh")] +impl BorshSerialize for SlashReport { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + BorshSerialize::serialize(self.0.as_slice(), writer) + } +} +#[cfg(feature = "borsh")] +impl BorshDeserialize for SlashReport { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let slashes = Vec::::deserialize_reader(reader)?; + slashes + .try_into() + .map(Self) + .map_err(|_| borsh::io::Error::other("length of slash report exceeds max validators")) + } +} + +impl TryFrom> for SlashReport { + type Error = &'static str; + fn try_from(slashes: Vec) -> Result { + slashes.try_into().map(Self).map_err(|_| "length of slash report exceeds max validators") + } +} + +impl SlashReport { + /// The message to sign when publishing this SlashReport. + // This is assumed binding to the ValidatorSet via the key signed with + pub fn report_slashes_message(&self) -> Vec { + (b"ValidatorSets-report_slashes", &self.0).encode() + } +} + +#[test] +fn test_penalty() { + for validators in [1, 50, 100, crate::MAX_KEY_SHARES_PER_SET] { + let validators = NonZero::new(validators).unwrap(); + // 12 hours of slash points should only decrease the rewards proportionately + let twelve_hours_of_slash_points = + u32::try_from((12 * 60 * 60) / downtime_per_slash_point(validators).as_secs()).unwrap(); + assert_eq!( + Slash::Points(twelve_hours_of_slash_points).penalty( + validators, + Amount(u64::MAX), + Amount(168) + ), + Amount(12) + ); + // 24 hours of slash points should be counted as 36 hours + assert_eq!( + Slash::Points(2 * twelve_hours_of_slash_points).penalty( + validators, + Amount(u64::MAX), + Amount(168) + ), + Amount(36) + ); + // 36 hours of slash points should be counted as 96 hours + assert_eq!( + Slash::Points(3 * twelve_hours_of_slash_points).penalty( + validators, + Amount(u64::MAX), + Amount(168) + ), + Amount(96) + ); + // 48 hours of slash points should be counted as 168 hours + assert_eq!( + Slash::Points(4 * twelve_hours_of_slash_points).penalty( + validators, + Amount(u64::MAX), + Amount(168) + ), + Amount(168) + ); + + /* + // A full week of slash points should slash 2% + let week_of_slash_points = 14 * twelve_hours_of_slash_points; + assert_eq!( + Slash::Points(week_of_slash_points).penalty(validators, Amount(1000), Amount(168)), + Amount(20 + 168) + ); + + // Two weeks of slash points should slash 10% + assert_eq!( + Slash::Points(2 * week_of_slash_points).penalty(validators, Amount(1000), Amount(168)), + Amount(100 + 168) + ); + + // Anything greater should still only slash 10% + assert_eq!( + Slash::Points(u32::MAX).penalty(validators, Amount(1000), Amount(168)), + Amount(100 + 168) + ); + */ + + // Anything greater should still only slash the rewards + assert_eq!( + Slash::Points(u32::MAX).penalty(validators, Amount(u64::MAX), Amount(168)), + Amount(168) + ); + } +} + +#[test] +fn no_overflow() { + Slash::Points(u32::MAX).penalty( + NonZero::new(u16::MAX).unwrap(), + Amount(u64::MAX), + Amount(u64::MAX), + ); + + Slash::Points(u32::MAX).penalty(NonZero::new(1).unwrap(), Amount(u64::MAX), Amount(u64::MAX)); +} diff --git a/tests/coordinator/Cargo.toml b/tests/coordinator/Cargo.toml index 89b168c0..6038da38 100644 --- a/tests/coordinator/Cargo.toml +++ b/tests/coordinator/Cargo.toml @@ -19,12 +19,15 @@ workspace = true [dependencies] hex = "0.4" -async-trait = "0.1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } blake2 = "0.10" + ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["ristretto", "secp256k1"] } +embedwards25519 = { path = "../../crypto/evrf/embedwards25519" } +secq256k1 = { path = "../../crypto/evrf/secq256k1" } + schnorrkel = "0.11" dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index a1efcf41..e8a5b2fc 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -18,6 +18,8 @@ use ciphersuite::{ group::{ff::PrimeField, GroupEncoding}, Ciphersuite, Ristretto, }; +use embedwards25519::Embedwards25519; +use secq256k1::Secq256k1; use serai_client::primitives::ExternalNetworkId; @@ -118,6 +120,8 @@ pub struct Processor { queue_for_sending: MessageQueue, abort_handle: Option>, + evrf_public_keys: ([u8; 32], Vec), + substrate_key: Arc::F>>>>, } @@ -131,7 +135,7 @@ impl Drop for Processor { impl Processor { pub async fn new( - raw_i: u8, + name: &'static str, network: ExternalNetworkId, ops: &DockerOperations, handles: Handles, @@ -168,7 +172,11 @@ impl Processor { let (msg_send, msg_recv) = mpsc::unbounded_channel(); + use serai_client::primitives::insecure_arbitrary_key_from_name; let substrate_key = Arc::new(AsyncMutex::new(None)); + let embedwards25519_evrf_key = (Embedwards25519::generator() * + insecure_arbitrary_key_from_name::(name)) + .to_bytes(); let mut res = Processor { network, @@ -183,6 +191,21 @@ impl Processor { msgs: msg_recv, abort_handle: None, + evrf_public_keys: ( + embedwards25519_evrf_key, + match network { + NetworkId::Serai => panic!("mock processor for the serai network"), + NetworkId::Bitcoin | NetworkId::Ethereum => { + let key = (Secq256k1::generator() * + insecure_arbitrary_key_from_name::(name)) + .to_bytes(); + let key: &[u8] = key.as_ref(); + key.to_vec() + } + NetworkId::Monero => embedwards25519_evrf_key.to_vec(), + }, + ), + substrate_key: substrate_key.clone(), }; @@ -256,10 +279,12 @@ impl Processor { if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { *current_cosign = Some(new_cosign); } + let mut preprocess = [0; 64]; + preprocess[.. name.len()].copy_from_slice(name.as_ref()); send_message( messages::coordinator::ProcessorMessage::CosignPreprocess { id: id.clone(), - preprocesses: vec![[raw_i; 64]], + preprocesses: vec![preprocess], } .into(), ) @@ -270,12 +295,11 @@ impl Processor { ) => { // TODO: Assert the ID matches CURRENT_COSIGN // TODO: Verify the received preprocesses + let mut share = [0; 32]; + share[.. name.len()].copy_from_slice(name.as_bytes()); send_message( - messages::coordinator::ProcessorMessage::SubstrateShare { - id, - shares: vec![[raw_i; 32]], - } - .into(), + messages::coordinator::ProcessorMessage::SubstrateShare { id, shares: vec![share] } + .into(), ) .await; } @@ -327,6 +351,14 @@ impl Processor { res } + pub fn network(&self) -> NetworkId { + self.network + } + + pub fn evrf_public_keys(&self) -> ([u8; 32], Vec) { + self.evrf_public_keys.clone() + } + pub async fn serai(&self) -> Serai { Serai::new(self.serai_rpc.clone()).await.unwrap() } diff --git a/tests/coordinator/src/tests/key_gen.rs b/tests/coordinator/src/tests/key_gen.rs index 66aa9f5b..8b57f4da 100644 --- a/tests/coordinator/src/tests/key_gen.rs +++ b/tests/coordinator/src/tests/key_gen.rs @@ -1,7 +1,4 @@ -use std::{ - time::{Duration, SystemTime}, - collections::HashMap, -}; +use std::time::{Duration, SystemTime}; use zeroize::Zeroizing; use rand_core::OsRng; @@ -10,13 +7,13 @@ use ciphersuite::{ group::{ff::Field, GroupEncoding}, Ciphersuite, Ristretto, Secp256k1, }; -use dkg::ThresholdParams; +use dkg::Participant; use serai_client::{ validator_sets::primitives::{ExternalValidatorSet, KeyPair, Session}, Public, }; -use messages::{key_gen::KeyGenId, CoordinatorMessage}; +use messages::CoordinatorMessage; use crate::tests::*; @@ -28,16 +25,28 @@ pub async fn key_gen( let mut participant_is = vec![]; let set = ExternalValidatorSet { session, network: ExternalNetworkId::Bitcoin }; - let id = KeyGenId { session: set.session, attempt: 0 }; - for (i, processor) in processors.iter_mut().enumerate() { + // This is distinct from the result of evrf_public_keys for each processor, as there'll have some + // ordering algorithm on-chain which won't match our ordering + let mut evrf_public_keys_as_on_chain = None; + for processor in processors.iter_mut() { + // Receive GenerateKey let msg = processor.recv_message().await; match &msg { CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { - params, + evrf_public_keys, .. }) => { - participant_is.push(params.i()); + if evrf_public_keys_as_on_chain.is_none() { + evrf_public_keys_as_on_chain = Some(evrf_public_keys.clone()); + } + assert_eq!(evrf_public_keys_as_on_chain.as_ref().unwrap(), evrf_public_keys); + let i = evrf_public_keys + .iter() + .position(|public_keys| *public_keys == processor.evrf_public_keys()) + .unwrap(); + let i = Participant::new(1 + u16::try_from(i).unwrap()).unwrap(); + participant_is.push(i); } _ => panic!("unexpected message: {msg:?}"), } @@ -45,63 +54,43 @@ pub async fn key_gen( assert_eq!( msg, CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey { - id, - params: ThresholdParams::new( - u16::try_from(((coordinators * 2) / 3) + 1).unwrap(), - u16::try_from(coordinators).unwrap(), - participant_is[i], - ) - .unwrap(), - shares: 1, + session, + threshold: u16::try_from(((coordinators * 2) / 3) + 1).unwrap(), + evrf_public_keys: evrf_public_keys_as_on_chain.clone().unwrap(), }) ); - - processor - .send_message(messages::key_gen::ProcessorMessage::Commitments { - id, - commitments: vec![vec![u8::try_from(u16::from(participant_is[i])).unwrap()]], - }) - .await; } - wait_for_tributary().await; - for (i, processor) in processors.iter_mut().enumerate() { - let mut commitments = (0 .. u8::try_from(coordinators).unwrap()) - .map(|l| { - ( - participant_is[usize::from(l)], - vec![u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap()], - ) + for i in 0 .. coordinators { + // Send Participation + processors[i] + .send_message(messages::key_gen::ProcessorMessage::Participation { + session, + participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()], }) - .collect::>(); - commitments.remove(&participant_is[i]); - assert_eq!( - processor.recv_message().await, - CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Commitments { - id, - commitments, - }) - ); - - // Recipient it's for -> (Sender i, Recipient i) - let mut shares = (0 .. u8::try_from(coordinators).unwrap()) - .map(|l| { - ( - participant_is[usize::from(l)], - vec![ - u8::try_from(u16::from(participant_is[i])).unwrap(), - u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(), - ], - ) - }) - .collect::>(); - - shares.remove(&participant_is[i]); - processor - .send_message(messages::key_gen::ProcessorMessage::Shares { id, shares: vec![shares] }) .await; + + // Sleep so this participation gets included + for _ in 0 .. 2 { + wait_for_tributary().await; + } + + // Have every other processor recv this message too + for processor in processors.iter_mut() { + assert_eq!( + processor.recv_message().await, + messages::CoordinatorMessage::KeyGen( + messages::key_gen::CoordinatorMessage::Participation { + session, + participant: participant_is[i], + participation: vec![u8::try_from(u16::from(participant_is[i])).unwrap()], + } + ) + ); + } } + // Now that we've received all participations, publish the key pair let substrate_priv_key = Zeroizing::new(::F::random(&mut OsRng)); let substrate_key = (::generator() * *substrate_priv_key).to_bytes(); @@ -111,40 +100,24 @@ pub async fn key_gen( let serai = processors[0].serai().await; let mut last_serai_block = serai.latest_finalized_block().await.unwrap().number(); - wait_for_tributary().await; - for (i, processor) in processors.iter_mut().enumerate() { - let i = participant_is[i]; - assert_eq!( - processor.recv_message().await, - CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Shares { - id, - shares: { - let mut shares = (0 .. u8::try_from(coordinators).unwrap()) - .map(|l| { - ( - participant_is[usize::from(l)], - vec![ - u8::try_from(u16::from(participant_is[usize::from(l)])).unwrap(), - u8::try_from(u16::from(i)).unwrap(), - ], - ) - }) - .collect::>(); - shares.remove(&i); - vec![shares] - }, - }) - ); + for processor in processors.iter_mut() { processor .send_message(messages::key_gen::ProcessorMessage::GeneratedKeyPair { - id, + session, substrate_key, network_key: network_key.clone(), }) .await; } - // Sleeps for longer since we need to wait for a Substrate block as well + // Wait for the Nonces TXs to go around + wait_for_tributary().await; + // Wait for the Share TXs to go around + wait_for_tributary().await; + + // And now we're waiting ro the TX to be published onto Serai + + // We need to wait for a finalized Substrate block as well, so this waites for up to 20 blocks 'outer: for _ in 0 .. 20 { tokio::time::sleep(Duration::from_secs(6)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { diff --git a/tests/coordinator/src/tests/mod.rs b/tests/coordinator/src/tests/mod.rs index a488d01a..6d27d218 100644 --- a/tests/coordinator/src/tests/mod.rs +++ b/tests/coordinator/src/tests/mod.rs @@ -41,6 +41,18 @@ impl) -> F> Test } } +fn name(i: usize) -> &'static str { + match i { + 0 => "Alice", + 1 => "Bob", + 2 => "Charlie", + 3 => "Dave", + 4 => "Eve", + 5 => "Ferdie", + _ => panic!("needed a 7th name for a serai node"), + } +} + pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await; @@ -50,15 +62,7 @@ pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { // Spawn one extra coordinator which isn't in-set #[allow(clippy::range_plus_one)] for i in 0 .. (COORDINATORS + 1) { - let name = match i { - 0 => "Alice", - 1 => "Bob", - 2 => "Charlie", - 3 => "Dave", - 4 => "Eve", - 5 => "Ferdie", - _ => panic!("needed a 7th name for a serai node"), - }; + let name = name(i); let serai_composition = serai_composition(name, fast_epoch); let (processor_key, message_queue_keys, message_queue_composition) = @@ -196,14 +200,7 @@ pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { let mut processors: Vec = vec![]; for (i, (handles, key)) in coordinators.iter().enumerate() { processors.push( - Processor::new( - i.try_into().unwrap(), - ExternalNetworkId::Bitcoin, - &outer_ops, - handles.clone(), - *key, - ) - .await, + Processor::new(name(i), ExternalNetworkId::Bitcoin, &outer_ops, handles.clone(), *key).await, ); } diff --git a/tests/coordinator/src/tests/rotation.rs b/tests/coordinator/src/tests/rotation.rs index c3659a9e..1f044621 100644 --- a/tests/coordinator/src/tests/rotation.rs +++ b/tests/coordinator/src/tests/rotation.rs @@ -3,7 +3,7 @@ use tokio::time::{sleep, Duration}; use ciphersuite::Secp256k1; use serai_client::{ - primitives::{insecure_pair_from_name, NetworkId}, + primitives::{EmbeddedEllipticCurve, NetworkId, insecure_pair_from_name}, validator_sets::{ self, primitives::{Session, ValidatorSet}, @@ -55,6 +55,27 @@ async fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] { } } +#[allow(dead_code)] +async fn set_embedded_elliptic_curve_key( + serai: &Serai, + curve: EmbeddedEllipticCurve, + key: Vec, + pair: &Pair, + nonce: u32, +) -> [u8; 32] { + // get the call + let tx = serai.sign( + pair, + validator_sets::SeraiValidatorSets::set_embedded_elliptic_curve_key( + curve, + key.try_into().unwrap(), + ), + nonce, + 0, + ); + publish_tx(serai, &tx).await +} + #[allow(dead_code)] async fn allocate_stake( serai: &Serai, @@ -132,13 +153,29 @@ async fn set_rotation_test() { // excluded participant let pair5 = insecure_pair_from_name("Eve"); - let network = ExternalNetworkId::Bitcoin; + let network = excluded.network(); let amount = Amount(1_000_000 * 10_u64.pow(8)); let serai = processors[0].serai().await; // allocate now for the last participant so that it is guaranteed to be included into session // 1 set. This doesn't affect the genesis set at all since that is a predetermined set. - allocate_stake(&serai, network.into(), amount, &pair5, 0).await; + set_embedded_elliptic_curve_key( + &serai, + EmbeddedEllipticCurve::Embedwards25519, + excluded.evrf_public_keys().0.to_vec(), + &pair5, + 0, + ) + .await; + set_embedded_elliptic_curve_key( + &serai, + *excluded.network().embedded_elliptic_curves().last().unwrap(), + excluded.evrf_public_keys().1.clone(), + &pair5, + 1, + ) + .await; + allocate_stake(&serai, network.into(), amount, &pair5, 2).await; // genesis keygen let _ = key_gen::(&mut processors, Session(0)).await; diff --git a/tests/coordinator/src/tests/sign.rs b/tests/coordinator/src/tests/sign.rs index f6fdb6e6..e489e593 100644 --- a/tests/coordinator/src/tests/sign.rs +++ b/tests/coordinator/src/tests/sign.rs @@ -249,7 +249,6 @@ async fn sign_test() { balance, instruction: OutInstruction { address: ExternalAddress::new(b"external".to_vec()).unwrap(), - data: None, }, }; serai diff --git a/tests/full-stack/Cargo.toml b/tests/full-stack/Cargo.toml index 12af01bd..5bafb346 100644 --- a/tests/full-stack/Cargo.toml +++ b/tests/full-stack/Cargo.toml @@ -19,8 +19,6 @@ workspace = true [dependencies] hex = "0.4" -async-trait = "0.1" - zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } @@ -34,7 +32,7 @@ scale = { package = "parity-scale-codec", version = "3" } serde = "1" serde_json = "1" -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } +# processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } serai-client = { path = "../../substrate/client", features = ["serai"] } diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index 26ffb442..637da935 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -496,8 +496,8 @@ async fn mint_and_burn_test() { let serai_pair = &serai_pair; move |nonce, coin, amount, address| async move { let out_instruction = OutInstructionWithBalance { - balance: ExternalBalance { coin, amount: Amount(amount) }, - instruction: OutInstruction { address, data: None }, + balance: Balance { coin, amount: Amount(amount) }, + instruction: OutInstruction { address }, }; serai diff --git a/tests/full-stack/src/tests/mod.rs b/tests/full-stack/src/tests/mod.rs index b82ff177..62f941a4 100644 --- a/tests/full-stack/src/tests/mod.rs +++ b/tests/full-stack/src/tests/mod.rs @@ -58,19 +58,23 @@ pub(crate) async fn new_test(test_body: impl TestBody) { let (bitcoin_composition, bitcoin_port) = network_instance(ExternalNetworkId::Bitcoin); let mut bitcoin_processor_composition = processor_instance( + name, ExternalNetworkId::Bitcoin, bitcoin_port, message_queue_keys[&ExternalNetworkId::Bitcoin], - ); + ) + .0; assert_eq!(bitcoin_processor_composition.len(), 1); let bitcoin_processor_composition = bitcoin_processor_composition.swap_remove(0); let (monero_composition, monero_port) = network_instance(ExternalNetworkId::Monero); let mut monero_processor_composition = processor_instance( + name, ExternalNetworkId::Monero, monero_port, - message_queue_keys[&ExternalNetworkId::Monero], - ); + message_queue_keys[&NetworkId::Monero], + ExternalNetworkId + .0; assert_eq!(monero_processor_composition.len(), 1); let monero_processor_composition = monero_processor_composition.swap_remove(0); diff --git a/tests/message-queue/src/lib.rs b/tests/message-queue/src/lib.rs index d59273d9..a2eab627 100644 --- a/tests/message-queue/src/lib.rs +++ b/tests/message-queue/src/lib.rs @@ -92,7 +92,8 @@ fn basic_functionality() { }, b"Hello, World!".to_vec(), ) - .await; + .await + .unwrap(); // Queue this twice, which message-queue should de-duplicate for _ in 0 .. 2 { @@ -105,7 +106,8 @@ fn basic_functionality() { }, b"Hello, World, again!".to_vec(), ) - .await; + .await + .unwrap(); } // Successfully get it @@ -148,7 +150,8 @@ fn basic_functionality() { }, b"Hello, World!".to_vec(), ) - .await; + .await + .unwrap(); let monero = MessageQueue::new( Service::Processor(ExternalNetworkId::Monero), diff --git a/tests/no-std/Cargo.toml b/tests/no-std/Cargo.toml index 16ca5d24..d1773b6e 100644 --- a/tests/no-std/Cargo.toml +++ b/tests/no-std/Cargo.toml @@ -29,6 +29,13 @@ multiexp = { path = "../../crypto/multiexp", default-features = false, features dleq = { path = "../../crypto/dleq", default-features = false } schnorr-signatures = { path = "../../crypto/schnorr", default-features = false } +secq256k1 = { path = "../../crypto/evrf/secq256k1", default-features = false } +embedwards25519 = { path = "../../crypto/evrf/embedwards25519", default-features = false } +generalized-bulletproofs = { path = "../../crypto/evrf/generalized-bulletproofs", default-features = false } +generalized-bulletproofs-circuit-abstraction = { path = "../../crypto/evrf/circuit-abstraction", default-features = false } +ec-divisors = { path = "../../crypto/evrf/divisors", default-features = false } +generalized-bulletproofs-ec-gadgets = { path = "../../crypto/evrf/ec-gadgets", default-features = false } + dkg = { path = "../../crypto/dkg", default-features = false } # modular-frost = { path = "../../crypto/frost", default-features = false } # frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false } diff --git a/tests/no-std/src/lib.rs b/tests/no-std/src/lib.rs index f1824050..0c08e111 100644 --- a/tests/no-std/src/lib.rs +++ b/tests/no-std/src/lib.rs @@ -12,6 +12,13 @@ pub use multiexp; pub use dleq; pub use schnorr_signatures; +pub use secq256k1; +pub use embedwards25519; +pub use generalized_bulletproofs; +pub use generalized_bulletproofs_circuit_abstraction; +pub use ec_divisors; +pub use generalized_bulletproofs_ec_gadgets; + pub use dkg; /* pub use modular_frost; diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index 8817b0c9..c7267b55 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -23,13 +23,12 @@ zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } curve25519-dalek = "4" -ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["secp256k1", "ristretto"] } -dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["secp256k1", "ed25519", "ristretto"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] } bitcoin-serai = { path = "../../networks/bitcoin" } k256 = "0.13" -ethereum-serai = { path = "../../networks/ethereum" } monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request" } monero-wallet = { path = "../../networks/monero/wallet" } @@ -46,7 +45,7 @@ serde_json = { version = "1", default-features = false } tokio = { version = "1", features = ["time"] } -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } +# processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 108abeda..66f2779d 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -3,11 +3,14 @@ use std::sync::{OnceLock, Mutex}; use zeroize::Zeroizing; -use rand_core::{RngCore, OsRng}; -use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Secp256k1, Ed25519, Ristretto, +}; +use dkg::evrf::*; -use serai_client::primitives::ExternalNetworkId; +use serai_client::primitives::{ExternalNetworkId, insecure_arbitrary_key_from_name}; use messages::{ProcessorMessage, CoordinatorMessage}; use serai_message_queue::{Service, Metadata, client::MessageQueue}; @@ -24,13 +27,42 @@ mod tests; static UNIQUE_ID: OnceLock> = OnceLock::new(); +#[allow(dead_code)] +#[derive(Clone)] +pub struct EvrfPublicKeys { + substrate: [u8; 32], + network: Vec, +} + pub fn processor_instance( + name: &str, network: ExternalNetworkId, port: u32, message_queue_key: ::F, -) -> Vec { - let mut entropy = [0; 32]; - OsRng.fill_bytes(&mut entropy); +) -> (Vec, EvrfPublicKeys) { + let substrate_evrf_key = + insecure_arbitrary_key_from_name::<::EmbeddedCurve>(name); + let substrate_evrf_pub_key = + (::EmbeddedCurve::generator() * substrate_evrf_key).to_bytes(); + let substrate_evrf_key = substrate_evrf_key.to_repr(); + + let (network_evrf_key, network_evrf_pub_key) = match network { + NetworkId::Serai => panic!("starting a processor for Serai"), + NetworkId::Bitcoin | NetworkId::Ethereum => { + let evrf_key = + insecure_arbitrary_key_from_name::<::EmbeddedCurve>(name); + let pub_key = + (::EmbeddedCurve::generator() * evrf_key).to_bytes().to_vec(); + (evrf_key.to_repr(), pub_key) + } + NetworkId::Monero => { + let evrf_key = + insecure_arbitrary_key_from_name::<::EmbeddedCurve>(name); + let pub_key = + (::EmbeddedCurve::generator() * evrf_key).to_bytes().to_vec(); + (evrf_key.to_repr(), pub_key) + } + }; let network_str = match network { ExternalNetworkId::Bitcoin => "bitcoin", @@ -46,7 +78,8 @@ pub fn processor_instance( .replace_env( [ ("MESSAGE_QUEUE_KEY".to_string(), hex::encode(message_queue_key.to_repr())), - ("ENTROPY".to_string(), hex::encode(entropy)), + ("SUBSTRATE_EVRF_KEY".to_string(), hex::encode(substrate_evrf_key)), + ("NETWORK_EVRF_KEY".to_string(), hex::encode(network_evrf_key)), ("NETWORK".to_string(), network_str.to_string()), ("NETWORK_RPC_LOGIN".to_string(), format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_PORT".to_string(), port.to_string()), @@ -74,21 +107,27 @@ pub fn processor_instance( ); } - res + (res, EvrfPublicKeys { substrate: substrate_evrf_pub_key, network: network_evrf_pub_key }) +} + +pub struct ProcessorKeys { + coordinator: ::F, + evrf: EvrfPublicKeys, } pub type Handles = (String, String, String, String); pub fn processor_stack( + name: &str, network: ExternalNetworkId, network_hostname_override: Option, -) -> (Handles, ::F, Vec) { +) -> (Handles, ProcessorKeys, Vec) { let (network_composition, network_rpc_port) = network_instance(network); let (coord_key, message_queue_keys, message_queue_composition) = serai_message_queue_tests::instance(); - let mut processor_compositions = - processor_instance(network, network_rpc_port, message_queue_keys[&network]); + let (mut processor_compositions, evrf_keys) = + processor_instance(name, network, network_rpc_port, message_queue_keys[&network]); // Give every item in this stack a unique ID // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits @@ -153,7 +192,7 @@ pub fn processor_stack( handles[2].clone(), handles.get(3).cloned().unwrap_or(String::new()), ), - coord_key, + ProcessorKeys { coordinator: coord_key, evrf: evrf_keys }, compositions, ) } @@ -168,6 +207,8 @@ pub struct Coordinator { processor_handle: String, relayer_handle: String, + evrf_keys: EvrfPublicKeys, + next_send_id: u64, next_recv_id: u64, queue: MessageQueue, @@ -178,7 +219,7 @@ impl Coordinator { network: ExternalNetworkId, ops: &DockerOperations, handles: Handles, - coord_key: ::F, + keys: ProcessorKeys, ) -> Coordinator { let rpc = ops.handle(&handles.1).host_port(2287).unwrap(); let rpc = rpc.0.to_string() + ":" + &rpc.1.to_string(); @@ -191,9 +232,11 @@ impl Coordinator { processor_handle: handles.2, relayer_handle: handles.3, + evrf_keys: keys.evrf, + next_send_id: 0, next_recv_id: 0, - queue: MessageQueue::new(Service::Coordinator, rpc, Zeroizing::new(coord_key)), + queue: MessageQueue::new(Service::Coordinator, rpc, Zeroizing::new(keys.coordinator)), }; // Sleep for up to a minute in case the external network's RPC has yet to start @@ -299,6 +342,11 @@ impl Coordinator { res } + /// Get the eVRF keys for the associated processor. + pub fn evrf_keys(&self) -> EvrfPublicKeys { + self.evrf_keys.clone() + } + /// Send a message to a processor as its coordinator. pub async fn send_message(&mut self, msg: impl Into) { let msg: CoordinatorMessage = msg.into(); diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index e6ef485c..c4947f82 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -449,7 +449,7 @@ impl Wallet { ); } - let to_spend_key = decompress_point(<[u8; 32]>::try_from(to.as_ref()).unwrap()).unwrap(); + let to_spend_key = decompress_point(<[u8; 32]>::try_from(to.as_slice()).unwrap()).unwrap(); let to_view_key = additional_key::(0); let to_addr = Address::new( Network::Mainnet, diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 4a34500e..5effab81 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -3,6 +3,8 @@ use std::{ time::{SystemTime, Duration}, }; +use rand_core::{RngCore, OsRng}; + use dkg::{Participant, tests::clone_without}; use messages::{coordinator::*, SubstrateContext}; diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index ec616b51..9ee2b001 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -1,30 +1,24 @@ -use std::{collections::HashMap, time::SystemTime}; +use std::time::SystemTime; -use dkg::{Participant, ThresholdParams, tests::clone_without}; +use dkg::Participant; use serai_client::{ primitives::{BlockHash, PublicKey, EXTERNAL_NETWORKS}, validator_sets::primitives::{KeyPair, Session}, }; -use messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage, ProcessorMessage}; +use messages::{SubstrateContext, CoordinatorMessage, ProcessorMessage}; use crate::{*, tests::*}; pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { // Perform an interaction with all processors via their coordinators - async fn interact_with_all< - FS: Fn(Participant) -> messages::key_gen::CoordinatorMessage, - FR: FnMut(Participant, messages::key_gen::ProcessorMessage), - >( + async fn interact_with_all( coordinators: &mut [Coordinator], - message: FS, mut recv: FR, ) { for (i, coordinator) in coordinators.iter_mut().enumerate() { let participant = Participant::new(u16::try_from(i + 1).unwrap()).unwrap(); - coordinator.send_message(CoordinatorMessage::KeyGen(message(participant))).await; - match coordinator.recv_message().await { ProcessorMessage::KeyGen(msg) => recv(participant, msg), _ => panic!("processor didn't return KeyGen message"), @@ -33,85 +27,69 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { } // Order a key gen - let id = KeyGenId { session: Session(0), attempt: 0 }; + let session = Session(0); - let mut commitments = HashMap::new(); - interact_with_all( - coordinators, - |participant| messages::key_gen::CoordinatorMessage::GenerateKey { - id, - params: ThresholdParams::new( - u16::try_from(THRESHOLD).unwrap(), - u16::try_from(COORDINATORS).unwrap(), + let mut evrf_public_keys = vec![]; + for coordinator in &*coordinators { + let keys = coordinator.evrf_keys(); + evrf_public_keys.push((keys.substrate, keys.network)); + } + + let mut participations = vec![]; + for coordinator in &mut *coordinators { + coordinator + .send_message(CoordinatorMessage::KeyGen( + messages::key_gen::CoordinatorMessage::GenerateKey { + session, + threshold: u16::try_from(THRESHOLD).unwrap(), + evrf_public_keys: evrf_public_keys.clone(), + }, + )) + .await; + } + // This takes forever on debug, as we use in these tests + let ci_scaling_factor = + 1 + u64::from(u8::from(std::env::var("GITHUB_CI") == Ok("true".to_string()))); + tokio::time::sleep(core::time::Duration::from_secs(600 * ci_scaling_factor)).await; + interact_with_all(coordinators, |participant, msg| match msg { + messages::key_gen::ProcessorMessage::Participation { session: this_session, participation } => { + assert_eq!(this_session, session); + participations.push(messages::key_gen::CoordinatorMessage::Participation { + session, participant, - ) - .unwrap(), - shares: 1, - }, - |participant, msg| match msg { - messages::key_gen::ProcessorMessage::Commitments { - id: this_id, - commitments: mut these_commitments, - } => { - assert_eq!(this_id, id); - assert_eq!(these_commitments.len(), 1); - commitments.insert(participant, these_commitments.swap_remove(0)); - } - _ => panic!("processor didn't return Commitments in response to GenerateKey"), - }, - ) + participation, + }); + } + _ => panic!("processor didn't return Participation in response to GenerateKey"), + }) .await; - // Send the commitments to all parties - let mut shares = HashMap::new(); - interact_with_all( - coordinators, - |participant| messages::key_gen::CoordinatorMessage::Commitments { - id, - commitments: clone_without(&commitments, &participant), - }, - |participant, msg| match msg { - messages::key_gen::ProcessorMessage::Shares { id: this_id, shares: mut these_shares } => { - assert_eq!(this_id, id); - assert_eq!(these_shares.len(), 1); - shares.insert(participant, these_shares.swap_remove(0)); - } - _ => panic!("processor didn't return Shares in response to GenerateKey"), - }, - ) - .await; - - // Send the shares + // Send the participations let mut substrate_key = None; let mut network_key = None; - interact_with_all( - coordinators, - |participant| messages::key_gen::CoordinatorMessage::Shares { - id, - shares: vec![shares - .iter() - .filter_map(|(this_participant, shares)| { - shares.get(&participant).cloned().map(|share| (*this_participant, share)) - }) - .collect()], - }, - |_, msg| match msg { - messages::key_gen::ProcessorMessage::GeneratedKeyPair { - id: this_id, - substrate_key: this_substrate_key, - network_key: this_network_key, - } => { - assert_eq!(this_id, id); - if substrate_key.is_none() { - substrate_key = Some(this_substrate_key); - network_key = Some(this_network_key.clone()); - } - assert_eq!(substrate_key.unwrap(), this_substrate_key); - assert_eq!(network_key.as_ref().unwrap(), &this_network_key); + for participation in participations { + for coordinator in &mut *coordinators { + coordinator.send_message(participation.clone()).await; + } + } + // This also takes a while on debug + tokio::time::sleep(core::time::Duration::from_secs(240 * ci_scaling_factor)).await; + interact_with_all(coordinators, |_, msg| match msg { + messages::key_gen::ProcessorMessage::GeneratedKeyPair { + session: this_session, + substrate_key: this_substrate_key, + network_key: this_network_key, + } => { + assert_eq!(this_session, session); + if substrate_key.is_none() { + substrate_key = Some(this_substrate_key); + network_key = Some(this_network_key.clone()); } - _ => panic!("processor didn't return GeneratedKeyPair in response to GenerateKey"), - }, - ) + assert_eq!(substrate_key.unwrap(), this_substrate_key); + assert_eq!(network_key.as_ref().unwrap(), &this_network_key); + } + _ => panic!("processor didn't return GeneratedKeyPair in response to all Participations"), + }) .await; // Confirm the key pair @@ -132,7 +110,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { .send_message(CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, - session: id.session, + session, key_pair: key_pair.clone(), }, )) diff --git a/tests/processor/src/tests/mod.rs b/tests/processor/src/tests/mod.rs index 0347a3dd..45b739ca 100644 --- a/tests/processor/src/tests/mod.rs +++ b/tests/processor/src/tests/mod.rs @@ -1,4 +1,4 @@ -use ciphersuite::{Ciphersuite, Ristretto}; +use serai_client::primitives::NetworkId; use dockertest::DockerTest; @@ -15,20 +15,21 @@ mod send; pub(crate) const COORDINATORS: usize = 4; pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; -fn new_test( - network: ExternalNetworkId, -) -> (Vec<(Handles, ::F)>, DockerTest) { +fn new_test(network: ExternalNetworkId) -> (Vec<(Handles, ProcessorKeys)>, DockerTest) { let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); let mut eth_handle = None; - for _ in 0 .. COORDINATORS { - let (handles, coord_key, compositions) = processor_stack(network, eth_handle.clone()); + for i in 0 .. COORDINATORS { + // Uses the counter `i` as this has no relation to any other system, and while Substrate has + // hard-coded names for itself, these tests down't spawn any Substrate node + let (handles, keys, compositions) = + processor_stack(&i.to_string(), network, eth_handle.clone()); // TODO: Remove this once https://github.com/foundry-rs/foundry/issues/7955 // This has all processors share an Ethereum node until we can sync controlled nodes if network == ExternalNetworkId::Ethereum { eth_handle = eth_handle.or_else(|| Some(handles.0.clone())); } - coordinators.push((handles, coord_key)); + coordinators.push((handles, keys)); for composition in compositions { test.provide_container(composition); } diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index e50edc3f..1e5bddfa 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -3,6 +3,8 @@ use std::{ time::{SystemTime, Duration}, }; +use rand_core::{RngCore, OsRng}; + use dkg::{Participant, tests::clone_without}; use messages::{sign::SignId, SubstrateContext}; @@ -243,7 +245,7 @@ fn send_test() { }, block: substrate_block_num, burns: vec![OutInstructionWithBalance { - instruction: OutInstruction { address: wallet.address(), data: None }, + instruction: OutInstruction { address: wallet.address() }, balance: ExternalBalance { coin: balance_sent.coin, amount: amount_minted }, }], batches: vec![batch.batch.id],