15 Commits

Author SHA1 Message Date
hinto.janai
68060b4efc fix doc tests 2024-05-16 19:56:36 -04:00
hinto.janai
4af83bd0e5 doc rpc/mod.rs 2024-05-16 19:50:35 -04:00
hinto.janai
d4b22e5136 doc ringct/mod.rs 2024-05-16 19:06:59 -04:00
hinto.janai
58fe79da10 lib: doc constants 2024-05-16 18:11:20 -04:00
hinto.janai
3f07dd13c6 block: doc Block 2024-05-16 17:59:56 -04:00
hinto.janai
4e1d86dae2 block: doc BlockHeader 2024-05-16 17:45:15 -04:00
hinto.janai
7ef21830a5 readme: add feature flags 2024-05-10 15:35:39 -04:00
Luke Parker
8cb4c5d167 Various RingCT doc comments 2024-04-23 03:44:51 -04:00
Luke Parker
f9e4b420ed Move amount_decryption into EncryptedAmount::decrypt 2024-04-22 01:36:43 -04:00
Luke Parker
817b8e99d3 Remove experimental feature from monero-serai 2024-04-22 00:47:59 -04:00
Luke Parker
925cef17f2 Tidy Borromean/MLSAG a tad 2024-04-22 00:46:46 -04:00
Luke Parker
3283cd79e4 Remove the distribution cache
It's a notable bandwidth/performance improvement, yet it's not ready. We need a
dedicated Distribution struct which is managed by the wallet and passed in.
While we can do that now, it's not currently worth the effort.
2024-04-22 00:29:18 -04:00
Luke Parker
51e2f24bc1 Make CLSAG signing private
Also adds a bit more documentation and does a bit more tidying.
2024-04-22 00:25:58 -04:00
Luke Parker
372e29fe08 Rename Bulletproofs to Bulletproof, since they are a single Bulletproof
Also bifurcates prove with prove_plus, and adds a few documentation items.
2024-04-22 00:00:14 -04:00
Luke Parker
fccb1aea51 Remove unsafe creation of dalek_ff_group::EdwardsPoint in BP+ 2024-04-21 23:45:07 -04:00
874 changed files with 78584 additions and 67314 deletions

View File

@@ -37,4 +37,4 @@ runs:
- name: Bitcoin Regtest Daemon
shell: bash
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/bitcoin/run.sh -daemon

View File

@@ -42,8 +42,8 @@ runs:
shell: bash
run: |
cargo install svm-rs
svm install 0.8.26
svm use 0.8.26
svm install 0.8.25
svm use 0.8.25
# - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -5,7 +5,7 @@ inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.3.4
default: v0.18.3.1
runs:
using: "composite"

View File

@@ -5,7 +5,7 @@ inputs:
version:
description: "Version to download and run"
required: false
default: v0.18.3.4
default: v0.18.3.1
runs:
using: "composite"
@@ -43,4 +43,4 @@ runs:
- name: Monero Regtest Daemon
shell: bash
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/monero/run.sh --detach

View File

@@ -5,12 +5,12 @@ inputs:
monero-version:
description: "Monero version to download and run as a regtest node"
required: false
default: v0.18.3.4
default: v0.18.3.1
bitcoin-version:
description: "Bitcoin version to download and run as a regtest node"
required: false
default: "27.1"
default: "27.0"
runs:
using: "composite"

View File

@@ -1 +1 @@
nightly-2025-02-01
nightly-2024-02-07

View File

@@ -1,4 +1,4 @@
name: networks/ Tests
name: coins/ Tests
on:
push:
@@ -7,18 +7,18 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
workflow_dispatch:
jobs:
test-networks:
test-coins:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
@@ -30,20 +30,7 @@ jobs:
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p bitcoin-serai \
-p build-solidity-contracts \
-p ethereum-schnorr-contract \
-p alloy-simple-request-transport \
-p serai-ethereum-relayer \
-p monero-io \
-p ethereum-serai \
-p monero-generators \
-p monero-primitives \
-p monero-mlsag \
-p monero-clsag \
-p monero-borromean \
-p monero-bulletproofs \
-p monero-serai \
-p monero-rpc \
-p monero-simple-request-rpc \
-p monero-address \
-p monero-wallet \
-p monero-serai-verify-chain
-p monero-serai

View File

@@ -27,8 +27,6 @@ jobs:
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p std-shims \
-p zalloc \
-p patchable-async-sleep \
-p serai-db \
-p serai-env \
-p serai-task \
-p simple-request

View File

@@ -7,7 +7,7 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "message-queue/**"
- "coordinator/**"
- "orchestration/**"
@@ -18,7 +18,7 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "message-queue/**"
- "coordinator/**"
- "orchestration/**"
@@ -37,4 +37,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run coordinator Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -35,10 +35,6 @@ jobs:
-p multiexp \
-p schnorr-signatures \
-p dleq \
-p generalized-bulletproofs \
-p generalized-bulletproofs-circuit-abstraction \
-p ec-divisors \
-p generalized-bulletproofs-ec-gadgets \
-p dkg \
-p modular-frost \
-p frost-schnorrkel

View File

@@ -19,4 +19,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run Full Stack Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -73,15 +73,6 @@ jobs:
- name: Run rustfmt
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
- name: Install foundry
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with:
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
cache: false
- name: Run forge fmt
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
machete:
runs-on: ubuntu-latest
steps:
@@ -90,25 +81,3 @@ jobs:
run: |
cargo install cargo-machete
cargo machete
slither:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Slither
run: |
python3 -m pip install solc-select
solc-select install 0.8.26
solc-select use 0.8.26
python3 -m pip install slither-analyzer
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
slither processor/ethereum/deployer/contracts/Deployer.sol
slither processor/ethereum/erc20/contracts/IERC20.sol
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
cd processor/ethereum/router/contracts
slither Router.sol

View File

@@ -33,4 +33,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run message-queue Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -5,12 +5,12 @@ on:
branches:
- develop
paths:
- "networks/monero/**"
- "coins/monero/**"
- "processor/**"
pull_request:
paths:
- "networks/monero/**"
- "coins/monero/**"
- "processor/**"
workflow_dispatch:
@@ -26,19 +26,7 @@ jobs:
uses: ./.github/actions/test-dependencies
- name: Run Unit Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
# Doesn't run unit tests with features as the tests workflow will
@@ -47,7 +35,7 @@ jobs:
# Test against all supported protocol versions
strategy:
matrix:
version: [v0.17.3.2, v0.18.3.4]
version: [v0.17.3.2, v0.18.2.0]
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
@@ -58,15 +46,11 @@ jobs:
monero-version: ${{ matrix.version }}
- name: Run Integration Tests Without Features
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
# Runs with the binaries feature so the binaries build
# https://github.com/rust-lang/cargo/issues/8396
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --features binaries --test '*'
- name: Run Integration Tests
# Don't run if the the tests workflow also will
if: ${{ matrix.version != 'v0.18.3.4' }}
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
if: ${{ matrix.version != 'v0.18.2.0' }}
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'

View File

@@ -1,259 +0,0 @@
name: Weekly MSRV Check
on:
schedule:
- cron: "0 0 * * 0"
workflow_dispatch:
jobs:
msrv-common:
name: Run cargo msrv on common
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on common
run: |
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
cargo msrv verify --manifest-path common/env/Cargo.toml
cargo msrv verify --manifest-path common/db/Cargo.toml
cargo msrv verify --manifest-path common/task/Cargo.toml
cargo msrv verify --manifest-path common/request/Cargo.toml
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
msrv-crypto:
name: Run cargo msrv on crypto
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on crypto
run: |
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
msrv-networks:
name: Run cargo msrv on networks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on networks
run: |
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
cargo msrv verify --manifest-path networks/monero/Cargo.toml
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
msrv-message-queue:
name: Run cargo msrv on message-queue
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on message-queue
run: |
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
msrv-processor:
name: Run cargo msrv on processor
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on processor
run: |
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/messages/Cargo.toml
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
cargo msrv verify --manifest-path processor/signers/Cargo.toml
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
cargo msrv verify --manifest-path processor/monero/Cargo.toml
msrv-coordinator:
name: Run cargo msrv on coordinator
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on coordinator
run: |
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
cargo msrv verify --manifest-path coordinator/Cargo.toml
msrv-substrate:
name: Run cargo msrv on substrate
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on substrate
run: |
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
cargo msrv verify --manifest-path substrate/client/Cargo.toml
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
cargo msrv verify --manifest-path substrate/node/Cargo.toml
msrv-orchestration:
name: Run cargo msrv on orchestration
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on message-queue
run: |
cargo msrv verify --manifest-path orchestration/Cargo.toml
msrv-mini:
name: Run cargo msrv on mini
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
- name: Install Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Install cargo msrv
run: cargo install --locked cargo-msrv
- name: Run cargo msrv on mini
run: |
cargo msrv verify --manifest-path mini/Cargo.toml

View File

@@ -7,14 +7,14 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "tests/no-std/**"
pull_request:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "tests/no-std/**"
workflow_dispatch:
@@ -32,4 +32,4 @@ jobs:
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
- name: Verify no-std builds
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
run: cd tests/no-std && CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf

View File

@@ -1,7 +1,6 @@
# MIT License
#
# Copyright (c) 2022 just-the-docs
# Copyright (c) 2022-2024 Luke Parker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
@@ -21,21 +20,31 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
name: Deploy Rust docs and Jekyll site to Pages
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy Jekyll site to Pages
on:
push:
branches:
- "develop"
paths:
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Only allow one concurrent deployment
# Allow one concurrent deployment
concurrency:
group: "pages"
cancel-in-progress: true
@@ -44,6 +53,9 @@ jobs:
# Build job
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: docs
steps:
- name: Checkout
uses: actions/checkout@v3
@@ -57,24 +69,11 @@ jobs:
id: pages
uses: actions/configure-pages@v3
- name: Build with Jekyll
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Get nightly version to use
id: nightly
shell: bash
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
- name: Build Dependencies
uses: ./.github/actions/build-dependencies
- name: Buld Rust docs
run: |
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c rust-docs
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
mv target/doc docs/_site/rust
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
uses: actions/upload-pages-artifact@v1
with:
path: "docs/_site/"
@@ -88,4 +87,4 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4
uses: actions/deploy-pages@v2

View File

@@ -7,7 +7,7 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "message-queue/**"
- "processor/**"
- "orchestration/**"
@@ -18,7 +18,7 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "message-queue/**"
- "processor/**"
- "orchestration/**"
@@ -37,4 +37,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run processor Docker tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -33,4 +33,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run Reproducible Runtime tests
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -7,7 +7,7 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "message-queue/**"
- "processor/**"
- "coordinator/**"
@@ -17,7 +17,7 @@ on:
paths:
- "common/**"
- "crypto/**"
- "networks/**"
- "coins/**"
- "message-queue/**"
- "processor/**"
- "coordinator/**"
@@ -39,33 +39,9 @@ jobs:
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p serai-message-queue \
-p serai-processor-messages \
-p serai-processor-key-gen \
-p serai-processor-view-keys \
-p serai-processor-frost-attempt-manager \
-p serai-processor-primitives \
-p serai-processor-scanner \
-p serai-processor-scheduler-primitives \
-p serai-processor-utxo-scheduler-primitives \
-p serai-processor-utxo-scheduler \
-p serai-processor-transaction-chaining-scheduler \
-p serai-processor-smart-contract-scheduler \
-p serai-processor-signers \
-p serai-processor-bin \
-p serai-bitcoin-processor \
-p serai-processor-ethereum-primitives \
-p serai-processor-ethereum-test-primitives \
-p serai-processor-ethereum-deployer \
-p serai-processor-ethereum-router \
-p serai-processor-ethereum-erc20 \
-p serai-ethereum-processor \
-p serai-monero-processor \
-p serai-processor \
-p tendermint-machine \
-p tributary-sdk \
-p serai-cosign \
-p serai-coordinator-substrate \
-p serai-coordinator-tributary \
-p serai-coordinator-p2p \
-p serai-coordinator-libp2p-p2p \
-p tributary-chain \
-p serai-coordinator \
-p serai-orchestrator \
-p serai-docker-tests
@@ -87,11 +63,6 @@ jobs:
-p serai-dex-pallet \
-p serai-validator-sets-primitives \
-p serai-validator-sets-pallet \
-p serai-genesis-liquidity-primitives \
-p serai-genesis-liquidity-pallet \
-p serai-emissions-primitives \
-p serai-emissions-pallet \
-p serai-economic-security-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \
-p serai-signals-primitives \

5623
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,10 +2,9 @@
resolver = "2"
members = [
# Version patches
"patches/parking_lot_core",
"patches/parking_lot",
"patches/zstd",
"patches/rocksdb",
"patches/proc-macro-crate",
# std patches
"patches/matches",
@@ -17,10 +16,8 @@ members = [
"common/std-shims",
"common/zalloc",
"common/patchable-async-sleep",
"common/db",
"common/env",
"common/task",
"common/request",
"crypto/transcript",
@@ -31,75 +28,26 @@ members = [
"crypto/ciphersuite",
"crypto/multiexp",
"crypto/schnorr",
"crypto/dleq",
"crypto/evrf/secq256k1",
"crypto/evrf/embedwards25519",
"crypto/evrf/generalized-bulletproofs",
"crypto/evrf/circuit-abstraction",
"crypto/evrf/divisors",
"crypto/evrf/ec-gadgets",
"crypto/dkg",
"crypto/frost",
"crypto/schnorrkel",
"networks/bitcoin",
"networks/ethereum/build-contracts",
"networks/ethereum/schnorr",
"networks/ethereum/alloy-simple-request-transport",
"networks/ethereum/relayer",
"networks/monero/io",
"networks/monero/generators",
"networks/monero/primitives",
"networks/monero/ringct/mlsag",
"networks/monero/ringct/clsag",
"networks/monero/ringct/borromean",
"networks/monero/ringct/bulletproofs",
"networks/monero",
"networks/monero/rpc",
"networks/monero/rpc/simple-request",
"networks/monero/wallet/address",
"networks/monero/wallet",
"networks/monero/verify-chain",
"coins/bitcoin",
"coins/ethereum/alloy-simple-request-transport",
"coins/ethereum",
"coins/monero/generators",
"coins/monero",
"message-queue",
"processor/messages",
"processor",
"processor/key-gen",
"processor/view-keys",
"processor/frost-attempt-manager",
"processor/primitives",
"processor/scanner",
"processor/scheduler/primitives",
"processor/scheduler/utxo/primitives",
"processor/scheduler/utxo/standard",
"processor/scheduler/utxo/transaction-chaining",
"processor/scheduler/smart-contract",
"processor/signers",
"processor/bin",
"processor/bitcoin",
"processor/ethereum/primitives",
"processor/ethereum/test-primitives",
"processor/ethereum/deployer",
"processor/ethereum/router",
"processor/ethereum/erc20",
"processor/ethereum",
"processor/monero",
"coordinator/tributary-sdk/tendermint",
"coordinator/tributary-sdk",
"coordinator/cosign",
"coordinator/substrate",
"coordinator/tributary/tendermint",
"coordinator/tributary",
"coordinator/p2p",
"coordinator/p2p/libp2p",
"coordinator",
"substrate/primitives",
@@ -107,22 +55,12 @@ members = [
"substrate/coins/primitives",
"substrate/coins/pallet",
"substrate/dex/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/validator-sets/primitives",
"substrate/validator-sets/pallet",
"substrate/genesis-liquidity/primitives",
"substrate/genesis-liquidity/pallet",
"substrate/emissions/primitives",
"substrate/emissions/pallet",
"substrate/economic-security/pallet",
"substrate/in-instructions/primitives",
"substrate/in-instructions/pallet",
"substrate/signals/primitives",
"substrate/signals/pallet",
@@ -141,9 +79,9 @@ members = [
"tests/docker",
"tests/message-queue",
# TODO "tests/processor",
# TODO "tests/coordinator",
# TODO "tests/full-stack",
"tests/processor",
"tests/coordinator",
"tests/full-stack",
"tests/reproducible-runtime",
]
@@ -151,32 +89,18 @@ members = [
# to the extensive operations required for Bulletproofs
[profile.dev.package]
subtle = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
ff = { opt-level = 3 }
group = { opt-level = 3 }
crypto-bigint = { opt-level = 3 }
secp256k1 = { opt-level = 3 }
curve25519-dalek = { opt-level = 3 }
dalek-ff-group = { opt-level = 3 }
minimal-ed448 = { opt-level = 3 }
multiexp = { opt-level = 3 }
secq256k1 = { opt-level = 3 }
embedwards25519 = { opt-level = 3 }
generalized-bulletproofs = { opt-level = 3 }
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
ec-divisors = { opt-level = 3 }
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
dkg = { opt-level = 3 }
monero-generators = { opt-level = 3 }
monero-borromean = { opt-level = 3 }
monero-bulletproofs = { opt-level = 3 }
monero-mlsag = { opt-level = 3 }
monero-clsag = { opt-level = 3 }
monero-serai = { opt-level = 3 }
[profile.release]
panic = "unwind"
@@ -185,12 +109,15 @@ panic = "unwind"
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
parking_lot_core = { path = "patches/parking_lot_core" }
parking_lot = { path = "patches/parking_lot" }
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" }
# wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" }
# Needed for WAL compression
rocksdb = { path = "patches/rocksdb" }
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
proc-macro-crate = { path = "patches/proc-macro-crate" }
# is-terminal now has an std-based solution with an equivalent API
is-terminal = { path = "patches/is-terminal" }
@@ -207,8 +134,6 @@ directories-next = { path = "patches/directories-next" }
[workspace.lints.clippy]
unwrap_or_default = "allow"
map_unwrap_or = "allow"
needless_continue = "allow"
borrow_as_ptr = "deny"
cast_lossless = "deny"
cast_possible_truncation = "deny"
@@ -236,9 +161,11 @@ manual_instant_elapsed = "deny"
manual_let_else = "deny"
manual_ok_or = "deny"
manual_string_new = "deny"
map_unwrap_or = "deny"
match_bool = "deny"
match_same_arms = "deny"
missing_fields_in_debug = "deny"
needless_continue = "deny"
needless_pass_by_value = "deny"
ptr_cast_constness = "deny"
range_minus_one = "deny"
@@ -246,7 +173,6 @@ range_plus_one = "deny"
redundant_closure_for_method_calls = "deny"
redundant_else = "deny"
string_add_assign = "deny"
string_slice = "deny"
unchecked_duration_subtraction = "deny"
uninlined_format_args = "deny"
unnecessary_box_returns = "deny"

View File

@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
as a reference text. This copy should be provided with any distribution of a
crate licensed under the AGPL-3.0, as per its terms.
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
The GitHub actions (`.github/actions`) are licensed under the MIT license.

View File

@@ -24,7 +24,7 @@ wallet.
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
needed for Bitcoin-Monero atomic swaps.
- `networks`: Various libraries intended for usage in Serai yet also by the
- `coins`: Various coin libraries intended for usage in Serai yet also by the
wider community. This means they will always support the functionality Serai
needs, yet won't disadvantage other use cases when possible.

View File

@@ -0,0 +1,6 @@
# Cypher Stack /coins/bitcoin Audit, August 2023
This audit was over the /coins/bitcoin folder. It is encompassing up to commit
5121ca75199dff7bd34230880a1fdd793012068c.
Please see https://github.com/cypherstack/serai-btc-audit for provenance.

View File

@@ -1,7 +0,0 @@
# Cypher Stack /networks/bitcoin Audit, August 2023
This audit was over the `/networks/bitcoin` folder (at the time located at
`/coins/bitcoin`). It is encompassing up to commit
5121ca75199dff7bd34230880a1fdd793012068c.
Please see https://github.com/cypherstack/serai-btc-audit for provenance.

View File

@@ -1,427 +0,0 @@
Attribution-ShareAlike 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution-ShareAlike 4.0 International Public
License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-ShareAlike 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. BY-SA Compatible License means a license listed at
creativecommons.org/compatiblelicenses, approved by Creative
Commons as essentially the equivalent of this Public License.
d. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
e. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
f. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
g. License Elements means the license attributes listed in the name
of a Creative Commons Public License. The License Elements of this
Public License are Attribution and ShareAlike.
h. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
i. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
j. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
k. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
l. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
m. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. Additional offer from the Licensor -- Adapted Material.
Every recipient of Adapted Material from You
automatically receives an offer from the Licensor to
exercise the Licensed Rights in the Adapted Material
under the conditions of the Adapter's License You apply.
c. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
b. ShareAlike.
In addition to the conditions in Section 3(a), if You Share
Adapted Material You produce, the following conditions also apply.
1. The Adapter's License You apply must be a Creative Commons
license with the same License Elements, this version or
later, or a BY-SA Compatible License.
2. You must include the text of, or the URI or hyperlink to, the
Adapter's License You apply. You may satisfy this condition
in any reasonable manner based on the medium, means, and
context in which You Share Adapted Material.
3. You may not offer or impose any additional or different terms
or conditions on, or apply any Effective Technological
Measures to, Adapted Material that restrict exercise of the
rights granted under the Adapter's License You apply.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material,
including for purposes of Section 3(b); and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.

View File

@@ -1,14 +0,0 @@
# Trail of Bits Ethereum Contracts Audit, June 2025
This audit included:
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
- Our Ethereum primitives library (/processor/ethereum/primitives)
- Our Deployer contract and associated library (/processor/ethereum/deployer)
- Our ERC20 library (/processor/ethereum/erc20)
- Our Router contract and associated library (/processor/ethereum/router)
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
Please see
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
for provenance.

View File

@@ -3,10 +3,10 @@ name = "bitcoin-serai"
version = "0.3.0"
description = "A Bitcoin library for FROST-signing transactions"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/networks/bitcoin"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/bitcoin"
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Vrx <vrx00@proton.me>"]
edition = "2021"
rust-version = "1.80"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
@@ -18,14 +18,16 @@ workspace = true
[dependencies]
std-shims = { version = "0.1.1", path = "../../common/std-shims", default-features = false }
thiserror = { version = "2", default-features = false }
thiserror = { version = "1", default-features = false, optional = true }
zeroize = { version = "^1.5", default-features = false }
rand_core = { version = "0.6", default-features = false }
bitcoin = { version = "0.32", default-features = false }
bitcoin = { version = "0.31", default-features = false, features = ["no-std"] }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["secp256k1"], optional = true }
hex = { version = "0.4", default-features = false, optional = true }
@@ -34,7 +36,7 @@ serde_json = { version = "1", default-features = false, optional = true }
simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true }
[dev-dependencies]
secp256k1 = { version = "0.29", default-features = false, features = ["std"] }
secp256k1 = { version = "0.28", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] }
@@ -44,7 +46,7 @@ tokio = { version = "1", features = ["macros"] }
std = [
"std-shims/std",
"thiserror/std",
"thiserror",
"zeroize/std",
"rand_core/std",
@@ -53,6 +55,8 @@ std = [
"bitcoin/serde",
"k256/std",
"transcript/std",
"frost",
"hex/std",

View File

@@ -40,12 +40,14 @@ mod frost_crypto {
use bitcoin::hashes::{HashEngine, Hash, sha256::Hash as Sha256};
use transcript::Transcript;
use k256::{elliptic_curve::ops::Reduce, U256, Scalar};
use frost::{
curve::{Ciphersuite, Secp256k1},
Participant, ThresholdKeys, ThresholdView, FrostError,
algorithm::{Hram as HramTrait, Algorithm, IetfSchnorr as FrostSchnorr},
algorithm::{Hram as HramTrait, Algorithm, Schnorr as FrostSchnorr},
};
use super::*;
@@ -80,17 +82,16 @@ mod frost_crypto {
///
/// This must be used with a ThresholdKeys whose group key is even. If it is odd, this will panic.
#[derive(Clone)]
pub struct Schnorr(FrostSchnorr<Secp256k1, Hram>);
impl Schnorr {
pub struct Schnorr<T: Sync + Clone + Debug + Transcript>(FrostSchnorr<Secp256k1, T, Hram>);
impl<T: Sync + Clone + Debug + Transcript> Schnorr<T> {
/// Construct a Schnorr algorithm continuing the specified transcript.
#[allow(clippy::new_without_default)]
pub fn new() -> Schnorr {
Schnorr(FrostSchnorr::ietf())
pub fn new(transcript: T) -> Schnorr<T> {
Schnorr(FrostSchnorr::new(transcript))
}
}
impl Algorithm<Secp256k1> for Schnorr {
type Transcript = <FrostSchnorr<Secp256k1, Hram> as Algorithm<Secp256k1>>::Transcript;
impl<T: Sync + Clone + Debug + Transcript> Algorithm<Secp256k1> for Schnorr<T> {
type Transcript = T;
type Addendum = ();
type Signature = [u8; 64];

View File

@@ -195,13 +195,13 @@ impl Rpc {
// If this was already successfully published, consider this having succeeded
if let RpcError::RequestError(Error { code, .. }) = e {
if code == RPC_VERIFY_ALREADY_IN_CHAIN {
return Ok(tx.compute_txid());
return Ok(tx.txid());
}
}
Err(e)?
}
};
if txid != tx.compute_txid() {
if txid != tx.txid() {
Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?;
}
Ok(txid)
@@ -215,7 +215,7 @@ impl Rpc {
let tx: Transaction = encode::deserialize(&bytes)
.map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?;
let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array();
let mut tx_hash = *tx.txid().as_raw_hash().as_byte_array();
tx_hash.reverse();
if hash != &tx_hash {
Err(RpcError::InvalidResponse("node replied with a different transaction"))?;

View File

@@ -3,6 +3,7 @@ use rand_core::OsRng;
use secp256k1::{Secp256k1 as BContext, Message, schnorr::Signature};
use k256::Scalar;
use transcript::{Transcript, RecommendedTranscript};
use frost::{
curve::Secp256k1,
Participant,
@@ -24,7 +25,8 @@ fn test_algorithm() {
*keys = keys.offset(Scalar::from(offset));
}
let algo = Schnorr::new();
let algo =
Schnorr::<RecommendedTranscript>::new(RecommendedTranscript::new(b"bitcoin-serai sign test"));
let sig = sign(
&mut OsRng,
&algo,
@@ -37,7 +39,7 @@ fn test_algorithm() {
.verify_schnorr(
&Signature::from_slice(&sig)
.expect("couldn't convert produced signature to secp256k1::Signature"),
&Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(),
&Message::from(Hash::hash(MESSAGE)),
&x_only(&keys[&Participant::new(1).unwrap()].group_key()),
)
.unwrap()

View File

@@ -4,7 +4,7 @@ use std_shims::{
io::{self, Write},
};
#[cfg(feature = "std")]
use std::io::{Read, BufReader};
use std_shims::io::Read;
use k256::{
elliptic_curve::sec1::{Tag, ToEncodedPoint},
@@ -18,11 +18,11 @@ use frost::{
};
use bitcoin::{
consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction,
Block,
consensus::encode::serialize, key::TweakedPublicKey, address::Payload, OutPoint, ScriptBuf,
TxOut, Transaction, Block,
};
#[cfg(feature = "std")]
use bitcoin::{hashes::Hash, consensus::encode::Decodable, TapTweakHash};
use bitcoin::consensus::encode::Decodable;
use crate::crypto::x_only;
#[cfg(feature = "std")]
@@ -33,40 +33,12 @@ mod send;
#[cfg(feature = "std")]
pub use send::*;
/// Tweak keys to ensure they're usable with Bitcoin's Taproot upgrade.
/// Tweak keys to ensure they're usable with Bitcoin.
///
/// This adds an unspendable script path to the key, preventing any outputs received to this key
/// from being spent via a script. To have keys which have spendable script paths, further offsets
/// from this position must be used.
///
/// After adding an unspendable script path, the key is incremented until its even. This means the
/// existence of the unspendable script path may not provable, without an understanding of the
/// algorithm used here.
/// Taproot keys, which these keys are used as, must be even. This offsets the keys until they're
/// even.
#[cfg(feature = "std")]
pub fn tweak_keys(keys: &ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp256k1> {
// Adds the unspendable script path per
// https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-23
let keys = {
use k256::elliptic_curve::{
bigint::{Encoding, U256},
ops::Reduce,
group::GroupEncoding,
};
let tweak_hash = TapTweakHash::hash(&keys.group_key().to_bytes().as_slice()[1 ..]);
/*
https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki#cite_ref-13-0 states how the
bias is negligible. This reduction shouldn't ever occur, yet if it did, the script path
would be unusable due to a check the script path hash is less than the order. That doesn't
impact us as we don't want the script path to be usable.
*/
keys.offset(<Secp256k1 as Ciphersuite>::F::reduce(U256::from_be_bytes(
*tweak_hash.to_raw_hash().as_ref(),
)))
};
// This doesn't risk re-introducing a script path as you'd have to find a preimage for the tweak
// hash with whatever increment, or manipulate the key so that the tweak hash and increment
// equals the desired offset, yet manipulating the key would change the tweak hash
let (_, offset) = make_even(keys.group_key());
keys.offset(Scalar::from(offset))
}
@@ -74,12 +46,12 @@ pub fn tweak_keys(keys: &ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp256k1> {
/// Return the Taproot address payload for a public key.
///
/// If the key is odd, this will return None.
pub fn p2tr_script_buf(key: ProjectivePoint) -> Option<ScriptBuf> {
pub fn address_payload(key: ProjectivePoint) -> Option<Payload> {
if key.to_encoded_point(true).tag() != Tag::CompressedEvenY {
return None;
}
Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key))))
Some(Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key))))
}
/// A spendable output.
@@ -117,17 +89,11 @@ impl ReceivedOutput {
/// Read a ReceivedOutput from a generic satisfying Read.
#[cfg(feature = "std")]
pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {
let offset = Secp256k1::read_F(r)?;
let output;
let outpoint;
{
let mut buf_r = BufReader::with_capacity(0, r);
output =
TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?;
outpoint =
OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?;
}
Ok(ReceivedOutput { offset, output, outpoint })
Ok(ReceivedOutput {
offset: Secp256k1::read_F(r)?,
output: TxOut::consensus_decode(r).map_err(|_| io::Error::other("invalid TxOut"))?,
outpoint: OutPoint::consensus_decode(r).map_err(|_| io::Error::other("invalid OutPoint"))?,
})
}
/// Write a ReceivedOutput to a generic satisfying Write.
@@ -158,7 +124,7 @@ impl Scanner {
/// Returns None if this key can't be scanned for.
pub fn new(key: ProjectivePoint) -> Option<Scanner> {
let mut scripts = HashMap::new();
scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO);
scripts.insert(address_payload(key)?.script_pubkey(), Scalar::ZERO);
Some(Scanner { key, scripts })
}
@@ -170,17 +136,14 @@ impl Scanner {
///
/// This means offsets are surjective, not bijective, and the order offsets are registered in
/// may determine the validity of future offsets.
///
/// The offsets registered must be securely generated. Arbitrary offsets may introduce a script
/// path into the output, allowing the output to be spent by satisfaction of an arbitrary script
/// (not by the signature of the key).
pub fn register_offset(&mut self, mut offset: Scalar) -> Option<Scalar> {
// This loop will terminate as soon as an even point is found, with any point having a ~50%
// chance of being even
// That means this should terminate within a very small amount of iterations
loop {
match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) {
Some(script) => {
match address_payload(self.key + (ProjectivePoint::GENERATOR * offset)) {
Some(address) => {
let script = address.script_pubkey();
if self.scripts.contains_key(&script) {
None?;
}
@@ -203,7 +166,7 @@ impl Scanner {
res.push(ReceivedOutput {
offset: *offset,
output: output.clone(),
outpoint: OutPoint::new(tx.compute_txid(), vout),
outpoint: OutPoint::new(tx.txid(), vout),
});
}
}

View File

@@ -7,7 +7,9 @@ use thiserror::Error;
use rand_core::{RngCore, CryptoRng};
use k256::Scalar;
use transcript::{Transcript, RecommendedTranscript};
use k256::{elliptic_curve::sec1::ToEncodedPoint, Scalar};
use frost::{curve::Secp256k1, Participant, ThresholdKeys, FrostError, sign::*};
use bitcoin::{
@@ -16,12 +18,12 @@ use bitcoin::{
absolute::LockTime,
script::{PushBytesBuf, ScriptBuf},
transaction::{Version, Transaction},
OutPoint, Sequence, Witness, TxIn, Amount, TxOut,
OutPoint, Sequence, Witness, TxIn, Amount, TxOut, Address,
};
use crate::{
crypto::Schnorr,
wallet::{ReceivedOutput, p2tr_script_buf},
wallet::{ReceivedOutput, address_payload},
};
#[rustfmt::skip]
@@ -44,7 +46,7 @@ pub enum TransactionError {
#[error("fee was too low to pass the default minimum fee rate")]
TooLowFee,
#[error("not enough funds for these payments")]
NotEnoughFunds { inputs: u64, payments: u64, fee: u64 },
NotEnoughFunds,
#[error("transaction was too large")]
TooLargeTransaction,
}
@@ -59,11 +61,7 @@ pub struct SignableTransaction {
}
impl SignableTransaction {
fn calculate_weight_vbytes(
inputs: usize,
payments: &[(ScriptBuf, u64)],
change: Option<&ScriptBuf>,
) -> (u64, u64) {
fn calculate_weight(inputs: usize, payments: &[(Address, u64)], change: Option<&Address>) -> u64 {
// Expand this a full transaction in order to use the bitcoin library's weight function
let mut tx = Transaction {
version: Version(2),
@@ -88,42 +86,16 @@ impl SignableTransaction {
// The script pub key is not of a fixed size and does have to be used here
.map(|payment| TxOut {
value: Amount::from_sat(payment.1),
script_pubkey: payment.0.clone(),
script_pubkey: payment.0.script_pubkey(),
})
.collect(),
};
if let Some(change) = change {
// Use a 0 value since we're currently unsure what the change amount will be, and since
// the value is fixed size (so any value could be used here)
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() });
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() });
}
let weight = tx.weight();
// Now calculate the size in vbytes
/*
"Virtual transaction size" is weight ceildiv 4 per
https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki
https://github.com/bitcoin/bitcoin/blob/306ccd4927a2efe325c8d84be1bdb79edeb29b04
/src/policy/policy.cpp#L295-L298
implements this almost as expected, with an additional consideration to signature operations
Signature operations (the second argument of the following call) do not count Taproot
signatures per https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki#cite_ref-11-0
We don't risk running afoul of the Taproot signature limit as it allows at least one per
input, which is all we use
*/
(
weight.to_wu(),
u64::try_from(bitcoin::policy::get_virtual_tx_size(
i64::try_from(weight.to_wu()).unwrap(),
0i64,
))
.unwrap(),
)
u64::from(tx.weight())
}
/// Returns the fee necessary for this transaction to achieve the fee rate specified at
@@ -149,10 +121,10 @@ impl SignableTransaction {
/// If data is specified, an OP_RETURN output will be added with it.
pub fn new(
mut inputs: Vec<ReceivedOutput>,
payments: &[(ScriptBuf, u64)],
change: Option<ScriptBuf>,
payments: &[(Address, u64)],
change: Option<&Address>,
data: Option<Vec<u8>>,
fee_per_vbyte: u64,
fee_per_weight: u64,
) -> Result<SignableTransaction, TransactionError> {
if inputs.is_empty() {
Err(TransactionError::NoInputs)?;
@@ -187,7 +159,10 @@ impl SignableTransaction {
let payment_sat = payments.iter().map(|payment| payment.1).sum::<u64>();
let mut tx_outs = payments
.iter()
.map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() })
.map(|payment| TxOut {
value: Amount::from_sat(payment.1),
script_pubkey: payment.0.script_pubkey(),
})
.collect::<Vec<_>>();
// Add the OP_RETURN output
@@ -201,33 +176,49 @@ impl SignableTransaction {
})
}
let (mut weight, vbytes) = Self::calculate_weight_vbytes(tx_ins.len(), payments, None);
let mut weight = Self::calculate_weight(tx_ins.len(), payments, None);
let mut needed_fee = fee_per_weight * weight;
let mut needed_fee = fee_per_vbyte * vbytes;
// "Virtual transaction size" is weight ceildiv 4 per
// https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki
// https://github.com/bitcoin/bitcoin/blob/306ccd4927a2efe325c8d84be1bdb79edeb29b04/
// src/policy/policy.cpp#L295-L298
// implements this as expected
// Technically, it takes whatever's greater, the weight or the amount of signature operations
// multiplied by DEFAULT_BYTES_PER_SIGOP (20)
// We only use 1 signature per input, and our inputs have a weight exceeding 20
// Accordingly, our inputs' weight will always be greater than the cost of the signature ops
let vsize = weight.div_ceil(4);
debug_assert_eq!(
u64::try_from(bitcoin::policy::get_virtual_tx_size(
weight.try_into().unwrap(),
tx_ins.len().try_into().unwrap()
))
.unwrap(),
vsize
);
// Technically, if there isn't change, this TX may still pay enough of a fee to pass the
// minimum fee. Such edge cases aren't worth programming when they go against intent, as the
// specified fee rate is too low to be valid
// bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE is in sats/kilo-vbyte
if needed_fee < ((u64::from(bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE) * vbytes) / 1000) {
if needed_fee < ((u64::from(bitcoin::policy::DEFAULT_MIN_RELAY_TX_FEE) * vsize) / 1000) {
Err(TransactionError::TooLowFee)?;
}
if input_sat < (payment_sat + needed_fee) {
Err(TransactionError::NotEnoughFunds {
inputs: input_sat,
payments: payment_sat,
fee: needed_fee,
})?;
Err(TransactionError::NotEnoughFunds)?;
}
// If there's a change address, check if there's change to give it
if let Some(change) = change {
let (weight_with_change, vbytes_with_change) =
Self::calculate_weight_vbytes(tx_ins.len(), payments, Some(&change));
let fee_with_change = fee_per_vbyte * vbytes_with_change;
let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change));
let fee_with_change = fee_per_weight * weight_with_change;
if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) {
if value >= DUST {
tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change });
tx_outs
.push(TxOut { value: Amount::from_sat(value), script_pubkey: change.script_pubkey() });
weight = weight_with_change;
needed_fee = fee_with_change;
}
@@ -257,28 +248,54 @@ impl SignableTransaction {
/// Returns the TX ID of the transaction this will create.
pub fn txid(&self) -> [u8; 32] {
let mut res = self.tx.compute_txid().to_byte_array();
let mut res = self.tx.txid().to_byte_array();
res.reverse();
res
}
/// Returns the transaction, sans witness, this will create if signed.
pub fn transaction(&self) -> &Transaction {
&self.tx
/// Returns the outputs this transaction will create.
pub fn outputs(&self) -> &[TxOut] {
&self.tx.output
}
/// Create a multisig machine for this transaction.
///
/// Returns None if the wrong keys are used.
pub fn multisig(self, keys: &ThresholdKeys<Secp256k1>) -> Option<TransactionMachine> {
pub fn multisig(
self,
keys: &ThresholdKeys<Secp256k1>,
mut transcript: RecommendedTranscript,
) -> Option<TransactionMachine> {
transcript.domain_separate(b"bitcoin_transaction");
transcript.append_message(b"root_key", keys.group_key().to_encoded_point(true).as_bytes());
// Transcript the inputs and outputs
let tx = &self.tx;
for input in &tx.input {
transcript.append_message(b"input_hash", input.previous_output.txid);
transcript.append_message(b"input_output_index", input.previous_output.vout.to_le_bytes());
}
for payment in &tx.output {
transcript.append_message(b"output_script", payment.script_pubkey.as_bytes());
transcript.append_message(b"output_amount", payment.value.to_sat().to_le_bytes());
}
let mut sigs = vec![];
for i in 0 .. self.tx.input.len() {
for i in 0 .. tx.input.len() {
let mut transcript = transcript.clone();
// This unwrap is safe since any transaction with this many inputs violates the maximum
// size allowed under standards, which this lib will error on creation of
transcript.append_message(b"signing_input", u32::try_from(i).unwrap().to_le_bytes());
let offset = keys.clone().offset(self.offsets[i]);
if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey {
if address_payload(offset.group_key())?.script_pubkey() != self.prevouts[i].script_pubkey {
None?;
}
sigs.push(AlgorithmMachine::new(Schnorr::new(), keys.clone().offset(self.offsets[i])));
sigs.push(AlgorithmMachine::new(
Schnorr::new(transcript),
keys.clone().offset(self.offsets[i]),
));
}
Some(TransactionMachine { tx: self, sigs })
@@ -291,7 +308,7 @@ impl SignableTransaction {
/// This will panic if either `cache` is called or the message isn't empty.
pub struct TransactionMachine {
tx: SignableTransaction,
sigs: Vec<AlgorithmMachine<Secp256k1, Schnorr>>,
sigs: Vec<AlgorithmMachine<Secp256k1, Schnorr<RecommendedTranscript>>>,
}
impl PreprocessMachine for TransactionMachine {
@@ -320,7 +337,7 @@ impl PreprocessMachine for TransactionMachine {
pub struct TransactionSignMachine {
tx: SignableTransaction,
sigs: Vec<AlgorithmSignMachine<Secp256k1, Schnorr>>,
sigs: Vec<AlgorithmSignMachine<Secp256k1, Schnorr<RecommendedTranscript>>>,
}
impl SignMachine<Transaction> for TransactionSignMachine {
@@ -400,7 +417,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
pub struct TransactionSignatureMachine {
tx: Transaction,
sigs: Vec<AlgorithmSignatureMachine<Secp256k1, Schnorr>>,
sigs: Vec<AlgorithmSignatureMachine<Secp256k1, Schnorr<RecommendedTranscript>>>,
}
impl SignatureMachine<Transaction> for TransactionSignatureMachine {

View File

@@ -1,11 +1,14 @@
use std::sync::LazyLock;
use std::sync::OnceLock;
use bitcoin_serai::rpc::Rpc;
use tokio::sync::Mutex;
#[allow(dead_code)]
pub(crate) static SEQUENTIAL: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
static SEQUENTIAL_CELL: OnceLock<Mutex<()>> = OnceLock::new();
#[allow(non_snake_case)]
pub fn SEQUENTIAL() -> &'static Mutex<()> {
SEQUENTIAL_CELL.get_or_init(|| Mutex::new(()))
}
#[allow(dead_code)]
pub(crate) async fn rpc() -> Rpc {
@@ -31,7 +34,7 @@ macro_rules! async_sequential {
$(
#[tokio::test]
async fn $name() {
let guard = runner::SEQUENTIAL.lock().await;
let guard = runner::SEQUENTIAL().lock().await;
let local = tokio::task::LocalSet::new();
local.run_until(async move {
if let Err(err) = tokio::task::spawn_local(async move { $body }).await {

View File

@@ -2,6 +2,8 @@ use std::collections::HashMap;
use rand_core::{RngCore, OsRng};
use transcript::{Transcript, RecommendedTranscript};
use k256::{
elliptic_curve::{
group::{ff::Field, Group},
@@ -20,10 +22,11 @@ use bitcoin_serai::{
hashes::Hash as HashTrait,
blockdata::opcodes::all::OP_RETURN,
script::{PushBytesBuf, Instruction, Instructions, Script},
address::NetworkChecked,
OutPoint, Amount, TxOut, Transaction, Network, Address,
},
wallet::{
tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction,
tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, SignableTransaction,
},
rpc::Rpc,
};
@@ -45,7 +48,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint)
"generatetoaddress",
serde_json::json!([
1,
Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap()
Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap())
]),
)
.await
@@ -66,7 +69,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint)
assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0]));
assert_eq!(outputs.len(), 1);
assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0));
assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].txid(), 0));
assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat());
assert_eq!(
@@ -92,11 +95,46 @@ fn sign(
) -> Transaction {
let mut machines = HashMap::new();
for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) {
machines.insert(i, tx.clone().multisig(&keys[&i].clone()).unwrap());
machines.insert(
i,
tx.clone()
.multisig(&keys[&i].clone(), RecommendedTranscript::new(b"bitcoin-serai Test Transaction"))
.unwrap(),
);
}
sign_without_caching(&mut OsRng, machines, &[])
}
#[test]
fn test_tweak_keys() {
let mut even = false;
let mut odd = false;
// Generate keys until we get an even set and an odd set
while !(even && odd) {
let mut keys = key_gen(&mut OsRng).drain().next().unwrap().1;
if is_even(keys.group_key()) {
// Tweaking should do nothing
assert_eq!(tweak_keys(&keys).group_key(), keys.group_key());
even = true;
} else {
let tweaked = tweak_keys(&keys).group_key();
assert_ne!(tweaked, keys.group_key());
// Tweaking should produce an even key
assert!(is_even(tweaked));
// Verify it uses the smallest possible offset
while keys.group_key().to_encoded_point(true).tag() == Tag::CompressedOddY {
keys = keys.offset(Scalar::ONE);
}
assert_eq!(tweaked, keys.group_key());
odd = true;
}
}
}
async_sequential! {
async fn test_scanner() {
// Test Scanners are creatable for even keys.
@@ -155,7 +193,7 @@ async_sequential! {
assert_eq!(output.offset(), Scalar::ZERO);
let inputs = vec![output];
let addr = || p2tr_script_buf(key).unwrap();
let addr = || Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap());
let payments = vec![(addr(), 1000)];
assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok());
@@ -168,7 +206,7 @@ async_sequential! {
// No change
assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok());
// Consolidation TX
assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok());
assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok());
// Data
assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok());
// No outputs
@@ -191,14 +229,14 @@ async_sequential! {
);
assert_eq!(
SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0),
SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0),
Err(TransactionError::TooLowFee),
);
assert!(matches!(
assert_eq!(
SignableTransaction::new(inputs.clone(), &[(addr(), inputs[0].value() * 2)], None, None, FEE),
Err(TransactionError::NotEnoughFunds { .. }),
));
Err(TransactionError::NotEnoughFunds),
);
assert_eq!(
SignableTransaction::new(inputs, &vec![(addr(), 1000); 10000], None, None, FEE),
@@ -223,19 +261,20 @@ async_sequential! {
// Declare payments, change, fee
let payments = [
(p2tr_script_buf(key).unwrap(), 1005),
(p2tr_script_buf(offset_key).unwrap(), 1007)
(Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap()), 1005),
(Address::<NetworkChecked>::new(Network::Regtest, address_payload(offset_key).unwrap()), 1007)
];
let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap();
let change_key = key + (ProjectivePoint::GENERATOR * change_offset);
let change_addr = p2tr_script_buf(change_key).unwrap();
let change_addr =
Address::<NetworkChecked>::new(Network::Regtest, address_payload(change_key).unwrap());
// Create and sign the TX
let tx = SignableTransaction::new(
vec![output.clone(), offset_output.clone()],
&payments,
Some(change_addr.clone()),
Some(&change_addr),
None,
FEE
).unwrap();
@@ -248,7 +287,7 @@ async_sequential! {
// Ensure we can scan it
let outputs = scanner.scan_transaction(&tx);
for (o, output) in outputs.iter().enumerate() {
assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap()));
assert_eq!(output.outpoint(), &OutPoint::new(tx.txid(), u32::try_from(o).unwrap()));
assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output);
}
@@ -260,13 +299,13 @@ async_sequential! {
for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) {
assert_eq!(
output,
&TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) },
&TxOut { script_pubkey: payment.0.script_pubkey(), value: Amount::from_sat(payment.1) },
);
assert_eq!(scanned.value(), payment.1 );
}
// Make sure the change is correct
assert_eq!(needed_fee, u64::try_from(tx.vsize()).unwrap() * FEE);
assert_eq!(needed_fee, u64::from(tx.weight()) * FEE);
let input_value = output.value() + offset_output.value();
let output_value = tx.output.iter().map(|output| output.value.to_sat()).sum::<u64>();
assert_eq!(input_value - output_value, needed_fee);
@@ -275,13 +314,13 @@ async_sequential! {
input_value - payments.iter().map(|payment| payment.1).sum::<u64>() - needed_fee;
assert_eq!(
tx.output[2],
TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) },
TxOut { script_pubkey: change_addr.script_pubkey(), value: Amount::from_sat(change_amount) },
);
// This also tests send_raw_transaction and get_transaction, which the RPC test can't
// effectively test
rpc.send_raw_transaction(&tx).await.unwrap();
let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array();
let mut hash = *tx.txid().as_raw_hash().as_byte_array();
hash.reverse();
assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap());
assert_eq!(expected_id, hash);
@@ -305,7 +344,7 @@ async_sequential! {
&SignableTransaction::new(
vec![output],
&[],
Some(p2tr_script_buf(key).unwrap()),
Some(&Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap())),
Some(data.clone()),
FEE
).unwrap()

3
coins/ethereum/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
# Solidity build outputs
cache
artifacts

46
coins/ethereum/Cargo.toml Normal file
View File

@@ -0,0 +1,46 @@
[package]
name = "ethereum-serai"
version = "0.1.0"
description = "An Ethereum library supporting Schnorr signing and on-chain verification"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum"
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Elizabeth Binks <elizabethjbinks@gmail.com>"]
edition = "2021"
publish = false
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
thiserror = { version = "1", default-features = false }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
alloy-core = { version = "0.7", default-features = false }
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] }
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
[dev-dependencies]
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
tokio = { version = "1", features = ["macros"] }
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
[features]
tests = []

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license
Copyright (c) 2023-2025 Luke Parker
Copyright (c) 2022-2023 Luke Parker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as

15
coins/ethereum/README.md Normal file
View File

@@ -0,0 +1,15 @@
# Ethereum
This package contains Ethereum-related functionality, specifically deploying and
interacting with Serai contracts.
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
`ethereum-serai` is Serai specific. If any of the utilities are generally
desired, please fork and maintain your own copy to ensure the desired
functionality is preserved, or open an issue to request we make this library
general purpose.
### Dependencies
- solc
- [Foundry](https://github.com/foundry-rs/foundry)

View File

@@ -0,0 +1,29 @@
[package]
name = "alloy-simple-request-transport"
version = "0.1.0"
description = "A transport for alloy based off simple-request"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tower = "0.4"
serde_json = { version = "1", default-features = false }
simple-request = { path = "../../../common/request", default-features = false }
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
[features]
default = ["tls"]
tls = ["simple-request/tls"]

41
coins/ethereum/build.rs Normal file
View File

@@ -0,0 +1,41 @@
use std::process::Command;
fn main() {
println!("cargo:rerun-if-changed=contracts/*");
println!("cargo:rerun-if-changed=artifacts/*");
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
.unwrap()
.lines()
{
if let Some(version) = line.strip_prefix("Version: ") {
let version = version.split('+').next().unwrap();
assert_eq!(version, "0.8.25");
}
}
#[rustfmt::skip]
let args = [
"--base-path", ".",
"-o", "./artifacts", "--overwrite",
"--bin", "--abi",
"--via-ir", "--optimize",
"./contracts/IERC20.sol",
"./contracts/Schnorr.sol",
"./contracts/Deployer.sol",
"./contracts/Sandbox.sol",
"./contracts/Router.sol",
"./src/tests/contracts/Schnorr.sol",
"./src/tests/contracts/ERC20.sol",
"--no-color",
];
let solc = Command::new("solc").args(args).output().unwrap();
assert!(solc.status.success());
for line in String::from_utf8(solc.stderr).unwrap().lines() {
assert!(!line.starts_with("Error:"));
}
}

View File

@@ -0,0 +1,52 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
/*
The expected deployment process of the Router is as follows:
1) A transaction deploying Deployer is made. Then, a deterministic signature is
created such that an account with an unknown private key is the creator of
the contract. Anyone can fund this address, and once anyone does, the
transaction deploying Deployer can be published by anyone. No other
transaction may be made from that account.
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
While such attacks would still be feasible if the Deployer's address was
controllable, the usage of a deterministic signature with a NUMS method
prevents that.
This doesn't have any denial-of-service risks and will resolve once anyone steps
forward as deployer. This does fail to guarantee an identical address across
every chain, though it enables letting anyone efficiently ask the Deployer for
the address (with the Deployer having an identical address on every chain).
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
Deployer contract to use a consistent salt for the Router, yet the Router must
be deployed with a specific public key for Serai. Since Ethereum isn't able to
determine a valid public key (one the result of a Serai DKG) from a dishonest
public key, we have to allow multiple deployments with Serai being the one to
determine which to use.
The alternative would be to have a council publish the Serai key on-Ethereum,
with Serai verifying the published result. This would introduce a DoS risk in
the council not publishing the correct key/not publishing any key.
*/
contract Deployer {
event Deployment(bytes32 indexed init_code_hash, address created);
error DeploymentFailed();
function deploy(bytes memory init_code) external {
address created;
assembly {
created := create(0, add(init_code, 0x20), mload(init_code))
}
if (created == address(0)) {
revert DeploymentFailed();
}
// These may be emitted out of order upon re-entrancy
emit Deployment(keccak256(init_code), created);
}
}

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: CC0
pragma solidity ^0.8.26;
pragma solidity ^0.8.0;
interface IERC20 {
event Transfer(address indexed from, address indexed to, uint256 value);
@@ -18,17 +18,3 @@ interface IERC20 {
function approve(address spender, uint256 value) external returns (bool);
function allowance(address owner, address spender) external view returns (uint256);
}
interface SeraiIERC20 {
function transferWithInInstruction01BB244A8A(
address to,
uint256 value,
bytes calldata inInstruction
) external returns (bool);
function transferFromWithInInstruction00081948E0(
address from,
address to,
uint256 value,
bytes calldata inInstruction
) external returns (bool);
}

View File

@@ -0,0 +1,222 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "./IERC20.sol";
import "./Schnorr.sol";
import "./Sandbox.sol";
contract Router {
// Nonce is incremented for each batch of transactions executed/key update
uint256 public nonce;
// Current public key's x-coordinate
// This key must always have the parity defined within the Schnorr contract
bytes32 public seraiKey;
struct OutInstruction {
address to;
Call[] calls;
uint256 value;
}
struct Signature {
bytes32 c;
bytes32 s;
}
event SeraiKeyUpdated(
uint256 indexed nonce,
bytes32 indexed key,
Signature signature
);
event InInstruction(
address indexed from,
address indexed coin,
uint256 amount,
bytes instruction
);
// success is a uint256 representing a bitfield of transaction successes
event Executed(
uint256 indexed nonce,
bytes32 indexed batch,
uint256 success,
Signature signature
);
// error types
error InvalidKey();
error InvalidSignature();
error InvalidAmount();
error FailedTransfer();
error TooManyTransactions();
modifier _updateSeraiKeyAtEndOfFn(
uint256 _nonce,
bytes32 key,
Signature memory sig
) {
if (
(key == bytes32(0)) ||
((bytes32(uint256(key) % Schnorr.Q)) != key)
) {
revert InvalidKey();
}
_;
seraiKey = key;
emit SeraiKeyUpdated(_nonce, key, sig);
}
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
0,
_seraiKey,
Signature({ c: bytes32(0), s: bytes32(0) })
) {
nonce = 1;
}
// updateSeraiKey validates the given Schnorr signature against the current
// public key, and if successful, updates the contract's public key to the
// given one.
function updateSeraiKey(
bytes32 _seraiKey,
Signature calldata sig
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
bytes memory message =
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
nonce++;
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
revert InvalidSignature();
}
}
function inInstruction(
address coin,
uint256 amount,
bytes memory instruction
) external payable {
if (coin == address(0)) {
if (amount != msg.value) {
revert InvalidAmount();
}
} else {
(bool success, bytes memory res) =
address(coin).call(
abi.encodeWithSelector(
IERC20.transferFrom.selector,
msg.sender,
address(this),
amount
)
);
// Require there was nothing returned, which is done by some non-standard
// tokens, or that the ERC20 contract did in fact return true
bool nonStandardResOrTrue =
(res.length == 0) || abi.decode(res, (bool));
if (!(success && nonStandardResOrTrue)) {
revert FailedTransfer();
}
}
/*
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
The amount instructed to transfer may not actually be the amount
transferred.
If we add nonReentrant to every single function which can effect the
balance, we can check the amount exactly matches. This prevents transfers of
less value than expected occurring, at least, not without an additional
transfer to top up the difference (which isn't routed through this contract
and accordingly isn't trying to artificially create events).
If we don't add nonReentrant, a transfer can be started, and then a new
transfer for the difference can follow it up (again and again until a
rounding error is reached). This contract would believe all transfers were
done in full, despite each only being done in part (except for the last
one).
Given fee-on-transfer tokens aren't intended to be supported, the only
token planned to be supported is Dai and it doesn't have any fee-on-transfer
logic, fee-on-transfer tokens aren't even able to be supported at this time,
we simply classify this entire class of tokens as non-standard
implementations which induce undefined behavior. It is the Serai network's
role not to add support for any non-standard implementations.
*/
emit InInstruction(msg.sender, coin, amount, instruction);
}
// execute accepts a list of transactions to execute as well as a signature.
// if signature verification passes, the given transactions are executed.
// if signature verification fails, this function will revert.
function execute(
OutInstruction[] calldata transactions,
Signature calldata sig
) external {
if (transactions.length > 256) {
revert TooManyTransactions();
}
bytes memory message =
abi.encode("execute", block.chainid, nonce, transactions);
uint256 executed_with_nonce = nonce;
// This prevents re-entrancy from causing double spends yet does allow
// out-of-order execution via re-entrancy
nonce++;
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
revert InvalidSignature();
}
uint256 successes;
for (uint256 i = 0; i < transactions.length; i++) {
bool success;
// If there are no calls, send to `to` the value
if (transactions[i].calls.length == 0) {
(success, ) = transactions[i].to.call{
value: transactions[i].value,
gas: 5_000
}("");
} else {
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
// calls through that
//
// We could use a single sandbox in order to reduce gas costs, yet that
// risks one person creating an approval that's hooked before another
// user's intended action executes, in order to drain their coins
//
// While technically, that would be a flaw in the sandboxed flow, this
// is robust and prevents such flaws from being possible
//
// We also don't want people to set state via the Sandbox and expect it
// future available when anyone else could set a distinct value
Sandbox sandbox = new Sandbox();
(success, ) = address(sandbox).call{
value: transactions[i].value,
// TODO: Have the Call specify the gas up front
gas: 350_000
}(
abi.encodeWithSelector(
Sandbox.sandbox.selector,
transactions[i].calls
)
);
}
assembly {
successes := or(successes, shl(i, success))
}
}
emit Executed(
executed_with_nonce,
keccak256(message),
successes,
sig
);
}
}

View File

@@ -0,0 +1,48 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.24;
struct Call {
address to;
uint256 value;
bytes data;
}
// A minimal sandbox focused on gas efficiency.
//
// The first call is executed if any of the calls fail, making it a fallback.
// All other calls are executed sequentially.
contract Sandbox {
error AlreadyCalled();
error CallsFailed();
function sandbox(Call[] calldata calls) external payable {
// Prevent re-entrancy due to this executing arbitrary calls from anyone
// and anywhere
bool called;
assembly { called := tload(0) }
if (called) {
revert AlreadyCalled();
}
assembly { tstore(0, 1) }
// Execute the calls, starting from 1
for (uint256 i = 1; i < calls.length; i++) {
(bool success, ) =
calls[i].to.call{ value: calls[i].value }(calls[i].data);
// If this call failed, execute the fallback (call 0)
if (!success) {
(success, ) =
calls[0].to.call{ value: address(this).balance }(calls[0].data);
// If this call also failed, revert entirely
if (!success) {
revert CallsFailed();
}
return;
}
}
// We don't clear the re-entrancy guard as this contract should never be
// called again, so there's no reason to spend the effort
}
}

View File

@@ -0,0 +1,44 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
// see https://github.com/noot/schnorr-verify for implementation details
library Schnorr {
// secp256k1 group order
uint256 constant public Q =
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
// Fixed parity for the public keys used in this contract
// This avoids spending a word passing the parity in a similar style to
// Bitcoin's Taproot
uint8 constant public KEY_PARITY = 27;
error InvalidSOrA();
error MalformedSignature();
// px := public key x-coord, where the public key has a parity of KEY_PARITY
// message := 32-byte hash of the message
// c := schnorr signature challenge
// s := schnorr signature
function verify(
bytes32 px,
bytes memory message,
bytes32 c,
bytes32 s
) internal pure returns (bool) {
// ecrecover = (m, v, r, s) -> key
// We instead pass the following to obtain the nonce (not the key)
// Then we hash it and verify it matches the challenge
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
// That leaves us to check `sa` are non-zero
if (sa == 0) revert InvalidSOrA();
address R = ecrecover(sa, KEY_PARITY, px, ca);
if (R == address(0)) revert MalformedSignature();
// Check the signature is correct by rebuilding the challenge
return c == keccak256(abi.encodePacked(R, px, message));
}
}

View File

@@ -0,0 +1,37 @@
use alloy_sol_types::sol;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod erc20_container {
use super::*;
sol!("contracts/IERC20.sol");
}
pub use erc20_container::IERC20 as erc20;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod deployer_container {
use super::*;
sol!("contracts/Deployer.sol");
}
pub use deployer_container::Deployer as deployer;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod router_container {
use super::*;
sol!(Router, "artifacts/Router.abi");
}
pub use router_container::Router as router;

View File

@@ -0,0 +1,185 @@
use group::ff::PrimeField;
use k256::{
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
ProjectivePoint, Scalar, U256 as KU256,
};
#[cfg(test)]
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
use frost::{
algorithm::{Hram, SchnorrSignature},
curve::{Ciphersuite, Secp256k1},
};
use alloy_core::primitives::{Parity, Signature as AlloySignature};
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
use crate::abi::router::{Signature as AbiSignature};
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
alloy_core::primitives::keccak256(data).into()
}
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
}
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
let encoded_point = point.to_encoded_point(false);
// Last 20 bytes of the hash of the concatenated x and y coordinates
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
}
pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
assert!(
tx.chain_id.is_none(),
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
);
let sig_hash = tx.signature_hash().0;
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
loop {
let r_bytes: [u8; 32] = r.to_repr().into();
let s_bytes: [u8; 32] = s.to_repr().into();
let v = Parity::NonEip155(false);
let signature =
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
let tx = tx.clone().into_signed(signature);
if tx.recover_signer().is_ok() {
return tx;
}
// Re-hash until valid
r = hash_to_scalar(r_bytes.as_ref());
s = hash_to_scalar(s_bytes.as_ref());
}
}
/// The public key for a Schnorr-signing account.
#[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PublicKey {
pub(crate) A: ProjectivePoint,
pub(crate) px: Scalar,
}
impl PublicKey {
/// Construct a new `PublicKey`.
///
/// This will return None if the provided point isn't eligible to be a public key (due to
/// bounds such as parity).
#[allow(non_snake_case)]
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
let affine = A.to_affine();
// Only allow even keys to save a word within Ethereum
let is_odd = bool::from(affine.y_is_odd());
if is_odd {
None?;
}
let x_coord = affine.x();
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
// Return None if a reduction would occur
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
// headache/concern to have
// This does ban a trivial amoount of public keys
if x_coord_scalar.to_repr() != x_coord {
None?;
}
Some(PublicKey { A, px: x_coord_scalar })
}
pub fn point(&self) -> ProjectivePoint {
self.A
}
pub(crate) fn eth_repr(&self) -> [u8; 32] {
self.px.to_repr().into()
}
#[cfg(test)]
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
#[allow(non_snake_case)]
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
}
}
/// The HRAm to use for the Schnorr contract.
#[derive(Clone, Default)]
pub struct EthereumHram {}
impl Hram<Secp256k1> for EthereumHram {
#[allow(non_snake_case)]
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
let x_coord = A.to_affine().x();
let mut data = address(R).to_vec();
data.extend(x_coord.as_slice());
data.extend(m);
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
}
}
/// A signature for the Schnorr contract.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Signature {
pub(crate) c: Scalar,
pub(crate) s: Scalar,
}
impl Signature {
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
#[allow(non_snake_case)]
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
EthereumHram::hram(&R, &public_key.A, message) == self.c
}
/// Construct a new `Signature`.
///
/// This will return None if the signature is invalid.
pub fn new(
public_key: &PublicKey,
message: &[u8],
signature: SchnorrSignature<Secp256k1>,
) -> Option<Signature> {
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
if !signature.verify(public_key.A, c) {
None?;
}
let res = Signature { c, s: signature.s };
assert!(res.verify(public_key, message));
Some(res)
}
pub fn c(&self) -> Scalar {
self.c
}
pub fn s(&self) -> Scalar {
self.s
}
pub fn to_bytes(&self) -> [u8; 64] {
let mut res = [0; 64];
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
res
}
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
let mut reader = bytes.as_slice();
let c = Secp256k1::read_F(&mut reader)?;
let s = Secp256k1::read_F(&mut reader)?;
Ok(Signature { c, s })
}
}
impl From<&Signature> for AbiSignature {
fn from(sig: &Signature) -> AbiSignature {
let c: [u8; 32] = sig.c.to_repr().into();
let s: [u8; 32] = sig.s.to_repr().into();
AbiSignature { c: c.into(), s: s.into() }
}
}

View File

@@ -0,0 +1,119 @@
use std::sync::Arc;
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
use alloy_consensus::{Signed, TxLegacy};
use alloy_sol_types::{SolCall, SolEvent};
use alloy_rpc_types::{BlockNumberOrTag, Filter};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::{
Error,
crypto::{self, keccak256, PublicKey},
router::Router,
};
pub use crate::abi::deployer as abi;
/// The Deployer contract for the Router contract.
///
/// This Deployer has a deterministic address, letting it be immediately identified on any
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
/// deterministic) using a single log query.
#[derive(Clone, Debug)]
pub struct Deployer;
impl Deployer {
/// Obtain the transaction to deploy this contract, already signed.
///
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
/// funded for this transaction to be submitted. This account has no known private key to anyone,
/// so ETH sent can be neither misappropriated nor returned.
pub fn deployment_tx() -> Signed<TxLegacy> {
let bytecode = include_str!("../artifacts/Deployer.bin");
let bytecode =
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
let tx = TxLegacy {
chain_id: None,
nonce: 0,
gas_price: 100_000_000_000u128,
// TODO: Use a more accurate gas limit
gas_limit: 1_000_000u128,
to: TxKind::Create,
value: U256::ZERO,
input: bytecode,
};
crypto::deterministically_sign(&tx)
}
/// Obtain the deterministic address for this contract.
pub fn address() -> [u8; 20] {
let deployer_deployer =
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
**Address::create(&deployer_deployer, 0)
}
/// Construct a new view of the `Deployer`.
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
let address = Self::address();
#[cfg(not(test))]
let required_block = BlockNumberOrTag::Finalized;
#[cfg(test)]
let required_block = BlockNumberOrTag::Latest;
let code = provider
.get_code_at(address.into(), required_block.into())
.await
.map_err(|_| Error::ConnectionError)?;
// Contract has yet to be deployed
if code.is_empty() {
return Ok(None);
}
Ok(Some(Self))
}
/// Yield the `ContractCall` necessary to deploy the Router.
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
TxLegacy {
to: TxKind::Call(Self::address().into()),
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
gas_limit: 1_000_000,
..Default::default()
}
}
/// Find the first Router deployed with the specified key as its first key.
///
/// This is the Router Serai will use, and is the only way to construct a `Router`.
pub async fn find_router(
&self,
provider: Arc<RootProvider<SimpleRequest>>,
key: &PublicKey,
) -> Result<Option<Router>, Error> {
let init_code = Router::init_code(key);
let init_code_hash = keccak256(&init_code);
#[cfg(not(test))]
let to_block = BlockNumberOrTag::Finalized;
#[cfg(test)]
let to_block = BlockNumberOrTag::Latest;
// Find the first log using this init code (where the init code is binding to the key)
let filter =
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
let filter = filter.topic1(B256::from(init_code_hash));
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let Some(first_log) = logs.first() else { return Ok(None) };
let router = first_log
.log_decode::<abi::Deployment>()
.map_err(|_| Error::ConnectionError)?
.inner
.data
.created;
Ok(Some(Router::new(provider, router)))
}
}

118
coins/ethereum/src/erc20.rs Normal file
View File

@@ -0,0 +1,118 @@
use std::{sync::Arc, collections::HashSet};
use alloy_core::primitives::{Address, B256, U256};
use alloy_sol_types::{SolInterface, SolEvent};
use alloy_rpc_types::{BlockNumberOrTag, Filter};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::Error;
pub use crate::abi::erc20 as abi;
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
#[derive(Clone, Debug)]
pub struct TopLevelErc20Transfer {
pub id: [u8; 32],
pub from: [u8; 20],
pub amount: U256,
pub data: Vec<u8>,
}
/// A view for an ERC20 contract.
#[derive(Clone, Debug)]
pub struct ERC20(Arc<RootProvider<SimpleRequest>>, Address);
impl ERC20 {
/// Construct a new view of the specified ERC20 contract.
///
/// This checks a contract is deployed at that address yet does not check the contract is
/// actually an ERC20.
pub async fn new(
provider: Arc<RootProvider<SimpleRequest>>,
address: [u8; 20],
) -> Result<Option<Self>, Error> {
let code = provider
.get_code_at(address.into(), BlockNumberOrTag::Finalized.into())
.await
.map_err(|_| Error::ConnectionError)?;
// Contract has yet to be deployed
if code.is_empty() {
return Ok(None);
}
Ok(Some(Self(provider.clone(), Address::from(&address))))
}
pub async fn top_level_transfers(
&self,
block: u64,
to: [u8; 20],
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
let mut to_topic = [0; 32];
to_topic[12 ..].copy_from_slice(&to);
let filter = filter.topic2(B256::from(to_topic));
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let mut handled = HashSet::new();
let mut top_level_transfers = vec![];
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?;
// If this is a top-level call...
if tx.to == Some(self.1) {
// And we recognize the call...
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
// to the InInstruction appended
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
// Extract the top-level call's from/to/value
let (from, call_to, value) = match call {
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
(from, call_to, value)
}
// Treat any other function selectors as unrecognized
_ => continue,
};
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
// internal transfer
if (log.from != from) || (call_to != to) || (value != log.value) {
continue;
}
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
// the only log we handle
if handled.contains(&tx_id) {
continue;
}
handled.insert(tx_id);
// Read the data appended after
let encoded = call.abi_encode();
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
// Push the transfer
top_level_transfers.push(TopLevelErc20Transfer {
// Since we'll only handle one log for this TX, set the ID to the TX ID
id: *tx_id,
from: *log.from.0,
amount: log.value,
data,
});
}
}
}
Ok(top_level_transfers)
}
}

30
coins/ethereum/src/lib.rs Normal file
View File

@@ -0,0 +1,30 @@
use thiserror::Error;
pub use alloy_core;
pub use alloy_consensus;
pub use alloy_rpc_types;
pub use alloy_simple_request_transport;
pub use alloy_rpc_client;
pub use alloy_provider;
pub mod crypto;
pub(crate) mod abi;
pub mod erc20;
pub mod deployer;
pub mod router;
pub mod machine;
#[cfg(test)]
mod tests;
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
pub enum Error {
#[error("failed to verify Schnorr signature")]
InvalidSignature,
#[error("couldn't make call/send TX")]
ConnectionError,
}

View File

@@ -0,0 +1,414 @@
use std::{
io::{self, Read},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use transcript::{Transcript, RecommendedTranscript};
use group::GroupEncoding;
use frost::{
curve::{Ciphersuite, Secp256k1},
Participant, ThresholdKeys, FrostError,
algorithm::Schnorr,
sign::*,
};
use alloy_core::primitives::U256;
use crate::{
crypto::{PublicKey, EthereumHram, Signature},
router::{
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
Router,
},
};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Call {
pub to: [u8; 20],
pub value: U256,
pub data: Vec<u8>,
}
impl Call {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut to = [0; 20];
reader.read_exact(&mut to)?;
let value = {
let mut value_bytes = [0; 32];
reader.read_exact(&mut value_bytes)?;
U256::from_le_slice(&value_bytes)
};
let mut data_len = {
let mut data_len = [0; 4];
reader.read_exact(&mut data_len)?;
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
};
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
let mut data = vec![];
while data_len > 0 {
let chunk_len = data_len.min(1024);
let mut chunk = vec![0; chunk_len];
reader.read_exact(&mut chunk)?;
data.extend(&chunk);
data_len -= chunk_len;
}
Ok(Call { to, value, data })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.to)?;
writer.write_all(&self.value.as_le_bytes())?;
let data_len = u32::try_from(self.data.len())
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
writer.write_all(&data_len.to_le_bytes())?;
writer.write_all(&self.data)
}
}
impl From<Call> for AbiCall {
fn from(call: Call) -> AbiCall {
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum OutInstructionTarget {
Direct([u8; 20]),
Calls(Vec<Call>),
}
impl OutInstructionTarget {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut addr = [0; 20];
reader.read_exact(&mut addr)?;
Ok(OutInstructionTarget::Direct(addr))
}
1 => {
let mut calls_len = [0; 4];
reader.read_exact(&mut calls_len)?;
let calls_len = u32::from_le_bytes(calls_len);
let mut calls = vec![];
for _ in 0 .. calls_len {
calls.push(Call::read(reader)?);
}
Ok(OutInstructionTarget::Calls(calls))
}
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
}
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
OutInstructionTarget::Direct(addr) => {
writer.write_all(&[0])?;
writer.write_all(addr)?;
}
OutInstructionTarget::Calls(calls) => {
writer.write_all(&[1])?;
let call_len = u32::try_from(calls.len())
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
writer.write_all(&call_len.to_le_bytes())?;
for call in calls {
call.write(writer)?;
}
}
}
Ok(())
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OutInstruction {
pub target: OutInstructionTarget,
pub value: U256,
}
impl OutInstruction {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let target = OutInstructionTarget::read(reader)?;
let value = {
let mut value_bytes = [0; 32];
reader.read_exact(&mut value_bytes)?;
U256::from_le_slice(&value_bytes)
};
Ok(OutInstruction { target, value })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.target.write(writer)?;
writer.write_all(&self.value.as_le_bytes())
}
}
impl From<OutInstruction> for AbiOutInstruction {
fn from(instruction: OutInstruction) -> AbiOutInstruction {
match instruction.target {
OutInstructionTarget::Direct(addr) => {
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
}
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
to: [0; 20].into(),
calls: calls.into_iter().map(Into::into).collect(),
value: instruction.value,
},
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum RouterCommand {
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
}
impl RouterCommand {
pub fn msg(&self) -> Vec<u8> {
match self {
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
Router::update_serai_key_message(*chain_id, *nonce, key)
}
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
*chain_id,
*nonce,
outs.iter().map(|out| out.clone().into()).collect(),
),
}
}
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let mut nonce = [0; 32];
reader.read_exact(&mut nonce)?;
let key = PublicKey::new(Secp256k1::read_G(reader)?)
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
Ok(RouterCommand::UpdateSeraiKey {
chain_id: U256::from_le_slice(&chain_id),
nonce: U256::from_le_slice(&nonce),
key,
})
}
1 => {
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let chain_id = U256::from_le_slice(&chain_id);
let mut nonce = [0; 32];
reader.read_exact(&mut nonce)?;
let nonce = U256::from_le_slice(&nonce);
let mut outs_len = [0; 4];
reader.read_exact(&mut outs_len)?;
let outs_len = u32::from_le_bytes(outs_len);
let mut outs = vec![];
for _ in 0 .. outs_len {
outs.push(OutInstruction::read(reader)?);
}
Ok(RouterCommand::Execute { chain_id, nonce, outs })
}
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
}
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
writer.write_all(&[0])?;
writer.write_all(&chain_id.as_le_bytes())?;
writer.write_all(&nonce.as_le_bytes())?;
writer.write_all(&key.A.to_bytes())
}
RouterCommand::Execute { chain_id, nonce, outs } => {
writer.write_all(&[1])?;
writer.write_all(&chain_id.as_le_bytes())?;
writer.write_all(&nonce.as_le_bytes())?;
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
for out in outs {
out.write(writer)?;
}
Ok(())
}
}
}
pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![];
self.write(&mut res).unwrap();
res
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct SignedRouterCommand {
command: RouterCommand,
signature: Signature,
}
impl SignedRouterCommand {
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
let signature = Signature { c, s };
if !signature.verify(key, &command.msg()) {
None?
}
Some(SignedRouterCommand { command, signature })
}
pub fn command(&self) -> &RouterCommand {
&self.command
}
pub fn signature(&self) -> &Signature {
&self.signature
}
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let command = RouterCommand::read(reader)?;
let mut sig = [0; 64];
reader.read_exact(&mut sig)?;
let signature = Signature::from_bytes(sig)?;
Ok(SignedRouterCommand { command, signature })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.command.write(writer)?;
writer.write_all(&self.signature.to_bytes())
}
}
pub struct RouterCommandMachine {
key: PublicKey,
command: RouterCommand,
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl RouterCommandMachine {
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
// The Schnorr algorithm should be fine without this, even when using the IETF variant
// If this is better and more comprehensive, we should do it, even if not necessary
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
let key = keys.group_key();
transcript.append_message(b"key", key.to_bytes());
transcript.append_message(b"command", command.serialize());
Some(Self {
key: PublicKey::new(key)?,
command,
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
})
}
}
impl PreprocessMachine for RouterCommandMachine {
type Preprocess = Preprocess<Secp256k1, ()>;
type Signature = SignedRouterCommand;
type SignMachine = RouterCommandSignMachine;
fn preprocess<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (Self::SignMachine, Self::Preprocess) {
let (machine, preprocess) = self.machine.preprocess(rng);
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
}
}
pub struct RouterCommandSignMachine {
key: PublicKey,
command: RouterCommand,
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
type Params = ();
type Keys = ThresholdKeys<Secp256k1>;
type Preprocess = Preprocess<Secp256k1, ()>;
type SignatureShare = SignatureShare<Secp256k1>;
type SignatureMachine = RouterCommandSignatureMachine;
fn cache(self) -> CachedPreprocess {
unimplemented!(
"RouterCommand machines don't support caching their preprocesses due to {}",
"being already bound to a specific command"
);
}
fn from_cache(
(): (),
_: ThresholdKeys<Secp256k1>,
_: CachedPreprocess,
) -> (Self, Self::Preprocess) {
unimplemented!(
"RouterCommand machines don't support caching their preprocesses due to {}",
"being already bound to a specific command"
);
}
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
self.machine.read_preprocess(reader)
}
fn sign(
self,
commitments: HashMap<Participant, Self::Preprocess>,
msg: &[u8],
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() {
panic!("message was passed to a RouterCommand machine when it generates its own");
}
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
}
}
pub struct RouterCommandSignatureMachine {
key: PublicKey,
command: RouterCommand,
machine:
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
type SignatureShare = SignatureShare<Secp256k1>;
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
self.machine.read_share(reader)
}
fn complete(
self,
shares: HashMap<Participant, Self::SignatureShare>,
) -> Result<SignedRouterCommand, FrostError> {
let sig = self.machine.complete(shares)?;
let signature = Signature::new(&self.key, &self.command.msg(), sig)
.expect("machine produced an invalid signature");
Ok(SignedRouterCommand { command: self.command, signature })
}
}

View File

@@ -0,0 +1,426 @@
use std::{sync::Arc, io, collections::HashSet};
use k256::{
elliptic_curve::{group::GroupEncoding, sec1},
ProjectivePoint,
};
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
#[cfg(test)]
use alloy_core::primitives::B256;
use alloy_consensus::TxLegacy;
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
use alloy_rpc_types::Filter;
#[cfg(test)]
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
pub use crate::{
Error,
crypto::{PublicKey, Signature},
abi::{erc20::Transfer, router as abi},
};
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Coin {
Ether,
Erc20([u8; 20]),
}
impl Coin {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
Ok(match kind[0] {
0 => Coin::Ether,
1 => {
let mut address = [0; 20];
reader.read_exact(&mut address)?;
Coin::Erc20(address)
}
_ => Err(io::Error::other("unrecognized Coin type"))?,
})
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Coin::Ether => writer.write_all(&[0]),
Coin::Erc20(token) => {
writer.write_all(&[1])?;
writer.write_all(token)
}
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct InInstruction {
pub id: ([u8; 32], u64),
pub from: [u8; 20],
pub coin: Coin,
pub amount: U256,
pub data: Vec<u8>,
pub key_at_end_of_block: ProjectivePoint,
}
impl InInstruction {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let id = {
let mut id_hash = [0; 32];
reader.read_exact(&mut id_hash)?;
let mut id_pos = [0; 8];
reader.read_exact(&mut id_pos)?;
let id_pos = u64::from_le_bytes(id_pos);
(id_hash, id_pos)
};
let mut from = [0; 20];
reader.read_exact(&mut from)?;
let coin = Coin::read(reader)?;
let mut amount = [0; 32];
reader.read_exact(&mut amount)?;
let amount = U256::from_le_slice(&amount);
let mut data_len = [0; 4];
reader.read_exact(&mut data_len)?;
let data_len = usize::try_from(u32::from_le_bytes(data_len))
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
let mut data = vec![0; data_len];
reader.read_exact(&mut data)?;
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
reader.read_exact(&mut key_at_end_of_block)?;
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.id.0)?;
writer.write_all(&self.id.1.to_le_bytes())?;
writer.write_all(&self.from)?;
self.coin.write(writer)?;
writer.write_all(&self.amount.as_le_bytes())?;
writer.write_all(
&u32::try_from(self.data.len())
.map_err(|_| {
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
})?
.to_le_bytes(),
)?;
writer.write_all(&self.data)?;
writer.write_all(&self.key_at_end_of_block.to_bytes())
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Executed {
pub tx_id: [u8; 32],
pub nonce: u64,
pub signature: [u8; 64],
}
/// The contract Serai uses to manage its state.
#[derive(Clone, Debug)]
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
impl Router {
pub(crate) fn code() -> Vec<u8> {
let bytecode = include_str!("../artifacts/Router.bin");
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
}
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
let mut bytecode = Self::code();
// Append the constructor arguments
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
bytecode
}
// This isn't pub in order to force users to use `Deployer::find_router`.
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
Self(provider, address)
}
pub fn address(&self) -> [u8; 20] {
**self.1
}
/// Get the key for Serai at the specified block.
#[cfg(test)]
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
let call = TransactionRequest::default()
.to(Some(self.1))
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
let bytes = self
.0
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
.await
.map_err(|_| Error::ConnectionError)?;
let res =
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
}
/// Get the message to be signed in order to update the key for Serai.
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
let mut buffer = b"updateSeraiKey".to_vec();
buffer.extend(&chain_id.to_be_bytes::<32>());
buffer.extend(&nonce.to_be_bytes::<32>());
buffer.extend(&key.eth_repr());
buffer
}
/// Update the key representing Serai.
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
// TODO: Set a more accurate gas
TxLegacy {
to: TxKind::Call(self.1),
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
.abi_encode()
.into(),
gas_limit: 100_000,
..Default::default()
}
}
/// Get the current nonce for the published batches.
#[cfg(test)]
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
let call = TransactionRequest::default()
.to(Some(self.1))
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
let bytes = self
.0
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
.await
.map_err(|_| Error::ConnectionError)?;
let res =
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
Ok(res._0)
}
/// Get the message to be signed in order to update the key for Serai.
pub(crate) fn execute_message(
chain_id: U256,
nonce: U256,
outs: Vec<abi::OutInstruction>,
) -> Vec<u8> {
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
}
/// Execute a batch of `OutInstruction`s.
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
TxLegacy {
to: TxKind::Call(self.1),
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
// TODO
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
..Default::default()
}
}
pub async fn in_instructions(
&self,
block: u64,
allowed_tokens: &HashSet<[u8; 20]>,
) -> Result<Vec<InInstruction>, Error> {
let key_at_end_of_block = {
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
let last_key_x_coordinate = last_key_x_coordinate_log
.log_decode::<SeraiKeyUpdated>()
.map_err(|_| Error::ConnectionError)?
.inner
.data
.key;
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
ProjectivePoint::from_bytes(&compressed_point).expect("router's last key wasn't a valid key")
};
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let mut transfer_check = HashSet::new();
let mut in_instructions = vec![];
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let id = (
log.block_hash.ok_or(Error::ConnectionError)?.into(),
log.log_index.ok_or(Error::ConnectionError)?,
);
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?;
let log =
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
let coin = if log.coin.0 == [0; 20] {
Coin::Ether
} else {
let token = *log.coin.0;
if !allowed_tokens.contains(&token) {
continue;
}
// If this also counts as a top-level transfer via the token, drop it
//
// Necessary in order to handle a potential edge case with some theoretical token
// implementations
//
// This will either let it be handled by the top-level transfer hook or will drop it
// entirely on the side of caution
if tx.to == Some(token.into()) {
continue;
}
// Get all logs for this TX
let receipt = self
.0
.get_transaction_receipt(tx_hash)
.await
.map_err(|_| Error::ConnectionError)?
.ok_or(Error::ConnectionError)?;
let tx_logs = receipt.inner.logs();
// Find a matching transfer log
let mut found_transfer = false;
for tx_log in tx_logs {
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
// Ensure we didn't already use this transfer to check a distinct InInstruction event
if transfer_check.contains(&log_index) {
continue;
}
// Check if this log is from the token we expected to be transferred
if tx_log.address().0 != token {
continue;
}
// Check if this is a transfer log
// https://github.com/alloy-rs/core/issues/589
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
continue;
}
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
// Check if this is a transfer to us for the expected amount
if (transfer.to == self.1) && (transfer.value == log.amount) {
transfer_check.insert(log_index);
found_transfer = true;
break;
}
}
if !found_transfer {
// This shouldn't be a ConnectionError
// This is an exploit, a non-conforming ERC20, or an invalid connection
// This should halt the process which is sufficient, yet this is sub-optimal
// TODO
Err(Error::ConnectionError)?;
}
Coin::Erc20(token)
};
in_instructions.push(InInstruction {
id,
from: *log.from.0,
coin,
amount: log.amount,
data: log.instruction.as_ref().to_vec(),
key_at_end_of_block,
});
}
Ok(in_instructions)
}
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
let mut res = vec![];
{
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
let log =
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
let mut signature = [0; 64];
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
res.push(Executed {
tx_id,
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
signature,
});
}
}
{
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
let mut signature = [0; 64];
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
res.push(Executed {
tx_id,
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
signature,
});
}
}
Ok(res)
}
#[cfg(feature = "tests")]
pub fn key_updated_filter(&self) -> Filter {
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
}
#[cfg(feature = "tests")]
pub fn executed_filter(&self) -> Filter {
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
}
}

View File

@@ -0,0 +1,13 @@
use alloy_sol_types::sol;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod schnorr_container {
use super::*;
sol!("src/tests/contracts/Schnorr.sol");
}
pub(crate) use schnorr_container::TestSchnorr as schnorr;

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: AGPL-3.0-only
pragma solidity ^0.8.26;
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
contract TestERC20 {
event Transfer(address indexed from, address indexed to, uint256 value);
@@ -8,57 +8,44 @@ contract TestERC20 {
function name() public pure returns (string memory) {
return "Test ERC20";
}
function symbol() public pure returns (string memory) {
return "TEST";
}
function decimals() public pure returns (uint8) {
return 18;
}
uint256 public totalSupply;
function totalSupply() public pure returns (uint256) {
return 1_000_000 * 10e18;
}
mapping(address => uint256) balances;
mapping(address => mapping(address => uint256)) allowances;
constructor() {
balances[msg.sender] = totalSupply();
}
function balanceOf(address owner) public view returns (uint256) {
return balances[owner];
}
function transfer(address to, uint256 value) public returns (bool) {
balances[msg.sender] -= value;
balances[to] += value;
emit Transfer(msg.sender, to, value);
return true;
}
function transferFrom(address from, address to, uint256 value) public returns (bool) {
allowances[from][msg.sender] -= value;
balances[from] -= value;
balances[to] += value;
emit Transfer(from, to, value);
return true;
}
function approve(address spender, uint256 value) public returns (bool) {
allowances[msg.sender][spender] = value;
emit Approval(msg.sender, spender, value);
return true;
}
function allowance(address owner, address spender) public view returns (uint256) {
return allowances[owner][spender];
}
function mint(address owner, uint256 value) external {
balances[owner] += value;
totalSupply += value;
emit Transfer(address(0), owner, value);
}
function magicApprove(address owner, address spender, uint256 value) external {
allowances[owner][spender] = value;
emit Approval(owner, spender, value);
}
}

View File

@@ -0,0 +1,15 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "../../../contracts/Schnorr.sol";
contract TestSchnorr {
function verify(
bytes32 px,
bytes calldata message,
bytes32 c,
bytes32 s
) external pure returns (bool) {
return Schnorr.verify(px, message, c, s);
}
}

View File

@@ -0,0 +1,105 @@
use rand_core::OsRng;
use group::ff::{Field, PrimeField};
use k256::{
ecdsa::{
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
},
Scalar, ProjectivePoint,
};
use frost::{
curve::{Ciphersuite, Secp256k1},
algorithm::{Hram, IetfSchnorr},
tests::{algorithm_machines, sign},
};
use crate::{crypto::*, tests::key_gen};
// The ecrecover opcode, yet with parity replacing v
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
let message: [u8; 32] = message.to_repr().into();
alloy_core::primitives::Signature::from_signature_and_parity(
sig,
alloy_core::primitives::Parity::Parity(odd_y),
)
.ok()?
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
.ok()
.map(Into::into)
}
#[test]
fn test_ecrecover() {
let private = SigningKey::random(&mut OsRng);
let public = VerifyingKey::from(&private);
// Sign the signature
const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private
.as_nonzero_scalar()
.try_sign_prehashed(
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
&keccak256(MESSAGE).into(),
)
.unwrap();
// Sanity check the signature verifies
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
}
// Perform the ecrecover
assert_eq!(
ecrecover(
hash_to_scalar(MESSAGE),
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
*sig.r(),
*sig.s()
)
.unwrap(),
address(&ProjectivePoint::from(public.as_affine()))
);
}
// Run the sign test with the EthereumHram
#[test]
fn test_signing() {
let (keys, _) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let _sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
}
#[allow(non_snake_case)]
pub fn preprocess_signature_for_ecrecover(
R: ProjectivePoint,
public_key: &PublicKey,
m: &[u8],
s: Scalar,
) -> (Scalar, Scalar) {
let c = EthereumHram::hram(&R, &public_key.A, m);
let sa = -(s * public_key.px);
let ca = -(c * public_key.px);
(sa, ca)
}
#[test]
fn test_ecrecover_hack() {
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
assert_eq!(q, address(&sig.R));
}

View File

@@ -0,0 +1,127 @@
use std::{sync::Arc, collections::HashMap};
use rand_core::OsRng;
use k256::{Scalar, ProjectivePoint};
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
use alloy_core::{
primitives::{Address, U256, Bytes, TxKind},
hex::FromHex,
};
use alloy_consensus::{SignableTransaction, TxLegacy};
use alloy_rpc_types::TransactionReceipt;
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::crypto::{address, deterministically_sign, PublicKey};
mod crypto;
mod abi;
mod schnorr;
mod router;
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
let mut offset = Scalar::ZERO;
while PublicKey::new(group_key).is_none() {
offset += Scalar::ONE;
group_key += ProjectivePoint::GENERATOR;
}
for keys in keys.values_mut() {
*keys = keys.offset(offset);
}
let public_key = PublicKey::new(group_key).unwrap();
(keys, public_key)
}
// TODO: Use a proper error here
pub async fn send(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
mut tx: TxLegacy,
) -> Option<TransactionReceipt> {
let verifying_key = *wallet.verifying_key().as_affine();
let address = Address::from(address(&verifying_key.into()));
// https://github.com/alloy-rs/alloy/issues/539
// let chain_id = provider.get_chain_id().await.unwrap();
// tx.chain_id = Some(chain_id);
tx.chain_id = None;
tx.nonce = provider.get_transaction_count(address, None).await.unwrap();
// 100 gwei
tx.gas_price = 100_000_000_000u128;
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
assert!(
provider.get_balance(address, None).await.unwrap() >
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
);
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
pending_tx.get_receipt().await.ok()
}
pub async fn fund_account(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
to_fund: Address,
value: U256,
) -> Option<()> {
let funding_tx =
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
Some(())
}
// TODO: Use a proper error here
pub async fn deploy_contract(
client: Arc<RootProvider<SimpleRequest>>,
wallet: &k256::ecdsa::SigningKey,
name: &str,
) -> Option<Address> {
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = Bytes::from_hex(hex_bin).unwrap();
let deployment_tx = TxLegacy {
chain_id: None,
nonce: 0,
// 100 gwei
gas_price: 100_000_000_000u128,
gas_limit: 1_000_000,
to: TxKind::Create,
value: U256::ZERO,
input: bin,
};
let deployment_tx = deterministically_sign(&deployment_tx);
// Fund the deployer address
fund_account(
&client,
wallet,
deployment_tx.recover_signer().unwrap(),
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
)
.await?;
let (deployment_tx, sig, _) = deployment_tx.into_parts();
let mut bytes = vec![];
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
let receipt = pending_tx.get_receipt().await.ok()?;
assert!(receipt.status());
Some(receipt.contract_address.unwrap())
}

View File

@@ -0,0 +1,183 @@
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
use rand_core::OsRng;
use group::Group;
use k256::ProjectivePoint;
use frost::{
curve::Secp256k1,
Participant, ThresholdKeys,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use alloy_core::primitives::{Address, U256};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{
crypto::*,
deployer::Deployer,
router::{Router, abi as router},
tests::{key_gen, send, fund_account},
};
async fn setup_test() -> (
AnvilInstance,
Arc<RootProvider<SimpleRequest>>,
u64,
Router,
HashMap<Participant, ThresholdKeys<Secp256k1>>,
PublicKey,
) {
let anvil = Anvil::new().spawn();
let provider = RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
);
let chain_id = provider.get_chain_id().await.unwrap();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
// Make sure the Deployer constructor returns None, as it doesn't exist yet
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
// Deploy the Deployer
let tx = Deployer::deployment_tx();
fund_account(
&client,
&wallet,
tx.recover_signer().unwrap(),
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
)
.await
.unwrap();
let (tx, sig, _) = tx.into_parts();
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
let receipt = pending_tx.get_receipt().await.unwrap();
assert!(receipt.status());
let deployer =
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
let (keys, public_key) = key_gen();
// Verify the Router constructor returns None, as it doesn't exist yet
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
// Deploy the router
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
.await
.unwrap();
assert!(receipt.status());
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
(anvil, client, chain_id, contract, keys, public_key)
}
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
client
.get_block(client.get_block_number().await.unwrap().into(), false)
.await
.unwrap()
.unwrap()
.header
.hash
.unwrap()
.0
}
#[tokio::test]
async fn test_deploy_contract() {
let (_anvil, client, _, router, _, public_key) = setup_test().await;
let block_hash = latest_block_hash(&client).await;
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
}
pub fn hash_and_sign(
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
public_key: &PublicKey,
message: &[u8],
) -> Signature {
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
Signature::new(public_key, message, sig).unwrap()
}
#[tokio::test]
async fn test_router_update_serai_key() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let next_key = loop {
let point = ProjectivePoint::random(&mut OsRng);
let Some(next_key) = PublicKey::new(point) else { continue };
break next_key;
};
let message = Router::update_serai_key_message(
U256::try_from(chain_id).unwrap(),
U256::try_from(1u64).unwrap(),
&next_key,
);
let sig = hash_and_sign(&keys, &public_key, &message);
let first_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
.await
.unwrap();
assert!(receipt.status());
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
// Check this does still offer the historical state
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
}
#[tokio::test]
async fn test_router_execute() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let to = Address::from([0; 20]);
let value = U256::ZERO;
let tx = router::OutInstruction { to, value, calls: vec![] };
let txs = vec![tx];
let first_block_hash = latest_block_hash(&client).await;
let nonce = contract.nonce(first_block_hash).await.unwrap();
assert_eq!(nonce, U256::try_from(1u64).unwrap());
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
let sig = hash_and_sign(&keys, &public_key, &message);
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
assert!(receipt.status());
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
// Check this does still offer the historical state
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
}

View File

@@ -0,0 +1,93 @@
use std::sync::Arc;
use rand_core::OsRng;
use group::ff::PrimeField;
use k256::Scalar;
use frost::{
curve::Secp256k1,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use alloy_core::primitives::Address;
use alloy_sol_types::SolCall;
use alloy_rpc_types::{TransactionInput, TransactionRequest};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{
Error,
crypto::*,
tests::{key_gen, deploy_contract, abi::schnorr as abi},
};
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
let anvil = Anvil::new().spawn();
let provider = RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
);
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
(anvil, client, address)
}
#[tokio::test]
async fn test_deploy_contract() {
setup_test().await;
}
pub async fn call_verify(
provider: &RootProvider<SimpleRequest>,
contract: Address,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<(), Error> {
let px: [u8; 32] = public_key.px.to_repr().into();
let c_bytes: [u8; 32] = signature.c.to_repr().into();
let s_bytes: [u8; 32] = signature.s.to_repr().into();
let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new(
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
.abi_encode()
.into(),
));
let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?;
let res =
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
if res._0 {
Ok(())
} else {
Err(Error::InvalidSignature)
}
}
#[tokio::test]
async fn test_ecrecover_hack() {
let (_anvil, client, contract) = setup_test().await;
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
// Test an invalid signature fails
let mut sig = sig;
sig.s += Scalar::ONE;
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
}

105
coins/monero/Cargo.toml Normal file
View File

@@ -0,0 +1,105 @@
[package]
name = "monero-serai"
version = "0.1.4-alpha"
description = "A modern Monero transaction library"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/monero"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false }
async-trait = { version = "0.1", default-features = false }
thiserror = { version = "1", default-features = false, optional = true }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
subtle = { version = "^2.4", default-features = false }
rand_core = { version = "0.6", default-features = false }
# Used to send transactions
rand = { version = "0.8", default-features = false }
rand_chacha = { version = "0.3", default-features = false }
# Used to select decoys
rand_distr = { version = "0.4", default-features = false }
sha3 = { version = "0.10", default-features = false }
pbkdf2 = { version = "0.12", features = ["simple"], default-features = false }
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize", "precomputed-tables"] }
# Used for the hash to curve, along with the more complicated proofs
group = { version = "0.13", default-features = false }
dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.4", default-features = false }
multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features = false, features = ["batch"] }
# Needed for multisig
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
monero-generators = { path = "generators", version = "0.4", default-features = false }
hex-literal = "0.4"
hex = { version = "0.4", default-features = false, features = ["alloc"] }
serde = { version = "1", default-features = false, features = ["derive", "alloc"] }
serde_json = { version = "1", default-features = false, features = ["alloc"] }
base58-monero = { version = "2", default-features = false, features = ["check"] }
# Used for the provided HTTP RPC
digest_auth = { version = "0.3", default-features = false, optional = true }
simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls"], optional = true }
tokio = { version = "1", default-features = false, optional = true }
[build-dependencies]
dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.4", default-features = false }
monero-generators = { path = "generators", version = "0.4", default-features = false }
[dev-dependencies]
tokio = { version = "1", features = ["sync", "macros"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] }
[features]
std = [
"std-shims/std",
"thiserror",
"zeroize/std",
"subtle/std",
"rand_core/std",
"rand/std",
"rand_chacha/std",
"rand_distr/std",
"sha3/std",
"pbkdf2/std",
"multiexp/std",
"transcript/std",
"monero-generators/std",
"hex/std",
"serde/std",
"serde_json/std",
"base58-monero/std",
]
http-rpc = ["digest_auth", "simple-request", "tokio"]
multisig = ["transcript", "frost", "std"]
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
default = ["std", "http-rpc"]

61
coins/monero/README.md Normal file
View File

@@ -0,0 +1,61 @@
# monero-serai
A modern Monero transaction library intended for usage in wallets. It prides
itself on accuracy, correctness, and removing common pit falls developers may
face.
monero-serai also offers the following features:
- Featured Addresses
- A FROST-based multisig orders of magnitude more performant than Monero's
### Purpose and support
monero-serai was written for Serai, a decentralized exchange aiming to support
Monero. Despite this, monero-serai is intended to be a widely usable library,
accurate to Monero. monero-serai guarantees the functionality needed for Serai,
yet will not deprive functionality from other users.
Various legacy transaction formats are not currently implemented, yet we are
willing to add support for them. There aren't active development efforts around
them however.
### Caveats
This library DOES attempt to do the following:
- Create on-chain transactions identical to how wallet2 would (unless told not
to)
- Not be detectable as monero-serai when scanning outputs
- Not reveal spent outputs to the connected RPC node
This library DOES NOT attempt to do the following:
- Have identical RPC behavior when creating transactions
- Be a wallet
This means that monero-serai shouldn't be fingerprintable on-chain. It also
shouldn't be fingerprintable if a targeted attack occurs to detect if the
receiving wallet is monero-serai or wallet2. It also should be generally safe
for usage with remote nodes.
It won't hide from remote nodes it's monero-serai however, potentially
allowing a remote node to profile you. The implications of this are left to the
user to consider.
It also won't act as a wallet, just as a transaction library. wallet2 has
several *non-transaction-level* policies, such as always attempting to use two
inputs to create transactions. These are considered out of scope to
monero-serai.
### Feature flags
monero-serai has certain functionality behind feature flags:
- `std:` Enables usage of Rust's `std` and several other functionality. See `Cargo.toml` for the full list.
- `http-rpc`: Enables an HTTP(S) transport type within the `rpc` module
- `multisig`: Enables multi-signature features within the `wallet` module
- `binaries`: TODO
The features enabled by default are:
- `std`
- `http-rpc`

67
coins/monero/build.rs Normal file
View File

@@ -0,0 +1,67 @@
use std::{
io::Write,
env,
path::Path,
fs::{File, remove_file},
};
use dalek_ff_group::EdwardsPoint;
use monero_generators::bulletproofs_generators;
fn serialize(generators_string: &mut String, points: &[EdwardsPoint]) {
for generator in points {
generators_string.extend(
format!(
"
dalek_ff_group::EdwardsPoint(
curve25519_dalek::edwards::CompressedEdwardsY({:?}).decompress().unwrap()
),
",
generator.compress().to_bytes()
)
.chars(),
);
}
}
fn generators(prefix: &'static str, path: &str) {
let generators = bulletproofs_generators(prefix.as_bytes());
#[allow(non_snake_case)]
let mut G_str = String::new();
serialize(&mut G_str, &generators.G);
#[allow(non_snake_case)]
let mut H_str = String::new();
serialize(&mut H_str, &generators.H);
let path = Path::new(&env::var("OUT_DIR").unwrap()).join(path);
let _ = remove_file(&path);
File::create(&path)
.unwrap()
.write_all(
format!(
"
pub(crate) static GENERATORS_CELL: OnceLock<Generators> = OnceLock::new();
pub fn GENERATORS() -> &'static Generators {{
GENERATORS_CELL.get_or_init(|| Generators {{
G: vec![
{G_str}
],
H: vec![
{H_str}
],
}})
}}
",
)
.as_bytes(),
)
.unwrap();
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
generators("bulletproof", "generators.rs");
generators("bulletproof_plus", "generators_plus.rs");
}

View File

@@ -1,12 +1,11 @@
[package]
name = "monero-generators"
version = "0.4.0"
description = "Monero's hash to point function and generators"
description = "Monero's hash_to_point and generators"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/networks/monero/generators"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/monero/generators"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.80"
[package.metadata.docs.rs]
all-features = true
@@ -21,27 +20,15 @@ std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-fe
subtle = { version = "^2.4", default-features = false }
sha3 = { version = "0.10", default-features = false }
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize"] }
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize", "precomputed-tables"] }
group = { version = "0.13", default-features = false }
dalek-ff-group = { path = "../../../crypto/dalek-ff-group", version = "0.4", default-features = false }
monero-io = { path = "../io", version = "0.1", default-features = false }
[dev-dependencies]
hex = "0.4"
[features]
std = [
"std-shims/std",
"subtle/std",
"sha3/std",
"group/alloc",
"dalek-ff-group/std",
"monero-io/std"
]
std = ["std-shims/std", "subtle/std", "sha3/std", "dalek-ff-group/std"]
default = ["std"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2024 Luke Parker
Copyright (c) 2022-2023 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -0,0 +1,7 @@
# Monero Generators
Generators used by Monero in both its Pedersen commitments and Bulletproofs(+).
An implementation of Monero's `ge_fromfe_frombytes_vartime`, simply called
`hash_to_point` here, is included, as needed to generate generators.
This library is usable under no-std when the `std` feature is disabled.

View File

@@ -1,20 +1,27 @@
use subtle::ConditionallySelectable;
use curve25519_dalek::edwards::EdwardsPoint;
use curve25519_dalek::edwards::{EdwardsPoint, CompressedEdwardsY};
use group::ff::{Field, PrimeField};
use dalek_ff_group::FieldElement;
use monero_io::decompress_point;
use crate::hash;
use crate::keccak256;
/// Decompress canonically encoded ed25519 point
/// It does not check if the point is in the prime order subgroup
pub fn decompress_point(bytes: [u8; 32]) -> Option<EdwardsPoint> {
CompressedEdwardsY(bytes)
.decompress()
// Ban points which are either unreduced or -0
.filter(|point| point.compress().to_bytes() == bytes)
}
/// Monero's `hash_to_ec` function.
/// Monero's hash to point function, as named `hash_to_ec`.
pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
#[allow(non_snake_case)]
let A = FieldElement::from(486662u64);
let v = FieldElement::from_square(keccak256(&bytes)).double();
let v = FieldElement::from_square(hash(&bytes)).double();
let w = v + FieldElement::ONE;
let x = w.square() + (-A.square() * v);

View File

@@ -0,0 +1,79 @@
//! Generators used by Monero in both its Pedersen commitments and Bulletproofs(+).
//!
//! An implementation of Monero's `ge_fromfe_frombytes_vartime`, simply called
//! `hash_to_point` here, is included, as needed to generate generators.
#![cfg_attr(not(feature = "std"), no_std)]
use std_shims::{sync::OnceLock, vec::Vec};
use sha3::{Digest, Keccak256};
use curve25519_dalek::edwards::{EdwardsPoint as DalekPoint};
use group::{Group, GroupEncoding};
use dalek_ff_group::EdwardsPoint;
mod varint;
use varint::write_varint;
mod hash_to_point;
pub use hash_to_point::{hash_to_point, decompress_point};
#[cfg(test)]
mod tests;
fn hash(data: &[u8]) -> [u8; 32] {
Keccak256::digest(data).into()
}
static H_CELL: OnceLock<DalekPoint> = OnceLock::new();
/// Monero's alternate generator `H`, used for amounts in Pedersen commitments.
#[allow(non_snake_case)]
pub fn H() -> DalekPoint {
*H_CELL.get_or_init(|| {
decompress_point(hash(&EdwardsPoint::generator().to_bytes())).unwrap().mul_by_cofactor()
})
}
static H_POW_2_CELL: OnceLock<[DalekPoint; 64]> = OnceLock::new();
/// Monero's alternate generator `H`, multiplied by 2**i for i in 1 ..= 64.
#[allow(non_snake_case)]
pub fn H_pow_2() -> &'static [DalekPoint; 64] {
H_POW_2_CELL.get_or_init(|| {
let mut res = [H(); 64];
for i in 1 .. 64 {
res[i] = res[i - 1] + res[i - 1];
}
res
})
}
const MAX_M: usize = 16;
const N: usize = 64;
const MAX_MN: usize = MAX_M * N;
/// Container struct for Bulletproofs(+) generators.
#[allow(non_snake_case)]
pub struct Generators {
pub G: Vec<EdwardsPoint>,
pub H: Vec<EdwardsPoint>,
}
/// Generate generators as needed for Bulletproofs(+), as Monero does.
pub fn bulletproofs_generators(dst: &'static [u8]) -> Generators {
let mut res = Generators { G: Vec::with_capacity(MAX_MN), H: Vec::with_capacity(MAX_MN) };
for i in 0 .. MAX_MN {
let i = 2 * i;
let mut even = H().compress().to_bytes().to_vec();
even.extend(dst);
let mut odd = even.clone();
write_varint(&i.try_into().unwrap(), &mut even).unwrap();
write_varint(&(i + 1).try_into().unwrap(), &mut odd).unwrap();
res.H.push(EdwardsPoint(hash_to_point(hash(&even))));
res.G.push(EdwardsPoint(hash_to_point(hash(&odd))));
}
res
}

View File

@@ -1,7 +1,7 @@
use crate::{decompress_point, hash_to_point};
#[test]
fn test_vectors() {
fn crypto_tests() {
// tests.txt file copied from monero repo
// https://github.com/monero-project/monero/
// blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/tests/crypto/tests.txt
@@ -21,6 +21,7 @@ fn test_vectors() {
};
let actual = decompress_point(hex::decode(key).unwrap().try_into().unwrap());
assert_eq!(actual.is_some(), expected);
}
"hash_to_ec" => {
@@ -28,6 +29,7 @@ fn test_vectors() {
let expected = words.next().unwrap();
let actual = hash_to_point(hex::decode(bytes).unwrap().try_into().unwrap());
assert_eq!(hex::encode(actual.compress().to_bytes()), expected);
}
_ => unreachable!("unknown command"),

View File

@@ -0,0 +1 @@
mod hash_to_point;

View File

@@ -0,0 +1,16 @@
use std_shims::io::{self, Write};
const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
pub(crate) fn write_varint<W: Write>(varint: &u64, w: &mut W) -> io::Result<()> {
let mut varint = *varint;
while {
let mut b = u8::try_from(varint & u64::from(!VARINT_CONTINUATION_MASK)).unwrap();
varint >>= 7;
if varint != 0 {
b |= VARINT_CONTINUATION_MASK;
}
w.write_all(&[b])?;
varint != 0
} {}
Ok(())
}

View File

@@ -0,0 +1,321 @@
#[cfg(feature = "binaries")]
mod binaries {
pub(crate) use std::sync::Arc;
pub(crate) use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
pub(crate) use multiexp::BatchVerifier;
pub(crate) use serde::Deserialize;
pub(crate) use serde_json::json;
pub(crate) use monero_serai::{
Commitment,
ringct::RctPrunable,
transaction::{Input, Transaction},
block::Block,
rpc::{RpcError, Rpc, HttpRpc},
};
pub(crate) use monero_generators::decompress_point;
pub(crate) use tokio::task::JoinHandle;
pub(crate) async fn check_block(rpc: Arc<Rpc<HttpRpc>>, block_i: usize) {
let hash = loop {
match rpc.get_block_hash(block_i).await {
Ok(hash) => break hash,
Err(RpcError::ConnectionError(e)) => {
println!("get_block_hash ConnectionError: {e}");
continue;
}
Err(e) => panic!("couldn't get block {block_i}'s hash: {e:?}"),
}
};
// TODO: Grab the JSON to also check it was deserialized correctly
#[derive(Deserialize, Debug)]
struct BlockResponse {
blob: String,
}
let res: BlockResponse = loop {
match rpc.json_rpc_call("get_block", Some(json!({ "hash": hex::encode(hash) }))).await {
Ok(res) => break res,
Err(RpcError::ConnectionError(e)) => {
println!("get_block ConnectionError: {e}");
continue;
}
Err(e) => panic!("couldn't get block {block_i} via block.hash(): {e:?}"),
}
};
let blob = hex::decode(res.blob).expect("node returned non-hex block");
let block = Block::read(&mut blob.as_slice())
.unwrap_or_else(|e| panic!("couldn't deserialize block {block_i}: {e}"));
assert_eq!(block.hash(), hash, "hash differs");
assert_eq!(block.serialize(), blob, "serialization differs");
let txs_len = 1 + block.txs.len();
if !block.txs.is_empty() {
#[derive(Deserialize, Debug)]
struct TransactionResponse {
tx_hash: String,
as_hex: String,
}
#[derive(Deserialize, Debug)]
struct TransactionsResponse {
#[serde(default)]
missed_tx: Vec<String>,
txs: Vec<TransactionResponse>,
}
let mut hashes_hex = block.txs.iter().map(hex::encode).collect::<Vec<_>>();
let mut all_txs = vec![];
while !hashes_hex.is_empty() {
let txs: TransactionsResponse = loop {
match rpc
.rpc_call(
"get_transactions",
Some(json!({
"txs_hashes": hashes_hex.drain(.. hashes_hex.len().min(100)).collect::<Vec<_>>(),
})),
)
.await
{
Ok(txs) => break txs,
Err(RpcError::ConnectionError(e)) => {
println!("get_transactions ConnectionError: {e}");
continue;
}
Err(e) => panic!("couldn't call get_transactions: {e:?}"),
}
};
assert!(txs.missed_tx.is_empty());
all_txs.extend(txs.txs);
}
let mut batch = BatchVerifier::new(block.txs.len());
for (tx_hash, tx_res) in block.txs.into_iter().zip(all_txs) {
assert_eq!(
tx_res.tx_hash,
hex::encode(tx_hash),
"node returned a transaction with different hash"
);
let tx = Transaction::read(
&mut hex::decode(&tx_res.as_hex).expect("node returned non-hex transaction").as_slice(),
)
.expect("couldn't deserialize transaction");
assert_eq!(
hex::encode(tx.serialize()),
tx_res.as_hex,
"Transaction serialization was different"
);
assert_eq!(tx.hash(), tx_hash, "Transaction hash was different");
if matches!(tx.rct_signatures.prunable, RctPrunable::Null) {
assert_eq!(tx.prefix.version, 1);
assert!(!tx.signatures.is_empty());
continue;
}
let sig_hash = tx.signature_hash();
// Verify all proofs we support proving for
// This is due to having debug_asserts calling verify within their proving, and CLSAG
// multisig explicitly calling verify as part of its signing process
// Accordingly, making sure our signature_hash algorithm is correct is great, and further
// making sure the verification functions are valid is appreciated
match tx.rct_signatures.prunable {
RctPrunable::Null |
RctPrunable::AggregateMlsagBorromean { .. } |
RctPrunable::MlsagBorromean { .. } => {}
RctPrunable::MlsagBulletproofs { bulletproofs, .. } => {
assert!(bulletproofs.batch_verify(
&mut rand_core::OsRng,
&mut batch,
(),
&tx.rct_signatures.base.commitments
));
}
RctPrunable::Clsag { bulletproofs, clsags, pseudo_outs } => {
assert!(bulletproofs.batch_verify(
&mut rand_core::OsRng,
&mut batch,
(),
&tx.rct_signatures.base.commitments
));
for (i, clsag) in clsags.into_iter().enumerate() {
let (amount, key_offsets, image) = match &tx.prefix.inputs[i] {
Input::Gen(_) => panic!("Input::Gen"),
Input::ToKey { amount, key_offsets, key_image } => (amount, key_offsets, key_image),
};
let mut running_sum = 0;
let mut actual_indexes = vec![];
for offset in key_offsets {
running_sum += offset;
actual_indexes.push(running_sum);
}
async fn get_outs(
rpc: &Rpc<HttpRpc>,
amount: u64,
indexes: &[u64],
) -> Vec<[EdwardsPoint; 2]> {
#[derive(Deserialize, Debug)]
struct Out {
key: String,
mask: String,
}
#[derive(Deserialize, Debug)]
struct Outs {
outs: Vec<Out>,
}
let outs: Outs = loop {
match rpc
.rpc_call(
"get_outs",
Some(json!({
"get_txid": true,
"outputs": indexes.iter().map(|o| json!({
"amount": amount,
"index": o
})).collect::<Vec<_>>()
})),
)
.await
{
Ok(outs) => break outs,
Err(RpcError::ConnectionError(e)) => {
println!("get_outs ConnectionError: {e}");
continue;
}
Err(e) => panic!("couldn't connect to RPC to get outs: {e:?}"),
}
};
let rpc_point = |point: &str| {
decompress_point(
hex::decode(point)
.expect("invalid hex for ring member")
.try_into()
.expect("invalid point len for ring member"),
)
.expect("invalid point for ring member")
};
outs
.outs
.iter()
.map(|out| {
let mask = rpc_point(&out.mask);
if amount != 0 {
assert_eq!(mask, Commitment::new(Scalar::from(1u8), amount).calculate());
}
[rpc_point(&out.key), mask]
})
.collect()
}
clsag
.verify(
&get_outs(&rpc, amount.unwrap_or(0), &actual_indexes).await,
image,
&pseudo_outs[i],
&sig_hash,
)
.unwrap();
}
}
}
}
assert!(batch.verify_vartime());
}
println!("Deserialized, hashed, and reserialized {block_i} with {txs_len} TXs");
}
}
#[cfg(feature = "binaries")]
#[tokio::main]
async fn main() {
use binaries::*;
let args = std::env::args().collect::<Vec<String>>();
// Read start block as the first arg
let mut block_i = args[1].parse::<usize>().expect("invalid start block");
// How many blocks to work on at once
let async_parallelism: usize =
args.get(2).unwrap_or(&"8".to_string()).parse::<usize>().expect("invalid parallelism argument");
// Read further args as RPC URLs
let default_nodes = vec![
"http://xmr-node.cakewallet.com:18081".to_string(),
"https://node.sethforprivacy.com".to_string(),
];
let mut specified_nodes = vec![];
{
let mut i = 0;
loop {
let Some(node) = args.get(3 + i) else { break };
specified_nodes.push(node.clone());
i += 1;
}
}
let nodes = if specified_nodes.is_empty() { default_nodes } else { specified_nodes };
let rpc = |url: String| async move {
HttpRpc::new(url.clone())
.await
.unwrap_or_else(|_| panic!("couldn't create HttpRpc connected to {url}"))
};
let main_rpc = rpc(nodes[0].clone()).await;
let mut rpcs = vec![];
for i in 0 .. async_parallelism {
rpcs.push(Arc::new(rpc(nodes[i % nodes.len()].clone()).await));
}
let mut rpc_i = 0;
let mut handles: Vec<JoinHandle<()>> = vec![];
let mut height = 0;
loop {
let new_height = main_rpc.get_height().await.expect("couldn't call get_height");
if new_height == height {
break;
}
height = new_height;
while block_i < height {
if handles.len() >= async_parallelism {
// Guarantee one handle is complete
handles.swap_remove(0).await.unwrap();
// Remove all of the finished handles
let mut i = 0;
while i < handles.len() {
if handles[i].is_finished() {
handles.swap_remove(i).await.unwrap();
continue;
}
i += 1;
}
}
handles.push(tokio::spawn(check_block(rpcs[rpc_i].clone(), block_i)));
rpc_i = (rpc_i + 1) % rpcs.len();
block_i += 1;
}
}
}
#[cfg(not(feature = "binaries"))]
fn main() {
panic!("To run binaries, please build with `--feature binaries`.");
}

224
coins/monero/src/block.rs Normal file
View File

@@ -0,0 +1,224 @@
use std_shims::{
vec::Vec,
io::{self, Read, Write},
};
use crate::{
hash,
merkle::merkle_root,
serialize::*,
transaction::{Input, Transaction},
};
const CORRECT_BLOCK_HASH_202612: [u8; 32] =
hex_literal::hex!("426d16cff04c71f8b16340b722dc4010a2dd3831c22041431f772547ba6e331a");
const EXISTING_BLOCK_HASH_202612: [u8; 32] =
hex_literal::hex!("bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698");
/// The header of a [`Block`].
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BlockHeader {
/// This represents the hardfork number of the block.
pub major_version: u8,
/// This field is used to vote for a particular [hardfork](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_basic/cryptonote_basic.h#L460).
pub minor_version: u8,
/// The UNIX time at which the block was mined.
pub timestamp: u64,
/// The previous [`Block::hash`].
pub previous: [u8; 32],
/// The block's nonce.
pub nonce: u32,
}
impl BlockHeader {
/// Serialize [`Self`] into the writer `w`.
///
/// # Example
/// ```rust
/// # use monero_serai::block::*;
/// # fn main() -> std::io::Result<()> {
/// let block_header = BlockHeader {
/// major_version: 1,
/// minor_version: 2,
/// timestamp: 3,
/// previous: [4; 32],
/// nonce: 5,
/// };
///
/// let mut writer = vec![];
/// block_header.write(&mut writer)?;
/// # Ok(()) }
/// ```
///
/// # Errors
/// This function returns any errors from the writer itself.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
write_varint(&self.major_version, w)?;
write_varint(&self.minor_version, w)?;
write_varint(&self.timestamp, w)?;
w.write_all(&self.previous)?;
w.write_all(&self.nonce.to_le_bytes())
}
/// Serialize [`Self`] into a new byte buffer.
///
/// # Example
/// ```rust
/// # use monero_serai::block::*;
/// # fn main() -> std::io::Result<()> {
/// let block_header = BlockHeader {
/// major_version: 1,
/// minor_version: 2,
/// timestamp: 3,
/// previous: [4; 32],
/// nonce: 5,
/// };
///
/// let mut writer = vec![];
/// block_header.write(&mut writer)?;
///
/// let serialized = block_header.serialize();
/// assert_eq!(serialized, writer);
/// # Ok(()) }
/// ```
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = vec![];
self.write(&mut serialized).unwrap();
serialized
}
/// Create [`Self`] from the reader `r`.
///
/// # Example
/// ```rust
/// # use monero_serai::block::*;
/// # fn main() -> std::io::Result<()> {
/// let block_header = BlockHeader {
/// major_version: 1,
/// minor_version: 2,
/// timestamp: 3,
/// previous: [4; 32],
/// nonce: 5,
/// };
///
/// let mut vec = vec![];
/// block_header.write(&mut vec)?;
///
/// let read = BlockHeader::read(&mut vec.as_slice())?;
/// assert_eq!(read, block_header);
/// # Ok(()) }
/// ```
///
/// # Errors
/// This function returns an error if either the reader failed,
/// or if the data could not be deserialized into a [`Self`].
pub fn read<R: Read>(r: &mut R) -> io::Result<BlockHeader> {
Ok(BlockHeader {
major_version: read_varint(r)?,
minor_version: read_varint(r)?,
timestamp: read_varint(r)?,
previous: read_bytes(r)?,
nonce: read_bytes(r).map(u32::from_le_bytes)?,
})
}
}
/// Block on the Monero blockchain.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Block {
/// The header of this block.
pub header: BlockHeader,
/// The miner/coinbase transaction.
pub miner_tx: Transaction,
/// Hashes of all the transactions within this block.
pub txs: Vec<[u8; 32]>,
}
impl Block {
/// Return the amount of Monero generated in this block in atomic units.
pub fn number(&self) -> Option<u64> {
match self.miner_tx.prefix.inputs.first() {
Some(Input::Gen(number)) => Some(*number),
_ => None,
}
}
/// Serialize [`Self`] into the writer `w`.
///
/// # Errors
/// This function returns any errors from the writer itself.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.header.write(w)?;
self.miner_tx.write(w)?;
write_varint(&self.txs.len(), w)?;
for tx in &self.txs {
w.write_all(tx)?;
}
Ok(())
}
/// Return the merkle root of this block.
///
/// In the case that this block has no transactions other than
/// the miner transaction, the miner transaction hash is returned,
/// i.e. the [`Transaction::hash`] of [`Self::miner_tx`] is returned.
fn tx_merkle_root(&self) -> [u8; 32] {
merkle_root(self.miner_tx.hash(), &self.txs)
}
/// Serialize the block as required for the proof of work hash.
///
/// This is distinct from the serialization required for the block hash. To get the block hash,
/// use the [`Block::hash`] function.
pub fn serialize_hashable(&self) -> Vec<u8> {
let mut blob = self.header.serialize();
blob.extend_from_slice(&self.tx_merkle_root());
write_varint(&(1 + u64::try_from(self.txs.len()).unwrap()), &mut blob).unwrap();
blob
}
/// Calculate the hash of this block.
pub fn hash(&self) -> [u8; 32] {
let mut hashable = self.serialize_hashable();
// Monero pre-appends a VarInt of the block hashing blobs length before getting the block hash
// but doesn't do this when getting the proof of work hash :)
let mut hashing_blob = Vec::with_capacity(8 + hashable.len());
write_varint(&u64::try_from(hashable.len()).unwrap(), &mut hashing_blob).unwrap();
hashing_blob.append(&mut hashable);
let hash = hash(&hashing_blob);
if hash == CORRECT_BLOCK_HASH_202612 {
return EXISTING_BLOCK_HASH_202612;
};
hash
}
/// Serialize [`Self`] into a new byte buffer.
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = vec![];
self.write(&mut serialized).unwrap();
serialized
}
/// Create [`Self`] from the reader `r`.
///
/// # Errors
/// This function returns an error if either the reader failed,
/// or if the data could not be deserialized into a [`Self`].
pub fn read<R: Read>(r: &mut R) -> io::Result<Block> {
let header = BlockHeader::read(r)?;
let miner_tx = Transaction::read(r)?;
if !matches!(miner_tx.prefix.inputs.as_slice(), &[Input::Gen(_)]) {
Err(io::Error::other("Miner transaction has incorrect input type."))?;
}
Ok(Block {
header,
miner_tx,
txs: (0_usize..read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
})
}
}

308
coins/monero/src/lib.rs Normal file
View File

@@ -0,0 +1,308 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
#[macro_use]
extern crate alloc;
use std_shims::{sync::OnceLock, io};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use sha3::{Digest, Keccak256};
use curve25519_dalek::{
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
scalar::Scalar,
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
traits::VartimePrecomputedMultiscalarMul,
};
pub use monero_generators::{H, decompress_point};
mod merkle;
mod serialize;
use serialize::{read_byte, read_u16};
/// UnreducedScalar struct with functionality for recovering incorrectly reduced scalars.
mod unreduced_scalar;
/// Ring Signature structs and functionality.
pub mod ring_signatures;
/// RingCT structs and functionality.
pub mod ringct;
use ringct::RctType;
/// Transaction structs.
pub mod transaction;
/// Block structs.
pub mod block;
/// Monero daemon RPC interface.
pub mod rpc;
/// Wallet functionality, enabling scanning and sending transactions.
pub mod wallet;
#[cfg(test)]
mod tests;
/// Default block lock time for transactions.
///
/// This is the amount of new blocks that must
/// pass before a new transaction can be spent.
///
/// Equivalent to Monero's [`CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L49).
pub const DEFAULT_LOCK_WINDOW: usize = 10;
/// Block lock time for coinbase transactions.
///
/// This is the amount of new blocks that must
/// pass before a coinbase/miner transaction can be spent.
///
/// Equivalent to Monero's [`CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L44).
pub const COINBASE_LOCK_WINDOW: usize = 60;
/// Average amount of seconds it takes for a block to be mined.
///
/// This is target amount of seconds mining difficulty will adjust to,
/// i.e. a block will be mined every `BLOCK_TIME` seconds on average.
///
/// Equivalent to Monero's [`DIFFICULTY_TARGET_V2`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L44).
pub const BLOCK_TIME: usize = 120;
static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new();
#[allow(non_snake_case)]
pub(crate) fn INV_EIGHT() -> Scalar {
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
}
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
#[allow(non_snake_case)]
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
BASEPOINT_PRECOMP_CELL
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
}
/// Monero protocol version.
///
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
/// transactions supported. Accordingly, v16 should be used during v15.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
#[allow(non_camel_case_types)]
pub enum Protocol {
/// Version 14.
v14,
/// Version 16.
v16,
/// A custom version with customized properties.
Custom {
/// See [`Self::ring_len`].
ring_len: usize,
/// See [`Self::bp_plus`].
bp_plus: bool,
/// See [`Self::optimal_rct_type`].
optimal_rct_type: RctType,
/// See [`Self::view_tags`].
view_tags: bool,
/// See [`Self::v16_fee`].
v16_fee: bool,
},
}
impl Protocol {
/// Amount of ring members under this protocol version.
///
/// # Example
/// ```rust
/// # use monero_serai::*;
/// assert_eq!(Protocol::v14.ring_len(), 11);
/// assert_eq!(Protocol::v16.ring_len(), 16);
/// ```
pub fn ring_len(&self) -> usize {
match self {
Protocol::v14 => 11,
Protocol::v16 => 16,
Protocol::Custom { ring_len, .. } => *ring_len,
}
}
/// Whether or not the specified version uses Bulletproofs or Bulletproofs+.
///
/// This method will likely be reworked when versions not using Bulletproofs at all are added.
///
/// # Example
/// ```rust
/// # use monero_serai::*;
/// assert_eq!(Protocol::v14.bp_plus(), false);
/// assert_eq!(Protocol::v16.bp_plus(), true);
/// ```
pub fn bp_plus(&self) -> bool {
match self {
Protocol::v14 => false,
Protocol::v16 => true,
Protocol::Custom { bp_plus, .. } => *bp_plus,
}
}
/// The optimal RingCT type for this version.
///
/// # Example
/// ```rust
/// # use monero_serai::{*, ringct::*};
/// assert_eq!(Protocol::v14.optimal_rct_type(), RctType::Clsag);
/// assert_eq!(Protocol::v16.optimal_rct_type(), RctType::BulletproofsPlus);
/// ```
// TODO: Make this an Option when we support pre-RCT protocols
pub fn optimal_rct_type(&self) -> RctType {
match self {
Protocol::v14 => RctType::Clsag,
Protocol::v16 => RctType::BulletproofsPlus,
Protocol::Custom { optimal_rct_type, .. } => *optimal_rct_type,
}
}
/// Whether or not the specified version uses view tags.
///
/// # Example
/// ```rust
/// # use monero_serai::{*, ringct::*};
/// assert_eq!(Protocol::v14.view_tags(), false);
/// assert_eq!(Protocol::v16.view_tags(), true);
/// ```
pub fn view_tags(&self) -> bool {
match self {
Protocol::v14 => false,
Protocol::v16 => true,
Protocol::Custom { view_tags, .. } => *view_tags,
}
}
/// Whether or not the specified version uses the fee algorithm from Monero
/// hard fork version 16 (released in v18 binaries).
///
/// # Example
/// ```rust
/// # use monero_serai::{*, ringct::*};
/// assert_eq!(Protocol::v14.v16_fee(), false);
/// assert_eq!(Protocol::v16.v16_fee(), true);
/// ```
pub fn v16_fee(&self) -> bool {
match self {
Protocol::v14 => false,
Protocol::v16 => true,
Protocol::Custom { v16_fee, .. } => *v16_fee,
}
}
pub(crate) fn write<W: io::Write>(&self, w: &mut W) -> io::Result<()> {
match self {
Protocol::v14 => w.write_all(&[0, 14]),
Protocol::v16 => w.write_all(&[0, 16]),
Protocol::Custom { ring_len, bp_plus, optimal_rct_type, view_tags, v16_fee } => {
// Custom, version 0
w.write_all(&[1, 0])?;
w.write_all(&u16::try_from(*ring_len).unwrap().to_le_bytes())?;
w.write_all(&[u8::from(*bp_plus)])?;
w.write_all(&[optimal_rct_type.to_byte()])?;
w.write_all(&[u8::from(*view_tags)])?;
w.write_all(&[u8::from(*v16_fee)])
}
}
}
pub(crate) fn read<R: io::Read>(r: &mut R) -> io::Result<Protocol> {
Ok(match read_byte(r)? {
// Monero protocol
0 => match read_byte(r)? {
14 => Protocol::v14,
16 => Protocol::v16,
_ => Err(io::Error::other("unrecognized monero protocol"))?,
},
// Custom
1 => match read_byte(r)? {
0 => Protocol::Custom {
ring_len: read_u16(r)?.into(),
bp_plus: match read_byte(r)? {
0 => false,
1 => true,
_ => Err(io::Error::other("invalid bool serialization"))?,
},
optimal_rct_type: RctType::from_byte(read_byte(r)?)
.ok_or_else(|| io::Error::other("invalid RctType serialization"))?,
view_tags: match read_byte(r)? {
0 => false,
1 => true,
_ => Err(io::Error::other("invalid bool serialization"))?,
},
v16_fee: match read_byte(r)? {
0 => false,
1 => true,
_ => Err(io::Error::other("invalid bool serialization"))?,
},
},
_ => Err(io::Error::other("unrecognized custom protocol serialization"))?,
},
_ => Err(io::Error::other("unrecognized protocol serialization"))?,
})
}
}
/// Transparent structure representing a [Pedersen commitment](https://web.getmonero.org/resources/moneropedia/pedersen-commitment.html)'s contents.
#[allow(non_snake_case)]
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
pub struct Commitment {
/// The value used to mask the `amount`.
pub mask: Scalar,
/// The value being masked.
///
/// In Monero's case, this is the amount of XMR in atomic units.
pub amount: u64,
}
impl core::fmt::Debug for Commitment {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
fmt.debug_struct("Commitment").field("amount", &self.amount).finish_non_exhaustive()
}
}
impl Commitment {
/// A commitment to zero, defined with a mask of 1 (as to not be the identity).
pub fn zero() -> Commitment {
Commitment { mask: Scalar::ONE, amount: 0 }
}
/// Create a new [`Self`].
pub fn new(mask: Scalar, amount: u64) -> Commitment {
Commitment { mask, amount }
}
/// Calculate a Pedersen commitment, as a point, from the transparent structure.
pub fn calculate(&self) -> EdwardsPoint {
(&self.mask * ED25519_BASEPOINT_TABLE) + (Scalar::from(self.amount) * H())
}
}
/// Support generating a random scalar using a modern rand, as dalek's is notoriously dated.
pub fn random_scalar<R: RngCore + CryptoRng>(rng: &mut R) -> Scalar {
let mut r = [0; 64];
rng.fill_bytes(&mut r);
Scalar::from_bytes_mod_order_wide(&r)
}
pub(crate) fn hash(data: &[u8]) -> [u8; 32] {
Keccak256::digest(data).into()
}
/// Hash the provided data to a scalar via keccak256(data) % l.
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
let scalar = Scalar::from_bytes_mod_order(hash(data));
// Monero will explicitly error in this case
// This library acknowledges its practical impossibility of it occurring, and doesn't bother to
// code in logic to handle it. That said, if it ever occurs, something must happen in order to
// not generate/verify a proof we believe to be valid when it isn't
assert!(scalar != Scalar::ZERO, "ZERO HASH: {data:?}");
scalar
}

View File

@@ -1,11 +1,11 @@
use std_shims::vec::Vec;
use crate::primitives::keccak256;
use crate::hash;
pub(crate) fn merkle_root(root: [u8; 32], leafs: &[[u8; 32]]) -> [u8; 32] {
match leafs.len() {
0 => root,
1 => keccak256([root, leafs[0]].concat()),
1 => hash(&[root, leafs[0]].concat()),
_ => {
let mut hashes = Vec::with_capacity(1 + leafs.len());
hashes.push(root);
@@ -29,7 +29,7 @@ pub(crate) fn merkle_root(root: [u8; 32], leafs: &[[u8; 32]]) -> [u8; 32] {
let mut paired_hashes = Vec::with_capacity(overage);
while let Some(left) = rightmost.next() {
let right = rightmost.next().unwrap();
paired_hashes.push(keccak256([left.as_ref(), &right].concat()));
paired_hashes.push(hash(&[left.as_ref(), &right].concat()));
}
drop(rightmost);
@@ -42,7 +42,7 @@ pub(crate) fn merkle_root(root: [u8; 32], leafs: &[[u8; 32]]) -> [u8; 32] {
while hashes.len() > 1 {
let mut i = 0;
while i < hashes.len() {
new_hashes.push(keccak256([hashes[i], hashes[i + 1]].concat()));
new_hashes.push(hash(&[hashes[i], hashes[i + 1]].concat()));
i += 2;
}

View File

@@ -0,0 +1,92 @@
use std_shims::{
io::{self, *},
vec::Vec,
};
use zeroize::Zeroize;
use curve25519_dalek::{EdwardsPoint, Scalar};
use monero_generators::hash_to_point;
use crate::{serialize::*, hash_to_scalar};
/// A signature within a [`RingSignature`].
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct Signature {
c: Scalar,
r: Scalar,
}
impl Signature {
/// Serialize [`Self`] into the writer `w`.
///
/// # Errors
/// This function returns any errors from the writer itself.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
write_scalar(&self.c, w)?;
write_scalar(&self.r, w)?;
Ok(())
}
/// Create [`Self`] from the reader `r`.
///
/// # Errors
/// This function returns an error if either the reader failed,
/// or if the data could not be deserialized into a [`Self`].
pub fn read<R: Read>(r: &mut R) -> io::Result<Signature> {
Ok(Signature { c: read_scalar(r)?, r: read_scalar(r)? })
}
}
/// A [ring signature](https://en.wikipedia.org/wiki/Ring_signature).
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct RingSignature {
sigs: Vec<Signature>,
}
impl RingSignature {
/// Serialize [`Self`] into the writer `w`.
///
/// # Errors
/// This function returns any errors from the writer itself.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
for sig in &self.sigs {
sig.write(w)?;
}
Ok(())
}
/// Create [`Self`] from the reader `r`.
///
/// # Errors
/// This function returns an error if either the reader failed,
/// or if the data could not be deserialized into a [`Self`].
pub fn read<R: Read>(members: usize, r: &mut R) -> io::Result<RingSignature> {
Ok(RingSignature { sigs: read_raw_vec(Signature::read, members, r)? })
}
pub fn verify(&self, msg: &[u8; 32], ring: &[EdwardsPoint], key_image: &EdwardsPoint) -> bool {
if ring.len() != self.sigs.len() {
return false;
}
let mut buf = Vec::with_capacity(32 + (32 * 2 * ring.len()));
buf.extend_from_slice(msg);
let mut sum = Scalar::ZERO;
for (ring_member, sig) in ring.iter().zip(&self.sigs) {
#[allow(non_snake_case)]
let Li = EdwardsPoint::vartime_double_scalar_mul_basepoint(&sig.c, ring_member, &sig.r);
buf.extend_from_slice(Li.compress().as_bytes());
#[allow(non_snake_case)]
let Ri = (sig.r * hash_to_point(ring_member.compress().to_bytes())) + (sig.c * key_image);
buf.extend_from_slice(Ri.compress().as_bytes());
sum += sig.c;
}
sum == hash_to_scalar(&buf)
}
}

View File

@@ -1,26 +1,18 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(non_snake_case)]
use core::fmt::Debug;
use std_shims::io::{self, Read, Write};
use zeroize::Zeroize;
use curve25519_dalek::{traits::Identity, Scalar, EdwardsPoint};
use monero_io::*;
use monero_generators::H_pow_2;
use monero_primitives::{keccak256_to_scalar, UnreducedScalar};
// 64 Borromean ring signatures, as needed for a 64-bit range proof.
//
// s0 and s1 are stored as `UnreducedScalar`s due to Monero not requiring they were reduced.
// `UnreducedScalar` preserves their original byte encoding and implements a custom reduction
// algorithm which was in use.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
use crate::{hash_to_scalar, unreduced_scalar::UnreducedScalar, serialize::*};
/// 64 Borromean ring signatures, as needed for a 64-bit range proof.
///
/// s0 and s1 are stored as `UnreducedScalar`s due to Monero not requiring they were reduced.
/// `UnreducedScalar` preserves their original byte encoding and implements a custom reduction
/// algorithm which was in use.
#[derive(Clone, PartialEq, Eq, Debug)]
struct BorromeanSignatures {
s0: [UnreducedScalar; 64],
s1: [UnreducedScalar; 64],
@@ -28,7 +20,7 @@ struct BorromeanSignatures {
}
impl BorromeanSignatures {
// Read a set of BorromeanSignatures.
/// Read a set of BorromeanSignatures from a reader.
fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
Ok(BorromeanSignatures {
s0: read_array(UnreducedScalar::read, r)?,
@@ -37,7 +29,7 @@ impl BorromeanSignatures {
})
}
// Write the set of BorromeanSignatures.
/// Write the set of BorromeanSignatures to a writer.
fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
for s0 in &self.s0 {
s0.write(w)?;
@@ -60,26 +52,26 @@ impl BorromeanSignatures {
);
#[allow(non_snake_case)]
let LV = EdwardsPoint::vartime_double_scalar_mul_basepoint(
&keccak256_to_scalar(LL.compress().as_bytes()),
&hash_to_scalar(LL.compress().as_bytes()),
&keys_b[i],
&self.s1[i].recover_monero_slide_scalar(),
);
transcript[(i * 32) .. ((i + 1) * 32)].copy_from_slice(LV.compress().as_bytes());
}
keccak256_to_scalar(transcript) == self.ee
hash_to_scalar(&transcript) == self.ee
}
}
/// A range proof premised on Borromean ring signatures.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct BorromeanRange {
sigs: BorromeanSignatures,
bit_commitments: [EdwardsPoint; 64],
}
impl BorromeanRange {
/// Read a BorromeanRange proof.
/// Read a BorromeanRange proof from a reader.
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanRange> {
Ok(BorromeanRange {
sigs: BorromeanSignatures::read(r)?,
@@ -87,14 +79,13 @@ impl BorromeanRange {
})
}
/// Write the BorromeanRange proof.
/// Write the BorromeanRange proof to a reader.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.sigs.write(w)?;
write_raw_vec(write_point, &self.bit_commitments, w)
}
/// Verify the commitment contains a 64-bit value.
#[must_use]
pub fn verify(&self, commitment: &EdwardsPoint) -> bool {
if &self.bit_commitments.iter().sum::<EdwardsPoint>() != commitment {
return false;

View File

@@ -0,0 +1,151 @@
use std_shims::{vec::Vec, sync::OnceLock};
use rand_core::{RngCore, CryptoRng};
use subtle::{Choice, ConditionallySelectable};
use curve25519_dalek::edwards::EdwardsPoint as DalekPoint;
use group::{ff::Field, Group};
use dalek_ff_group::{Scalar, EdwardsPoint};
use multiexp::multiexp as multiexp_const;
pub(crate) use monero_generators::Generators;
use crate::{INV_EIGHT as DALEK_INV_EIGHT, H as DALEK_H, Commitment, hash_to_scalar as dalek_hash};
pub(crate) use crate::ringct::bulletproofs::scalar_vector::*;
#[inline]
pub(crate) fn INV_EIGHT() -> Scalar {
Scalar(DALEK_INV_EIGHT())
}
#[inline]
pub(crate) fn H() -> EdwardsPoint {
EdwardsPoint(DALEK_H())
}
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar(dalek_hash(data))
}
// Components common between variants
pub(crate) const MAX_M: usize = 16;
pub(crate) const LOG_N: usize = 6; // 2 << 6 == N
pub(crate) const N: usize = 64;
pub(crate) fn prove_multiexp(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint {
multiexp_const(pairs) * INV_EIGHT()
}
pub(crate) fn vector_exponent(
generators: &Generators,
a: &ScalarVector,
b: &ScalarVector,
) -> EdwardsPoint {
debug_assert_eq!(a.len(), b.len());
(a * &generators.G[.. a.len()]) + (b * &generators.H[.. b.len()])
}
pub(crate) fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
let slice =
&[cache.to_bytes().as_ref(), mash.iter().copied().flatten().collect::<Vec<_>>().as_ref()]
.concat();
*cache = hash_to_scalar(slice);
*cache
}
pub(crate) fn MN(outputs: usize) -> (usize, usize, usize) {
let mut logM = 0;
let mut M;
while {
M = 1 << logM;
(M <= MAX_M) && (M < outputs)
} {
logM += 1;
}
(logM + LOG_N, M, M * N)
}
pub(crate) fn bit_decompose(commitments: &[Commitment]) -> (ScalarVector, ScalarVector) {
let (_, M, MN) = MN(commitments.len());
let sv = commitments.iter().map(|c| Scalar::from(c.amount)).collect::<Vec<_>>();
let mut aL = ScalarVector::new(MN);
let mut aR = ScalarVector::new(MN);
for j in 0 .. M {
for i in (0 .. N).rev() {
let bit =
if j < sv.len() { Choice::from((sv[j][i / 8] >> (i % 8)) & 1) } else { Choice::from(0) };
aL.0[(j * N) + i] = Scalar::conditional_select(&Scalar::ZERO, &Scalar::ONE, bit);
aR.0[(j * N) + i] = Scalar::conditional_select(&-Scalar::ONE, &Scalar::ZERO, bit);
}
}
(aL, aR)
}
pub(crate) fn hash_commitments<C: IntoIterator<Item = DalekPoint>>(
commitments: C,
) -> (Scalar, Vec<EdwardsPoint>) {
let V = commitments.into_iter().map(|c| EdwardsPoint(c) * INV_EIGHT()).collect::<Vec<_>>();
(hash_to_scalar(&V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
}
pub(crate) fn alpha_rho<R: RngCore + CryptoRng>(
rng: &mut R,
generators: &Generators,
aL: &ScalarVector,
aR: &ScalarVector,
) -> (Scalar, EdwardsPoint) {
let ar = Scalar::random(rng);
(ar, (vector_exponent(generators, aL, aR) + (EdwardsPoint::generator() * ar)) * INV_EIGHT())
}
pub(crate) fn LR_statements(
a: &ScalarVector,
G_i: &[EdwardsPoint],
b: &ScalarVector,
H_i: &[EdwardsPoint],
cL: Scalar,
U: EdwardsPoint,
) -> Vec<(Scalar, EdwardsPoint)> {
let mut res = a
.0
.iter()
.copied()
.zip(G_i.iter().copied())
.chain(b.0.iter().copied().zip(H_i.iter().copied()))
.collect::<Vec<_>>();
res.push((cL, U));
res
}
static TWO_N_CELL: OnceLock<ScalarVector> = OnceLock::new();
pub(crate) fn TWO_N() -> &'static ScalarVector {
TWO_N_CELL.get_or_init(|| ScalarVector::powers(Scalar::from(2u8), N))
}
pub(crate) fn challenge_products(w: &[Scalar], winv: &[Scalar]) -> Vec<Scalar> {
let mut products = vec![Scalar::ZERO; 1 << w.len()];
products[0] = winv[0];
products[1] = w[0];
for j in 1 .. w.len() {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * w[j];
products[slots - 1] = products[slots / 2] * winv[j];
slots = slots.saturating_sub(2);
}
}
// Sanity check as if the above failed to populate, it'd be critical
for w in &products {
debug_assert!(!bool::from(w.is_zero()));
}
products
}

View File

@@ -0,0 +1,233 @@
#![allow(non_snake_case)]
use std_shims::{
vec::Vec,
io::{self, Read, Write},
};
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, Zeroizing};
use curve25519_dalek::edwards::EdwardsPoint;
use multiexp::BatchVerifier;
use crate::{Commitment, wallet::TransactionError, serialize::*};
pub(crate) mod scalar_vector;
pub(crate) mod core;
use self::core::LOG_N;
pub(crate) mod original;
use self::original::OriginalStruct;
pub(crate) mod plus;
use self::plus::*;
pub(crate) const MAX_OUTPUTS: usize = self::core::MAX_M;
/// Bulletproof enum, encapsulating both Bulletproofs and Bulletproofs+.
#[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Bulletproof {
Original(OriginalStruct),
Plus(AggregateRangeProof),
}
impl Bulletproof {
fn bp_fields(plus: bool) -> usize {
if plus {
6
} else {
9
}
}
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
// src/cryptonote_basic/cryptonote_format_utils.cpp#L106-L124
pub(crate) fn calculate_bp_clawback(plus: bool, n_outputs: usize) -> (usize, usize) {
#[allow(non_snake_case)]
let mut LR_len = 0;
let mut n_padded_outputs = 1;
while n_padded_outputs < n_outputs {
LR_len += 1;
n_padded_outputs = 1 << LR_len;
}
LR_len += LOG_N;
let mut bp_clawback = 0;
if n_padded_outputs > 2 {
let fields = Bulletproof::bp_fields(plus);
let base = ((fields + (2 * (LOG_N + 1))) * 32) / 2;
let size = (fields + (2 * LR_len)) * 32;
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
}
(bp_clawback, LR_len)
}
pub(crate) fn fee_weight(plus: bool, outputs: usize) -> usize {
#[allow(non_snake_case)]
let (bp_clawback, LR_len) = Bulletproof::calculate_bp_clawback(plus, outputs);
32 * (Bulletproof::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
}
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof.
pub fn prove<R: RngCore + CryptoRng>(
rng: &mut R,
outputs: &[Commitment],
) -> Result<Bulletproof, TransactionError> {
if outputs.is_empty() {
Err(TransactionError::NoOutputs)?;
}
if outputs.len() > MAX_OUTPUTS {
Err(TransactionError::TooManyOutputs)?;
}
Ok(Bulletproof::Original(OriginalStruct::prove(rng, outputs)))
}
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof+.
pub fn prove_plus<R: RngCore + CryptoRng>(
rng: &mut R,
outputs: Vec<Commitment>,
) -> Result<Bulletproof, TransactionError> {
if outputs.is_empty() {
Err(TransactionError::NoOutputs)?;
}
if outputs.len() > MAX_OUTPUTS {
Err(TransactionError::TooManyOutputs)?;
}
Ok(Bulletproof::Plus(
AggregateRangeStatement::new(outputs.iter().map(Commitment::calculate).collect())
.unwrap()
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
.unwrap(),
))
}
/// Verify the given Bulletproof(+).
#[must_use]
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
match self {
Bulletproof::Original(bp) => bp.verify(rng, commitments),
Bulletproof::Plus(bp) => {
let mut verifier = BatchVerifier::new(1);
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
return false;
};
if !statement.verify(rng, &mut verifier, (), bp.clone()) {
return false;
}
verifier.verify_vartime()
}
}
}
/// Accumulate the verification for the given Bulletproof into the specified BatchVerifier.
///
/// Returns false if the Bulletproof isn't sane, leaving the BatchVerifier in an undefined
/// state.
/// Returns true if the Bulletproof is sane, regardless of their validity.
#[must_use]
pub fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BatchVerifier<ID, dalek_ff_group::EdwardsPoint>,
id: ID,
commitments: &[EdwardsPoint],
) -> bool {
match self {
Bulletproof::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
Bulletproof::Plus(bp) => {
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
return false;
};
statement.verify(rng, verifier, id, bp.clone())
}
}
}
fn write_core<W: Write, F: Fn(&[EdwardsPoint], &mut W) -> io::Result<()>>(
&self,
w: &mut W,
specific_write_vec: F,
) -> io::Result<()> {
match self {
Bulletproof::Original(bp) => {
write_point(&bp.A, w)?;
write_point(&bp.S, w)?;
write_point(&bp.T1, w)?;
write_point(&bp.T2, w)?;
write_scalar(&bp.taux, w)?;
write_scalar(&bp.mu, w)?;
specific_write_vec(&bp.L, w)?;
specific_write_vec(&bp.R, w)?;
write_scalar(&bp.a, w)?;
write_scalar(&bp.b, w)?;
write_scalar(&bp.t, w)
}
Bulletproof::Plus(bp) => {
write_point(&bp.A.0, w)?;
write_point(&bp.wip.A.0, w)?;
write_point(&bp.wip.B.0, w)?;
write_scalar(&bp.wip.r_answer.0, w)?;
write_scalar(&bp.wip.s_answer.0, w)?;
write_scalar(&bp.wip.delta_answer.0, w)?;
specific_write_vec(&bp.wip.L.iter().copied().map(|L| L.0).collect::<Vec<_>>(), w)?;
specific_write_vec(&bp.wip.R.iter().copied().map(|R| R.0).collect::<Vec<_>>(), w)
}
}
}
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
}
/// Write the Bulletproof(+) to a writer.
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
self.write_core(w, |points, w| write_vec(write_point, points, w))
}
/// Serialize the Bulletproof(+) to a `Vec<u8>`.
pub fn serialize(&self) -> Vec<u8> {
let mut serialized = vec![];
self.write(&mut serialized).unwrap();
serialized
}
/// Read a Bulletproof.
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
Ok(Bulletproof::Original(OriginalStruct {
A: read_point(r)?,
S: read_point(r)?,
T1: read_point(r)?,
T2: read_point(r)?,
taux: read_scalar(r)?,
mu: read_scalar(r)?,
L: read_vec(read_point, r)?,
R: read_vec(read_point, r)?,
a: read_scalar(r)?,
b: read_scalar(r)?,
t: read_scalar(r)?,
}))
}
/// Read a Bulletproof+.
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
use dalek_ff_group::{Scalar as DfgScalar, EdwardsPoint as DfgPoint};
Ok(Bulletproof::Plus(AggregateRangeProof {
A: DfgPoint(read_point(r)?),
wip: WipProof {
A: DfgPoint(read_point(r)?),
B: DfgPoint(read_point(r)?),
r_answer: DfgScalar(read_scalar(r)?),
s_answer: DfgScalar(read_scalar(r)?),
delta_answer: DfgScalar(read_scalar(r)?),
L: read_vec(read_point, r)?.into_iter().map(DfgPoint).collect(),
R: read_vec(read_point, r)?.into_iter().map(DfgPoint).collect(),
},
}))
}
}

View File

@@ -0,0 +1,323 @@
use std_shims::{vec::Vec, sync::OnceLock};
use rand_core::{RngCore, CryptoRng};
use zeroize::Zeroize;
use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as DalekPoint};
use group::{ff::Field, Group};
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
use multiexp::{BatchVerifier, multiexp};
use crate::{Commitment, ringct::bulletproofs::core::*};
include!(concat!(env!("OUT_DIR"), "/generators.rs"));
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
pub(crate) fn IP12() -> Scalar {
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
}
pub(crate) fn hadamard_fold(
l: &[EdwardsPoint],
r: &[EdwardsPoint],
a: Scalar,
b: Scalar,
) -> Vec<EdwardsPoint> {
let mut res = Vec::with_capacity(l.len() / 2);
for i in 0 .. l.len() {
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
}
res
}
/// Internal structure representing a Bulletproof.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OriginalStruct {
pub(crate) A: DalekPoint,
pub(crate) S: DalekPoint,
pub(crate) T1: DalekPoint,
pub(crate) T2: DalekPoint,
pub(crate) taux: DalekScalar,
pub(crate) mu: DalekScalar,
pub(crate) L: Vec<DalekPoint>,
pub(crate) R: Vec<DalekPoint>,
pub(crate) a: DalekScalar,
pub(crate) b: DalekScalar,
pub(crate) t: DalekScalar,
}
impl OriginalStruct {
pub(crate) fn prove<R: RngCore + CryptoRng>(
rng: &mut R,
commitments: &[Commitment],
) -> OriginalStruct {
let (logMN, M, MN) = MN(commitments.len());
let (aL, aR) = bit_decompose(commitments);
let commitments_points = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
let (mut cache, _) = hash_commitments(commitments_points.clone());
let (sL, sR) =
ScalarVector((0 .. (MN * 2)).map(|_| Scalar::random(&mut *rng)).collect::<Vec<_>>()).split();
let generators = GENERATORS();
let (mut alpha, A) = alpha_rho(&mut *rng, generators, &aL, &aR);
let (mut rho, S) = alpha_rho(&mut *rng, generators, &sL, &sR);
let y = hash_cache(&mut cache, &[A.compress().to_bytes(), S.compress().to_bytes()]);
let mut cache = hash_to_scalar(&y.to_bytes());
let z = cache;
let l0 = aL - z;
let l1 = sL;
let mut zero_twos = Vec::with_capacity(MN);
let zpow = ScalarVector::powers(z, M + 2);
for j in 0 .. M {
for i in 0 .. N {
zero_twos.push(zpow[j + 2] * TWO_N()[i]);
}
}
let yMN = ScalarVector::powers(y, MN);
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
let r1 = yMN * &sR;
let (T1, T2, x, mut taux) = {
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
let t2 = l1.clone().inner_product(&r1);
let mut tau1 = Scalar::random(&mut *rng);
let mut tau2 = Scalar::random(&mut *rng);
let T1 = prove_multiexp(&[(t1, H()), (tau1, EdwardsPoint::generator())]);
let T2 = prove_multiexp(&[(t2, H()), (tau2, EdwardsPoint::generator())]);
let x =
hash_cache(&mut cache, &[z.to_bytes(), T1.compress().to_bytes(), T2.compress().to_bytes()]);
let taux = (tau2 * (x * x)) + (tau1 * x);
tau1.zeroize();
tau2.zeroize();
(T1, T2, x, taux)
};
let mu = (x * rho) + alpha;
alpha.zeroize();
rho.zeroize();
for (i, gamma) in commitments.iter().map(|c| Scalar(c.mask)).enumerate() {
taux += zpow[i + 2] * gamma;
}
let l = l0 + &(l1 * x);
let r = r0 + &(r1 * x);
let t = l.clone().inner_product(&r);
let x_ip =
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
let mut a = l;
let mut b = r;
let yinv = y.invert().unwrap();
let yinvpow = ScalarVector::powers(yinv, MN);
let mut G_proof = generators.G[.. a.len()].to_vec();
let mut H_proof = generators.H[.. a.len()].to_vec();
H_proof.iter_mut().zip(yinvpow.0.iter()).for_each(|(this_H, yinvpow)| *this_H *= yinvpow);
let U = H() * x_ip;
let mut L = Vec::with_capacity(logMN);
let mut R = Vec::with_capacity(logMN);
while a.len() != 1 {
let (aL, aR) = a.split();
let (bL, bR) = b.split();
let cL = aL.clone().inner_product(&bR);
let cR = aR.clone().inner_product(&bL);
let (G_L, G_R) = G_proof.split_at(aL.len());
let (H_L, H_R) = H_proof.split_at(aL.len());
let L_i = prove_multiexp(&LR_statements(&aL, G_R, &bR, H_L, cL, U));
let R_i = prove_multiexp(&LR_statements(&aR, G_L, &bL, H_R, cR, U));
L.push(L_i);
R.push(R_i);
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
let winv = w.invert().unwrap();
a = (aL * w) + &(aR * winv);
b = (bL * winv) + &(bR * w);
if a.len() != 1 {
G_proof = hadamard_fold(G_L, G_R, winv, w);
H_proof = hadamard_fold(H_L, H_R, w, winv);
}
}
let res = OriginalStruct {
A: *A,
S: *S,
T1: *T1,
T2: *T2,
taux: *taux,
mu: *mu,
L: L.drain(..).map(|L| *L).collect(),
R: R.drain(..).map(|R| *R).collect(),
a: *a[0],
b: *b[0],
t: *t,
};
debug_assert!(res.verify(rng, &commitments_points));
res
}
#[must_use]
fn verify_core<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BatchVerifier<ID, EdwardsPoint>,
id: ID,
commitments: &[DalekPoint],
) -> bool {
// Verify commitments are valid
if commitments.is_empty() || (commitments.len() > MAX_M) {
return false;
}
// Verify L and R are properly sized
if self.L.len() != self.R.len() {
return false;
}
let (logMN, M, MN) = MN(commitments.len());
if self.L.len() != logMN {
return false;
}
// Rebuild all challenges
let (mut cache, commitments) = hash_commitments(commitments.iter().copied());
let y = hash_cache(&mut cache, &[self.A.compress().to_bytes(), self.S.compress().to_bytes()]);
let z = hash_to_scalar(&y.to_bytes());
cache = z;
let x = hash_cache(
&mut cache,
&[z.to_bytes(), self.T1.compress().to_bytes(), self.T2.compress().to_bytes()],
);
let x_ip = hash_cache(
&mut cache,
&[x.to_bytes(), self.taux.to_bytes(), self.mu.to_bytes(), self.t.to_bytes()],
);
let mut w = Vec::with_capacity(logMN);
let mut winv = Vec::with_capacity(logMN);
for (L, R) in self.L.iter().zip(&self.R) {
w.push(hash_cache(&mut cache, &[L.compress().to_bytes(), R.compress().to_bytes()]));
winv.push(cache.invert().unwrap());
}
// Convert the proof from * INV_EIGHT to its actual form
let normalize = |point: &DalekPoint| EdwardsPoint(point.mul_by_cofactor());
let L = self.L.iter().map(normalize).collect::<Vec<_>>();
let R = self.R.iter().map(normalize).collect::<Vec<_>>();
let T1 = normalize(&self.T1);
let T2 = normalize(&self.T2);
let A = normalize(&self.A);
let S = normalize(&self.S);
let commitments = commitments.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
// Verify it
let mut proof = Vec::with_capacity(4 + commitments.len());
let zpow = ScalarVector::powers(z, M + 3);
let ip1y = ScalarVector::powers(y, M * N).sum();
let mut k = -(zpow[2] * ip1y);
for j in 1 ..= M {
k -= zpow[j + 2] * IP12();
}
let y1 = Scalar(self.t) - ((z * ip1y) + k);
proof.push((-y1, H()));
proof.push((-Scalar(self.taux), G));
for (j, commitment) in commitments.iter().enumerate() {
proof.push((zpow[j + 2], *commitment));
}
proof.push((x, T1));
proof.push((x * x, T2));
verifier.queue(&mut *rng, id, proof);
proof = Vec::with_capacity(4 + (2 * (MN + logMN)));
let z3 = (Scalar(self.t) - (Scalar(self.a) * Scalar(self.b))) * x_ip;
proof.push((z3, H()));
proof.push((-Scalar(self.mu), G));
proof.push((Scalar::ONE, A));
proof.push((x, S));
{
let ypow = ScalarVector::powers(y, MN);
let yinv = y.invert().unwrap();
let yinvpow = ScalarVector::powers(yinv, MN);
let w_cache = challenge_products(&w, &winv);
let generators = GENERATORS();
for i in 0 .. MN {
let g = (Scalar(self.a) * w_cache[i]) + z;
proof.push((-g, generators.G[i]));
let mut h = Scalar(self.b) * yinvpow[i] * w_cache[(!i) & (MN - 1)];
h -= ((zpow[(i / N) + 2] * TWO_N()[i % N]) + (z * ypow[i])) * yinvpow[i];
proof.push((-h, generators.H[i]));
}
}
for i in 0 .. logMN {
proof.push((w[i] * w[i], L[i]));
proof.push((winv[i] * winv[i], R[i]));
}
verifier.queue(rng, id, proof);
true
}
#[must_use]
pub(crate) fn verify<R: RngCore + CryptoRng>(
&self,
rng: &mut R,
commitments: &[DalekPoint],
) -> bool {
let mut verifier = BatchVerifier::new(1);
if self.verify_core(rng, &mut verifier, (), commitments) {
verifier.verify_vartime()
} else {
false
}
}
#[must_use]
pub(crate) fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
&self,
rng: &mut R,
verifier: &mut BatchVerifier<ID, EdwardsPoint>,
id: ID,
commitments: &[DalekPoint],
) -> bool {
self.verify_core(rng, verifier, id, commitments)
}
}

View File

@@ -1,28 +1,41 @@
use std_shims::{vec, vec::Vec};
use std_shims::vec::Vec;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::EdwardsPoint};
use monero_primitives::{INV_EIGHT, Commitment, keccak256_to_scalar};
use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
use group::{
ff::{Field, PrimeField},
Group, GroupEncoding,
};
use curve25519_dalek::EdwardsPoint as DalekPoint;
use dalek_ff_group::{Scalar, EdwardsPoint};
use crate::{
batch_verifier::BulletproofsPlusBatchVerifier,
core::{MAX_COMMITMENTS, COMMITMENT_BITS, multiexp, multiexp_vartime},
plus::{
ScalarVector, PointVector, GeneratorsList, BpPlusGenerators,
transcript::*,
weighted_inner_product::{WipStatement, WipWitness, WipProof},
padded_pow_of_2, u64_decompose,
Commitment,
ringct::{
bulletproofs::core::{MAX_M, N},
bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators,
transcript::*,
weighted_inner_product::{WipStatement, WipWitness, WipProof},
padded_pow_of_2, u64_decompose,
},
},
};
// Figure 3 of the Bulletproofs+ Paper
#[derive(Clone, Debug)]
pub(crate) struct AggregateRangeStatement<'a> {
generators: BpPlusGenerators,
V: &'a [EdwardsPoint],
pub(crate) struct AggregateRangeStatement {
generators: Generators,
V: Vec<DalekPoint>,
}
impl Zeroize for AggregateRangeStatement {
fn zeroize(&mut self) {
self.V.zeroize();
}
}
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
@@ -30,7 +43,7 @@ pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
impl AggregateRangeWitness {
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
if commitments.is_empty() || (commitments.len() > MAX_M) {
return None;
}
@@ -38,48 +51,36 @@ impl AggregateRangeWitness {
}
}
/// Internal structure representing a Bulletproof+, as defined by Monero..
#[doc(hidden)]
/// Internal structure representing a Bulletproof+, as used in Monero.
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub struct AggregateRangeProof {
pub(crate) A: EdwardsPoint,
pub(crate) wip: WipProof,
}
struct AHatComputation {
y: Scalar,
d_descending_y_plus_z: ScalarVector,
y_mn_plus_one: Scalar,
z: Scalar,
z_pow: ScalarVector,
A_hat: EdwardsPoint,
}
impl<'a> AggregateRangeStatement<'a> {
pub(crate) fn new(V: &'a [EdwardsPoint]) -> Option<Self> {
if V.is_empty() || (V.len() > MAX_COMMITMENTS) {
impl AggregateRangeStatement {
pub(crate) fn new(V: Vec<DalekPoint>) -> Option<Self> {
if V.is_empty() || (V.len() > MAX_M) {
return None;
}
Some(Self { generators: BpPlusGenerators::new(), V })
Some(Self { generators: Generators::new(), V })
}
fn transcript_A(transcript: &mut Scalar, A: EdwardsPoint) -> (Scalar, Scalar) {
let y = keccak256_to_scalar(
[transcript.to_bytes().as_ref(), A.compress().to_bytes().as_ref()].concat(),
);
let z = keccak256_to_scalar(y.to_bytes().as_ref());
let y = hash_to_scalar(&[transcript.to_repr().as_ref(), A.to_bytes().as_ref()].concat());
let z = hash_to_scalar(y.to_bytes().as_ref());
*transcript = z;
(y, z)
}
fn d_j(j: usize, m: usize) -> ScalarVector {
let mut d_j = Vec::with_capacity(m * COMMITMENT_BITS);
for _ in 0 .. (j - 1) * COMMITMENT_BITS {
let mut d_j = Vec::with_capacity(m * N);
for _ in 0 .. (j - 1) * N {
d_j.push(Scalar::ZERO);
}
d_j.append(&mut ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS).0);
for _ in 0 .. (m - j) * COMMITMENT_BITS {
d_j.append(&mut ScalarVector::powers(Scalar::from(2u8), N).0);
for _ in 0 .. (m - j) * N {
d_j.push(Scalar::ZERO);
}
ScalarVector(d_j)
@@ -87,17 +88,17 @@ impl<'a> AggregateRangeStatement<'a> {
fn compute_A_hat(
mut V: PointVector,
generators: &BpPlusGenerators,
generators: &Generators,
transcript: &mut Scalar,
mut A: EdwardsPoint,
) -> AHatComputation {
) -> (Scalar, ScalarVector, Scalar, Scalar, ScalarVector, EdwardsPoint) {
let (y, z) = Self::transcript_A(transcript, A);
A = A.mul_by_cofactor();
while V.len() < padded_pow_of_2(V.len()) {
V.0.push(EdwardsPoint::identity());
}
let mn = V.len() * COMMITMENT_BITS;
let mn = V.len() * N;
// 2, 4, 6, 8... powers of z, of length equivalent to the amount of commitments
let mut z_pow = Vec::with_capacity(V.len());
@@ -132,23 +133,23 @@ impl<'a> AggregateRangeStatement<'a> {
let neg_z = -z;
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold, i)));
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold, i)));
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i)));
}
A_terms.push((y_mn_plus_one, commitment_accum));
A_terms.push((
((y_pows * z) - (d.sum() * y_mn_plus_one * z) - (y_pows * (z * z))),
BpPlusGenerators::g(),
((y_pows * z) - (d.sum() * y_mn_plus_one * z) - (y_pows * z.square())),
Generators::g(),
));
AHatComputation {
(
y,
d_descending_y_plus_z,
y_mn_plus_one,
z,
z_pow: ScalarVector(z_pow),
A_hat: A + multiexp_vartime(&A_terms),
}
ScalarVector(z_pow),
A + multiexp_vartime(&A_terms),
)
}
pub(crate) fn prove<R: RngCore + CryptoRng>(
@@ -174,19 +175,19 @@ impl<'a> AggregateRangeStatement<'a> {
// Commitments aren't transmitted INV_EIGHT though, so this multiplies by INV_EIGHT to enable
// clearing its cofactor without mutating the value
// For some reason, these values are transcripted * INV_EIGHT, not as transmitted
let V = V.iter().map(|V| V * INV_EIGHT()).collect::<Vec<_>>();
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
let mut transcript = initial_transcript(V.iter());
let mut V = V.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
let mut V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
// Pad V
while V.len() < padded_pow_of_2(V.len()) {
V.push(EdwardsPoint::identity());
}
let generators = generators.reduce(V.len() * COMMITMENT_BITS);
let generators = generators.reduce(V.len() * N);
let mut d_js = Vec::with_capacity(V.len());
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * COMMITMENT_BITS));
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
for j in 1 ..= V.len() {
d_js.push(Self::d_j(j, V.len()));
#[allow(clippy::map_unwrap_or)]
@@ -204,26 +205,26 @@ impl<'a> AggregateRangeStatement<'a> {
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 1);
for (i, a_l) in a_l.0.iter().enumerate() {
A_terms.push((*a_l, generators.generator(GeneratorsList::GBold, i)));
A_terms.push((*a_l, generators.generator(GeneratorsList::GBold1, i)));
}
for (i, a_r) in a_r.0.iter().enumerate() {
A_terms.push((*a_r, generators.generator(GeneratorsList::HBold, i)));
A_terms.push((*a_r, generators.generator(GeneratorsList::HBold1, i)));
}
A_terms.push((alpha, BpPlusGenerators::h()));
A_terms.push((alpha, Generators::h()));
let mut A = multiexp(&A_terms);
A_terms.zeroize();
// Multiply by INV_EIGHT per earlier commentary
A *= INV_EIGHT();
A.0 *= crate::INV_EIGHT();
let AHatComputation { y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat } =
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) =
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
let a_l = a_l - z;
let a_r = a_r + &d_descending_y_plus_z;
let mut alpha = alpha;
for j in 1 ..= witness.0.len() {
alpha += z_pow[j - 1] * witness.0[j - 1].mask * y_mn_plus_one;
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
}
Some(AggregateRangeProof {
@@ -234,22 +235,25 @@ impl<'a> AggregateRangeStatement<'a> {
})
}
pub(crate) fn verify<R: RngCore + CryptoRng>(
pub(crate) fn verify<Id: Copy + Zeroize, R: RngCore + CryptoRng>(
self,
rng: &mut R,
verifier: &mut BulletproofsPlusBatchVerifier,
verifier: &mut BatchVerifier<Id, EdwardsPoint>,
id: Id,
proof: AggregateRangeProof,
) -> bool {
let Self { generators, V } = self;
let V = V.iter().map(|V| V * INV_EIGHT()).collect::<Vec<_>>();
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
let mut transcript = initial_transcript(V.iter());
let V = V.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
// With the torsion clear, wrap it into a EdwardsPoint from dalek-ff-group
// (which is prime-order)
let V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
let generators = generators.reduce(V.len() * COMMITMENT_BITS);
let generators = generators.reduce(V.len() * N);
let AHatComputation { y, A_hat, .. } =
let (y, _, _, _, _, A_hat) =
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, proof.A);
WipStatement::new(generators, A_hat, y).verify(rng, verifier, transcript, proof.wip)
WipStatement::new(generators, A_hat, y).verify(rng, verifier, id, transcript, proof.wip)
}
}

View File

@@ -1,12 +1,11 @@
#![allow(non_snake_case)]
use std_shims::sync::LazyLock;
use group::Group;
use dalek_ff_group::{Scalar, EdwardsPoint};
use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar, edwards::EdwardsPoint};
use monero_generators::{H, Generators};
pub(crate) use crate::{scalar_vector::ScalarVector, point_vector::PointVector};
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector;
mod point_vector;
pub(crate) use point_vector::PointVector;
pub(crate) mod transcript;
pub(crate) mod weighted_inner_product;
@@ -24,50 +23,55 @@ pub(crate) fn padded_pow_of_2(i: usize) -> usize {
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub(crate) enum GeneratorsList {
GBold,
HBold,
GBold1,
HBold1,
}
// TODO: Table these
#[derive(Clone, Debug)]
pub(crate) struct BpPlusGenerators {
g_bold: &'static [EdwardsPoint],
h_bold: &'static [EdwardsPoint],
pub(crate) struct Generators {
g_bold1: &'static [EdwardsPoint],
h_bold1: &'static [EdwardsPoint],
}
include!(concat!(env!("OUT_DIR"), "/generators_plus.rs"));
mod generators {
use std_shims::sync::OnceLock;
use monero_generators::Generators;
include!(concat!(env!("OUT_DIR"), "/generators_plus.rs"));
}
impl BpPlusGenerators {
impl Generators {
#[allow(clippy::new_without_default)]
pub(crate) fn new() -> Self {
let gens = &GENERATORS;
BpPlusGenerators { g_bold: &gens.G, h_bold: &gens.H }
let gens = generators::GENERATORS();
Generators { g_bold1: &gens.G, h_bold1: &gens.H }
}
pub(crate) fn len(&self) -> usize {
self.g_bold.len()
self.g_bold1.len()
}
pub(crate) fn g() -> EdwardsPoint {
*H
dalek_ff_group::EdwardsPoint(crate::H())
}
pub(crate) fn h() -> EdwardsPoint {
ED25519_BASEPOINT_POINT
EdwardsPoint::generator()
}
pub(crate) fn generator(&self, list: GeneratorsList, i: usize) -> EdwardsPoint {
match list {
GeneratorsList::GBold => self.g_bold[i],
GeneratorsList::HBold => self.h_bold[i],
GeneratorsList::GBold1 => self.g_bold1[i],
GeneratorsList::HBold1 => self.h_bold1[i],
}
}
pub(crate) fn reduce(&self, generators: usize) -> Self {
// Round to the nearest power of 2
let generators = padded_pow_of_2(generators);
assert!(generators <= self.g_bold.len());
assert!(generators <= self.g_bold1.len());
BpPlusGenerators { g_bold: &self.g_bold[.. generators], h_bold: &self.h_bold[.. generators] }
Generators { g_bold1: &self.g_bold1[.. generators], h_bold1: &self.h_bold1[.. generators] }
}
}

View File

@@ -1,16 +1,16 @@
use core::ops::{Index, IndexMut};
use std_shims::vec::Vec;
use zeroize::Zeroize;
use zeroize::{Zeroize, ZeroizeOnDrop};
use curve25519_dalek::edwards::EdwardsPoint;
use crate::scalar_vector::ScalarVector;
use dalek_ff_group::EdwardsPoint;
#[cfg(test)]
use crate::core::multiexp;
use multiexp::multiexp;
#[cfg(test)]
use crate::ringct::bulletproofs::plus::ScalarVector;
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct PointVector(pub(crate) Vec<EdwardsPoint>);
impl Index<usize> for PointVector {
@@ -27,15 +27,6 @@ impl IndexMut<usize> for PointVector {
}
impl PointVector {
pub(crate) fn mul_vec(&self, vector: &ScalarVector) -> Self {
assert_eq!(self.len(), vector.len());
let mut res = self.clone();
for (i, val) in res.0.iter_mut().enumerate() {
*val *= vector.0[i];
}
res
}
#[cfg(test)]
pub(crate) fn multiexp(&self, vector: &ScalarVector) -> EdwardsPoint {
debug_assert_eq!(self.len(), vector.len());

View File

@@ -0,0 +1,25 @@
use std_shims::{sync::OnceLock, vec::Vec};
use curve25519_dalek::EdwardsPoint;
use dalek_ff_group::Scalar;
use monero_generators::{hash_to_point as raw_hash_to_point};
use crate::{hash, hash_to_scalar as dalek_hash};
// Monero starts BP+ transcripts with the following constant.
static TRANSCRIPT_CELL: OnceLock<[u8; 32]> = OnceLock::new();
pub(crate) fn TRANSCRIPT() -> [u8; 32] {
// Why this uses a hash_to_point is completely unknown.
*TRANSCRIPT_CELL
.get_or_init(|| raw_hash_to_point(hash(b"bulletproof_plus_transcript")).compress().to_bytes())
}
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar(dalek_hash(data))
}
pub(crate) fn initial_transcript(commitments: core::slice::Iter<'_, EdwardsPoint>) -> Scalar {
let commitments_hash =
hash_to_scalar(&commitments.flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>());
hash_to_scalar(&[TRANSCRIPT().as_ref(), &commitments_hash.to_bytes()].concat())
}

View File

@@ -1,21 +1,24 @@
use std_shims::{vec, vec::Vec};
use std_shims::vec::Vec;
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop};
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use multiexp::{BatchVerifier, multiexp, multiexp_vartime};
use group::{
ff::{Field, PrimeField},
GroupEncoding,
};
use dalek_ff_group::{Scalar, EdwardsPoint};
use monero_primitives::{INV_EIGHT, keccak256_to_scalar};
use crate::{
core::{multiexp, multiexp_vartime, challenge_products},
batch_verifier::BulletproofsPlusBatchVerifier,
plus::{ScalarVector, PointVector, GeneratorsList, BpPlusGenerators, padded_pow_of_2},
use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
};
// Figure 1 of the Bulletproofs+ paper
#[derive(Clone, Debug)]
pub(crate) struct WipStatement {
generators: BpPlusGenerators,
generators: Generators,
P: EdwardsPoint,
y: ScalarVector,
}
@@ -65,7 +68,7 @@ pub(crate) struct WipProof {
}
impl WipStatement {
pub(crate) fn new(generators: BpPlusGenerators, P: EdwardsPoint, y: Scalar) -> Self {
pub(crate) fn new(generators: Generators, P: EdwardsPoint, y: Scalar) -> Self {
debug_assert_eq!(generators.len(), padded_pow_of_2(generators.len()));
// y ** n
@@ -79,26 +82,16 @@ impl WipStatement {
}
fn transcript_L_R(transcript: &mut Scalar, L: EdwardsPoint, R: EdwardsPoint) -> Scalar {
let e = keccak256_to_scalar(
[
transcript.to_bytes().as_ref(),
L.compress().to_bytes().as_ref(),
R.compress().to_bytes().as_ref(),
]
.concat(),
let e = hash_to_scalar(
&[transcript.to_repr().as_ref(), L.to_bytes().as_ref(), R.to_bytes().as_ref()].concat(),
);
*transcript = e;
e
}
fn transcript_A_B(transcript: &mut Scalar, A: EdwardsPoint, B: EdwardsPoint) -> Scalar {
let e = keccak256_to_scalar(
[
transcript.to_bytes().as_ref(),
A.compress().to_bytes().as_ref(),
B.compress().to_bytes().as_ref(),
]
.concat(),
let e = hash_to_scalar(
&[transcript.to_repr().as_ref(), A.to_bytes().as_ref(), B.to_bytes().as_ref()].concat(),
);
*transcript = e;
e
@@ -107,6 +100,9 @@ impl WipStatement {
// Prover's variant of the shared code block to calculate G/H/P when n > 1
// Returns each permutation of G/H since the prover needs to do operation on each permutation
// P is dropped as it's unused in the prover's path
// TODO: It'd still probably be faster to keep in terms of the original generators, both between
// the reduced amount of group operations and the potential tabling of the generators under
// multiexp
#[allow(clippy::too_many_arguments)]
fn next_G_H(
transcript: &mut Scalar,
@@ -123,7 +119,7 @@ impl WipStatement {
debug_assert_eq!(g_bold1.len(), h_bold1.len());
let e = Self::transcript_L_R(transcript, L, R);
let inv_e = e.invert();
let inv_e = e.invert().unwrap();
// This vartime is safe as all of these arguments are public
let mut new_g_bold = Vec::with_capacity(g_bold1.len());
@@ -137,12 +133,57 @@ impl WipStatement {
new_h_bold.push(multiexp_vartime(&[(e, h_bold.0), (inv_e, h_bold.1)]));
}
let e_square = e * e;
let inv_e_square = inv_e * inv_e;
let e_square = e.square();
let inv_e_square = inv_e.square();
(e, inv_e, e_square, inv_e_square, PointVector(new_g_bold), PointVector(new_h_bold))
}
/*
This has room for optimization worth investigating further. It currently takes
an iterative approach. It can be optimized further via divide and conquer.
Assume there are 4 challenges.
Iterative approach (current):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across that result and column 2.
3. Do the optimal multiplications across that result and column 3.
Divide and conquer (worth investigating further):
1. Do the optimal multiplications across challenge column 0 and 1.
2. Do the optimal multiplications across challenge column 2 and 3.
3. Multiply both results together.
When there are 4 challenges (n=16), the iterative approach does 28 multiplications
versus divide and conquer's 24.
*/
fn challenge_products(challenges: &[(Scalar, Scalar)]) -> Vec<Scalar> {
let mut products = vec![Scalar::ONE; 1 << challenges.len()];
if !challenges.is_empty() {
products[0] = challenges[0].1;
products[1] = challenges[0].0;
for (j, challenge) in challenges.iter().enumerate().skip(1) {
let mut slots = (1 << (j + 1)) - 1;
while slots > 0 {
products[slots] = products[slots / 2] * challenge.0;
products[slots - 1] = products[slots / 2] * challenge.1;
slots = slots.saturating_sub(2);
}
}
// Sanity check since if the above failed to populate, it'd be critical
for product in &products {
debug_assert!(!bool::from(product.is_zero()));
}
}
products
}
pub(crate) fn prove<R: RngCore + CryptoRng>(
self,
rng: &mut R,
@@ -156,27 +197,16 @@ impl WipStatement {
if generators.len() != witness.a.len() {
return None;
}
let (g, h) = (BpPlusGenerators::g(), BpPlusGenerators::h());
let (g, h) = (Generators::g(), Generators::h());
let mut g_bold = vec![];
let mut h_bold = vec![];
for i in 0 .. generators.len() {
g_bold.push(generators.generator(GeneratorsList::GBold, i));
h_bold.push(generators.generator(GeneratorsList::HBold, i));
g_bold.push(generators.generator(GeneratorsList::GBold1, i));
h_bold.push(generators.generator(GeneratorsList::HBold1, i));
}
let mut g_bold = PointVector(g_bold);
let mut h_bold = PointVector(h_bold);
let mut y_inv = {
let mut i = 1;
let mut to_invert = vec![];
while i < g_bold.len() {
to_invert.push(y[i - 1]);
i *= 2;
}
Scalar::batch_invert(&mut to_invert);
to_invert
};
// Check P has the expected relationship
#[cfg(debug_assertions)]
{
@@ -230,7 +260,8 @@ impl WipStatement {
let c_l = a1.clone().weighted_inner_product(&b2, &y);
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
let y_inv_n_hat = y_inv.pop().unwrap();
// TODO: Calculate these with a batch inversion
let y_inv_n_hat = y_n_hat.invert().unwrap();
let mut L_terms = (a1.clone() * y_inv_n_hat)
.0
@@ -240,7 +271,7 @@ impl WipStatement {
.collect::<Vec<_>>();
L_terms.push((c_l, g));
L_terms.push((d_l, h));
let L = multiexp(&L_terms) * INV_EIGHT();
let L = multiexp(&L_terms) * Scalar(crate::INV_EIGHT());
L_vec.push(L);
L_terms.zeroize();
@@ -252,7 +283,7 @@ impl WipStatement {
.collect::<Vec<_>>();
R_terms.push((c_r, g));
R_terms.push((d_r, h));
let R = multiexp(&R_terms) * INV_EIGHT();
let R = multiexp(&R_terms) * Scalar(crate::INV_EIGHT());
R_vec.push(R);
R_terms.zeroize();
@@ -285,33 +316,34 @@ impl WipStatement {
let mut A_terms =
vec![(r, g_bold[0]), (s, h_bold[0]), ((ry * b[0]) + (s * y[0] * a[0]), g), (delta, h)];
let A = multiexp(&A_terms) * INV_EIGHT();
let A = multiexp(&A_terms) * Scalar(crate::INV_EIGHT());
A_terms.zeroize();
let mut B_terms = vec![(ry * s, g), (eta, h)];
let B = multiexp(&B_terms) * INV_EIGHT();
let B = multiexp(&B_terms) * Scalar(crate::INV_EIGHT());
B_terms.zeroize();
let e = Self::transcript_A_B(&mut transcript, A, B);
let r_answer = r + (a[0] * e);
let s_answer = s + (b[0] * e);
let delta_answer = eta + (delta * e) + (alpha * (e * e));
let delta_answer = eta + (delta * e) + (alpha * e.square());
Some(WipProof { L: L_vec, R: R_vec, A, B, r_answer, s_answer, delta_answer })
}
pub(crate) fn verify<R: RngCore + CryptoRng>(
pub(crate) fn verify<Id: Copy + Zeroize, R: RngCore + CryptoRng>(
self,
rng: &mut R,
verifier: &mut BulletproofsPlusBatchVerifier,
verifier: &mut BatchVerifier<Id, EdwardsPoint>,
id: Id,
mut transcript: Scalar,
mut proof: WipProof,
) -> bool {
let verifier_weight = Scalar::random(rng);
let WipStatement { generators, P, y } = self;
let (g, h) = (Generators::g(), Generators::h());
// Verify the L/R lengths
{
let mut lr_len = 0;
@@ -327,7 +359,7 @@ impl WipStatement {
}
let inv_y = {
let inv_y = y[0].invert();
let inv_y = y[0].invert().unwrap();
let mut res = Vec::with_capacity(y.len());
res.push(inv_y);
while res.len() < y.len() {
@@ -336,49 +368,51 @@ impl WipStatement {
res
};
let mut e_is = Vec::with_capacity(proof.L.len());
for (L, R) in proof.L.iter_mut().zip(proof.R.iter_mut()) {
e_is.push(Self::transcript_L_R(&mut transcript, *L, *R));
*L = L.mul_by_cofactor();
*R = R.mul_by_cofactor();
}
let mut P_terms = vec![(Scalar::ONE, P)];
P_terms.reserve(6 + (2 * generators.len()) + proof.L.len());
let mut challenges = Vec::with_capacity(proof.L.len());
let product_cache = {
let mut es = Vec::with_capacity(proof.L.len());
for (L, R) in proof.L.iter_mut().zip(proof.R.iter_mut()) {
es.push(Self::transcript_L_R(&mut transcript, *L, *R));
*L = L.mul_by_cofactor();
*R = R.mul_by_cofactor();
}
let mut inv_es = es.clone();
let mut scratch = vec![Scalar::ZERO; es.len()];
group::ff::BatchInverter::invert_with_external_scratch(&mut inv_es, &mut scratch);
drop(scratch);
debug_assert_eq!(es.len(), inv_es.len());
debug_assert_eq!(es.len(), proof.L.len());
debug_assert_eq!(es.len(), proof.R.len());
for ((e, inv_e), (L, R)) in
es.drain(..).zip(inv_es.drain(..)).zip(proof.L.iter().zip(proof.R.iter()))
{
debug_assert_eq!(e.invert().unwrap(), inv_e);
challenges.push((e, inv_e));
let e_square = e.square();
let inv_e_square = inv_e.square();
P_terms.push((e_square, *L));
P_terms.push((inv_e_square, *R));
}
Self::challenge_products(&challenges)
};
let e = Self::transcript_A_B(&mut transcript, proof.A, proof.B);
proof.A = proof.A.mul_by_cofactor();
proof.B = proof.B.mul_by_cofactor();
let neg_e_square = verifier_weight * -(e * e);
let neg_e_square = -e.square();
verifier.0.other.push((neg_e_square, P));
let mut challenges = Vec::with_capacity(proof.L.len());
let product_cache = {
let mut inv_e_is = e_is.clone();
Scalar::batch_invert(&mut inv_e_is);
debug_assert_eq!(e_is.len(), inv_e_is.len());
debug_assert_eq!(e_is.len(), proof.L.len());
debug_assert_eq!(e_is.len(), proof.R.len());
for ((e_i, inv_e_i), (L, R)) in
e_is.drain(..).zip(inv_e_is.drain(..)).zip(proof.L.iter().zip(proof.R.iter()))
{
debug_assert_eq!(e_i.invert(), inv_e_i);
challenges.push((e_i, inv_e_i));
let e_i_square = e_i * e_i;
let inv_e_i_square = inv_e_i * inv_e_i;
verifier.0.other.push((neg_e_square * e_i_square, *L));
verifier.0.other.push((neg_e_square * inv_e_i_square, *R));
}
challenge_products(&challenges)
};
while verifier.0.g_bold.len() < generators.len() {
verifier.0.g_bold.push(Scalar::ZERO);
}
while verifier.0.h_bold.len() < generators.len() {
verifier.0.h_bold.push(Scalar::ZERO);
let mut multiexp = P_terms;
multiexp.reserve(4 + (2 * generators.len()));
for (scalar, _) in &mut multiexp {
*scalar *= neg_e_square;
}
let re = proof.r_answer * e;
@@ -387,18 +421,23 @@ impl WipStatement {
if i > 0 {
scalar *= inv_y[i - 1];
}
verifier.0.g_bold[i] += verifier_weight * scalar;
multiexp.push((scalar, generators.generator(GeneratorsList::GBold1, i)));
}
let se = proof.s_answer * e;
for i in 0 .. generators.len() {
verifier.0.h_bold[i] += verifier_weight * (se * product_cache[product_cache.len() - 1 - i]);
multiexp.push((
se * product_cache[product_cache.len() - 1 - i],
generators.generator(GeneratorsList::HBold1, i),
));
}
verifier.0.other.push((verifier_weight * -e, proof.A));
verifier.0.g += verifier_weight * (proof.r_answer * y[0] * proof.s_answer);
verifier.0.h += verifier_weight * proof.delta_answer;
verifier.0.other.push((-verifier_weight, proof.B));
multiexp.push((-e, proof.A));
multiexp.push((proof.r_answer * y[0] * proof.s_answer, g));
multiexp.push((proof.delta_answer, h));
multiexp.push((-Scalar::ONE, proof.B));
verifier.queue(rng, id, multiexp);
true
}

Some files were not shown because too many files have changed in this diff Show More