mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-11 21:49:26 +00:00
Compare commits
103 Commits
10s-tender
...
68060b4efc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68060b4efc | ||
|
|
4af83bd0e5 | ||
|
|
d4b22e5136 | ||
|
|
58fe79da10 | ||
|
|
3f07dd13c6 | ||
|
|
4e1d86dae2 | ||
|
|
7ef21830a5 | ||
|
|
8cb4c5d167 | ||
|
|
f9e4b420ed | ||
|
|
817b8e99d3 | ||
|
|
925cef17f2 | ||
|
|
3283cd79e4 | ||
|
|
51e2f24bc1 | ||
|
|
372e29fe08 | ||
|
|
fccb1aea51 | ||
|
|
a25e6330bd | ||
|
|
558a2bfa46 | ||
|
|
c73acb3d62 | ||
|
|
933b17aa91 | ||
|
|
5fa7e3d450 | ||
|
|
749d783b1e | ||
|
|
5a3ea80943 | ||
|
|
fddbebc7c0 | ||
|
|
e01848aa9e | ||
|
|
320b5627b5 | ||
|
|
be7780e69d | ||
|
|
0ddbaefb38 | ||
|
|
0f0db14f05 | ||
|
|
43083dfd49 | ||
|
|
523d2ac911 | ||
|
|
fd4f247917 | ||
|
|
ac9e356af4 | ||
|
|
bba7d2a356 | ||
|
|
4c349ae605 | ||
|
|
a4428761f7 | ||
|
|
940e9553fd | ||
|
|
593aefd229 | ||
|
|
5830c2463d | ||
|
|
bcc88c3e86 | ||
|
|
fea16df567 | ||
|
|
4960c3222e | ||
|
|
6b4df4f2c0 | ||
|
|
dac46c8d7d | ||
|
|
db2e8376df | ||
|
|
33dd412e67 | ||
|
|
fcad402186 | ||
|
|
ab4d79628d | ||
|
|
93be7a3067 | ||
|
|
63521f6a96 | ||
|
|
3d855c75be | ||
|
|
07df9aa035 | ||
|
|
bc44fbdbac | ||
|
|
4cacce5e55 | ||
|
|
7408e26781 | ||
|
|
1f92e1cbda | ||
|
|
333a9571b8 | ||
|
|
b7d49af1d5 | ||
|
|
5ea3b1bf97 | ||
|
|
2a31d8552e | ||
|
|
bca3728a10 | ||
|
|
4914420a37 | ||
|
|
f11a08c436 | ||
|
|
35b58a45bd | ||
|
|
af9b1ad5f9 | ||
|
|
e5afcda76b | ||
|
|
08c7c1b413 | ||
|
|
bdf5a66e95 | ||
|
|
e861859dec | ||
|
|
6658d95c85 | ||
|
|
2f07d04d88 | ||
|
|
e0259f2fe5 | ||
|
|
fab7a0a7cb | ||
|
|
84cee06ac1 | ||
|
|
c706d8664a | ||
|
|
1f2b9376f9 | ||
|
|
13b147cbf6 | ||
|
|
4a6496a90b | ||
|
|
9662d94bf9 | ||
|
|
233164cefd | ||
|
|
442d8c02fc | ||
|
|
d1be9eaa2d | ||
|
|
c32d3413ba | ||
|
|
a3a009a7e9 | ||
|
|
0889627e60 | ||
|
|
ace41c79fd | ||
|
|
f7d16b3fc5 | ||
|
|
157acc47ca | ||
|
|
ae0ecf9efe | ||
|
|
6374d9987e | ||
|
|
c93f6bf901 | ||
|
|
61a81e53e1 | ||
|
|
68dc872b88 | ||
|
|
89b237af7e | ||
|
|
2347bf5fd3 | ||
|
|
97f433c694 | ||
|
|
10f5ec51ca | ||
|
|
454bebaa77 | ||
|
|
0d569ff7a3 | ||
|
|
480acfd430 | ||
|
|
e266bc2e32 | ||
|
|
6c8a0bfda6 | ||
|
|
06c23368f2 | ||
|
|
5629c94b8b |
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: 24.0.1
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
@@ -42,8 +42,8 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
svm install 0.8.16
|
||||
svm use 0.8.16
|
||||
svm install 0.8.25
|
||||
svm use 0.8.25
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
6
.github/actions/test-dependencies/action.yml
vendored
6
.github/actions/test-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: 24.0.1
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -19,9 +19,9 @@ runs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
|
||||
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||
cache: false
|
||||
|
||||
- name: Run a Monero Regtest Node
|
||||
|
||||
1
.github/workflows/coins-tests.yml
vendored
1
.github/workflows/coins-tests.yml
vendored
@@ -30,6 +30,7 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p monero-generators \
|
||||
-p monero-serai
|
||||
|
||||
3
.github/workflows/common-tests.yml
vendored
3
.github/workflows/common-tests.yml
vendored
@@ -28,4 +28,5 @@ jobs:
|
||||
-p std-shims \
|
||||
-p zalloc \
|
||||
-p serai-db \
|
||||
-p serai-env
|
||||
-p serai-env \
|
||||
-p simple-request
|
||||
|
||||
2
.github/workflows/coordinator-tests.yml
vendored
2
.github/workflows/coordinator-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run coordinator Docker tests
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Full Stack Docker tests
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run message-queue Docker tests
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
90
.github/workflows/pages.yml
vendored
Normal file
90
.github/workflows/pages.yml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2022 just-the-docs
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||
name: Deploy Jekyll site to Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow one concurrent deployment
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
bundler-cache: true
|
||||
cache-version: 0
|
||||
working-directory: "${{ github.workspace }}/docs"
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v3
|
||||
- name: Build with Jekyll
|
||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
with:
|
||||
path: "docs/_site/"
|
||||
|
||||
# Deployment job
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2
|
||||
2
.github/workflows/processor-tests.yml
vendored
2
.github/workflows/processor-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run processor Docker tests
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -43,6 +43,7 @@ jobs:
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
test-substrate:
|
||||
@@ -64,7 +65,9 @@ jobs:
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-in-instructions-primitives \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-signals-primitives \
|
||||
-p serai-signals-pallet \
|
||||
-p serai-abi \
|
||||
-p serai-runtime \
|
||||
-p serai-node
|
||||
|
||||
|
||||
2057
Cargo.lock
generated
2057
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/zstd",
|
||||
"patches/rocksdb",
|
||||
"patches/proc-macro-crate",
|
||||
|
||||
# std patches
|
||||
@@ -35,6 +36,7 @@ members = [
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"coins/bitcoin",
|
||||
"coins/ethereum/alloy-simple-request-transport",
|
||||
"coins/ethereum",
|
||||
"coins/monero/generators",
|
||||
"coins/monero",
|
||||
@@ -112,6 +114,8 @@ dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "a
|
||||
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||
|
||||
|
||||
@@ -5,13 +5,16 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
|
||||
experience. Funds are stored in an economically secured threshold-multisig
|
||||
wallet.
|
||||
|
||||
[Getting Started](docs/Getting%20Started.md)
|
||||
[Getting Started](spec/Getting%20Started.md)
|
||||
|
||||
### Layout
|
||||
|
||||
- `audits`: Audits for various parts of Serai.
|
||||
|
||||
- `docs`: Documentation on the Serai protocol.
|
||||
- `spec`: The specification of the Serai protocol, both internally and as
|
||||
networked.
|
||||
|
||||
- `docs`: User-facing documentation on the Serai protocol.
|
||||
|
||||
- `common`: Crates containing utilities common to a variety of areas under
|
||||
Serai, none neatly fitting under another category.
|
||||
|
||||
@@ -375,7 +375,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
msg: &[u8],
|
||||
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to the TransactionMachine when it generates its own");
|
||||
panic!("message was passed to the TransactionSignMachine when it generates its own");
|
||||
}
|
||||
|
||||
let commitments = (0 .. self.sigs.len())
|
||||
|
||||
2
coins/ethereum/.gitignore
vendored
2
coins/ethereum/.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
# solidity build outputs
|
||||
# Solidity build outputs
|
||||
cache
|
||||
artifacts
|
||||
|
||||
@@ -18,25 +18,29 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "1", default-features = false }
|
||||
eyre = { version = "0.6", default-features = false }
|
||||
|
||||
sha3 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
|
||||
|
||||
ethers-core = { version = "2", default-features = false }
|
||||
ethers-providers = { version = "2", default-features = false }
|
||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serde = { version = "1", default-features = false, features = ["std"] }
|
||||
serde_json = { version = "1", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
|
||||
|
||||
sha2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||
|
||||
alloy-core = { version = "0.7", default-features = false }
|
||||
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
|
||||
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] }
|
||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
|
||||
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
|
||||
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
|
||||
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
|
||||
[features]
|
||||
tests = []
|
||||
|
||||
@@ -3,6 +3,12 @@
|
||||
This package contains Ethereum-related functionality, specifically deploying and
|
||||
interacting with Serai contracts.
|
||||
|
||||
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
|
||||
`ethereum-serai` is Serai specific. If any of the utilities are generally
|
||||
desired, please fork and maintain your own copy to ensure the desired
|
||||
functionality is preserved, or open an issue to request we make this library
|
||||
general purpose.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- solc
|
||||
|
||||
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "alloy-simple-request-transport"
|
||||
version = "0.1.0"
|
||||
description = "A transport for alloy based off simple-request"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.74"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tower = "0.4"
|
||||
|
||||
serde_json = { version = "1", default-features = false }
|
||||
simple-request = { path = "../../../common/request", default-features = false }
|
||||
|
||||
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["tls"]
|
||||
tls = ["simple-request/tls"]
|
||||
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Alloy Simple Request Transport
|
||||
|
||||
A transport for alloy based on simple-request, a small HTTP client built around
|
||||
hyper.
|
||||
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use core::task;
|
||||
use std::io;
|
||||
|
||||
use alloy_json_rpc::{RequestPacket, ResponsePacket};
|
||||
use alloy_transport::{TransportError, TransportErrorKind, TransportFut};
|
||||
|
||||
use simple_request::{hyper, Request, Client};
|
||||
|
||||
use tower::Service;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SimpleRequest {
|
||||
client: Client,
|
||||
url: String,
|
||||
}
|
||||
|
||||
impl SimpleRequest {
|
||||
pub fn new(url: String) -> Self {
|
||||
Self { client: Client::with_connection_pool(), url }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<RequestPacket> for SimpleRequest {
|
||||
type Response = ResponsePacket;
|
||||
type Error = TransportError;
|
||||
type Future = TransportFut<'static>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {
|
||||
task::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
||||
let inner = self.clone();
|
||||
Box::pin(async move {
|
||||
let packet = req.serialize().map_err(TransportError::SerError)?;
|
||||
let request = Request::from(
|
||||
hyper::Request::post(&inner.url)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let mut res = inner
|
||||
.client
|
||||
.request(request)
|
||||
.await
|
||||
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?
|
||||
.body()
|
||||
.await
|
||||
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?;
|
||||
|
||||
serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, ""))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,41 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=contracts");
|
||||
println!("cargo:rerun-if-changed=artifacts");
|
||||
println!("cargo:rerun-if-changed=contracts/*");
|
||||
println!("cargo:rerun-if-changed=artifacts/*");
|
||||
|
||||
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
|
||||
.unwrap()
|
||||
.lines()
|
||||
{
|
||||
if let Some(version) = line.strip_prefix("Version: ") {
|
||||
let version = version.split('+').next().unwrap();
|
||||
assert_eq!(version, "0.8.25");
|
||||
}
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
let args = [
|
||||
"--base-path", ".",
|
||||
"-o", "./artifacts", "--overwrite",
|
||||
"--bin", "--abi",
|
||||
"--optimize",
|
||||
"./contracts/Schnorr.sol"
|
||||
];
|
||||
"--via-ir", "--optimize",
|
||||
|
||||
assert!(std::process::Command::new("solc").args(args).status().unwrap().success());
|
||||
"./contracts/IERC20.sol",
|
||||
|
||||
"./contracts/Schnorr.sol",
|
||||
"./contracts/Deployer.sol",
|
||||
"./contracts/Sandbox.sol",
|
||||
"./contracts/Router.sol",
|
||||
|
||||
"./src/tests/contracts/Schnorr.sol",
|
||||
"./src/tests/contracts/ERC20.sol",
|
||||
|
||||
"--no-color",
|
||||
];
|
||||
let solc = Command::new("solc").args(args).output().unwrap();
|
||||
assert!(solc.status.success());
|
||||
for line in String::from_utf8(solc.stderr).unwrap().lines() {
|
||||
assert!(!line.starts_with("Error:"));
|
||||
}
|
||||
}
|
||||
|
||||
52
coins/ethereum/contracts/Deployer.sol
Normal file
52
coins/ethereum/contracts/Deployer.sol
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
/*
|
||||
The expected deployment process of the Router is as follows:
|
||||
|
||||
1) A transaction deploying Deployer is made. Then, a deterministic signature is
|
||||
created such that an account with an unknown private key is the creator of
|
||||
the contract. Anyone can fund this address, and once anyone does, the
|
||||
transaction deploying Deployer can be published by anyone. No other
|
||||
transaction may be made from that account.
|
||||
|
||||
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
|
||||
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
|
||||
While such attacks would still be feasible if the Deployer's address was
|
||||
controllable, the usage of a deterministic signature with a NUMS method
|
||||
prevents that.
|
||||
|
||||
This doesn't have any denial-of-service risks and will resolve once anyone steps
|
||||
forward as deployer. This does fail to guarantee an identical address across
|
||||
every chain, though it enables letting anyone efficiently ask the Deployer for
|
||||
the address (with the Deployer having an identical address on every chain).
|
||||
|
||||
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
|
||||
Deployer contract to use a consistent salt for the Router, yet the Router must
|
||||
be deployed with a specific public key for Serai. Since Ethereum isn't able to
|
||||
determine a valid public key (one the result of a Serai DKG) from a dishonest
|
||||
public key, we have to allow multiple deployments with Serai being the one to
|
||||
determine which to use.
|
||||
|
||||
The alternative would be to have a council publish the Serai key on-Ethereum,
|
||||
with Serai verifying the published result. This would introduce a DoS risk in
|
||||
the council not publishing the correct key/not publishing any key.
|
||||
*/
|
||||
|
||||
contract Deployer {
|
||||
event Deployment(bytes32 indexed init_code_hash, address created);
|
||||
|
||||
error DeploymentFailed();
|
||||
|
||||
function deploy(bytes memory init_code) external {
|
||||
address created;
|
||||
assembly {
|
||||
created := create(0, add(init_code, 0x20), mload(init_code))
|
||||
}
|
||||
if (created == address(0)) {
|
||||
revert DeploymentFailed();
|
||||
}
|
||||
// These may be emitted out of order upon re-entrancy
|
||||
emit Deployment(keccak256(init_code), created);
|
||||
}
|
||||
}
|
||||
20
coins/ethereum/contracts/IERC20.sol
Normal file
20
coins/ethereum/contracts/IERC20.sol
Normal file
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: CC0
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||
|
||||
function name() external view returns (string memory);
|
||||
function symbol() external view returns (string memory);
|
||||
function decimals() external view returns (uint8);
|
||||
|
||||
function totalSupply() external view returns (uint256);
|
||||
|
||||
function balanceOf(address owner) external view returns (uint256);
|
||||
function transfer(address to, uint256 value) external returns (bool);
|
||||
function transferFrom(address from, address to, uint256 value) external returns (bool);
|
||||
|
||||
function approve(address spender, uint256 value) external returns (bool);
|
||||
function allowance(address owner, address spender) external view returns (uint256);
|
||||
}
|
||||
222
coins/ethereum/contracts/Router.sol
Normal file
222
coins/ethereum/contracts/Router.sol
Normal file
@@ -0,0 +1,222 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "./IERC20.sol";
|
||||
|
||||
import "./Schnorr.sol";
|
||||
import "./Sandbox.sol";
|
||||
|
||||
contract Router {
|
||||
// Nonce is incremented for each batch of transactions executed/key update
|
||||
uint256 public nonce;
|
||||
|
||||
// Current public key's x-coordinate
|
||||
// This key must always have the parity defined within the Schnorr contract
|
||||
bytes32 public seraiKey;
|
||||
|
||||
struct OutInstruction {
|
||||
address to;
|
||||
Call[] calls;
|
||||
|
||||
uint256 value;
|
||||
}
|
||||
|
||||
struct Signature {
|
||||
bytes32 c;
|
||||
bytes32 s;
|
||||
}
|
||||
|
||||
event SeraiKeyUpdated(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed key,
|
||||
Signature signature
|
||||
);
|
||||
event InInstruction(
|
||||
address indexed from,
|
||||
address indexed coin,
|
||||
uint256 amount,
|
||||
bytes instruction
|
||||
);
|
||||
// success is a uint256 representing a bitfield of transaction successes
|
||||
event Executed(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed batch,
|
||||
uint256 success,
|
||||
Signature signature
|
||||
);
|
||||
|
||||
// error types
|
||||
error InvalidKey();
|
||||
error InvalidSignature();
|
||||
error InvalidAmount();
|
||||
error FailedTransfer();
|
||||
error TooManyTransactions();
|
||||
|
||||
modifier _updateSeraiKeyAtEndOfFn(
|
||||
uint256 _nonce,
|
||||
bytes32 key,
|
||||
Signature memory sig
|
||||
) {
|
||||
if (
|
||||
(key == bytes32(0)) ||
|
||||
((bytes32(uint256(key) % Schnorr.Q)) != key)
|
||||
) {
|
||||
revert InvalidKey();
|
||||
}
|
||||
|
||||
_;
|
||||
|
||||
seraiKey = key;
|
||||
emit SeraiKeyUpdated(_nonce, key, sig);
|
||||
}
|
||||
|
||||
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
|
||||
0,
|
||||
_seraiKey,
|
||||
Signature({ c: bytes32(0), s: bytes32(0) })
|
||||
) {
|
||||
nonce = 1;
|
||||
}
|
||||
|
||||
// updateSeraiKey validates the given Schnorr signature against the current
|
||||
// public key, and if successful, updates the contract's public key to the
|
||||
// given one.
|
||||
function updateSeraiKey(
|
||||
bytes32 _seraiKey,
|
||||
Signature calldata sig
|
||||
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
|
||||
bytes memory message =
|
||||
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
}
|
||||
|
||||
function inInstruction(
|
||||
address coin,
|
||||
uint256 amount,
|
||||
bytes memory instruction
|
||||
) external payable {
|
||||
if (coin == address(0)) {
|
||||
if (amount != msg.value) {
|
||||
revert InvalidAmount();
|
||||
}
|
||||
} else {
|
||||
(bool success, bytes memory res) =
|
||||
address(coin).call(
|
||||
abi.encodeWithSelector(
|
||||
IERC20.transferFrom.selector,
|
||||
msg.sender,
|
||||
address(this),
|
||||
amount
|
||||
)
|
||||
);
|
||||
|
||||
// Require there was nothing returned, which is done by some non-standard
|
||||
// tokens, or that the ERC20 contract did in fact return true
|
||||
bool nonStandardResOrTrue =
|
||||
(res.length == 0) || abi.decode(res, (bool));
|
||||
if (!(success && nonStandardResOrTrue)) {
|
||||
revert FailedTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
|
||||
The amount instructed to transfer may not actually be the amount
|
||||
transferred.
|
||||
|
||||
If we add nonReentrant to every single function which can effect the
|
||||
balance, we can check the amount exactly matches. This prevents transfers of
|
||||
less value than expected occurring, at least, not without an additional
|
||||
transfer to top up the difference (which isn't routed through this contract
|
||||
and accordingly isn't trying to artificially create events).
|
||||
|
||||
If we don't add nonReentrant, a transfer can be started, and then a new
|
||||
transfer for the difference can follow it up (again and again until a
|
||||
rounding error is reached). This contract would believe all transfers were
|
||||
done in full, despite each only being done in part (except for the last
|
||||
one).
|
||||
|
||||
Given fee-on-transfer tokens aren't intended to be supported, the only
|
||||
token planned to be supported is Dai and it doesn't have any fee-on-transfer
|
||||
logic, fee-on-transfer tokens aren't even able to be supported at this time,
|
||||
we simply classify this entire class of tokens as non-standard
|
||||
implementations which induce undefined behavior. It is the Serai network's
|
||||
role not to add support for any non-standard implementations.
|
||||
*/
|
||||
emit InInstruction(msg.sender, coin, amount, instruction);
|
||||
}
|
||||
|
||||
// execute accepts a list of transactions to execute as well as a signature.
|
||||
// if signature verification passes, the given transactions are executed.
|
||||
// if signature verification fails, this function will revert.
|
||||
function execute(
|
||||
OutInstruction[] calldata transactions,
|
||||
Signature calldata sig
|
||||
) external {
|
||||
if (transactions.length > 256) {
|
||||
revert TooManyTransactions();
|
||||
}
|
||||
|
||||
bytes memory message =
|
||||
abi.encode("execute", block.chainid, nonce, transactions);
|
||||
uint256 executed_with_nonce = nonce;
|
||||
// This prevents re-entrancy from causing double spends yet does allow
|
||||
// out-of-order execution via re-entrancy
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
|
||||
uint256 successes;
|
||||
for (uint256 i = 0; i < transactions.length; i++) {
|
||||
bool success;
|
||||
|
||||
// If there are no calls, send to `to` the value
|
||||
if (transactions[i].calls.length == 0) {
|
||||
(success, ) = transactions[i].to.call{
|
||||
value: transactions[i].value,
|
||||
gas: 5_000
|
||||
}("");
|
||||
} else {
|
||||
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
|
||||
// calls through that
|
||||
//
|
||||
// We could use a single sandbox in order to reduce gas costs, yet that
|
||||
// risks one person creating an approval that's hooked before another
|
||||
// user's intended action executes, in order to drain their coins
|
||||
//
|
||||
// While technically, that would be a flaw in the sandboxed flow, this
|
||||
// is robust and prevents such flaws from being possible
|
||||
//
|
||||
// We also don't want people to set state via the Sandbox and expect it
|
||||
// future available when anyone else could set a distinct value
|
||||
Sandbox sandbox = new Sandbox();
|
||||
(success, ) = address(sandbox).call{
|
||||
value: transactions[i].value,
|
||||
// TODO: Have the Call specify the gas up front
|
||||
gas: 350_000
|
||||
}(
|
||||
abi.encodeWithSelector(
|
||||
Sandbox.sandbox.selector,
|
||||
transactions[i].calls
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
assembly {
|
||||
successes := or(successes, shl(i, success))
|
||||
}
|
||||
}
|
||||
emit Executed(
|
||||
executed_with_nonce,
|
||||
keccak256(message),
|
||||
successes,
|
||||
sig
|
||||
);
|
||||
}
|
||||
}
|
||||
48
coins/ethereum/contracts/Sandbox.sol
Normal file
48
coins/ethereum/contracts/Sandbox.sol
Normal file
@@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
struct Call {
|
||||
address to;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
}
|
||||
|
||||
// A minimal sandbox focused on gas efficiency.
|
||||
//
|
||||
// The first call is executed if any of the calls fail, making it a fallback.
|
||||
// All other calls are executed sequentially.
|
||||
contract Sandbox {
|
||||
error AlreadyCalled();
|
||||
error CallsFailed();
|
||||
|
||||
function sandbox(Call[] calldata calls) external payable {
|
||||
// Prevent re-entrancy due to this executing arbitrary calls from anyone
|
||||
// and anywhere
|
||||
bool called;
|
||||
assembly { called := tload(0) }
|
||||
if (called) {
|
||||
revert AlreadyCalled();
|
||||
}
|
||||
assembly { tstore(0, 1) }
|
||||
|
||||
// Execute the calls, starting from 1
|
||||
for (uint256 i = 1; i < calls.length; i++) {
|
||||
(bool success, ) =
|
||||
calls[i].to.call{ value: calls[i].value }(calls[i].data);
|
||||
|
||||
// If this call failed, execute the fallback (call 0)
|
||||
if (!success) {
|
||||
(success, ) =
|
||||
calls[0].to.call{ value: address(this).balance }(calls[0].data);
|
||||
// If this call also failed, revert entirely
|
||||
if (!success) {
|
||||
revert CallsFailed();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't clear the re-entrancy guard as this contract should never be
|
||||
// called again, so there's no reason to spend the effort
|
||||
}
|
||||
}
|
||||
@@ -1,36 +1,44 @@
|
||||
//SPDX-License-Identifier: AGPLv3
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// see https://github.com/noot/schnorr-verify for implementation details
|
||||
contract Schnorr {
|
||||
library Schnorr {
|
||||
// secp256k1 group order
|
||||
uint256 constant public Q =
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||
|
||||
// parity := public key y-coord parity (27 or 28)
|
||||
// px := public key x-coord
|
||||
// message := 32-byte message
|
||||
// s := schnorr signature
|
||||
// e := schnorr signature challenge
|
||||
function verify(
|
||||
uint8 parity,
|
||||
bytes32 px,
|
||||
bytes32 message,
|
||||
bytes32 s,
|
||||
bytes32 e
|
||||
) public view returns (bool) {
|
||||
// ecrecover = (m, v, r, s);
|
||||
bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q));
|
||||
// Fixed parity for the public keys used in this contract
|
||||
// This avoids spending a word passing the parity in a similar style to
|
||||
// Bitcoin's Taproot
|
||||
uint8 constant public KEY_PARITY = 27;
|
||||
|
||||
require(sp != 0);
|
||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
||||
// inputs are non-zero (in this case, `px` and `ep`), thus we don't need to
|
||||
// check if they're zero.will make me
|
||||
address R = ecrecover(sp, parity, px, ep);
|
||||
require(R != address(0), "ecrecover failed");
|
||||
return e == keccak256(
|
||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
||||
);
|
||||
error InvalidSOrA();
|
||||
error MalformedSignature();
|
||||
|
||||
// px := public key x-coord, where the public key has a parity of KEY_PARITY
|
||||
// message := 32-byte hash of the message
|
||||
// c := schnorr signature challenge
|
||||
// s := schnorr signature
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes memory message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) internal pure returns (bool) {
|
||||
// ecrecover = (m, v, r, s) -> key
|
||||
// We instead pass the following to obtain the nonce (not the key)
|
||||
// Then we hash it and verify it matches the challenge
|
||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||
|
||||
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
|
||||
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
|
||||
// That leaves us to check `sa` are non-zero
|
||||
if (sa == 0) revert InvalidSOrA();
|
||||
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||
if (R == address(0)) revert MalformedSignature();
|
||||
|
||||
// Check the signature is correct by rebuilding the challenge
|
||||
return c == keccak256(abi.encodePacked(R, px, message));
|
||||
}
|
||||
}
|
||||
|
||||
37
coins/ethereum/src/abi/mod.rs
Normal file
37
coins/ethereum/src/abi/mod.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod erc20_container {
|
||||
use super::*;
|
||||
sol!("contracts/IERC20.sol");
|
||||
}
|
||||
pub use erc20_container::IERC20 as erc20;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod deployer_container {
|
||||
use super::*;
|
||||
sol!("contracts/Deployer.sol");
|
||||
}
|
||||
pub use deployer_container::Deployer as deployer;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod router_container {
|
||||
use super::*;
|
||||
sol!(Router, "artifacts/Router.abi");
|
||||
}
|
||||
pub use router_container::Router as router;
|
||||
@@ -1,36 +0,0 @@
|
||||
use thiserror::Error;
|
||||
use eyre::{eyre, Result};
|
||||
|
||||
use ethers_providers::{Provider, Http};
|
||||
use ethers_contract::abigen;
|
||||
|
||||
use crate::crypto::ProcessedSignature;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum EthereumError {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
VerificationError,
|
||||
}
|
||||
|
||||
abigen!(Schnorr, "./artifacts/Schnorr.abi");
|
||||
|
||||
pub async fn call_verify(
|
||||
contract: &Schnorr<Provider<Http>>,
|
||||
params: &ProcessedSignature,
|
||||
) -> Result<()> {
|
||||
if contract
|
||||
.verify(
|
||||
params.parity + 27,
|
||||
params.px.to_bytes().into(),
|
||||
params.message,
|
||||
params.s.to_bytes().into(),
|
||||
params.e.to_bytes().into(),
|
||||
)
|
||||
.call()
|
||||
.await?
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(eyre!(EthereumError::VerificationError))
|
||||
}
|
||||
}
|
||||
@@ -1,107 +1,185 @@
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use group::Group;
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{
|
||||
bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint,
|
||||
},
|
||||
AffinePoint, ProjectivePoint, Scalar, U256,
|
||||
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
|
||||
|
||||
use frost::{
|
||||
algorithm::{Hram, SchnorrSignature},
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
};
|
||||
|
||||
use frost::{algorithm::Hram, curve::Secp256k1};
|
||||
use alloy_core::primitives::{Parity, Signature as AlloySignature};
|
||||
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
|
||||
|
||||
pub fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
Keccak256::digest(data).into()
|
||||
use crate::abi::router::{Signature as AbiSignature};
|
||||
|
||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
alloy_core::primitives::keccak256(data).into()
|
||||
}
|
||||
|
||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
|
||||
}
|
||||
|
||||
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||
let encoded_point = point.to_encoded_point(false);
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap()
|
||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||
}
|
||||
|
||||
pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
if r.is_zero().into() || s.is_zero().into() {
|
||||
return None;
|
||||
pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
|
||||
assert!(
|
||||
tx.chain_id.is_none(),
|
||||
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
|
||||
);
|
||||
|
||||
let sig_hash = tx.signature_hash().0;
|
||||
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
|
||||
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
|
||||
loop {
|
||||
let r_bytes: [u8; 32] = r.to_repr().into();
|
||||
let s_bytes: [u8; 32] = s.to_repr().into();
|
||||
let v = Parity::NonEip155(false);
|
||||
let signature =
|
||||
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
|
||||
let tx = tx.clone().into_signed(signature);
|
||||
if tx.recover_signer().is_ok() {
|
||||
return tx;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let R = AffinePoint::decompress(&r.to_bytes(), v.into());
|
||||
#[allow(non_snake_case)]
|
||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
||||
#[allow(non_snake_case)]
|
||||
let R = ProjectivePoint::from(R);
|
||||
|
||||
let r = r.invert().unwrap();
|
||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
||||
let u2 = R * (s * r);
|
||||
let key: ProjectivePoint = u1 + u2;
|
||||
if !bool::from(key.is_identity()) {
|
||||
return Some(address(&key));
|
||||
// Re-hash until valid
|
||||
r = hash_to_scalar(r_bytes.as_ref());
|
||||
s = hash_to_scalar(s_bytes.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// The public key for a Schnorr-signing account.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct PublicKey {
|
||||
pub(crate) A: ProjectivePoint,
|
||||
pub(crate) px: Scalar,
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Construct a new `PublicKey`.
|
||||
///
|
||||
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||
/// bounds such as parity).
|
||||
#[allow(non_snake_case)]
|
||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||
let affine = A.to_affine();
|
||||
// Only allow even keys to save a word within Ethereum
|
||||
let is_odd = bool::from(affine.y_is_odd());
|
||||
if is_odd {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coord = affine.x();
|
||||
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
|
||||
// Return None if a reduction would occur
|
||||
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
|
||||
// headache/concern to have
|
||||
// This does ban a trivial amoount of public keys
|
||||
if x_coord_scalar.to_repr() != x_coord {
|
||||
None?;
|
||||
}
|
||||
|
||||
Some(PublicKey { A, px: x_coord_scalar })
|
||||
}
|
||||
|
||||
pub fn point(&self) -> ProjectivePoint {
|
||||
self.A
|
||||
}
|
||||
|
||||
pub(crate) fn eth_repr(&self) -> [u8; 32] {
|
||||
self.px.to_repr().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||
#[allow(non_snake_case)]
|
||||
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
|
||||
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
|
||||
}
|
||||
}
|
||||
|
||||
/// The HRAm to use for the Schnorr contract.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct EthereumHram {}
|
||||
impl Hram<Secp256k1> for EthereumHram {
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
||||
let a_encoded_point = A.to_encoded_point(true);
|
||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
||||
let x_coord = A.to_affine().x();
|
||||
|
||||
let mut data = address(R).to_vec();
|
||||
data.append(&mut a_encoded);
|
||||
data.append(&mut m.to_vec());
|
||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
||||
data.extend(x_coord.as_slice());
|
||||
data.extend(m);
|
||||
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessedSignature {
|
||||
pub s: Scalar,
|
||||
pub px: Scalar,
|
||||
pub parity: u8,
|
||||
pub message: [u8; 32],
|
||||
pub e: Scalar,
|
||||
/// A signature for the Schnorr contract.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
pub(crate) c: Scalar,
|
||||
pub(crate) s: Scalar,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
m: [u8; 32],
|
||||
R: &ProjectivePoint,
|
||||
s: Scalar,
|
||||
A: &ProjectivePoint,
|
||||
chain_id: U256,
|
||||
) -> (Scalar, Scalar) {
|
||||
let processed_sig = process_signature_for_contract(m, R, s, A, chain_id);
|
||||
let sr = processed_sig.s.mul(&processed_sig.px).negate();
|
||||
let er = processed_sig.e.mul(&processed_sig.px).negate();
|
||||
(sr, er)
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn process_signature_for_contract(
|
||||
m: [u8; 32],
|
||||
R: &ProjectivePoint,
|
||||
s: Scalar,
|
||||
A: &ProjectivePoint,
|
||||
chain_id: U256,
|
||||
) -> ProcessedSignature {
|
||||
let encoded_pk = A.to_encoded_point(true);
|
||||
let px = &encoded_pk.as_ref()[1 .. 33];
|
||||
let px_scalar = Scalar::reduce(U256::from_be_slice(px));
|
||||
let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat());
|
||||
ProcessedSignature {
|
||||
s,
|
||||
px: px_scalar,
|
||||
parity: &encoded_pk.as_ref()[0] - 2,
|
||||
impl Signature {
|
||||
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
|
||||
#[allow(non_snake_case)]
|
||||
message: m,
|
||||
e,
|
||||
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
|
||||
EthereumHram::hram(&R, &public_key.A, message) == self.c
|
||||
}
|
||||
|
||||
/// Construct a new `Signature`.
|
||||
///
|
||||
/// This will return None if the signature is invalid.
|
||||
pub fn new(
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: SchnorrSignature<Secp256k1>,
|
||||
) -> Option<Signature> {
|
||||
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
|
||||
if !signature.verify(public_key.A, c) {
|
||||
None?;
|
||||
}
|
||||
|
||||
let res = Signature { c, s: signature.s };
|
||||
assert!(res.verify(public_key, message));
|
||||
Some(res)
|
||||
}
|
||||
|
||||
pub fn c(&self) -> Scalar {
|
||||
self.c
|
||||
}
|
||||
pub fn s(&self) -> Scalar {
|
||||
self.s
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> [u8; 64] {
|
||||
let mut res = [0; 64];
|
||||
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
|
||||
let mut reader = bytes.as_slice();
|
||||
let c = Secp256k1::read_F(&mut reader)?;
|
||||
let s = Secp256k1::read_F(&mut reader)?;
|
||||
Ok(Signature { c, s })
|
||||
}
|
||||
}
|
||||
impl From<&Signature> for AbiSignature {
|
||||
fn from(sig: &Signature) -> AbiSignature {
|
||||
let c: [u8; 32] = sig.c.to_repr().into();
|
||||
let s: [u8; 32] = sig.s.to_repr().into();
|
||||
AbiSignature { c: c.into(), s: s.into() }
|
||||
}
|
||||
}
|
||||
|
||||
119
coins/ethereum/src/deployer.rs
Normal file
119
coins/ethereum/src/deployer.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
|
||||
use alloy_consensus::{Signed, TxLegacy};
|
||||
|
||||
use alloy_sol_types::{SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::{self, keccak256, PublicKey},
|
||||
router::Router,
|
||||
};
|
||||
pub use crate::abi::deployer as abi;
|
||||
|
||||
/// The Deployer contract for the Router contract.
|
||||
///
|
||||
/// This Deployer has a deterministic address, letting it be immediately identified on any
|
||||
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
|
||||
/// deterministic) using a single log query.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Deployer;
|
||||
impl Deployer {
|
||||
/// Obtain the transaction to deploy this contract, already signed.
|
||||
///
|
||||
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
|
||||
/// funded for this transaction to be submitted. This account has no known private key to anyone,
|
||||
/// so ETH sent can be neither misappropriated nor returned.
|
||||
pub fn deployment_tx() -> Signed<TxLegacy> {
|
||||
let bytecode = include_str!("../artifacts/Deployer.bin");
|
||||
let bytecode =
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
|
||||
|
||||
let tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 100_000_000_000u128,
|
||||
// TODO: Use a more accurate gas limit
|
||||
gas_limit: 1_000_000u128,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bytecode,
|
||||
};
|
||||
|
||||
crypto::deterministically_sign(&tx)
|
||||
}
|
||||
|
||||
/// Obtain the deterministic address for this contract.
|
||||
pub fn address() -> [u8; 20] {
|
||||
let deployer_deployer =
|
||||
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
|
||||
**Address::create(&deployer_deployer, 0)
|
||||
}
|
||||
|
||||
/// Construct a new view of the `Deployer`.
|
||||
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
|
||||
let address = Self::address();
|
||||
#[cfg(not(test))]
|
||||
let required_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let required_block = BlockNumberOrTag::Latest;
|
||||
let code = provider
|
||||
.get_code_at(address.into(), required_block.into())
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self))
|
||||
}
|
||||
|
||||
/// Yield the `ContractCall` necessary to deploy the Router.
|
||||
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(Self::address().into()),
|
||||
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
|
||||
gas_limit: 1_000_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the first Router deployed with the specified key as its first key.
|
||||
///
|
||||
/// This is the Router Serai will use, and is the only way to construct a `Router`.
|
||||
pub async fn find_router(
|
||||
&self,
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
key: &PublicKey,
|
||||
) -> Result<Option<Router>, Error> {
|
||||
let init_code = Router::init_code(key);
|
||||
let init_code_hash = keccak256(&init_code);
|
||||
|
||||
#[cfg(not(test))]
|
||||
let to_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let to_block = BlockNumberOrTag::Latest;
|
||||
|
||||
// Find the first log using this init code (where the init code is binding to the key)
|
||||
let filter =
|
||||
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
|
||||
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
|
||||
let filter = filter.topic1(B256::from(init_code_hash));
|
||||
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let Some(first_log) = logs.first() else { return Ok(None) };
|
||||
let router = first_log
|
||||
.log_decode::<abi::Deployment>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.created;
|
||||
|
||||
Ok(Some(Router::new(provider, router)))
|
||||
}
|
||||
}
|
||||
118
coins/ethereum/src/erc20.rs
Normal file
118
coins/ethereum/src/erc20.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use alloy_core::primitives::{Address, B256, U256};
|
||||
|
||||
use alloy_sol_types::{SolInterface, SolEvent};
|
||||
|
||||
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::Error;
|
||||
pub use crate::abi::erc20 as abi;
|
||||
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TopLevelErc20Transfer {
|
||||
pub id: [u8; 32],
|
||||
pub from: [u8; 20],
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A view for an ERC20 contract.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ERC20(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl ERC20 {
|
||||
/// Construct a new view of the specified ERC20 contract.
|
||||
///
|
||||
/// This checks a contract is deployed at that address yet does not check the contract is
|
||||
/// actually an ERC20.
|
||||
pub async fn new(
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
address: [u8; 20],
|
||||
) -> Result<Option<Self>, Error> {
|
||||
let code = provider
|
||||
.get_code_at(address.into(), BlockNumberOrTag::Finalized.into())
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self(provider.clone(), Address::from(&address))))
|
||||
}
|
||||
|
||||
pub async fn top_level_transfers(
|
||||
&self,
|
||||
block: u64,
|
||||
to: [u8; 20],
|
||||
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
|
||||
let mut to_topic = [0; 32];
|
||||
to_topic[12 ..].copy_from_slice(&to);
|
||||
let filter = filter.topic2(B256::from(to_topic));
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut handled = HashSet::new();
|
||||
|
||||
let mut top_level_transfers = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
// If this is a top-level call...
|
||||
if tx.to == Some(self.1) {
|
||||
// And we recognize the call...
|
||||
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
|
||||
// to the InInstruction appended
|
||||
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
|
||||
// Extract the top-level call's from/to/value
|
||||
let (from, call_to, value) = match call {
|
||||
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
|
||||
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
|
||||
(from, call_to, value)
|
||||
}
|
||||
// Treat any other function selectors as unrecognized
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
|
||||
// internal transfer
|
||||
if (log.from != from) || (call_to != to) || (value != log.value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
|
||||
// the only log we handle
|
||||
if handled.contains(&tx_id) {
|
||||
continue;
|
||||
}
|
||||
handled.insert(tx_id);
|
||||
|
||||
// Read the data appended after
|
||||
let encoded = call.abi_encode();
|
||||
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
|
||||
|
||||
// Push the transfer
|
||||
top_level_transfers.push(TopLevelErc20Transfer {
|
||||
// Since we'll only handle one log for this TX, set the ID to the TX ID
|
||||
id: *tx_id,
|
||||
from: *log.from.0,
|
||||
amount: log.value,
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(top_level_transfers)
|
||||
}
|
||||
}
|
||||
@@ -1,2 +1,30 @@
|
||||
pub mod contract;
|
||||
use thiserror::Error;
|
||||
|
||||
pub use alloy_core;
|
||||
pub use alloy_consensus;
|
||||
|
||||
pub use alloy_rpc_types;
|
||||
pub use alloy_simple_request_transport;
|
||||
pub use alloy_rpc_client;
|
||||
pub use alloy_provider;
|
||||
|
||||
pub mod crypto;
|
||||
|
||||
pub(crate) mod abi;
|
||||
|
||||
pub mod erc20;
|
||||
pub mod deployer;
|
||||
pub mod router;
|
||||
|
||||
pub mod machine;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
InvalidSignature,
|
||||
#[error("couldn't make call/send TX")]
|
||||
ConnectionError,
|
||||
}
|
||||
|
||||
414
coins/ethereum/src/machine.rs
Normal file
414
coins/ethereum/src/machine.rs
Normal file
@@ -0,0 +1,414 @@
|
||||
use std::{
|
||||
io::{self, Read},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use group::GroupEncoding;
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
Participant, ThresholdKeys, FrostError,
|
||||
algorithm::Schnorr,
|
||||
sign::*,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::U256;
|
||||
|
||||
use crate::{
|
||||
crypto::{PublicKey, EthereumHram, Signature},
|
||||
router::{
|
||||
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
|
||||
Router,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Call {
|
||||
pub to: [u8; 20],
|
||||
pub value: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
impl Call {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut to = [0; 20];
|
||||
reader.read_exact(&mut to)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
let mut data_len = {
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
|
||||
};
|
||||
|
||||
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
|
||||
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
|
||||
let mut data = vec![];
|
||||
while data_len > 0 {
|
||||
let chunk_len = data_len.min(1024);
|
||||
let mut chunk = vec![0; chunk_len];
|
||||
reader.read_exact(&mut chunk)?;
|
||||
data.extend(&chunk);
|
||||
data_len -= chunk_len;
|
||||
}
|
||||
|
||||
Ok(Call { to, value, data })
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.to)?;
|
||||
writer.write_all(&self.value.as_le_bytes())?;
|
||||
|
||||
let data_len = u32::try_from(self.data.len())
|
||||
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
|
||||
writer.write_all(&data_len.to_le_bytes())?;
|
||||
writer.write_all(&self.data)
|
||||
}
|
||||
}
|
||||
impl From<Call> for AbiCall {
|
||||
fn from(call: Call) -> AbiCall {
|
||||
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum OutInstructionTarget {
|
||||
Direct([u8; 20]),
|
||||
Calls(Vec<Call>),
|
||||
}
|
||||
impl OutInstructionTarget {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut addr = [0; 20];
|
||||
reader.read_exact(&mut addr)?;
|
||||
Ok(OutInstructionTarget::Direct(addr))
|
||||
}
|
||||
1 => {
|
||||
let mut calls_len = [0; 4];
|
||||
reader.read_exact(&mut calls_len)?;
|
||||
let calls_len = u32::from_le_bytes(calls_len);
|
||||
|
||||
let mut calls = vec![];
|
||||
for _ in 0 .. calls_len {
|
||||
calls.push(Call::read(reader)?);
|
||||
}
|
||||
Ok(OutInstructionTarget::Calls(calls))
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(addr)?;
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => {
|
||||
writer.write_all(&[1])?;
|
||||
let call_len = u32::try_from(calls.len())
|
||||
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
|
||||
writer.write_all(&call_len.to_le_bytes())?;
|
||||
for call in calls {
|
||||
call.write(writer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OutInstruction {
|
||||
pub target: OutInstructionTarget,
|
||||
pub value: U256,
|
||||
}
|
||||
impl OutInstruction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let target = OutInstructionTarget::read(reader)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
Ok(OutInstruction { target, value })
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.target.write(writer)?;
|
||||
writer.write_all(&self.value.as_le_bytes())
|
||||
}
|
||||
}
|
||||
impl From<OutInstruction> for AbiOutInstruction {
|
||||
fn from(instruction: OutInstruction) -> AbiOutInstruction {
|
||||
match instruction.target {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
|
||||
to: [0; 20].into(),
|
||||
calls: calls.into_iter().map(Into::into).collect(),
|
||||
value: instruction.value,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RouterCommand {
|
||||
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
|
||||
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
|
||||
}
|
||||
|
||||
impl RouterCommand {
|
||||
pub fn msg(&self) -> Vec<u8> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
Router::update_serai_key_message(*chain_id, *nonce, key)
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
|
||||
*chain_id,
|
||||
*nonce,
|
||||
outs.iter().map(|out| out.clone().into()).collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
|
||||
let key = PublicKey::new(Secp256k1::read_G(reader)?)
|
||||
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
|
||||
Ok(RouterCommand::UpdateSeraiKey {
|
||||
chain_id: U256::from_le_slice(&chain_id),
|
||||
nonce: U256::from_le_slice(&nonce),
|
||||
key,
|
||||
})
|
||||
}
|
||||
1 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
let chain_id = U256::from_le_slice(&chain_id);
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = U256::from_le_slice(&nonce);
|
||||
|
||||
let mut outs_len = [0; 4];
|
||||
reader.read_exact(&mut outs_len)?;
|
||||
let outs_len = u32::from_le_bytes(outs_len);
|
||||
|
||||
let mut outs = vec![];
|
||||
for _ in 0 .. outs_len {
|
||||
outs.push(OutInstruction::read(reader)?);
|
||||
}
|
||||
|
||||
Ok(RouterCommand::Execute { chain_id, nonce, outs })
|
||||
}
|
||||
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&key.A.to_bytes())
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
|
||||
for out in outs {
|
||||
out.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
self.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct SignedRouterCommand {
|
||||
command: RouterCommand,
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl SignedRouterCommand {
|
||||
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
|
||||
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
|
||||
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
|
||||
let signature = Signature { c, s };
|
||||
|
||||
if !signature.verify(key, &command.msg()) {
|
||||
None?
|
||||
}
|
||||
Some(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn command(&self) -> &RouterCommand {
|
||||
&self.command
|
||||
}
|
||||
|
||||
pub fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let command = RouterCommand::read(reader)?;
|
||||
|
||||
let mut sig = [0; 64];
|
||||
reader.read_exact(&mut sig)?;
|
||||
let signature = Signature::from_bytes(sig)?;
|
||||
|
||||
Ok(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.command.write(writer)?;
|
||||
writer.write_all(&self.signature.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl RouterCommandMachine {
|
||||
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
|
||||
// The Schnorr algorithm should be fine without this, even when using the IETF variant
|
||||
// If this is better and more comprehensive, we should do it, even if not necessary
|
||||
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
|
||||
let key = keys.group_key();
|
||||
transcript.append_message(b"key", key.to_bytes());
|
||||
transcript.append_message(b"command", command.serialize());
|
||||
|
||||
Some(Self {
|
||||
key: PublicKey::new(key)?,
|
||||
command,
|
||||
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PreprocessMachine for RouterCommandMachine {
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type Signature = SignedRouterCommand;
|
||||
type SignMachine = RouterCommandSignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
) -> (Self::SignMachine, Self::Preprocess) {
|
||||
let (machine, preprocess) = self.machine.preprocess(rng);
|
||||
|
||||
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
|
||||
type Params = ();
|
||||
type Keys = ThresholdKeys<Secp256k1>;
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
type SignatureMachine = RouterCommandSignatureMachine;
|
||||
|
||||
fn cache(self) -> CachedPreprocess {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn from_cache(
|
||||
(): (),
|
||||
_: ThresholdKeys<Secp256k1>,
|
||||
_: CachedPreprocess,
|
||||
) -> (Self, Self::Preprocess) {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||
self.machine.read_preprocess(reader)
|
||||
}
|
||||
|
||||
fn sign(
|
||||
self,
|
||||
commitments: HashMap<Participant, Self::Preprocess>,
|
||||
msg: &[u8],
|
||||
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to a RouterCommand machine when it generates its own");
|
||||
}
|
||||
|
||||
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
|
||||
|
||||
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignatureMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine:
|
||||
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
|
||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
||||
self.machine.read_share(reader)
|
||||
}
|
||||
|
||||
fn complete(
|
||||
self,
|
||||
shares: HashMap<Participant, Self::SignatureShare>,
|
||||
) -> Result<SignedRouterCommand, FrostError> {
|
||||
let sig = self.machine.complete(shares)?;
|
||||
let signature = Signature::new(&self.key, &self.command.msg(), sig)
|
||||
.expect("machine produced an invalid signature");
|
||||
Ok(SignedRouterCommand { command: self.command, signature })
|
||||
}
|
||||
}
|
||||
426
coins/ethereum/src/router.rs
Normal file
426
coins/ethereum/src/router.rs
Normal file
@@ -0,0 +1,426 @@
|
||||
use std::{sync::Arc, io, collections::HashSet};
|
||||
|
||||
use k256::{
|
||||
elliptic_curve::{group::GroupEncoding, sec1},
|
||||
ProjectivePoint,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
|
||||
#[cfg(test)]
|
||||
use alloy_core::primitives::B256;
|
||||
use alloy_consensus::TxLegacy;
|
||||
|
||||
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types::Filter;
|
||||
#[cfg(test)]
|
||||
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
pub use crate::{
|
||||
Error,
|
||||
crypto::{PublicKey, Signature},
|
||||
abi::{erc20::Transfer, router as abi},
|
||||
};
|
||||
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Coin {
|
||||
Ether,
|
||||
Erc20([u8; 20]),
|
||||
}
|
||||
|
||||
impl Coin {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
Ok(match kind[0] {
|
||||
0 => Coin::Ether,
|
||||
1 => {
|
||||
let mut address = [0; 20];
|
||||
reader.read_exact(&mut address)?;
|
||||
Coin::Erc20(address)
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized Coin type"))?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Coin::Ether => writer.write_all(&[0]),
|
||||
Coin::Erc20(token) => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(token)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct InInstruction {
|
||||
pub id: ([u8; 32], u64),
|
||||
pub from: [u8; 20],
|
||||
pub coin: Coin,
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
pub key_at_end_of_block: ProjectivePoint,
|
||||
}
|
||||
|
||||
impl InInstruction {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let id = {
|
||||
let mut id_hash = [0; 32];
|
||||
reader.read_exact(&mut id_hash)?;
|
||||
let mut id_pos = [0; 8];
|
||||
reader.read_exact(&mut id_pos)?;
|
||||
let id_pos = u64::from_le_bytes(id_pos);
|
||||
(id_hash, id_pos)
|
||||
};
|
||||
|
||||
let mut from = [0; 20];
|
||||
reader.read_exact(&mut from)?;
|
||||
|
||||
let coin = Coin::read(reader)?;
|
||||
let mut amount = [0; 32];
|
||||
reader.read_exact(&mut amount)?;
|
||||
let amount = U256::from_le_slice(&amount);
|
||||
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let data_len = usize::try_from(u32::from_le_bytes(data_len))
|
||||
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
|
||||
let mut data = vec![0; data_len];
|
||||
reader.read_exact(&mut data)?;
|
||||
|
||||
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(&mut key_at_end_of_block)?;
|
||||
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
|
||||
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
|
||||
|
||||
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.id.0)?;
|
||||
writer.write_all(&self.id.1.to_le_bytes())?;
|
||||
|
||||
writer.write_all(&self.from)?;
|
||||
|
||||
self.coin.write(writer)?;
|
||||
writer.write_all(&self.amount.as_le_bytes())?;
|
||||
|
||||
writer.write_all(
|
||||
&u32::try_from(self.data.len())
|
||||
.map_err(|_| {
|
||||
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
|
||||
})?
|
||||
.to_le_bytes(),
|
||||
)?;
|
||||
writer.write_all(&self.data)?;
|
||||
|
||||
writer.write_all(&self.key_at_end_of_block.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Executed {
|
||||
pub tx_id: [u8; 32],
|
||||
pub nonce: u64,
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
/// The contract Serai uses to manage its state.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Router {
|
||||
pub(crate) fn code() -> Vec<u8> {
|
||||
let bytecode = include_str!("../artifacts/Router.bin");
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
|
||||
let mut bytecode = Self::code();
|
||||
// Append the constructor arguments
|
||||
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
|
||||
bytecode
|
||||
}
|
||||
|
||||
// This isn't pub in order to force users to use `Deployer::find_router`.
|
||||
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
|
||||
Self(provider, address)
|
||||
}
|
||||
|
||||
pub fn address(&self) -> [u8; 20] {
|
||||
**self.1
|
||||
}
|
||||
|
||||
/// Get the key for Serai at the specified block.
|
||||
#[cfg(test)]
|
||||
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(Some(self.1))
|
||||
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
|
||||
let mut buffer = b"updateSeraiKey".to_vec();
|
||||
buffer.extend(&chain_id.to_be_bytes::<32>());
|
||||
buffer.extend(&nonce.to_be_bytes::<32>());
|
||||
buffer.extend(&key.eth_repr());
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Update the key representing Serai.
|
||||
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
|
||||
// TODO: Set a more accurate gas
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
gas_limit: 100_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current nonce for the published batches.
|
||||
#[cfg(test)]
|
||||
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(Some(self.1))
|
||||
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
Ok(res._0)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn execute_message(
|
||||
chain_id: U256,
|
||||
nonce: U256,
|
||||
outs: Vec<abi::OutInstruction>,
|
||||
) -> Vec<u8> {
|
||||
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
|
||||
}
|
||||
|
||||
/// Execute a batch of `OutInstruction`s.
|
||||
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
|
||||
// TODO
|
||||
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn in_instructions(
|
||||
&self,
|
||||
block: u64,
|
||||
allowed_tokens: &HashSet<[u8; 20]>,
|
||||
) -> Result<Vec<InInstruction>, Error> {
|
||||
let key_at_end_of_block = {
|
||||
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
|
||||
let last_key_x_coordinate = last_key_x_coordinate_log
|
||||
.log_decode::<SeraiKeyUpdated>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.key;
|
||||
|
||||
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
|
||||
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
|
||||
|
||||
ProjectivePoint::from_bytes(&compressed_point).expect("router's last key wasn't a valid key")
|
||||
};
|
||||
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut transfer_check = HashSet::new();
|
||||
let mut in_instructions = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let id = (
|
||||
log.block_hash.ok_or(Error::ConnectionError)?.into(),
|
||||
log.log_index.ok_or(Error::ConnectionError)?,
|
||||
);
|
||||
|
||||
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let log =
|
||||
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let coin = if log.coin.0 == [0; 20] {
|
||||
Coin::Ether
|
||||
} else {
|
||||
let token = *log.coin.0;
|
||||
|
||||
if !allowed_tokens.contains(&token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this also counts as a top-level transfer via the token, drop it
|
||||
//
|
||||
// Necessary in order to handle a potential edge case with some theoretical token
|
||||
// implementations
|
||||
//
|
||||
// This will either let it be handled by the top-level transfer hook or will drop it
|
||||
// entirely on the side of caution
|
||||
if tx.to == Some(token.into()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get all logs for this TX
|
||||
let receipt = self
|
||||
.0
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
let tx_logs = receipt.inner.logs();
|
||||
|
||||
// Find a matching transfer log
|
||||
let mut found_transfer = false;
|
||||
for tx_log in tx_logs {
|
||||
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
|
||||
// Ensure we didn't already use this transfer to check a distinct InInstruction event
|
||||
if transfer_check.contains(&log_index) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this log is from the token we expected to be transferred
|
||||
if tx_log.address().0 != token {
|
||||
continue;
|
||||
}
|
||||
// Check if this is a transfer log
|
||||
// https://github.com/alloy-rs/core/issues/589
|
||||
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
|
||||
continue;
|
||||
}
|
||||
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
|
||||
// Check if this is a transfer to us for the expected amount
|
||||
if (transfer.to == self.1) && (transfer.value == log.amount) {
|
||||
transfer_check.insert(log_index);
|
||||
found_transfer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found_transfer {
|
||||
// This shouldn't be a ConnectionError
|
||||
// This is an exploit, a non-conforming ERC20, or an invalid connection
|
||||
// This should halt the process which is sufficient, yet this is sub-optimal
|
||||
// TODO
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
Coin::Erc20(token)
|
||||
};
|
||||
|
||||
in_instructions.push(InInstruction {
|
||||
id,
|
||||
from: *log.from.0,
|
||||
coin,
|
||||
amount: log.amount,
|
||||
data: log.instruction.as_ref().to_vec(),
|
||||
key_at_end_of_block,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(in_instructions)
|
||||
}
|
||||
|
||||
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
|
||||
let mut res = vec![];
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log =
|
||||
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn key_updated_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
|
||||
}
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn executed_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
|
||||
}
|
||||
}
|
||||
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod schnorr_container {
|
||||
use super::*;
|
||||
sol!("src/tests/contracts/Schnorr.sol");
|
||||
}
|
||||
pub(crate) use schnorr_container::TestSchnorr as schnorr;
|
||||
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
@@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract TestERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||
|
||||
function name() public pure returns (string memory) {
|
||||
return "Test ERC20";
|
||||
}
|
||||
function symbol() public pure returns (string memory) {
|
||||
return "TEST";
|
||||
}
|
||||
function decimals() public pure returns (uint8) {
|
||||
return 18;
|
||||
}
|
||||
|
||||
function totalSupply() public pure returns (uint256) {
|
||||
return 1_000_000 * 10e18;
|
||||
}
|
||||
|
||||
mapping(address => uint256) balances;
|
||||
mapping(address => mapping(address => uint256)) allowances;
|
||||
|
||||
constructor() {
|
||||
balances[msg.sender] = totalSupply();
|
||||
}
|
||||
|
||||
function balanceOf(address owner) public view returns (uint256) {
|
||||
return balances[owner];
|
||||
}
|
||||
function transfer(address to, uint256 value) public returns (bool) {
|
||||
balances[msg.sender] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
function transferFrom(address from, address to, uint256 value) public returns (bool) {
|
||||
allowances[from][msg.sender] -= value;
|
||||
balances[from] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
|
||||
function approve(address spender, uint256 value) public returns (bool) {
|
||||
allowances[msg.sender][spender] = value;
|
||||
return true;
|
||||
}
|
||||
function allowance(address owner, address spender) public view returns (uint256) {
|
||||
return allowances[owner][spender];
|
||||
}
|
||||
}
|
||||
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
@@ -0,0 +1,15 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "../../../contracts/Schnorr.sol";
|
||||
|
||||
contract TestSchnorr {
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes calldata message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) external pure returns (bool) {
|
||||
return Schnorr.verify(px, message, c, s);
|
||||
}
|
||||
}
|
||||
105
coins/ethereum/src/tests/crypto.rs
Normal file
105
coins/ethereum/src/tests/crypto.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use crate::{crypto::*, tests::key_gen};
|
||||
|
||||
// The ecrecover opcode, yet with parity replacing v
|
||||
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||
let message: [u8; 32] = message.to_repr().into();
|
||||
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||
sig,
|
||||
alloy_core::primitives::Parity::Parity(odd_y),
|
||||
)
|
||||
.ok()?
|
||||
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||
.ok()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
// Sign the signature
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed(
|
||||
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
|
||||
&keccak256(MESSAGE).into(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Sanity check the signature verifies
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
// Perform the ecrecover
|
||||
assert_eq!(
|
||||
ecrecover(
|
||||
hash_to_scalar(MESSAGE),
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||
*sig.r(),
|
||||
*sig.s()
|
||||
)
|
||||
.unwrap(),
|
||||
address(&ProjectivePoint::from(public.as_affine()))
|
||||
);
|
||||
}
|
||||
|
||||
// Run the sign test with the EthereumHram
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
let (keys, _) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
R: ProjectivePoint,
|
||||
public_key: &PublicKey,
|
||||
m: &[u8],
|
||||
s: Scalar,
|
||||
) -> (Scalar, Scalar) {
|
||||
let c = EthereumHram::hram(&R, &public_key.A, m);
|
||||
let sa = -(s * public_key.px);
|
||||
let ca = -(c * public_key.px);
|
||||
(sa, ca)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover_hack() {
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
|
||||
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
|
||||
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
127
coins/ethereum/src/tests/mod.rs
Normal file
127
coins/ethereum/src/tests/mod.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use k256::{Scalar, ProjectivePoint};
|
||||
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
||||
|
||||
use alloy_core::{
|
||||
primitives::{Address, U256, Bytes, TxKind},
|
||||
hex::FromHex,
|
||||
};
|
||||
use alloy_consensus::{SignableTransaction, TxLegacy};
|
||||
|
||||
use alloy_rpc_types::TransactionReceipt;
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::crypto::{address, deterministically_sign, PublicKey};
|
||||
|
||||
mod crypto;
|
||||
|
||||
mod abi;
|
||||
mod schnorr;
|
||||
mod router;
|
||||
|
||||
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
|
||||
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
let mut offset = Scalar::ZERO;
|
||||
while PublicKey::new(group_key).is_none() {
|
||||
offset += Scalar::ONE;
|
||||
group_key += ProjectivePoint::GENERATOR;
|
||||
}
|
||||
for keys in keys.values_mut() {
|
||||
*keys = keys.offset(offset);
|
||||
}
|
||||
let public_key = PublicKey::new(group_key).unwrap();
|
||||
|
||||
(keys, public_key)
|
||||
}
|
||||
|
||||
// TODO: Use a proper error here
|
||||
pub async fn send(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
mut tx: TxLegacy,
|
||||
) -> Option<TransactionReceipt> {
|
||||
let verifying_key = *wallet.verifying_key().as_affine();
|
||||
let address = Address::from(address(&verifying_key.into()));
|
||||
|
||||
// https://github.com/alloy-rs/alloy/issues/539
|
||||
// let chain_id = provider.get_chain_id().await.unwrap();
|
||||
// tx.chain_id = Some(chain_id);
|
||||
tx.chain_id = None;
|
||||
tx.nonce = provider.get_transaction_count(address, None).await.unwrap();
|
||||
// 100 gwei
|
||||
tx.gas_price = 100_000_000_000u128;
|
||||
|
||||
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
|
||||
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
|
||||
assert!(
|
||||
provider.get_balance(address, None).await.unwrap() >
|
||||
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
|
||||
);
|
||||
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
|
||||
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
|
||||
pending_tx.get_receipt().await.ok()
|
||||
}
|
||||
|
||||
pub async fn fund_account(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
to_fund: Address,
|
||||
value: U256,
|
||||
) -> Option<()> {
|
||||
let funding_tx =
|
||||
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
|
||||
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
|
||||
|
||||
Some(())
|
||||
}
|
||||
|
||||
// TODO: Use a proper error here
|
||||
pub async fn deploy_contract(
|
||||
client: Arc<RootProvider<SimpleRequest>>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
name: &str,
|
||||
) -> Option<Address> {
|
||||
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
||||
let hex_bin =
|
||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||
let bin = Bytes::from_hex(hex_bin).unwrap();
|
||||
|
||||
let deployment_tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
// 100 gwei
|
||||
gas_price: 100_000_000_000u128,
|
||||
gas_limit: 1_000_000,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bin,
|
||||
};
|
||||
|
||||
let deployment_tx = deterministically_sign(&deployment_tx);
|
||||
|
||||
// Fund the deployer address
|
||||
fund_account(
|
||||
&client,
|
||||
wallet,
|
||||
deployment_tx.recover_signer().unwrap(),
|
||||
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (deployment_tx, sig, _) = deployment_tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
|
||||
let receipt = pending_tx.get_receipt().await.ok()?;
|
||||
assert!(receipt.status());
|
||||
|
||||
Some(receipt.contract_address.unwrap())
|
||||
}
|
||||
183
coins/ethereum/src/tests/router.rs
Normal file
183
coins/ethereum/src/tests/router.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::Group;
|
||||
use k256::ProjectivePoint;
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
Participant, ThresholdKeys,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{Address, U256};
|
||||
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
crypto::*,
|
||||
deployer::Deployer,
|
||||
router::{Router, abi as router},
|
||||
tests::{key_gen, send, fund_account},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (
|
||||
AnvilInstance,
|
||||
Arc<RootProvider<SimpleRequest>>,
|
||||
u64,
|
||||
Router,
|
||||
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||
PublicKey,
|
||||
) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let chain_id = provider.get_chain_id().await.unwrap();
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
// Make sure the Deployer constructor returns None, as it doesn't exist yet
|
||||
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
|
||||
|
||||
// Deploy the Deployer
|
||||
let tx = Deployer::deployment_tx();
|
||||
fund_account(
|
||||
&client,
|
||||
&wallet,
|
||||
tx.recover_signer().unwrap(),
|
||||
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, sig, _) = tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
|
||||
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
|
||||
let receipt = pending_tx.get_receipt().await.unwrap();
|
||||
assert!(receipt.status());
|
||||
let deployer =
|
||||
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
// Verify the Router constructor returns None, as it doesn't exist yet
|
||||
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
|
||||
|
||||
// Deploy the router
|
||||
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(receipt.status());
|
||||
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
|
||||
|
||||
(anvil, client, chain_id, contract, keys, public_key)
|
||||
}
|
||||
|
||||
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
|
||||
client
|
||||
.get_block(client.get_block_number().await.unwrap().into(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.header
|
||||
.hash
|
||||
.unwrap()
|
||||
.0
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
let (_anvil, client, _, router, _, public_key) = setup_test().await;
|
||||
|
||||
let block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
|
||||
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
|
||||
}
|
||||
|
||||
pub fn hash_and_sign(
|
||||
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
) -> Signature {
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
|
||||
|
||||
Signature::new(public_key, message, sig).unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_router_update_serai_key() {
|
||||
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||
|
||||
let next_key = loop {
|
||||
let point = ProjectivePoint::random(&mut OsRng);
|
||||
let Some(next_key) = PublicKey::new(point) else { continue };
|
||||
break next_key;
|
||||
};
|
||||
|
||||
let message = Router::update_serai_key_message(
|
||||
U256::try_from(chain_id).unwrap(),
|
||||
U256::try_from(1u64).unwrap(),
|
||||
&next_key,
|
||||
);
|
||||
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||
|
||||
let first_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||
|
||||
let receipt =
|
||||
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(receipt.status());
|
||||
|
||||
let second_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
|
||||
// Check this does still offer the historical state
|
||||
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||
// TODO: Check logs
|
||||
|
||||
println!("gas used: {:?}", receipt.gas_used);
|
||||
// println!("logs: {:?}", receipt.logs);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_router_execute() {
|
||||
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||
|
||||
let to = Address::from([0; 20]);
|
||||
let value = U256::ZERO;
|
||||
let tx = router::OutInstruction { to, value, calls: vec![] };
|
||||
let txs = vec![tx];
|
||||
|
||||
let first_block_hash = latest_block_hash(&client).await;
|
||||
let nonce = contract.nonce(first_block_hash).await.unwrap();
|
||||
assert_eq!(nonce, U256::try_from(1u64).unwrap());
|
||||
|
||||
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
|
||||
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||
|
||||
let receipt =
|
||||
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
|
||||
assert!(receipt.status());
|
||||
|
||||
let second_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
|
||||
// Check this does still offer the historical state
|
||||
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||
// TODO: Check logs
|
||||
|
||||
println!("gas used: {:?}", receipt.gas_used);
|
||||
// println!("logs: {:?}", receipt.logs);
|
||||
}
|
||||
93
coins/ethereum/src/tests/schnorr.rs
Normal file
93
coins/ethereum/src/tests/schnorr.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::Scalar;
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
|
||||
use alloy_sol_types::SolCall;
|
||||
|
||||
use alloy_rpc_types::{TransactionInput, TransactionRequest};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::*,
|
||||
tests::{key_gen, deploy_contract, abi::schnorr as abi},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
|
||||
(anvil, client, address)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
setup_test().await;
|
||||
}
|
||||
|
||||
pub async fn call_verify(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
contract: Address,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> Result<(), Error> {
|
||||
let px: [u8; 32] = public_key.px.to_repr().into();
|
||||
let c_bytes: [u8; 32] = signature.c.to_repr().into();
|
||||
let s_bytes: [u8; 32] = signature.s.to_repr().into();
|
||||
let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new(
|
||||
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
));
|
||||
let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
if res._0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (_anvil, client, contract) = setup_test().await;
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
|
||||
|
||||
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||
// Test an invalid signature fails
|
||||
let mut sig = sig;
|
||||
sig.s += Scalar::ONE;
|
||||
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ::k256::{
|
||||
elliptic_curve::{bigint::ArrayEncoding, PrimeField},
|
||||
U256,
|
||||
};
|
||||
|
||||
use ethers_core::{
|
||||
types::Signature,
|
||||
abi::Abi,
|
||||
utils::{keccak256, Anvil, AnvilInstance},
|
||||
};
|
||||
use ethers_contract::ContractFactory;
|
||||
use ethers_providers::{Middleware, Provider, Http};
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
Participant,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{key_gen, algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use ethereum_serai::{
|
||||
crypto,
|
||||
contract::{Schnorr, call_verify},
|
||||
};
|
||||
|
||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
||||
// to fund the deployer, not create/pass a wallet
|
||||
pub async fn deploy_schnorr_verifier_contract(
|
||||
chain_id: u32,
|
||||
client: Arc<Provider<Http>>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
) -> eyre::Result<Schnorr<Provider<Http>>> {
|
||||
let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap();
|
||||
|
||||
let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap();
|
||||
let hex_bin =
|
||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||
let bin = hex::decode(hex_bin).unwrap();
|
||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
||||
|
||||
let mut deployment_tx = factory.deploy(())?.tx;
|
||||
deployment_tx.set_chain_id(chain_id);
|
||||
deployment_tx.set_gas(500_000);
|
||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
||||
|
||||
let sig_hash = deployment_tx.sighash();
|
||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
||||
|
||||
// EIP-155 v
|
||||
let mut v = u64::from(rid.to_byte());
|
||||
assert!((v == 0) || (v == 1));
|
||||
v += u64::from((chain_id * 2) + 35);
|
||||
|
||||
let r = sig.r().to_repr();
|
||||
let r_ref: &[u8] = r.as_ref();
|
||||
let s = sig.s().to_repr();
|
||||
let s_ref: &[u8] = s.as_ref();
|
||||
let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v });
|
||||
|
||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
||||
|
||||
let mut receipt;
|
||||
while {
|
||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
||||
receipt.is_none()
|
||||
} {
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
}
|
||||
let receipt = receipt.unwrap();
|
||||
assert!(receipt.status == Some(1.into()));
|
||||
|
||||
let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone());
|
||||
Ok(contract)
|
||||
}
|
||||
|
||||
async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider =
|
||||
Provider::<Http>::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64));
|
||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
(chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
deploy_test_contract().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (chain_id, _anvil, contract) = deploy_test_contract().await;
|
||||
let chain_id = U256::from(chain_id);
|
||||
|
||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let hashed_message = keccak256(MESSAGE);
|
||||
|
||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||
full_message,
|
||||
);
|
||||
let mut processed_sig =
|
||||
crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
||||
|
||||
call_verify(&contract, &processed_sig).await.unwrap();
|
||||
|
||||
// test invalid signature fails
|
||||
processed_sig.message[0] = 0;
|
||||
assert!(call_verify(&contract, &processed_sig).await.is_err());
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
use k256::{
|
||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256,
|
||||
};
|
||||
use frost::{curve::Secp256k1, Participant};
|
||||
|
||||
use ethereum_serai::crypto::*;
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
use rand_core::OsRng;
|
||||
use sha2::Sha256;
|
||||
use sha3::{Digest, Keccak256};
|
||||
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
|
||||
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
||||
.unwrap();
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
|
||||
.unwrap(),
|
||||
address(&ProjectivePoint::from(public.as_affine()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
use frost::{
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, key_gen, sign},
|
||||
};
|
||||
use rand_core::OsRng;
|
||||
|
||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let _group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover_hack() {
|
||||
use frost::{
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, key_gen, sign},
|
||||
};
|
||||
use rand_core::OsRng;
|
||||
|
||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
let group_key_encoded = group_key.to_encoded_point(true);
|
||||
let group_key_compressed = group_key_encoded.as_ref();
|
||||
let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33]));
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let hashed_message = keccak256(MESSAGE);
|
||||
let chain_id = U256::ONE;
|
||||
|
||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||
full_message,
|
||||
);
|
||||
|
||||
let (sr, er) =
|
||||
preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
||||
let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
mod contract;
|
||||
mod crypto;
|
||||
@@ -43,13 +43,10 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features =
|
||||
|
||||
# Needed for multisig
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
||||
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
|
||||
|
||||
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
||||
|
||||
async-lock = { version = "3", default-features = false, optional = true }
|
||||
|
||||
hex-literal = "0.4"
|
||||
hex = { version = "0.4", default-features = false, features = ["alloc"] }
|
||||
serde = { version = "1", default-features = false, features = ["derive", "alloc"] }
|
||||
@@ -91,12 +88,9 @@ std = [
|
||||
"multiexp/std",
|
||||
|
||||
"transcript/std",
|
||||
"dleq/std",
|
||||
|
||||
"monero-generators/std",
|
||||
|
||||
"async-lock?/std",
|
||||
|
||||
"hex/std",
|
||||
"serde/std",
|
||||
"serde_json/std",
|
||||
@@ -104,10 +98,8 @@ std = [
|
||||
"base58-monero/std",
|
||||
]
|
||||
|
||||
cache-distribution = ["async-lock"]
|
||||
http-rpc = ["digest_auth", "simple-request", "tokio"]
|
||||
multisig = ["transcript", "frost", "dleq", "std"]
|
||||
multisig = ["transcript", "frost", "std"]
|
||||
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
|
||||
experimental = []
|
||||
|
||||
default = ["std", "http-rpc"]
|
||||
|
||||
@@ -47,3 +47,15 @@ It also won't act as a wallet, just as a transaction library. wallet2 has
|
||||
several *non-transaction-level* policies, such as always attempting to use two
|
||||
inputs to create transactions. These are considered out of scope to
|
||||
monero-serai.
|
||||
|
||||
### Feature flags
|
||||
monero-serai has certain functionality behind feature flags:
|
||||
|
||||
- `std:` Enables usage of Rust's `std` and several other functionality. See `Cargo.toml` for the full list.
|
||||
- `http-rpc`: Enables an HTTP(S) transport type within the `rpc` module
|
||||
- `multisig`: Enables multi-signature features within the `wallet` module
|
||||
- `binaries`: TODO
|
||||
|
||||
The features enabled by default are:
|
||||
- `std`
|
||||
- `http-rpc`
|
||||
@@ -15,16 +15,43 @@ const CORRECT_BLOCK_HASH_202612: [u8; 32] =
|
||||
const EXISTING_BLOCK_HASH_202612: [u8; 32] =
|
||||
hex_literal::hex!("bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698");
|
||||
|
||||
/// The header of a [`Block`].
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BlockHeader {
|
||||
/// This represents the hardfork number of the block.
|
||||
pub major_version: u8,
|
||||
/// This field is used to vote for a particular [hardfork](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_basic/cryptonote_basic.h#L460).
|
||||
pub minor_version: u8,
|
||||
/// The UNIX time at which the block was mined.
|
||||
pub timestamp: u64,
|
||||
/// The previous [`Block::hash`].
|
||||
pub previous: [u8; 32],
|
||||
/// The block's nonce.
|
||||
pub nonce: u32,
|
||||
}
|
||||
|
||||
impl BlockHeader {
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::block::*;
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// let block_header = BlockHeader {
|
||||
/// major_version: 1,
|
||||
/// minor_version: 2,
|
||||
/// timestamp: 3,
|
||||
/// previous: [4; 32],
|
||||
/// nonce: 5,
|
||||
/// };
|
||||
///
|
||||
/// let mut writer = vec![];
|
||||
/// block_header.write(&mut writer)?;
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_varint(&self.major_version, w)?;
|
||||
write_varint(&self.minor_version, w)?;
|
||||
@@ -33,12 +60,58 @@ impl BlockHeader {
|
||||
w.write_all(&self.nonce.to_le_bytes())
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::block::*;
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// let block_header = BlockHeader {
|
||||
/// major_version: 1,
|
||||
/// minor_version: 2,
|
||||
/// timestamp: 3,
|
||||
/// previous: [4; 32],
|
||||
/// nonce: 5,
|
||||
/// };
|
||||
///
|
||||
/// let mut writer = vec![];
|
||||
/// block_header.write(&mut writer)?;
|
||||
///
|
||||
/// let serialized = block_header.serialize();
|
||||
/// assert_eq!(serialized, writer);
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::block::*;
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// let block_header = BlockHeader {
|
||||
/// major_version: 1,
|
||||
/// minor_version: 2,
|
||||
/// timestamp: 3,
|
||||
/// previous: [4; 32],
|
||||
/// nonce: 5,
|
||||
/// };
|
||||
///
|
||||
/// let mut vec = vec![];
|
||||
/// block_header.write(&mut vec)?;
|
||||
///
|
||||
/// let read = BlockHeader::read(&mut vec.as_slice())?;
|
||||
/// assert_eq!(read, block_header);
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BlockHeader> {
|
||||
Ok(BlockHeader {
|
||||
major_version: read_varint(r)?,
|
||||
@@ -50,14 +123,19 @@ impl BlockHeader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Block on the Monero blockchain.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Block {
|
||||
/// The header of this block.
|
||||
pub header: BlockHeader,
|
||||
/// The miner/coinbase transaction.
|
||||
pub miner_tx: Transaction,
|
||||
/// Hashes of all the transactions within this block.
|
||||
pub txs: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Return the amount of Monero generated in this block in atomic units.
|
||||
pub fn number(&self) -> Option<u64> {
|
||||
match self.miner_tx.prefix.inputs.first() {
|
||||
Some(Input::Gen(number)) => Some(*number),
|
||||
@@ -65,6 +143,10 @@ impl Block {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.header.write(w)?;
|
||||
self.miner_tx.write(w)?;
|
||||
@@ -75,6 +157,11 @@ impl Block {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return the merkle root of this block.
|
||||
///
|
||||
/// In the case that this block has no transactions other than
|
||||
/// the miner transaction, the miner transaction hash is returned,
|
||||
/// i.e. the [`Transaction::hash`] of [`Self::miner_tx`] is returned.
|
||||
fn tx_merkle_root(&self) -> [u8; 32] {
|
||||
merkle_root(self.miner_tx.hash(), &self.txs)
|
||||
}
|
||||
@@ -91,6 +178,7 @@ impl Block {
|
||||
blob
|
||||
}
|
||||
|
||||
/// Calculate the hash of this block.
|
||||
pub fn hash(&self) -> [u8; 32] {
|
||||
let mut hashable = self.serialize_hashable();
|
||||
// Monero pre-appends a VarInt of the block hashing blobs length before getting the block hash
|
||||
@@ -107,12 +195,18 @@ impl Block {
|
||||
hash
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Block> {
|
||||
let header = BlockHeader::read(r)?;
|
||||
|
||||
@@ -124,7 +218,7 @@ impl Block {
|
||||
Ok(Block {
|
||||
header,
|
||||
miner_tx,
|
||||
txs: (0_usize .. read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
|
||||
txs: (0_usize..read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
traits::VartimePrecomputedMultiscalarMul,
|
||||
};
|
||||
|
||||
pub use monero_generators::{H, decompress_point};
|
||||
|
||||
@@ -46,8 +51,26 @@ pub mod wallet;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// Default block lock time for transactions.
|
||||
///
|
||||
/// This is the amount of new blocks that must
|
||||
/// pass before a new transaction can be spent.
|
||||
///
|
||||
/// Equivalent to Monero's [`CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L49).
|
||||
pub const DEFAULT_LOCK_WINDOW: usize = 10;
|
||||
/// Block lock time for coinbase transactions.
|
||||
///
|
||||
/// This is the amount of new blocks that must
|
||||
/// pass before a coinbase/miner transaction can be spent.
|
||||
///
|
||||
/// Equivalent to Monero's [`CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L44).
|
||||
pub const COINBASE_LOCK_WINDOW: usize = 60;
|
||||
/// Average amount of seconds it takes for a block to be mined.
|
||||
///
|
||||
/// This is target amount of seconds mining difficulty will adjust to,
|
||||
/// i.e. a block will be mined every `BLOCK_TIME` seconds on average.
|
||||
///
|
||||
/// Equivalent to Monero's [`DIFFICULTY_TARGET_V2`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L44).
|
||||
pub const BLOCK_TIME: usize = 120;
|
||||
|
||||
static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||
@@ -56,6 +79,13 @@ pub(crate) fn INV_EIGHT() -> Scalar {
|
||||
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
|
||||
}
|
||||
|
||||
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
|
||||
BASEPOINT_PRECOMP_CELL
|
||||
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
|
||||
}
|
||||
|
||||
/// Monero protocol version.
|
||||
///
|
||||
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
|
||||
@@ -63,19 +93,34 @@ pub(crate) fn INV_EIGHT() -> Scalar {
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum Protocol {
|
||||
/// Version 14.
|
||||
v14,
|
||||
/// Version 16.
|
||||
v16,
|
||||
/// A custom version with customized properties.
|
||||
Custom {
|
||||
/// See [`Self::ring_len`].
|
||||
ring_len: usize,
|
||||
/// See [`Self::bp_plus`].
|
||||
bp_plus: bool,
|
||||
/// See [`Self::optimal_rct_type`].
|
||||
optimal_rct_type: RctType,
|
||||
/// See [`Self::view_tags`].
|
||||
view_tags: bool,
|
||||
/// See [`Self::v16_fee`].
|
||||
v16_fee: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
/// Amount of ring members under this protocol version.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::*;
|
||||
/// assert_eq!(Protocol::v14.ring_len(), 11);
|
||||
/// assert_eq!(Protocol::v16.ring_len(), 16);
|
||||
/// ```
|
||||
pub fn ring_len(&self) -> usize {
|
||||
match self {
|
||||
Protocol::v14 => 11,
|
||||
@@ -87,6 +132,13 @@ impl Protocol {
|
||||
/// Whether or not the specified version uses Bulletproofs or Bulletproofs+.
|
||||
///
|
||||
/// This method will likely be reworked when versions not using Bulletproofs at all are added.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::*;
|
||||
/// assert_eq!(Protocol::v14.bp_plus(), false);
|
||||
/// assert_eq!(Protocol::v16.bp_plus(), true);
|
||||
/// ```
|
||||
pub fn bp_plus(&self) -> bool {
|
||||
match self {
|
||||
Protocol::v14 => false,
|
||||
@@ -95,6 +147,14 @@ impl Protocol {
|
||||
}
|
||||
}
|
||||
|
||||
/// The optimal RingCT type for this version.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::{*, ringct::*};
|
||||
/// assert_eq!(Protocol::v14.optimal_rct_type(), RctType::Clsag);
|
||||
/// assert_eq!(Protocol::v16.optimal_rct_type(), RctType::BulletproofsPlus);
|
||||
/// ```
|
||||
// TODO: Make this an Option when we support pre-RCT protocols
|
||||
pub fn optimal_rct_type(&self) -> RctType {
|
||||
match self {
|
||||
@@ -105,6 +165,13 @@ impl Protocol {
|
||||
}
|
||||
|
||||
/// Whether or not the specified version uses view tags.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::{*, ringct::*};
|
||||
/// assert_eq!(Protocol::v14.view_tags(), false);
|
||||
/// assert_eq!(Protocol::v16.view_tags(), true);
|
||||
/// ```
|
||||
pub fn view_tags(&self) -> bool {
|
||||
match self {
|
||||
Protocol::v14 => false,
|
||||
@@ -115,6 +182,13 @@ impl Protocol {
|
||||
|
||||
/// Whether or not the specified version uses the fee algorithm from Monero
|
||||
/// hard fork version 16 (released in v18 binaries).
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::{*, ringct::*};
|
||||
/// assert_eq!(Protocol::v14.v16_fee(), false);
|
||||
/// assert_eq!(Protocol::v16.v16_fee(), true);
|
||||
/// ```
|
||||
pub fn v16_fee(&self) -> bool {
|
||||
match self {
|
||||
Protocol::v14 => false,
|
||||
@@ -176,11 +250,15 @@ impl Protocol {
|
||||
}
|
||||
}
|
||||
|
||||
/// Transparent structure representing a Pedersen commitment's contents.
|
||||
/// Transparent structure representing a [Pedersen commitment](https://web.getmonero.org/resources/moneropedia/pedersen-commitment.html)'s contents.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct Commitment {
|
||||
/// The value used to mask the `amount`.
|
||||
pub mask: Scalar,
|
||||
/// The value being masked.
|
||||
///
|
||||
/// In Monero's case, this is the amount of XMR in atomic units.
|
||||
pub amount: u64,
|
||||
}
|
||||
|
||||
@@ -196,6 +274,7 @@ impl Commitment {
|
||||
Commitment { mask: Scalar::ONE, amount: 0 }
|
||||
}
|
||||
|
||||
/// Create a new [`Self`].
|
||||
pub fn new(mask: Scalar, amount: u64) -> Commitment {
|
||||
Commitment { mask, amount }
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ use monero_generators::hash_to_point;
|
||||
|
||||
use crate::{serialize::*, hash_to_scalar};
|
||||
|
||||
/// A signature within a [`RingSignature`].
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Signature {
|
||||
c: Scalar,
|
||||
@@ -18,23 +19,37 @@ pub struct Signature {
|
||||
}
|
||||
|
||||
impl Signature {
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_scalar(&self.c, w)?;
|
||||
write_scalar(&self.r, w)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Signature> {
|
||||
Ok(Signature { c: read_scalar(r)?, r: read_scalar(r)? })
|
||||
}
|
||||
}
|
||||
|
||||
/// A [ring signature](https://en.wikipedia.org/wiki/Ring_signature).
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct RingSignature {
|
||||
sigs: Vec<Signature>,
|
||||
}
|
||||
|
||||
impl RingSignature {
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for sig in &self.sigs {
|
||||
sig.write(w)?;
|
||||
@@ -42,6 +57,11 @@ impl RingSignature {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(members: usize, r: &mut R) -> io::Result<RingSignature> {
|
||||
Ok(RingSignature { sigs: read_raw_vec(Signature::read, members, r)? })
|
||||
}
|
||||
|
||||
@@ -7,20 +7,21 @@ use monero_generators::H_pow_2;
|
||||
|
||||
use crate::{hash_to_scalar, unreduced_scalar::UnreducedScalar, serialize::*};
|
||||
|
||||
/// 64 Borromean ring signatures.
|
||||
/// 64 Borromean ring signatures, as needed for a 64-bit range proof.
|
||||
///
|
||||
/// s0 and s1 are stored as `UnreducedScalar`s due to Monero not requiring they were reduced.
|
||||
/// `UnreducedScalar` preserves their original byte encoding and implements a custom reduction
|
||||
/// algorithm which was in use.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BorromeanSignatures {
|
||||
pub s0: [UnreducedScalar; 64],
|
||||
pub s1: [UnreducedScalar; 64],
|
||||
pub ee: Scalar,
|
||||
struct BorromeanSignatures {
|
||||
s0: [UnreducedScalar; 64],
|
||||
s1: [UnreducedScalar; 64],
|
||||
ee: Scalar,
|
||||
}
|
||||
|
||||
impl BorromeanSignatures {
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
|
||||
/// Read a set of BorromeanSignatures from a reader.
|
||||
fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
|
||||
Ok(BorromeanSignatures {
|
||||
s0: read_array(UnreducedScalar::read, r)?,
|
||||
s1: read_array(UnreducedScalar::read, r)?,
|
||||
@@ -28,7 +29,8 @@ impl BorromeanSignatures {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
/// Write the set of BorromeanSignatures to a writer.
|
||||
fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for s0 in &self.s0 {
|
||||
s0.write(w)?;
|
||||
}
|
||||
@@ -64,22 +66,26 @@ impl BorromeanSignatures {
|
||||
/// A range proof premised on Borromean ring signatures.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BorromeanRange {
|
||||
pub sigs: BorromeanSignatures,
|
||||
pub bit_commitments: [EdwardsPoint; 64],
|
||||
sigs: BorromeanSignatures,
|
||||
bit_commitments: [EdwardsPoint; 64],
|
||||
}
|
||||
|
||||
impl BorromeanRange {
|
||||
/// Read a BorromeanRange proof from a reader.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanRange> {
|
||||
Ok(BorromeanRange {
|
||||
sigs: BorromeanSignatures::read(r)?,
|
||||
bit_commitments: read_array(read_point, r)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the BorromeanRange proof to a reader.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.sigs.write(w)?;
|
||||
write_raw_vec(write_point, &self.bit_commitments, w)
|
||||
}
|
||||
|
||||
/// Verify the commitment contains a 64-bit value.
|
||||
pub fn verify(&self, commitment: &EdwardsPoint) -> bool {
|
||||
if &self.bit_commitments.iter().sum::<EdwardsPoint>() != commitment {
|
||||
return false;
|
||||
|
||||
@@ -26,15 +26,15 @@ use self::plus::*;
|
||||
|
||||
pub(crate) const MAX_OUTPUTS: usize = self::core::MAX_M;
|
||||
|
||||
/// Bulletproofs enum, supporting the original and plus formulations.
|
||||
/// Bulletproof enum, encapsulating both Bulletproofs and Bulletproofs+.
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Bulletproofs {
|
||||
pub enum Bulletproof {
|
||||
Original(OriginalStruct),
|
||||
Plus(AggregateRangeProof),
|
||||
}
|
||||
|
||||
impl Bulletproofs {
|
||||
impl Bulletproof {
|
||||
fn bp_fields(plus: bool) -> usize {
|
||||
if plus {
|
||||
6
|
||||
@@ -57,7 +57,7 @@ impl Bulletproofs {
|
||||
|
||||
let mut bp_clawback = 0;
|
||||
if n_padded_outputs > 2 {
|
||||
let fields = Bulletproofs::bp_fields(plus);
|
||||
let fields = Bulletproof::bp_fields(plus);
|
||||
let base = ((fields + (2 * (LOG_N + 1))) * 32) / 2;
|
||||
let size = (fields + (2 * LR_len)) * 32;
|
||||
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
|
||||
@@ -68,49 +68,51 @@ impl Bulletproofs {
|
||||
|
||||
pub(crate) fn fee_weight(plus: bool, outputs: usize) -> usize {
|
||||
#[allow(non_snake_case)]
|
||||
let (bp_clawback, LR_len) = Bulletproofs::calculate_bp_clawback(plus, outputs);
|
||||
32 * (Bulletproofs::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
|
||||
let (bp_clawback, LR_len) = Bulletproof::calculate_bp_clawback(plus, outputs);
|
||||
32 * (Bulletproof::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
|
||||
}
|
||||
|
||||
/// Prove the list of commitments are within [0 .. 2^64).
|
||||
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof.
|
||||
pub fn prove<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
outputs: &[Commitment],
|
||||
plus: bool,
|
||||
) -> Result<Bulletproofs, TransactionError> {
|
||||
) -> Result<Bulletproof, TransactionError> {
|
||||
if outputs.is_empty() {
|
||||
Err(TransactionError::NoOutputs)?;
|
||||
}
|
||||
if outputs.len() > MAX_OUTPUTS {
|
||||
Err(TransactionError::TooManyOutputs)?;
|
||||
}
|
||||
Ok(if !plus {
|
||||
Bulletproofs::Original(OriginalStruct::prove(rng, outputs))
|
||||
} else {
|
||||
use dalek_ff_group::EdwardsPoint as DfgPoint;
|
||||
Bulletproofs::Plus(
|
||||
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
|
||||
Ok(Bulletproof::Original(OriginalStruct::prove(rng, outputs)))
|
||||
}
|
||||
|
||||
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof+.
|
||||
pub fn prove_plus<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
outputs: Vec<Commitment>,
|
||||
) -> Result<Bulletproof, TransactionError> {
|
||||
if outputs.is_empty() {
|
||||
Err(TransactionError::NoOutputs)?;
|
||||
}
|
||||
if outputs.len() > MAX_OUTPUTS {
|
||||
Err(TransactionError::TooManyOutputs)?;
|
||||
}
|
||||
Ok(Bulletproof::Plus(
|
||||
AggregateRangeStatement::new(outputs.iter().map(Commitment::calculate).collect())
|
||||
.unwrap()
|
||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
))
|
||||
}
|
||||
|
||||
/// Verify the given Bulletproofs.
|
||||
/// Verify the given Bulletproof(+).
|
||||
#[must_use]
|
||||
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
|
||||
match self {
|
||||
Bulletproofs::Original(bp) => bp.verify(rng, commitments),
|
||||
Bulletproofs::Plus(bp) => {
|
||||
Bulletproof::Original(bp) => bp.verify(rng, commitments),
|
||||
Bulletproof::Plus(bp) => {
|
||||
let mut verifier = BatchVerifier::new(1);
|
||||
// If this commitment is torsioned (which is allowed), this won't be a well-formed
|
||||
// dfg::EdwardsPoint (expected to be of prime-order)
|
||||
// The actual BP+ impl will perform a torsion clear though, making this safe
|
||||
// TODO: Have AggregateRangeStatement take in dalek EdwardsPoint for clarity on this
|
||||
let Some(statement) = AggregateRangeStatement::new(
|
||||
commitments.iter().map(|c| dalek_ff_group::EdwardsPoint(*c)).collect(),
|
||||
) else {
|
||||
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
|
||||
return false;
|
||||
};
|
||||
if !statement.verify(rng, &mut verifier, (), bp.clone()) {
|
||||
@@ -121,9 +123,11 @@ impl Bulletproofs {
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulate the verification for the given Bulletproofs into the specified BatchVerifier.
|
||||
/// Returns false if the Bulletproofs aren't sane, without mutating the BatchVerifier.
|
||||
/// Returns true if the Bulletproofs are sane, regardless of their validity.
|
||||
/// Accumulate the verification for the given Bulletproof into the specified BatchVerifier.
|
||||
///
|
||||
/// Returns false if the Bulletproof isn't sane, leaving the BatchVerifier in an undefined
|
||||
/// state.
|
||||
/// Returns true if the Bulletproof is sane, regardless of their validity.
|
||||
#[must_use]
|
||||
pub fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
|
||||
&self,
|
||||
@@ -133,11 +137,9 @@ impl Bulletproofs {
|
||||
commitments: &[EdwardsPoint],
|
||||
) -> bool {
|
||||
match self {
|
||||
Bulletproofs::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
|
||||
Bulletproofs::Plus(bp) => {
|
||||
let Some(statement) = AggregateRangeStatement::new(
|
||||
commitments.iter().map(|c| dalek_ff_group::EdwardsPoint(*c)).collect(),
|
||||
) else {
|
||||
Bulletproof::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
|
||||
Bulletproof::Plus(bp) => {
|
||||
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
|
||||
return false;
|
||||
};
|
||||
statement.verify(rng, verifier, id, bp.clone())
|
||||
@@ -151,7 +153,7 @@ impl Bulletproofs {
|
||||
specific_write_vec: F,
|
||||
) -> io::Result<()> {
|
||||
match self {
|
||||
Bulletproofs::Original(bp) => {
|
||||
Bulletproof::Original(bp) => {
|
||||
write_point(&bp.A, w)?;
|
||||
write_point(&bp.S, w)?;
|
||||
write_point(&bp.T1, w)?;
|
||||
@@ -165,7 +167,7 @@ impl Bulletproofs {
|
||||
write_scalar(&bp.t, w)
|
||||
}
|
||||
|
||||
Bulletproofs::Plus(bp) => {
|
||||
Bulletproof::Plus(bp) => {
|
||||
write_point(&bp.A.0, w)?;
|
||||
write_point(&bp.wip.A.0, w)?;
|
||||
write_point(&bp.wip.B.0, w)?;
|
||||
@@ -182,19 +184,21 @@ impl Bulletproofs {
|
||||
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
/// Write the Bulletproof(+) to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.write_core(w, |points, w| write_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
/// Serialize the Bulletproof(+) to a `Vec<u8>`.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Read Bulletproofs.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproofs> {
|
||||
Ok(Bulletproofs::Original(OriginalStruct {
|
||||
/// Read a Bulletproof.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
|
||||
Ok(Bulletproof::Original(OriginalStruct {
|
||||
A: read_point(r)?,
|
||||
S: read_point(r)?,
|
||||
T1: read_point(r)?,
|
||||
@@ -209,11 +213,11 @@ impl Bulletproofs {
|
||||
}))
|
||||
}
|
||||
|
||||
/// Read Bulletproofs+.
|
||||
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproofs> {
|
||||
/// Read a Bulletproof+.
|
||||
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
|
||||
use dalek_ff_group::{Scalar as DfgScalar, EdwardsPoint as DfgPoint};
|
||||
|
||||
Ok(Bulletproofs::Plus(AggregateRangeProof {
|
||||
Ok(Bulletproof::Plus(AggregateRangeProof {
|
||||
A: DfgPoint(read_point(r)?),
|
||||
wip: WipProof {
|
||||
A: DfgPoint(read_point(r)?),
|
||||
|
||||
@@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D
|
||||
use group::{ff::Field, Group};
|
||||
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
||||
|
||||
use multiexp::BatchVerifier;
|
||||
use multiexp::{BatchVerifier, multiexp};
|
||||
|
||||
use crate::{Commitment, ringct::bulletproofs::core::*};
|
||||
|
||||
@@ -17,9 +17,23 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
||||
|
||||
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||
pub(crate) fn IP12() -> Scalar {
|
||||
*IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N()))
|
||||
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
|
||||
}
|
||||
|
||||
pub(crate) fn hadamard_fold(
|
||||
l: &[EdwardsPoint],
|
||||
r: &[EdwardsPoint],
|
||||
a: Scalar,
|
||||
b: Scalar,
|
||||
) -> Vec<EdwardsPoint> {
|
||||
let mut res = Vec::with_capacity(l.len() / 2);
|
||||
for i in 0 .. l.len() {
|
||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Internal structure representing a Bulletproof.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OriginalStruct {
|
||||
pub(crate) A: DalekPoint,
|
||||
@@ -57,7 +71,7 @@ impl OriginalStruct {
|
||||
let mut cache = hash_to_scalar(&y.to_bytes());
|
||||
let z = cache;
|
||||
|
||||
let l0 = &aL - z;
|
||||
let l0 = aL - z;
|
||||
let l1 = sL;
|
||||
|
||||
let mut zero_twos = Vec::with_capacity(MN);
|
||||
@@ -69,12 +83,12 @@ impl OriginalStruct {
|
||||
}
|
||||
|
||||
let yMN = ScalarVector::powers(y, MN);
|
||||
let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos);
|
||||
let r1 = yMN * sR;
|
||||
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
|
||||
let r1 = yMN * &sR;
|
||||
|
||||
let (T1, T2, x, mut taux) = {
|
||||
let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0);
|
||||
let t2 = inner_product(&l1, &r1);
|
||||
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
|
||||
let t2 = l1.clone().inner_product(&r1);
|
||||
|
||||
let mut tau1 = Scalar::random(&mut *rng);
|
||||
let mut tau2 = Scalar::random(&mut *rng);
|
||||
@@ -100,10 +114,10 @@ impl OriginalStruct {
|
||||
taux += zpow[i + 2] * gamma;
|
||||
}
|
||||
|
||||
let l = &l0 + &(l1 * x);
|
||||
let r = &r0 + &(r1 * x);
|
||||
let l = l0 + &(l1 * x);
|
||||
let r = r0 + &(r1 * x);
|
||||
|
||||
let t = inner_product(&l, &r);
|
||||
let t = l.clone().inner_product(&r);
|
||||
|
||||
let x_ip =
|
||||
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
||||
@@ -126,8 +140,8 @@ impl OriginalStruct {
|
||||
let (aL, aR) = a.split();
|
||||
let (bL, bR) = b.split();
|
||||
|
||||
let cL = inner_product(&aL, &bR);
|
||||
let cR = inner_product(&aR, &bL);
|
||||
let cL = aL.clone().inner_product(&bR);
|
||||
let cR = aR.clone().inner_product(&bL);
|
||||
|
||||
let (G_L, G_R) = G_proof.split_at(aL.len());
|
||||
let (H_L, H_R) = H_proof.split_at(aL.len());
|
||||
@@ -140,8 +154,8 @@ impl OriginalStruct {
|
||||
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
||||
let winv = w.invert().unwrap();
|
||||
|
||||
a = (aL * w) + (aR * winv);
|
||||
b = (bL * winv) + (bR * w);
|
||||
a = (aL * w) + &(aR * winv);
|
||||
b = (bL * winv) + &(bR * w);
|
||||
|
||||
if a.len() != 1 {
|
||||
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
||||
|
||||
@@ -9,6 +9,7 @@ use group::{
|
||||
ff::{Field, PrimeField},
|
||||
Group, GroupEncoding,
|
||||
};
|
||||
use curve25519_dalek::EdwardsPoint as DalekPoint;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
use crate::{
|
||||
@@ -24,11 +25,11 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
// Figure 3
|
||||
// Figure 3 of the Bulletproofs+ Paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregateRangeStatement {
|
||||
generators: Generators,
|
||||
V: Vec<EdwardsPoint>,
|
||||
V: Vec<DalekPoint>,
|
||||
}
|
||||
|
||||
impl Zeroize for AggregateRangeStatement {
|
||||
@@ -38,27 +39,19 @@ impl Zeroize for AggregateRangeStatement {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct AggregateRangeWitness {
|
||||
values: Vec<u64>,
|
||||
gammas: Vec<Scalar>,
|
||||
}
|
||||
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
|
||||
|
||||
impl AggregateRangeWitness {
|
||||
pub(crate) fn new(commitments: &[Commitment]) -> Option<Self> {
|
||||
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
|
||||
if commitments.is_empty() || (commitments.len() > MAX_M) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut values = Vec::with_capacity(commitments.len());
|
||||
let mut gammas = Vec::with_capacity(commitments.len());
|
||||
for commitment in commitments {
|
||||
values.push(commitment.amount);
|
||||
gammas.push(Scalar(commitment.mask));
|
||||
}
|
||||
Some(AggregateRangeWitness { values, gammas })
|
||||
Some(AggregateRangeWitness(commitments))
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal structure representing a Bulletproof+, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRangeProof {
|
||||
pub(crate) A: EdwardsPoint,
|
||||
@@ -66,7 +59,7 @@ pub struct AggregateRangeProof {
|
||||
}
|
||||
|
||||
impl AggregateRangeStatement {
|
||||
pub(crate) fn new(V: Vec<EdwardsPoint>) -> Option<Self> {
|
||||
pub(crate) fn new(V: Vec<DalekPoint>) -> Option<Self> {
|
||||
if V.is_empty() || (V.len() > MAX_M) {
|
||||
return None;
|
||||
}
|
||||
@@ -107,12 +100,15 @@ impl AggregateRangeStatement {
|
||||
}
|
||||
let mn = V.len() * N;
|
||||
|
||||
// 2, 4, 6, 8... powers of z, of length equivalent to the amount of commitments
|
||||
let mut z_pow = Vec::with_capacity(V.len());
|
||||
// z**2
|
||||
z_pow.push(z * z);
|
||||
|
||||
let mut d = ScalarVector::new(mn);
|
||||
for j in 1 ..= V.len() {
|
||||
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
||||
d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1]));
|
||||
z_pow.push(*z_pow.last().unwrap() * z_pow[0]);
|
||||
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
|
||||
}
|
||||
|
||||
let mut ascending_y = ScalarVector(vec![y]);
|
||||
@@ -124,7 +120,8 @@ impl AggregateRangeStatement {
|
||||
let mut descending_y = ascending_y.clone();
|
||||
descending_y.0.reverse();
|
||||
|
||||
let d_descending_y = d.mul_vec(&descending_y);
|
||||
let d_descending_y = d.clone() * &descending_y;
|
||||
let d_descending_y_plus_z = d_descending_y + z;
|
||||
|
||||
let y_mn_plus_one = descending_y[0] * y;
|
||||
|
||||
@@ -135,9 +132,9 @@ impl AggregateRangeStatement {
|
||||
|
||||
let neg_z = -z;
|
||||
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
||||
for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() {
|
||||
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
|
||||
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
|
||||
A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
||||
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
||||
}
|
||||
A_terms.push((y_mn_plus_one, commitment_accum));
|
||||
A_terms.push((
|
||||
@@ -145,7 +142,14 @@ impl AggregateRangeStatement {
|
||||
Generators::g(),
|
||||
));
|
||||
|
||||
(y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms))
|
||||
(
|
||||
y,
|
||||
d_descending_y_plus_z,
|
||||
y_mn_plus_one,
|
||||
z,
|
||||
ScalarVector(z_pow),
|
||||
A + multiexp_vartime(&A_terms),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||
@@ -154,13 +158,11 @@ impl AggregateRangeStatement {
|
||||
witness: &AggregateRangeWitness,
|
||||
) -> Option<AggregateRangeProof> {
|
||||
// Check for consistency with the witness
|
||||
if self.V.len() != witness.values.len() {
|
||||
if self.V.len() != witness.0.len() {
|
||||
return None;
|
||||
}
|
||||
for (commitment, (value, gamma)) in
|
||||
self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter()))
|
||||
{
|
||||
if Commitment::new(**gamma, *value).calculate() != **commitment {
|
||||
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
|
||||
if witness.calculate() != *commitment {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@@ -173,9 +175,9 @@ impl AggregateRangeStatement {
|
||||
// Commitments aren't transmitted INV_EIGHT though, so this multiplies by INV_EIGHT to enable
|
||||
// clearing its cofactor without mutating the value
|
||||
// For some reason, these values are transcripted * INV_EIGHT, not as transmitted
|
||||
let mut V = V.into_iter().map(|V| EdwardsPoint(V.0 * crate::INV_EIGHT())).collect::<Vec<_>>();
|
||||
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
|
||||
let mut transcript = initial_transcript(V.iter());
|
||||
V.iter_mut().for_each(|V| *V = V.mul_by_cofactor());
|
||||
let mut V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
|
||||
|
||||
// Pad V
|
||||
while V.len() < padded_pow_of_2(V.len()) {
|
||||
@@ -188,10 +190,16 @@ impl AggregateRangeStatement {
|
||||
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
|
||||
for j in 1 ..= V.len() {
|
||||
d_js.push(Self::d_j(j, V.len()));
|
||||
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
||||
#[allow(clippy::map_unwrap_or)]
|
||||
a_l.0.append(
|
||||
&mut u64_decompose(
|
||||
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
|
||||
)
|
||||
.0,
|
||||
);
|
||||
}
|
||||
|
||||
let a_r = a_l.sub(Scalar::ONE);
|
||||
let a_r = a_l.clone() - Scalar::ONE;
|
||||
|
||||
let alpha = Scalar::random(&mut *rng);
|
||||
|
||||
@@ -209,14 +217,14 @@ impl AggregateRangeStatement {
|
||||
// Multiply by INV_EIGHT per earlier commentary
|
||||
A.0 *= crate::INV_EIGHT();
|
||||
|
||||
let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) =
|
||||
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) =
|
||||
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
||||
|
||||
let a_l = a_l.sub(z);
|
||||
let a_r = a_r.add_vec(&d_descending_y).add(z);
|
||||
let a_l = a_l - z;
|
||||
let a_r = a_r + &d_descending_y_plus_z;
|
||||
let mut alpha = alpha;
|
||||
for j in 1 ..= witness.gammas.len() {
|
||||
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
||||
for j in 1 ..= witness.0.len() {
|
||||
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
|
||||
}
|
||||
|
||||
Some(AggregateRangeProof {
|
||||
@@ -236,9 +244,11 @@ impl AggregateRangeStatement {
|
||||
) -> bool {
|
||||
let Self { generators, V } = self;
|
||||
|
||||
let mut V = V.into_iter().map(|V| EdwardsPoint(V.0 * crate::INV_EIGHT())).collect::<Vec<_>>();
|
||||
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
|
||||
let mut transcript = initial_transcript(V.iter());
|
||||
V.iter_mut().for_each(|V| *V = V.mul_by_cofactor());
|
||||
// With the torsion clear, wrap it into a EdwardsPoint from dalek-ff-group
|
||||
// (which is prime-order)
|
||||
let V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
|
||||
|
||||
let generators = generators.reduce(V.len() * N);
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
use group::Group;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
mod scalar_vector;
|
||||
pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product};
|
||||
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector;
|
||||
mod point_vector;
|
||||
pub(crate) use point_vector::PointVector;
|
||||
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
use core::{
|
||||
borrow::Borrow,
|
||||
ops::{Index, IndexMut},
|
||||
};
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use group::ff::Field;
|
||||
use dalek_ff_group::Scalar;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||
|
||||
impl Index<usize> for ScalarVector {
|
||||
type Output = Scalar;
|
||||
fn index(&self, index: usize) -> &Scalar {
|
||||
&self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<usize> for ScalarVector {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||
&mut self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> Self {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn add(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||
let mut res = self.clone();
|
||||
for val in &mut res.0 {
|
||||
*val += scalar.borrow();
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn sub(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||
let mut res = self.clone();
|
||||
for val in &mut res.0 {
|
||||
*val -= scalar.borrow();
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn mul(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||
let mut res = self.clone();
|
||||
for val in &mut res.0 {
|
||||
*val *= scalar.borrow();
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn add_vec(&self, vector: &Self) -> Self {
|
||||
debug_assert_eq!(self.len(), vector.len());
|
||||
let mut res = self.clone();
|
||||
for (i, val) in res.0.iter_mut().enumerate() {
|
||||
*val += vector.0[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn mul_vec(&self, vector: &Self) -> Self {
|
||||
debug_assert_eq!(self.len(), vector.len());
|
||||
let mut res = self.clone();
|
||||
for (i, val) in res.0.iter_mut().enumerate() {
|
||||
*val *= vector.0[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(&self, vector: &Self) -> Scalar {
|
||||
self.mul_vec(vector).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
res.push(x);
|
||||
for i in 2 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
res.truncate(len);
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||
debug_assert!(self.len() > 1);
|
||||
let r = self.0.split_off(self.0.len() / 2);
|
||||
debug_assert_eq!(self.len(), r.len());
|
||||
(self, ScalarVector(r))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn weighted_inner_product(
|
||||
a: &ScalarVector,
|
||||
b: &ScalarVector,
|
||||
y: &ScalarVector,
|
||||
) -> Scalar {
|
||||
a.inner_product(&b.mul_vec(y))
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use std_shims::{sync::OnceLock, vec::Vec};
|
||||
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
use curve25519_dalek::EdwardsPoint;
|
||||
use dalek_ff_group::Scalar;
|
||||
|
||||
use monero_generators::{hash_to_point as raw_hash_to_point};
|
||||
use crate::{hash, hash_to_scalar as dalek_hash};
|
||||
|
||||
@@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
|
||||
use multiexp::{BatchVerifier, multiexp, multiexp_vartime};
|
||||
use group::{
|
||||
ff::{Field, PrimeField},
|
||||
GroupEncoding,
|
||||
@@ -12,11 +12,10 @@ use group::{
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
use crate::ringct::bulletproofs::plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product,
|
||||
transcript::*,
|
||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
||||
};
|
||||
|
||||
// Figure 1
|
||||
// Figure 1 of the Bulletproofs+ paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct WipStatement {
|
||||
generators: Generators,
|
||||
@@ -219,7 +218,7 @@ impl WipStatement {
|
||||
.zip(g_bold.0.iter().copied())
|
||||
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
||||
.collect::<Vec<_>>();
|
||||
P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g));
|
||||
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g));
|
||||
P_terms.push((witness.alpha, h));
|
||||
debug_assert_eq!(multiexp(&P_terms), P);
|
||||
P_terms.zeroize();
|
||||
@@ -258,14 +257,13 @@ impl WipStatement {
|
||||
let d_l = Scalar::random(&mut *rng);
|
||||
let d_r = Scalar::random(&mut *rng);
|
||||
|
||||
let c_l = weighted_inner_product(&a1, &b2, &y);
|
||||
let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y);
|
||||
let c_l = a1.clone().weighted_inner_product(&b2, &y);
|
||||
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
|
||||
|
||||
// TODO: Calculate these with a batch inversion
|
||||
let y_inv_n_hat = y_n_hat.invert().unwrap();
|
||||
|
||||
let mut L_terms = a1
|
||||
.mul(y_inv_n_hat)
|
||||
let mut L_terms = (a1.clone() * y_inv_n_hat)
|
||||
.0
|
||||
.drain(..)
|
||||
.zip(g_bold2.0.iter().copied())
|
||||
@@ -277,8 +275,7 @@ impl WipStatement {
|
||||
L_vec.push(L);
|
||||
L_terms.zeroize();
|
||||
|
||||
let mut R_terms = a2
|
||||
.mul(y_n_hat)
|
||||
let mut R_terms = (a2.clone() * y_n_hat)
|
||||
.0
|
||||
.drain(..)
|
||||
.zip(g_bold1.0.iter().copied())
|
||||
@@ -294,8 +291,8 @@ impl WipStatement {
|
||||
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
||||
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
||||
|
||||
a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e));
|
||||
b = b1.mul(inv_e).add_vec(&b2.mul(e));
|
||||
a = (a1 * e) + &(a2 * (y_n_hat * inv_e));
|
||||
b = (b1 * inv_e) + &(b2 * e);
|
||||
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
||||
|
||||
debug_assert_eq!(g_bold.len(), a.len());
|
||||
|
||||
@@ -1,85 +1,17 @@
|
||||
use core::ops::{Add, Sub, Mul, Index};
|
||||
use core::{
|
||||
borrow::Borrow,
|
||||
ops::{Index, IndexMut, Add, Sub, Mul},
|
||||
};
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use group::ff::Field;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
use multiexp::multiexp;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||
macro_rules! math_op {
|
||||
($Op: ident, $op: ident, $f: expr) => {
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<Scalar> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: Scalar) -> ScalarVector {
|
||||
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<Scalar> for &ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: Scalar) -> ScalarVector {
|
||||
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<&ScalarVector> for &ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b);
|
||||
math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b);
|
||||
math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b);
|
||||
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> ScalarVector {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
for i in 1 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn split(self) -> (ScalarVector, ScalarVector) {
|
||||
let (l, r) = self.0.split_at(self.0.len() / 2);
|
||||
(ScalarVector(l.to_vec()), ScalarVector(r.to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for ScalarVector {
|
||||
type Output = Scalar;
|
||||
@@ -87,28 +19,120 @@ impl Index<usize> for ScalarVector {
|
||||
&self.0[index]
|
||||
}
|
||||
}
|
||||
impl IndexMut<usize> for ScalarVector {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||
&mut self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar {
|
||||
(a * b).sum()
|
||||
impl<S: Borrow<Scalar>> Add<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn add(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s += scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn sub(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s -= scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn mul(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s *= scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn add(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s += o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Sub<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s -= o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Mul<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s *= o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
||||
type Output = EdwardsPoint;
|
||||
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>())
|
||||
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>();
|
||||
let res = multiexp(&multiexp_args);
|
||||
multiexp_args.zeroize();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn hadamard_fold(
|
||||
l: &[EdwardsPoint],
|
||||
r: &[EdwardsPoint],
|
||||
a: Scalar,
|
||||
b: Scalar,
|
||||
) -> Vec<EdwardsPoint> {
|
||||
let mut res = Vec::with_capacity(l.len() / 2);
|
||||
for i in 0 .. l.len() {
|
||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> Self {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
res.push(x);
|
||||
for i in 2 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
res.truncate(len);
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
|
||||
(self * vector).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
|
||||
(self * vector * y).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||
debug_assert!(self.len() > 1);
|
||||
let r = self.0.split_off(self.0.len() / 2);
|
||||
debug_assert_eq!(self.len(), r.len());
|
||||
(self, ScalarVector(r))
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
@@ -9,50 +9,53 @@ use std_shims::{
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
use subtle::{ConstantTimeEq, Choice, CtOption};
|
||||
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
|
||||
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||
INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||
ringct::hash_to_point, serialize::*,
|
||||
};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
mod multisig;
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
||||
#[cfg(feature = "multisig")]
|
||||
pub(crate) use multisig::add_key_image_share;
|
||||
pub(crate) use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
||||
|
||||
/// Errors returned when CLSAG signing fails.
|
||||
/// Errors when working with CLSAGs.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum ClsagError {
|
||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
||||
InternalError(&'static str),
|
||||
/// The ring was invalid (such as being too small or too large).
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
/// The specified ring member was invalid (index, ring size).
|
||||
#[cfg_attr(feature = "std", error("invalid ring member (member {0}, ring size {1})"))]
|
||||
InvalidRingMember(u8, u8),
|
||||
/// The commitment opening provided did not match the ring member's.
|
||||
#[cfg_attr(feature = "std", error("invalid commitment"))]
|
||||
InvalidCommitment,
|
||||
/// The key image was invalid (such as being identity or torsioned)
|
||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
||||
InvalidImage,
|
||||
/// The `D` component was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid D"))]
|
||||
InvalidD,
|
||||
/// The `s` vector was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid s"))]
|
||||
InvalidS,
|
||||
/// The `c1` variable was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid c1"))]
|
||||
InvalidC1,
|
||||
}
|
||||
|
||||
/// Input being signed for.
|
||||
/// Context on the ring member being signed for.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct ClsagInput {
|
||||
// The actual commitment for the true spend
|
||||
@@ -65,7 +68,7 @@ impl ClsagInput {
|
||||
pub fn new(commitment: Commitment, decoys: Decoys) -> Result<ClsagInput, ClsagError> {
|
||||
let n = decoys.len();
|
||||
if n > u8::MAX.into() {
|
||||
Err(ClsagError::InternalError("max ring size in this library is u8 max"))?;
|
||||
Err(ClsagError::InvalidRing)?;
|
||||
}
|
||||
let n = u8::try_from(n).unwrap();
|
||||
if decoys.i >= n {
|
||||
@@ -100,8 +103,11 @@ fn core(
|
||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
||||
let n = ring.len();
|
||||
|
||||
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
|
||||
let D = D * INV_EIGHT();
|
||||
let images_precomp = match A_c1 {
|
||||
Mode::Sign(..) => None,
|
||||
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
|
||||
};
|
||||
let D_INV_EIGHT = D * INV_EIGHT();
|
||||
|
||||
// Generate the transcript
|
||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
||||
@@ -130,7 +136,7 @@ fn core(
|
||||
}
|
||||
|
||||
to_hash.extend(I.compress().to_bytes());
|
||||
to_hash.extend(D.compress().to_bytes());
|
||||
to_hash.extend(D_INV_EIGHT.compress().to_bytes());
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
// mu_P with agg_0
|
||||
let mu_P = hash_to_scalar(&to_hash);
|
||||
@@ -169,37 +175,59 @@ fn core(
|
||||
}
|
||||
|
||||
// Perform the core loop
|
||||
let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0));
|
||||
let mut c1 = c;
|
||||
for i in (start .. end).map(|i| i % n) {
|
||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||
// however
|
||||
c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0)));
|
||||
|
||||
let c_p = mu_P * c;
|
||||
let c_c = mu_C * c;
|
||||
|
||||
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
||||
// (s_i * G) + (c_p * P_i) + (c_c * C_i)
|
||||
let L = match A_c1 {
|
||||
Mode::Sign(..) => {
|
||||
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
|
||||
}
|
||||
Mode::Verify(..) => {
|
||||
BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
|
||||
}
|
||||
};
|
||||
|
||||
let PH = hash_to_point(&P[i]);
|
||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
|
||||
|
||||
// (c_p * I) + (c_c * D) + (s_i * PH)
|
||||
let R = match A_c1 {
|
||||
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
|
||||
Mode::Verify(..) => {
|
||||
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
|
||||
}
|
||||
};
|
||||
|
||||
to_hash.truncate(((2 * n) + 3) * 32);
|
||||
to_hash.extend(L.compress().to_bytes());
|
||||
to_hash.extend(R.compress().to_bytes());
|
||||
c = hash_to_scalar(&to_hash);
|
||||
|
||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||
// however
|
||||
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
|
||||
}
|
||||
|
||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||
((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
|
||||
((D_INV_EIGHT, c * mu_P, c * mu_C), c1)
|
||||
}
|
||||
|
||||
/// CLSAG signature, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Clsag {
|
||||
pub D: EdwardsPoint,
|
||||
pub s: Vec<Scalar>,
|
||||
pub c1: Scalar,
|
||||
D: EdwardsPoint,
|
||||
pub(crate) s: Vec<Scalar>,
|
||||
pub(crate) c1: Scalar,
|
||||
}
|
||||
|
||||
pub(crate) struct ClsagSignCore {
|
||||
incomplete_clsag: Clsag,
|
||||
pseudo_out: EdwardsPoint,
|
||||
key_challenge: Scalar,
|
||||
challenged_mask: Scalar,
|
||||
}
|
||||
|
||||
impl Clsag {
|
||||
@@ -213,28 +241,34 @@ impl Clsag {
|
||||
msg: &[u8; 32],
|
||||
A: EdwardsPoint,
|
||||
AH: EdwardsPoint,
|
||||
) -> (Clsag, EdwardsPoint, Scalar, Scalar) {
|
||||
) -> ClsagSignCore {
|
||||
let r: usize = input.decoys.i.into();
|
||||
|
||||
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
|
||||
let z = input.commitment.mask - mask;
|
||||
let mask_delta = input.commitment.mask - mask;
|
||||
|
||||
let H = hash_to_point(&input.decoys.ring[r][0]);
|
||||
let D = H * z;
|
||||
let D = H * mask_delta;
|
||||
let mut s = Vec::with_capacity(input.decoys.ring.len());
|
||||
for _ in 0 .. input.decoys.ring.len() {
|
||||
s.push(random_scalar(rng));
|
||||
}
|
||||
let ((D, p, c), c1) =
|
||||
let ((D, c_p, c_c), c1) =
|
||||
core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH));
|
||||
|
||||
(Clsag { D, s, c1 }, pseudo_out, p, c * z)
|
||||
ClsagSignCore {
|
||||
incomplete_clsag: Clsag { D, s, c1 },
|
||||
pseudo_out,
|
||||
key_challenge: c_p,
|
||||
challenged_mask: c_c * mask_delta,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate CLSAG signatures for the given inputs.
|
||||
///
|
||||
/// inputs is of the form (private key, key image, input).
|
||||
/// sum_outputs is for the sum of the outputs' commitment masks.
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
pub(crate) fn sign<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
mut inputs: Vec<(Zeroizing<Scalar>, EdwardsPoint, ClsagInput)>,
|
||||
sum_outputs: Scalar,
|
||||
@@ -251,7 +285,8 @@ impl Clsag {
|
||||
}
|
||||
|
||||
let mut nonce = Zeroizing::new(random_scalar(rng));
|
||||
let (mut clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
let ClsagSignCore { mut incomplete_clsag, pseudo_out, key_challenge, challenged_mask } =
|
||||
Clsag::sign_core(
|
||||
rng,
|
||||
&inputs[i].1,
|
||||
&inputs[i].2,
|
||||
@@ -261,8 +296,14 @@ impl Clsag {
|
||||
nonce.deref() *
|
||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||
);
|
||||
clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
||||
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
|
||||
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring
|
||||
// member's commitment and our input commitment (which will only have a known discrete log
|
||||
// over G if the amounts cancel out)
|
||||
incomplete_clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
||||
nonce.deref() - ((key_challenge * inputs[i].0.deref()) + challenged_mask);
|
||||
let clsag = incomplete_clsag;
|
||||
|
||||
// Zeroize private keys and nonces.
|
||||
inputs[i].0.zeroize();
|
||||
nonce.zeroize();
|
||||
|
||||
@@ -292,7 +333,7 @@ impl Clsag {
|
||||
if ring.len() != self.s.len() {
|
||||
Err(ClsagError::InvalidS)?;
|
||||
}
|
||||
if I.is_identity() {
|
||||
if I.is_identity() || (!I.is_torsion_free()) {
|
||||
Err(ClsagError::InvalidImage)?;
|
||||
}
|
||||
|
||||
@@ -312,12 +353,14 @@ impl Clsag {
|
||||
(ring_len * 32) + 32 + 32
|
||||
}
|
||||
|
||||
/// Write the CLSAG to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_raw_vec(write_scalar, &self.s, w)?;
|
||||
w.write_all(&self.c1.to_bytes())?;
|
||||
write_point(&self.D, w)
|
||||
}
|
||||
|
||||
/// Read a CLSAG from a reader.
|
||||
pub fn read<R: Read>(decoys: usize, r: &mut R) -> io::Result<Clsag> {
|
||||
Ok(Clsag { s: read_raw_vec(read_scalar, decoys, r)?, c1: read_scalar(r)?, D: read_point(r)? })
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
use std_shims::io::{self, Read, Write};
|
||||
use std_shims::{
|
||||
io::{self, Read, Write},
|
||||
collections::HashMap,
|
||||
};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||
@@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use group::{ff::Field, Group, GroupEncoding};
|
||||
use group::{
|
||||
ff::{Field, PrimeField},
|
||||
Group, GroupEncoding,
|
||||
};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use dalek_ff_group as dfg;
|
||||
use dleq::DLEqProof;
|
||||
use frost::{
|
||||
dkg::lagrange,
|
||||
curve::Ed25519,
|
||||
@@ -26,10 +31,6 @@ use crate::ringct::{
|
||||
clsag::{ClsagInput, Clsag},
|
||||
};
|
||||
|
||||
fn dleq_transcript() -> RecommendedTranscript {
|
||||
RecommendedTranscript::new(b"monero_key_image_dleq")
|
||||
}
|
||||
|
||||
impl ClsagInput {
|
||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
||||
@@ -43,6 +44,7 @@ impl ClsagInput {
|
||||
// They're just a unreliable reference to this data which will be included in the message
|
||||
// if in use
|
||||
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
||||
// This also transcripts the key image generator since it's derived from this key
|
||||
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
||||
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
||||
}
|
||||
@@ -55,13 +57,13 @@ impl ClsagInput {
|
||||
|
||||
/// CLSAG input and the mask to use for it.
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct ClsagDetails {
|
||||
pub(crate) struct ClsagDetails {
|
||||
input: ClsagInput,
|
||||
mask: Scalar,
|
||||
}
|
||||
|
||||
impl ClsagDetails {
|
||||
pub fn new(input: ClsagInput, mask: Scalar) -> ClsagDetails {
|
||||
pub(crate) fn new(input: ClsagInput, mask: Scalar) -> ClsagDetails {
|
||||
ClsagDetails { input, mask }
|
||||
}
|
||||
}
|
||||
@@ -70,13 +72,11 @@ impl ClsagDetails {
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
||||
pub struct ClsagAddendum {
|
||||
pub(crate) key_image: dfg::EdwardsPoint,
|
||||
dleq: DLEqProof<dfg::EdwardsPoint>,
|
||||
}
|
||||
|
||||
impl WriteAddendum for ClsagAddendum {
|
||||
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
|
||||
self.dleq.write(writer)
|
||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,13 +93,12 @@ struct Interim {
|
||||
/// FROST algorithm for producing a CLSAG signature.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClsagMultisig {
|
||||
pub(crate) struct ClsagMultisig {
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
pub(crate) H: EdwardsPoint,
|
||||
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
|
||||
// an extra round
|
||||
image: EdwardsPoint,
|
||||
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>,
|
||||
image: Option<dfg::EdwardsPoint>,
|
||||
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||
|
||||
@@ -108,7 +107,7 @@ pub struct ClsagMultisig {
|
||||
}
|
||||
|
||||
impl ClsagMultisig {
|
||||
pub fn new(
|
||||
pub(crate) fn new(
|
||||
transcript: RecommendedTranscript,
|
||||
output_key: EdwardsPoint,
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||
@@ -117,7 +116,8 @@ impl ClsagMultisig {
|
||||
transcript,
|
||||
|
||||
H: hash_to_point(&output_key),
|
||||
image: EdwardsPoint::identity(),
|
||||
key_image_shares: HashMap::new(),
|
||||
image: None,
|
||||
|
||||
details,
|
||||
|
||||
@@ -135,20 +135,6 @@ impl ClsagMultisig {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_key_image_share(
|
||||
image: &mut EdwardsPoint,
|
||||
generator: EdwardsPoint,
|
||||
offset: Scalar,
|
||||
included: &[Participant],
|
||||
participant: Participant,
|
||||
share: EdwardsPoint,
|
||||
) {
|
||||
if image.is_identity().into() {
|
||||
*image = generator * offset;
|
||||
}
|
||||
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
|
||||
}
|
||||
|
||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
type Transcript = RecommendedTranscript;
|
||||
type Addendum = ClsagAddendum;
|
||||
@@ -160,23 +146,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
|
||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
_rng: &mut R,
|
||||
keys: &ThresholdKeys<Ed25519>,
|
||||
) -> ClsagAddendum {
|
||||
ClsagAddendum {
|
||||
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
|
||||
dleq: DLEqProof::prove(
|
||||
rng,
|
||||
// Doesn't take in a larger transcript object due to the usage of this
|
||||
// Every prover would immediately write their own DLEq proof, when they can only do so in
|
||||
// the proper order if they want to reach consensus
|
||||
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
|
||||
// try to merge later in some form, when it should instead just merge xH (as it does)
|
||||
&mut dleq_transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
||||
keys.secret_share(),
|
||||
),
|
||||
}
|
||||
ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() }
|
||||
}
|
||||
|
||||
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
||||
@@ -190,7 +163,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
Err(io::Error::other("non-canonical key image"))?;
|
||||
}
|
||||
|
||||
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
|
||||
Ok(ClsagAddendum { key_image: xH })
|
||||
}
|
||||
|
||||
fn process_addendum(
|
||||
@@ -199,32 +172,29 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
l: Participant,
|
||||
addendum: ClsagAddendum,
|
||||
) -> Result<(), FrostError> {
|
||||
if self.image.is_identity().into() {
|
||||
if self.image.is_none() {
|
||||
self.transcript.domain_separate(b"CLSAG");
|
||||
// Transcript the ring
|
||||
self.input().transcript(&mut self.transcript);
|
||||
// Transcript the mask
|
||||
self.transcript.append_message(b"mask", self.mask().to_bytes());
|
||||
|
||||
// Init the image to the offset
|
||||
self.image = Some(dfg::EdwardsPoint(self.H) * view.offset());
|
||||
}
|
||||
|
||||
// Transcript this participant's contribution
|
||||
self.transcript.append_message(b"participant", l.to_bytes());
|
||||
|
||||
addendum
|
||||
.dleq
|
||||
.verify(
|
||||
&mut dleq_transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
||||
&[view.original_verification_share(l), addendum.key_image],
|
||||
)
|
||||
.map_err(|_| FrostError::InvalidPreprocess(l))?;
|
||||
|
||||
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
|
||||
add_key_image_share(
|
||||
&mut self.image,
|
||||
self.H,
|
||||
view.offset().0,
|
||||
view.included(),
|
||||
l,
|
||||
addendum.key_image.0,
|
||||
);
|
||||
|
||||
// Accumulate the interpolated share
|
||||
let interpolated_key_image_share =
|
||||
addendum.key_image * lagrange::<dfg::Scalar>(l, view.included());
|
||||
*self.image.as_mut().unwrap() += interpolated_key_image_share;
|
||||
|
||||
self
|
||||
.key_image_shares
|
||||
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -249,19 +219,24 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
|
||||
self.msg = Some(msg.try_into().expect("CLSAG message should be 32-bytes"));
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
let sign_core = Clsag::sign_core(
|
||||
&mut rng,
|
||||
&self.image,
|
||||
&self.image.expect("verifying a share despite never processing any addendums").0,
|
||||
&self.input(),
|
||||
self.mask(),
|
||||
self.msg.as_ref().unwrap(),
|
||||
nonce_sums[0][0].0,
|
||||
nonce_sums[0][1].0,
|
||||
);
|
||||
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
||||
self.interim = Some(Interim {
|
||||
p: sign_core.key_challenge,
|
||||
c: sign_core.challenged_mask,
|
||||
clsag: sign_core.incomplete_clsag,
|
||||
pseudo_out: sign_core.pseudo_out,
|
||||
});
|
||||
|
||||
(-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
|
||||
// r - p x, where p is the challenge for the keys
|
||||
*nonces[0] - dfg::Scalar(sign_core.key_challenge) * view.secret_share().deref()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@@ -273,11 +248,13 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
) -> Option<Self::Signature> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
let mut clsag = interim.clsag.clone();
|
||||
// We produced shares as `r - p x`, yet the signature is `r - p x - c x`
|
||||
// Subtract `c x` (saved as `c`) now
|
||||
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
||||
if clsag
|
||||
.verify(
|
||||
&self.input().decoys.ring,
|
||||
&self.image,
|
||||
&self.image.expect("verifying a signature despite never processing any addendums").0,
|
||||
&interim.pseudo_out,
|
||||
self.msg.as_ref().unwrap(),
|
||||
)
|
||||
@@ -295,10 +272,61 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
share: dfg::Scalar,
|
||||
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
Ok(vec![
|
||||
|
||||
// For a share `r - p x`, the following two equalities should hold:
|
||||
// - `(r - p x)G == R.0 - pV`, where `V = xG`
|
||||
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
|
||||
//
|
||||
// This is effectively a discrete log equality proof for:
|
||||
// V, K over G, H
|
||||
// with nonces
|
||||
// R.0, R.1
|
||||
// and solution
|
||||
// s
|
||||
//
|
||||
// Which is a batch-verifiable rewrite of the traditional CP93 proof
|
||||
// (and also writable as Generalized Schnorr Protocol)
|
||||
//
|
||||
// That means that given a proper challenge, this alone can be certainly argued to prove the
|
||||
// key image share is well-formed and the provided signature so proves for that.
|
||||
|
||||
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
|
||||
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
|
||||
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
|
||||
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
|
||||
|
||||
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
|
||||
|
||||
// Hash every variable relevant here, using the hahs output as the random weight
|
||||
let mut weight_transcript =
|
||||
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
|
||||
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
|
||||
weight_transcript.append_message(b"H", self.H.to_bytes());
|
||||
weight_transcript.append_message(b"xG", verification_share.to_bytes());
|
||||
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
|
||||
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
|
||||
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
|
||||
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
|
||||
weight_transcript.append_message(b"s", share.to_repr());
|
||||
let weight = weight_transcript.challenge(b"weight");
|
||||
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
|
||||
|
||||
let part_one = vec![
|
||||
(share, dfg::EdwardsPoint::generator()),
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
// -(R.0 - pV) == -R.0 + pV
|
||||
(-dfg::Scalar::ONE, nonces[0][0]),
|
||||
])
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
];
|
||||
|
||||
let mut part_two = vec![
|
||||
(weight * share, dfg::EdwardsPoint(self.H)),
|
||||
// -(R.1 - pK) == -R.1 + pK
|
||||
(-weight, nonces[0][1]),
|
||||
(weight * dfg::Scalar(interim.p), key_image_share),
|
||||
];
|
||||
|
||||
let mut all = part_one;
|
||||
all.append(&mut part_two);
|
||||
Ok(all)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,28 +11,36 @@ use monero_generators::H;
|
||||
|
||||
use crate::{hash_to_scalar, ringct::hash_to_point, serialize::*};
|
||||
|
||||
/// Errors when working with MLSAGs.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum MlsagError {
|
||||
/// Invalid ring (such as too small or too large).
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
/// Invalid amount of key images.
|
||||
#[cfg_attr(feature = "std", error("invalid amount of key images"))]
|
||||
InvalidAmountOfKeyImages,
|
||||
/// Invalid ss matrix.
|
||||
#[cfg_attr(feature = "std", error("invalid ss"))]
|
||||
InvalidSs,
|
||||
#[cfg_attr(feature = "std", error("key image was identity"))]
|
||||
IdentityKeyImage,
|
||||
/// Invalid key image.
|
||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
||||
InvalidKeyImage,
|
||||
/// Invalid ci vector.
|
||||
#[cfg_attr(feature = "std", error("invalid ci"))]
|
||||
InvalidCi,
|
||||
}
|
||||
|
||||
/// A vector of rings, forming a matrix, to verify the MLSAG with.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct RingMatrix {
|
||||
matrix: Vec<Vec<EdwardsPoint>>,
|
||||
}
|
||||
|
||||
impl RingMatrix {
|
||||
pub fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
/// Construct a ring matrix from an already formatted series of points.
|
||||
fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
// Monero requires that there is more than one ring member for MLSAG signatures:
|
||||
// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
// src/ringct/rctSigs.cpp#L462
|
||||
@@ -60,11 +68,12 @@ impl RingMatrix {
|
||||
RingMatrix::new(matrix)
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &[EdwardsPoint]> {
|
||||
/// Iterate the members of the matrix.
|
||||
fn iter(&self) -> impl Iterator<Item = &[EdwardsPoint]> {
|
||||
self.matrix.iter().map(AsRef::as_ref)
|
||||
}
|
||||
|
||||
/// Return the amount of members in the ring.
|
||||
/// Returns the amount of members in the ring.
|
||||
pub fn members(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
@@ -79,13 +88,15 @@ impl RingMatrix {
|
||||
}
|
||||
}
|
||||
|
||||
/// The MLSAG linkable ring signature, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Mlsag {
|
||||
pub ss: Vec<Vec<Scalar>>,
|
||||
pub cc: Scalar,
|
||||
ss: Vec<Vec<Scalar>>,
|
||||
cc: Scalar,
|
||||
}
|
||||
|
||||
impl Mlsag {
|
||||
/// Write the MLSAG to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for ss in &self.ss {
|
||||
write_raw_vec(write_scalar, ss, w)?;
|
||||
@@ -93,6 +104,7 @@ impl Mlsag {
|
||||
write_scalar(&self.cc, w)
|
||||
}
|
||||
|
||||
/// Read the MLSAG from a reader.
|
||||
pub fn read<R: Read>(mixins: usize, ss_2_elements: usize, r: &mut R) -> io::Result<Mlsag> {
|
||||
Ok(Mlsag {
|
||||
ss: (0 .. mixins)
|
||||
@@ -102,6 +114,7 @@ impl Mlsag {
|
||||
})
|
||||
}
|
||||
|
||||
/// Verify the MLSAG.
|
||||
pub fn verify(
|
||||
&self,
|
||||
msg: &[u8; 32],
|
||||
@@ -142,8 +155,8 @@ impl Mlsag {
|
||||
// Not all dimensions need to be linkable, e.g. commitments, and only linkable layers need
|
||||
// to have key images.
|
||||
if let Some(ki) = ki {
|
||||
if ki.is_identity() {
|
||||
Err(MlsagError::IdentityKeyImage)?;
|
||||
if ki.is_identity() || (!ki.is_torsion_free()) {
|
||||
Err(MlsagError::InvalidKeyImage)?;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
@@ -164,8 +177,9 @@ impl Mlsag {
|
||||
}
|
||||
}
|
||||
|
||||
/// An aggregate ring matrix builder, usable to set up the ring matrix to prove/verify an aggregate
|
||||
/// MLSAG signature.
|
||||
/// Builder for a RingMatrix when using an aggregate signature.
|
||||
///
|
||||
/// This handles the formatting as necessary.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRingMatrixBuilder {
|
||||
key_ring: Vec<Vec<EdwardsPoint>>,
|
||||
@@ -206,7 +220,7 @@ impl AggregateRingMatrixBuilder {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build and return the [`RingMatrix`]
|
||||
/// Build and return the [`RingMatrix`].
|
||||
pub fn build(mut self) -> Result<RingMatrix, MlsagError> {
|
||||
for (i, amount_commitment) in self.amounts_ring.drain(..).enumerate() {
|
||||
self.key_ring[i].push(amount_commitment);
|
||||
|
||||
@@ -23,7 +23,7 @@ pub mod bulletproofs;
|
||||
use crate::{
|
||||
Protocol,
|
||||
serialize::*,
|
||||
ringct::{mlsag::Mlsag, clsag::Clsag, borromean::BorromeanRange, bulletproofs::Bulletproofs},
|
||||
ringct::{mlsag::Mlsag, clsag::Clsag, borromean::BorromeanRange, bulletproofs::Bulletproof},
|
||||
};
|
||||
|
||||
/// Generate a key image for a given key. Defined as `x * hash_to_point(xG)`.
|
||||
@@ -31,6 +31,7 @@ pub fn generate_key_image(secret: &Zeroizing<Scalar>) -> EdwardsPoint {
|
||||
hash_to_point(&(ED25519_BASEPOINT_TABLE * secret.deref())) * secret.deref()
|
||||
}
|
||||
|
||||
/// An encrypted amount.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum EncryptedAmount {
|
||||
Original { mask: [u8; 32], amount: [u8; 32] },
|
||||
@@ -38,6 +39,7 @@ pub enum EncryptedAmount {
|
||||
}
|
||||
|
||||
impl EncryptedAmount {
|
||||
/// Read an EncryptedAmount from a reader.
|
||||
pub fn read<R: Read>(compact: bool, r: &mut R) -> io::Result<EncryptedAmount> {
|
||||
Ok(if !compact {
|
||||
EncryptedAmount::Original { mask: read_bytes(r)?, amount: read_bytes(r)? }
|
||||
@@ -46,6 +48,7 @@ impl EncryptedAmount {
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the EncryptedAmount to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
EncryptedAmount::Original { mask, amount } => {
|
||||
@@ -57,6 +60,7 @@ impl EncryptedAmount {
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of the RingCT data.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub enum RctType {
|
||||
/// No RCT proofs.
|
||||
@@ -77,6 +81,18 @@ pub enum RctType {
|
||||
}
|
||||
|
||||
impl RctType {
|
||||
/// Convert [`self`] to its byte representation.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::Null.to_byte(), 0);
|
||||
/// assert_eq!(RctType::MlsagAggregate.to_byte(), 1);
|
||||
/// assert_eq!(RctType::MlsagIndividual.to_byte(), 2);
|
||||
/// assert_eq!(RctType::Bulletproofs.to_byte(), 3);
|
||||
/// assert_eq!(RctType::BulletproofsCompactAmount.to_byte(), 4);
|
||||
/// assert_eq!(RctType::Clsag.to_byte(), 5);
|
||||
/// assert_eq!(RctType::BulletproofsPlus.to_byte(), 6);
|
||||
/// ```
|
||||
pub fn to_byte(self) -> u8 {
|
||||
match self {
|
||||
RctType::Null => 0,
|
||||
@@ -89,6 +105,25 @@ impl RctType {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create [`Self`] from a byte representation.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::from_byte(0).unwrap(), RctType::Null);
|
||||
/// assert_eq!(RctType::from_byte(1).unwrap(), RctType::MlsagAggregate);
|
||||
/// assert_eq!(RctType::from_byte(2).unwrap(), RctType::MlsagIndividual);
|
||||
/// assert_eq!(RctType::from_byte(3).unwrap(), RctType::Bulletproofs);
|
||||
/// assert_eq!(RctType::from_byte(4).unwrap(), RctType::BulletproofsCompactAmount);
|
||||
/// assert_eq!(RctType::from_byte(5).unwrap(), RctType::Clsag);
|
||||
/// assert_eq!(RctType::from_byte(6).unwrap(), RctType::BulletproofsPlus);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns [`None`] if the byte representation is invalid.
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::from_byte(7), None);
|
||||
/// ```
|
||||
pub fn from_byte(byte: u8) -> Option<Self> {
|
||||
Some(match byte {
|
||||
0 => RctType::Null,
|
||||
@@ -102,22 +137,45 @@ impl RctType {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true if this RctType uses compact encrypted amounts, false otherwise.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::Null.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::MlsagAggregate.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::MlsagIndividual.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::Bulletproofs.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::BulletproofsCompactAmount.compact_encrypted_amounts(), true);
|
||||
/// assert_eq!(RctType::Clsag.compact_encrypted_amounts(), true);
|
||||
/// assert_eq!(RctType::BulletproofsPlus.compact_encrypted_amounts(), true);
|
||||
/// ```
|
||||
pub fn compact_encrypted_amounts(&self) -> bool {
|
||||
match self {
|
||||
RctType::Null |
|
||||
RctType::MlsagAggregate |
|
||||
RctType::MlsagIndividual |
|
||||
RctType::Bulletproofs => false,
|
||||
RctType::Null
|
||||
| RctType::MlsagAggregate
|
||||
| RctType::MlsagIndividual
|
||||
| RctType::Bulletproofs => false,
|
||||
RctType::BulletproofsCompactAmount | RctType::Clsag | RctType::BulletproofsPlus => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The base of the RingCT data.
|
||||
///
|
||||
/// This excludes all proofs (which once initially verified do not need to be kept around) and
|
||||
/// solely keeps data which either impacts the effects of the transactions or is needed to scan it.
|
||||
///
|
||||
/// The one exception for this is `pseudo_outs`, which was originally present here yet moved to
|
||||
/// RctPrunable in a later hard fork (causing it to be present in both).
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct RctBase {
|
||||
/// The fee used by this transaction.
|
||||
pub fee: u64,
|
||||
/// The re-randomized amount commitments used within inputs.
|
||||
pub pseudo_outs: Vec<EdwardsPoint>,
|
||||
/// The encrypted amounts for the recipient to decrypt.
|
||||
pub encrypted_amounts: Vec<EncryptedAmount>,
|
||||
/// The output commitments.
|
||||
pub commitments: Vec<EdwardsPoint>,
|
||||
}
|
||||
|
||||
@@ -127,6 +185,7 @@ impl RctBase {
|
||||
1 + (outputs * (8 + 32)) + varint_len(fee)
|
||||
}
|
||||
|
||||
/// Write the RctBase to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
||||
w.write_all(&[rct_type.to_byte()])?;
|
||||
match rct_type {
|
||||
@@ -144,16 +203,17 @@ impl RctBase {
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a RctBase from a writer.
|
||||
pub fn read<R: Read>(inputs: usize, outputs: usize, r: &mut R) -> io::Result<(RctBase, RctType)> {
|
||||
let rct_type =
|
||||
RctType::from_byte(read_byte(r)?).ok_or_else(|| io::Error::other("invalid RCT type"))?;
|
||||
|
||||
match rct_type {
|
||||
RctType::Null | RctType::MlsagAggregate | RctType::MlsagIndividual => {}
|
||||
RctType::Bulletproofs |
|
||||
RctType::BulletproofsCompactAmount |
|
||||
RctType::Clsag |
|
||||
RctType::BulletproofsPlus => {
|
||||
RctType::Bulletproofs
|
||||
| RctType::BulletproofsCompactAmount
|
||||
| RctType::Clsag
|
||||
| RctType::BulletproofsPlus => {
|
||||
if outputs == 0 {
|
||||
// Because the Bulletproofs(+) layout must be canonical, there must be 1 Bulletproof if
|
||||
// Bulletproofs are in use
|
||||
@@ -171,12 +231,14 @@ impl RctBase {
|
||||
} else {
|
||||
RctBase {
|
||||
fee: read_varint(r)?,
|
||||
// Only read pseudo_outs if they have yet to be moved to RctPrunable
|
||||
// TODO: Shouldn't this be any Mlsag*?
|
||||
pseudo_outs: if rct_type == RctType::MlsagIndividual {
|
||||
read_raw_vec(read_point, inputs, r)?
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
encrypted_amounts: (0 .. outputs)
|
||||
encrypted_amounts: (0..outputs)
|
||||
.map(|_| EncryptedAmount::read(rct_type.compact_encrypted_amounts(), r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
commitments: read_raw_vec(read_point, outputs, r)?,
|
||||
@@ -187,6 +249,7 @@ impl RctBase {
|
||||
}
|
||||
}
|
||||
|
||||
/// The prunable portion of the RingCT data.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RctPrunable {
|
||||
Null,
|
||||
@@ -199,12 +262,12 @@ pub enum RctPrunable {
|
||||
mlsags: Vec<Mlsag>,
|
||||
},
|
||||
MlsagBulletproofs {
|
||||
bulletproofs: Bulletproofs,
|
||||
bulletproofs: Bulletproof,
|
||||
mlsags: Vec<Mlsag>,
|
||||
pseudo_outs: Vec<EdwardsPoint>,
|
||||
},
|
||||
Clsag {
|
||||
bulletproofs: Bulletproofs,
|
||||
bulletproofs: Bulletproof,
|
||||
clsags: Vec<Clsag>,
|
||||
pseudo_outs: Vec<EdwardsPoint>,
|
||||
},
|
||||
@@ -213,10 +276,14 @@ pub enum RctPrunable {
|
||||
impl RctPrunable {
|
||||
pub(crate) fn fee_weight(protocol: Protocol, inputs: usize, outputs: usize) -> usize {
|
||||
// 1 byte for number of BPs (technically a VarInt, yet there's always just zero or one)
|
||||
1 + Bulletproofs::fee_weight(protocol.bp_plus(), outputs) +
|
||||
(inputs * (Clsag::fee_weight(protocol.ring_len()) + 32))
|
||||
1 + Bulletproof::fee_weight(protocol.bp_plus(), outputs)
|
||||
+ (inputs * (Clsag::fee_weight(protocol.ring_len()) + 32))
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => Ok(()),
|
||||
@@ -249,12 +316,18 @@ impl RctPrunable {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
pub fn serialize(&self, rct_type: RctType) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized, rct_type).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(
|
||||
rct_type: RctType,
|
||||
ring_length: usize,
|
||||
@@ -281,7 +354,7 @@ impl RctPrunable {
|
||||
},
|
||||
RctType::MlsagIndividual => RctPrunable::MlsagBorromean {
|
||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
||||
mlsags: (0 .. inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
mlsags: (0..inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
},
|
||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount => {
|
||||
RctPrunable::MlsagBulletproofs {
|
||||
@@ -294,11 +367,9 @@ impl RctPrunable {
|
||||
{
|
||||
Err(io::Error::other("n bulletproofs instead of one"))?;
|
||||
}
|
||||
Bulletproofs::read(r)?
|
||||
Bulletproof::read(r)?
|
||||
},
|
||||
mlsags: (0 .. inputs)
|
||||
.map(|_| Mlsag::read(ring_length, 2, r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
mlsags: (0..inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
||||
}
|
||||
}
|
||||
@@ -307,11 +378,9 @@ impl RctPrunable {
|
||||
if read_varint::<_, u64>(r)? != 1 {
|
||||
Err(io::Error::other("n bulletproofs instead of one"))?;
|
||||
}
|
||||
(if rct_type == RctType::Clsag { Bulletproofs::read } else { Bulletproofs::read_plus })(
|
||||
r,
|
||||
)?
|
||||
(if rct_type == RctType::Clsag { Bulletproof::read } else { Bulletproof::read_plus })(r)?
|
||||
},
|
||||
clsags: (0 .. inputs).map(|_| Clsag::read(ring_length, r)).collect::<Result<_, _>>()?,
|
||||
clsags: (0..inputs).map(|_| Clsag::read(ring_length, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
||||
},
|
||||
})
|
||||
@@ -320,19 +389,22 @@ impl RctPrunable {
|
||||
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => panic!("Serializing RctPrunable::Null for a signature"),
|
||||
RctPrunable::AggregateMlsagBorromean { borromean, .. } |
|
||||
RctPrunable::MlsagBorromean { borromean, .. } => {
|
||||
RctPrunable::AggregateMlsagBorromean { borromean, .. }
|
||||
| RctPrunable::MlsagBorromean { borromean, .. } => {
|
||||
borromean.iter().try_for_each(|rs| rs.write(w))
|
||||
}
|
||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. } |
|
||||
RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w),
|
||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. }
|
||||
| RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RingCT signature data.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct RctSignatures {
|
||||
/// The base of the RingCT data.
|
||||
pub base: RctBase,
|
||||
/// The prunable portion of the RingCT data.
|
||||
pub prunable: RctPrunable,
|
||||
}
|
||||
|
||||
@@ -360,7 +432,7 @@ impl RctSignatures {
|
||||
}
|
||||
}
|
||||
RctPrunable::Clsag { bulletproofs, .. } => {
|
||||
if matches!(bulletproofs, Bulletproofs::Original { .. }) {
|
||||
if matches!(bulletproofs, Bulletproof::Original { .. }) {
|
||||
RctType::Clsag
|
||||
} else {
|
||||
RctType::BulletproofsPlus
|
||||
@@ -373,18 +445,28 @@ impl RctSignatures {
|
||||
RctBase::fee_weight(outputs, fee) + RctPrunable::fee_weight(protocol, inputs, outputs)
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
let rct_type = self.rct_type();
|
||||
self.base.write(w, rct_type)?;
|
||||
self.prunable.write(w, rct_type)
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r` and other data.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(
|
||||
ring_length: usize,
|
||||
inputs: usize,
|
||||
|
||||
@@ -34,8 +34,10 @@ pub use http::*;
|
||||
// src/wallet/wallet2.cpp#L121
|
||||
const GRACE_BLOCKS_FOR_FEE_ESTIMATE: u64 = 10;
|
||||
|
||||
/// A empty marker struct representing an empty response.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct EmptyResponse {}
|
||||
/// A generic JSON-RPC response.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct JsonRpcResponse<T> {
|
||||
result: T,
|
||||
@@ -54,6 +56,7 @@ struct TransactionsResponse {
|
||||
txs: Vec<TransactionResponse>,
|
||||
}
|
||||
|
||||
/// The response data from an [`Rpc::get_outs`] call.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct OutputResponse {
|
||||
pub height: usize,
|
||||
@@ -63,27 +66,41 @@ pub struct OutputResponse {
|
||||
txid: String,
|
||||
}
|
||||
|
||||
/// Possible errors that can occur from an RPC call.
|
||||
///
|
||||
/// This represents errors on the client side, as well
|
||||
/// as valid error responses from the server.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum RpcError {
|
||||
/// There was an internal error.
|
||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
||||
InternalError(&'static str),
|
||||
/// There was a connection error.
|
||||
#[cfg_attr(feature = "std", error("connection error ({0})"))]
|
||||
ConnectionError(String),
|
||||
/// The data response received from the node was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid node ({0})"))]
|
||||
InvalidNode(String),
|
||||
/// The Monero [`Protocol`] version was invalid.
|
||||
#[cfg_attr(feature = "std", error("unsupported protocol version ({0})"))]
|
||||
UnsupportedProtocol(usize),
|
||||
/// Requested transaction hashes were not found.
|
||||
#[cfg_attr(feature = "std", error("transactions not found"))]
|
||||
TransactionsNotFound(Vec<[u8; 32]>),
|
||||
/// A curve point received from the node was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid point ({0})"))]
|
||||
InvalidPoint(String),
|
||||
/// The transaction(s) requested were pruned from the node.
|
||||
#[cfg_attr(feature = "std", error("pruned transaction"))]
|
||||
PrunedTransaction,
|
||||
/// An invalid transaction was either sent/received to/from the node.
|
||||
#[cfg_attr(feature = "std", error("invalid transaction ({0:?})"))]
|
||||
InvalidTransaction([u8; 32]),
|
||||
/// The node failed to return a fee.
|
||||
#[cfg_attr(feature = "std", error("unexpected fee response"))]
|
||||
InvalidFee,
|
||||
/// The transaction priority level given was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid priority"))]
|
||||
InvalidPriority,
|
||||
}
|
||||
@@ -114,12 +131,15 @@ fn read_epee_vi<R: io::Read>(reader: &mut R) -> io::Result<u64> {
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let mut vi = u64::from(vi_start >> 2);
|
||||
for i in 1 .. len {
|
||||
for i in 1..len {
|
||||
vi |= u64::from(read_byte(reader)?) << (((i - 1) * 8) + 6);
|
||||
}
|
||||
Ok(vi)
|
||||
}
|
||||
|
||||
/// A trait representing an RPC connection.
|
||||
///
|
||||
/// Note that [`HttpRpc`] already implements this trait.
|
||||
#[async_trait]
|
||||
pub trait RpcConnection: Clone + Debug {
|
||||
/// Perform a POST request to the specified route with the specified body.
|
||||
@@ -128,6 +148,7 @@ pub trait RpcConnection: Clone + Debug {
|
||||
async fn post(&self, route: &str, body: Vec<u8>) -> Result<Vec<u8>, RpcError>;
|
||||
}
|
||||
|
||||
/// A generic RPC client.
|
||||
// TODO: Make this provided methods for RpcConnection?
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Rpc<R: RpcConnection>(R);
|
||||
@@ -202,6 +223,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
)
|
||||
}
|
||||
|
||||
/// Get the node's current block height.
|
||||
pub async fn get_height(&self) -> Result<usize, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct HeightResponse {
|
||||
@@ -210,6 +232,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(self.rpc_call::<Option<()>, HeightResponse>("get_height", None).await?.height)
|
||||
}
|
||||
|
||||
/// Get [`Transaction`]s by their `hashes`.
|
||||
pub async fn get_transactions(&self, hashes: &[[u8; 32]]) -> Result<Vec<Transaction>, RpcError> {
|
||||
if hashes.is_empty() {
|
||||
return Ok(vec![]);
|
||||
@@ -274,6 +297,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get a single [`Transaction`] by its hash.
|
||||
pub async fn get_transaction(&self, tx: [u8; 32]) -> Result<Transaction, RpcError> {
|
||||
self.get_transactions(&[tx]).await.map(|mut txs| txs.swap_remove(0))
|
||||
}
|
||||
@@ -314,6 +338,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// Get a [`Block`] by its height number.
|
||||
pub async fn get_block_by_number(&self, number: usize) -> Result<Block, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockResponse {
|
||||
@@ -341,6 +366,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all the [`Transaction`]s belonging to the corresponding block `hash`.
|
||||
pub async fn get_block_transactions(&self, hash: [u8; 32]) -> Result<Vec<Transaction>, RpcError> {
|
||||
let block = self.get_block(hash).await?;
|
||||
let mut res = vec![block.miner_tx];
|
||||
@@ -405,7 +431,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
let read_object = |reader: &mut &[u8]| -> io::Result<Vec<u64>> {
|
||||
let fields = read_byte(reader)? >> 2;
|
||||
|
||||
for _ in 0 .. fields {
|
||||
for _ in 0..fields {
|
||||
let name_len = read_byte(reader)?;
|
||||
let name = read_raw_vec(read_byte, name_len.into(), reader)?;
|
||||
|
||||
@@ -458,7 +484,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
};
|
||||
|
||||
let mut bytes_res = vec![];
|
||||
for _ in 0 .. iters {
|
||||
for _ in 0..iters {
|
||||
bytes_res.push(f(reader)?);
|
||||
}
|
||||
|
||||
@@ -478,8 +504,8 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
if bytes_res
|
||||
.first()
|
||||
.ok_or_else(|| io::Error::other("status wasn't a string"))?
|
||||
.as_slice() !=
|
||||
b"OK"
|
||||
.as_slice()
|
||||
!= b"OK"
|
||||
{
|
||||
// TODO: Better handle non-OK responses
|
||||
Err(io::Error::other("response wasn't OK"))?;
|
||||
@@ -623,6 +649,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get a [`Fee`] based on [`Protocol::v14`] rules.
|
||||
async fn get_fee_v14(&self, priority: FeePriority) -> Result<Fee, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct FeeResponseV14 {
|
||||
@@ -702,6 +729,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Broadcast a [`Transaction`] to the network.
|
||||
pub async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -730,6 +758,13 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate blocks.
|
||||
///
|
||||
/// - `address` is the address that will receive the coinbase reward
|
||||
/// - `block_count` is the number of blocks that will be generated
|
||||
///
|
||||
/// Note this is only for testing with nodes started with `--regtest`, see:
|
||||
/// <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#generateblocks>.
|
||||
// TODO: Take &Address, not &str?
|
||||
pub async fn generate_blocks(
|
||||
&self,
|
||||
|
||||
@@ -7,7 +7,8 @@ use multiexp::BatchVerifier;
|
||||
|
||||
use crate::{
|
||||
Commitment, random_scalar,
|
||||
ringct::bulletproofs::{Bulletproofs, original::OriginalStruct},
|
||||
ringct::bulletproofs::{Bulletproof, original::OriginalStruct},
|
||||
wallet::TransactionError,
|
||||
};
|
||||
|
||||
mod plus;
|
||||
@@ -18,7 +19,7 @@ fn bulletproofs_vector() {
|
||||
let point = |point| decompress_point(point).unwrap();
|
||||
|
||||
// Generated from Monero
|
||||
assert!(Bulletproofs::Original(OriginalStruct {
|
||||
assert!(Bulletproof::Original(OriginalStruct {
|
||||
A: point(hex!("ef32c0b9551b804decdcb107eb22aa715b7ce259bf3c5cac20e24dfa6b28ac71")),
|
||||
S: point(hex!("e1285960861783574ee2b689ae53622834eb0b035d6943103f960cd23e063fa0")),
|
||||
T1: point(hex!("4ea07735f184ba159d0e0eb662bac8cde3eb7d39f31e567b0fbda3aa23fe5620")),
|
||||
@@ -70,7 +71,11 @@ macro_rules! bulletproofs_tests {
|
||||
.map(|i| Commitment::new(random_scalar(&mut OsRng), u64::try_from(i).unwrap()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let bp = Bulletproofs::prove(&mut OsRng, &commitments, $plus).unwrap();
|
||||
let bp = if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments.clone()).unwrap()
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, &commitments).unwrap()
|
||||
};
|
||||
|
||||
let commitments = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
assert!(bp.verify(&mut OsRng, &commitments));
|
||||
@@ -86,7 +91,15 @@ macro_rules! bulletproofs_tests {
|
||||
for _ in 0 .. 17 {
|
||||
commitments.push(Commitment::new(Scalar::ZERO, 0));
|
||||
}
|
||||
assert!(Bulletproofs::prove(&mut OsRng, &commitments, $plus).is_err());
|
||||
assert_eq!(
|
||||
(if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments)
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, &commitments)
|
||||
})
|
||||
.unwrap_err(),
|
||||
TransactionError::TooManyOutputs,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use rand_core::{RngCore, OsRng};
|
||||
|
||||
use multiexp::BatchVerifier;
|
||||
use group::ff::Field;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
use dalek_ff_group::Scalar;
|
||||
|
||||
use crate::{
|
||||
Commitment,
|
||||
@@ -19,9 +19,9 @@ fn test_aggregate_range_proof() {
|
||||
for _ in 0 .. m {
|
||||
commitments.push(Commitment::new(*Scalar::random(&mut OsRng), OsRng.next_u64()));
|
||||
}
|
||||
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
|
||||
let commitment_points = commitments.iter().map(Commitment::calculate).collect();
|
||||
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
|
||||
let witness = AggregateRangeWitness::new(&commitments).unwrap();
|
||||
let witness = AggregateRangeWitness::new(commitments).unwrap();
|
||||
|
||||
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
||||
statement.verify(&mut OsRng, &mut verifier, (), proof);
|
||||
|
||||
@@ -9,7 +9,6 @@ use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
use crate::ringct::bulletproofs::plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, Generators,
|
||||
weighted_inner_product::{WipStatement, WipWitness},
|
||||
weighted_inner_product,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -68,7 +67,7 @@ fn test_weighted_inner_product() {
|
||||
#[allow(non_snake_case)]
|
||||
let P = g_bold.multiexp(&a) +
|
||||
h_bold.multiexp(&b) +
|
||||
(g * weighted_inner_product(&a, &b, &y_vec)) +
|
||||
(g * a.clone().weighted_inner_product(&b, &y_vec)) +
|
||||
(h * alpha);
|
||||
|
||||
let statement = WipStatement::new(generators, P, y);
|
||||
|
||||
@@ -57,7 +57,7 @@ fn clsag() {
|
||||
}
|
||||
|
||||
let image = generate_key_image(&secrets.0);
|
||||
let (clsag, pseudo_out) = Clsag::sign(
|
||||
let (mut clsag, pseudo_out) = Clsag::sign(
|
||||
&mut OsRng,
|
||||
vec![(
|
||||
secrets.0,
|
||||
@@ -76,7 +76,12 @@ fn clsag() {
|
||||
msg,
|
||||
)
|
||||
.swap_remove(0);
|
||||
|
||||
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||
|
||||
// make sure verification fails if we throw a random `c1` at it.
|
||||
clsag.c1 = random_scalar(&mut OsRng);
|
||||
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
Protocol, hash,
|
||||
serialize::*,
|
||||
ring_signatures::RingSignature,
|
||||
ringct::{bulletproofs::Bulletproofs, RctType, RctBase, RctPrunable, RctSignatures},
|
||||
ringct::{bulletproofs::Bulletproof, RctType, RctBase, RctPrunable, RctSignatures},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -426,7 +426,7 @@ impl Transaction {
|
||||
if !(bp || bp_plus) {
|
||||
blob_size
|
||||
} else {
|
||||
blob_size + Bulletproofs::calculate_bp_clawback(bp_plus, self.prefix.outputs.len()).0
|
||||
blob_size + Bulletproof::calculate_bp_clawback(bp_plus, self.prefix.outputs.len()).0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,10 +34,6 @@ impl UnreducedScalar {
|
||||
Ok(UnreducedScalar(read_bytes(r)?))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> &[u8; 32] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
fn as_bits(&self) -> [u8; 256] {
|
||||
let mut bits = [0; 256];
|
||||
for (i, bit) in bits.iter_mut().enumerate() {
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
use std_shims::{vec::Vec, collections::HashSet};
|
||||
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
use std_shims::sync::OnceLock;
|
||||
|
||||
#[cfg(all(feature = "cache-distribution", not(feature = "std")))]
|
||||
use std_shims::sync::Mutex;
|
||||
#[cfg(all(feature = "cache-distribution", feature = "std"))]
|
||||
use async_lock::Mutex;
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
@@ -29,16 +21,6 @@ const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
const TIP_APPLICATION: f64 = (DEFAULT_LOCK_WINDOW * BLOCK_TIME) as f64;
|
||||
|
||||
// TODO: Resolve safety of this in case a reorg occurs/the network changes
|
||||
// TODO: Update this when scanning a block, as possible
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
static DISTRIBUTION_CELL: OnceLock<Mutex<Vec<u64>>> = OnceLock::new();
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[allow(non_snake_case)]
|
||||
fn DISTRIBUTION() -> &'static Mutex<Vec<u64>> {
|
||||
DISTRIBUTION_CELL.get_or_init(|| Mutex::new(Vec::with_capacity(3000000)))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
rng: &mut R,
|
||||
@@ -158,14 +140,6 @@ async fn select_decoys<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
inputs: &[SpendableOutput],
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(not(feature = "std"))]
|
||||
let mut distribution = DISTRIBUTION().lock();
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(feature = "std")]
|
||||
let mut distribution = DISTRIBUTION().lock().await;
|
||||
|
||||
#[cfg(not(feature = "cache-distribution"))]
|
||||
let mut distribution = vec![];
|
||||
|
||||
let decoy_count = ring_len - 1;
|
||||
|
||||
@@ -10,7 +10,7 @@ use curve25519_dalek::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
hash, hash_to_scalar, serialize::write_varint, ringct::EncryptedAmount, transaction::Input,
|
||||
hash, hash_to_scalar, serialize::write_varint, Commitment, ringct::EncryptedAmount, transaction::Input,
|
||||
};
|
||||
|
||||
pub mod extra;
|
||||
@@ -94,18 +94,21 @@ pub(crate) fn commitment_mask(shared_key: Scalar) -> Scalar {
|
||||
hash_to_scalar(&mask)
|
||||
}
|
||||
|
||||
pub(crate) fn amount_encryption(amount: u64, key: Scalar) -> [u8; 8] {
|
||||
pub(crate) fn compact_amount_encryption(amount: u64, key: Scalar) -> [u8; 8] {
|
||||
let mut amount_mask = b"amount".to_vec();
|
||||
amount_mask.extend(key.to_bytes());
|
||||
(amount ^ u64::from_le_bytes(hash(&amount_mask)[.. 8].try_into().unwrap())).to_le_bytes()
|
||||
}
|
||||
|
||||
// TODO: Move this under EncryptedAmount?
|
||||
fn amount_decryption(amount: &EncryptedAmount, key: Scalar) -> (Scalar, u64) {
|
||||
match amount {
|
||||
impl EncryptedAmount {
|
||||
/// Decrypt an EncryptedAmount into the Commitment it encrypts.
|
||||
///
|
||||
/// The caller must verify the decrypted Commitment matches with the actual Commitment used
|
||||
/// within in the Monero protocol.
|
||||
pub fn decrypt(&self, key: Scalar) -> Commitment {
|
||||
match self {
|
||||
// TODO: Add a test vector for this
|
||||
EncryptedAmount::Original { mask, amount } => {
|
||||
#[cfg(feature = "experimental")]
|
||||
{
|
||||
let mask_shared_sec = hash(key.as_bytes());
|
||||
let mask =
|
||||
Scalar::from_bytes_mod_order(*mask) - Scalar::from_bytes_mod_order(mask_shared_sec);
|
||||
@@ -116,21 +119,14 @@ fn amount_decryption(amount: &EncryptedAmount, key: Scalar) -> (Scalar, u64) {
|
||||
// d2b from rctTypes.cpp
|
||||
let amount = u64::from_le_bytes(amount_scalar.to_bytes()[0 .. 8].try_into().unwrap());
|
||||
|
||||
(mask, amount)
|
||||
Commitment::new(mask, amount)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental"))]
|
||||
{
|
||||
let _ = mask;
|
||||
let _ = amount;
|
||||
todo!("decrypting a legacy monero transaction's amount")
|
||||
}
|
||||
}
|
||||
EncryptedAmount::Compact { amount } => (
|
||||
EncryptedAmount::Compact { amount } => Commitment::new(
|
||||
commitment_mask(key),
|
||||
u64::from_le_bytes(amount_encryption(u64::from_le_bytes(*amount), key)),
|
||||
u64::from_le_bytes(compact_amount_encryption(u64::from_le_bytes(*amount), key)),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The private view key and public spend key, enabling scanning transactions.
|
||||
|
||||
@@ -17,9 +17,7 @@ use crate::{
|
||||
transaction::{Input, Timelock, Transaction},
|
||||
block::Block,
|
||||
rpc::{RpcError, RpcConnection, Rpc},
|
||||
wallet::{
|
||||
PaymentId, Extra, address::SubaddressIndex, Scanner, uniqueness, shared_key, amount_decryption,
|
||||
},
|
||||
wallet::{PaymentId, Extra, address::SubaddressIndex, Scanner, uniqueness, shared_key},
|
||||
};
|
||||
|
||||
/// An absolute output ID, defined as its transaction hash and output index.
|
||||
@@ -427,15 +425,13 @@ impl Scanner {
|
||||
commitment.amount = amount;
|
||||
// Regular transaction
|
||||
} else {
|
||||
let (mask, amount) = match tx.rct_signatures.base.encrypted_amounts.get(o) {
|
||||
Some(amount) => amount_decryption(amount, shared_key),
|
||||
commitment = match tx.rct_signatures.base.encrypted_amounts.get(o) {
|
||||
Some(amount) => amount.decrypt(shared_key),
|
||||
// This should never happen, yet it may be possible with miner transactions?
|
||||
// Using get just decreases the possibility of a panic and lets us move on in that case
|
||||
None => break,
|
||||
};
|
||||
|
||||
// Rebuild the commitment to verify it
|
||||
commitment = Commitment::new(mask, amount);
|
||||
// If this is a malicious commitment, move to the next output
|
||||
// Any other R value will calculate to a different spend key and are therefore ignorable
|
||||
if Some(&commitment.calculate()) != tx.rct_signatures.base.commitments.get(o) {
|
||||
|
||||
@@ -31,7 +31,7 @@ use crate::{
|
||||
ringct::{
|
||||
generate_key_image,
|
||||
clsag::{ClsagError, ClsagInput, Clsag},
|
||||
bulletproofs::{MAX_OUTPUTS, Bulletproofs},
|
||||
bulletproofs::{MAX_OUTPUTS, Bulletproof},
|
||||
RctBase, RctPrunable, RctSignatures,
|
||||
},
|
||||
transaction::{Input, Output, Timelock, TransactionPrefix, Transaction},
|
||||
@@ -39,7 +39,7 @@ use crate::{
|
||||
wallet::{
|
||||
address::{Network, AddressSpec, MoneroAddress},
|
||||
ViewPair, SpendableOutput, Decoys, PaymentId, ExtraField, Extra, key_image_sort, uniqueness,
|
||||
shared_key, commitment_mask, amount_encryption,
|
||||
shared_key, commitment_mask, compact_amount_encryption,
|
||||
extra::{ARBITRARY_DATA_MARKER, MAX_ARBITRARY_DATA_SIZE},
|
||||
},
|
||||
};
|
||||
@@ -92,7 +92,7 @@ impl SendOutput {
|
||||
view_tag,
|
||||
dest: ((&shared_key * ED25519_BASEPOINT_TABLE) + output.0.spend),
|
||||
commitment: Commitment::new(commitment_mask(shared_key), output.1),
|
||||
amount: amount_encryption(output.1, shared_key),
|
||||
amount: compact_amount_encryption(output.1, shared_key),
|
||||
},
|
||||
payment_id,
|
||||
)
|
||||
@@ -783,7 +783,11 @@ impl SignableTransaction {
|
||||
let sum = commitments.iter().map(|commitment| commitment.mask).sum();
|
||||
|
||||
// Safe due to the constructor checking MAX_OUTPUTS
|
||||
let bp = Bulletproofs::prove(rng, &commitments, self.protocol.bp_plus()).unwrap();
|
||||
let bp = if self.protocol.bp_plus() {
|
||||
Bulletproof::prove_plus(rng, commitments.clone()).unwrap()
|
||||
} else {
|
||||
Bulletproof::prove(rng, &commitments).unwrap()
|
||||
};
|
||||
|
||||
// Create the TX extra
|
||||
let extra = Self::extra(
|
||||
@@ -932,7 +936,7 @@ impl Eventuality {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: Remove this when the following for loop is updated
|
||||
// TODO: Remove this if/when the following for loop is updated to support older TXs
|
||||
assert!(
|
||||
rct_type.compact_encrypted_amounts(),
|
||||
"created an Eventuality for a very old RctType we don't support proving for"
|
||||
|
||||
@@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript};
|
||||
use frost::{
|
||||
curve::Ed25519,
|
||||
Participant, FrostError, ThresholdKeys,
|
||||
dkg::lagrange,
|
||||
sign::{
|
||||
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
|
||||
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
|
||||
@@ -27,7 +28,7 @@ use frost::{
|
||||
use crate::{
|
||||
random_scalar,
|
||||
ringct::{
|
||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
|
||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig},
|
||||
RctPrunable,
|
||||
},
|
||||
transaction::{Input, Transaction},
|
||||
@@ -261,8 +262,13 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
included.push(self.i);
|
||||
included.sort_unstable();
|
||||
|
||||
// Convert the unified commitments to a Vec of the individual commitments
|
||||
// Start calculating the key images, as needed on the TX level
|
||||
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
||||
for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) {
|
||||
*image = generator * offset;
|
||||
}
|
||||
|
||||
// Convert the serialized nonces commitments to a parallelized Vec
|
||||
let mut commitments = (0 .. self.clsags.len())
|
||||
.map(|c| {
|
||||
included
|
||||
@@ -291,14 +297,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
// provides the easiest API overall, as this is where the TX is (which needs the key
|
||||
// images in its message), along with where the outputs are determined (where our
|
||||
// outputs may need these in order to guarantee uniqueness)
|
||||
add_key_image_share(
|
||||
&mut images[c],
|
||||
self.key_images[c].0,
|
||||
self.key_images[c].1,
|
||||
&included,
|
||||
*l,
|
||||
preprocess.addendum.key_image.0,
|
||||
);
|
||||
images[c] += preprocess.addendum.key_image.0 * lagrange::<dfg::Scalar>(*l, &included).0;
|
||||
|
||||
Ok((*l, preprocess))
|
||||
})
|
||||
|
||||
@@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
||||
.unwrap();
|
||||
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
|
||||
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
||||
// let fee_rate = daemon_rpc
|
||||
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
||||
// .await
|
||||
@@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
||||
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
|
||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
||||
// runner::check_weight_and_fee(&tx, fee_rate);
|
||||
|
||||
match spec {
|
||||
|
||||
@@ -18,7 +18,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true }
|
||||
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
parity-db = ["dep:parity-db"]
|
||||
|
||||
@@ -1,42 +1,65 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB};
|
||||
use rocksdb::{
|
||||
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
|
||||
Transaction as RocksTransaction, Options, OptimisticTransactionDB,
|
||||
};
|
||||
|
||||
use crate::*;
|
||||
|
||||
impl<T: ThreadMode> Get for Transaction<'_, TransactionDB<T>> {
|
||||
pub struct Transaction<'a, T: ThreadMode>(
|
||||
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
||||
&'a OptimisticTransactionDB<T>,
|
||||
);
|
||||
|
||||
impl<T: ThreadMode> Get for Transaction<'_, T> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
self.get(key).expect("couldn't read from RocksDB via transaction")
|
||||
self.0.get(key).expect("couldn't read from RocksDB via transaction")
|
||||
}
|
||||
}
|
||||
impl<T: ThreadMode> DbTxn for Transaction<'_, TransactionDB<T>> {
|
||||
impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction")
|
||||
self.0.put(key, value).expect("couldn't write to RocksDB via transaction")
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||
self.0.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||
}
|
||||
fn commit(self) {
|
||||
Transaction::commit(self).expect("couldn't commit to RocksDB via transaction")
|
||||
self.0.commit().expect("couldn't commit to RocksDB via transaction");
|
||||
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
|
||||
self.1.flush().expect("couldn't flush RocksDB");
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ThreadMode> Get for Arc<TransactionDB<T>> {
|
||||
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
TransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||
}
|
||||
}
|
||||
impl<T: ThreadMode + 'static> Db for Arc<TransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, TransactionDB<T>>;
|
||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, T>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
self.transaction()
|
||||
let mut opts = WriteOptions::default();
|
||||
opts.set_sync(true);
|
||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||
}
|
||||
}
|
||||
|
||||
pub type RocksDB = Arc<TransactionDB<SingleThreaded>>;
|
||||
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
|
||||
pub fn new_rocksdb(path: &str) -> RocksDB {
|
||||
let mut options = Options::default();
|
||||
options.create_if_missing(true);
|
||||
options.set_compression_type(DBCompressionType::Lz4);
|
||||
Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap())
|
||||
options.set_compression_type(DBCompressionType::Zstd);
|
||||
|
||||
options.set_wal_compression_type(DBCompressionType::Zstd);
|
||||
// 10 MB
|
||||
options.set_max_total_wal_size(10 * 1024 * 1024);
|
||||
options.set_wal_size_limit_mb(10);
|
||||
|
||||
options.set_log_level(LogLevel::Warn);
|
||||
// 1 MB
|
||||
options.set_max_log_file_size(1024 * 1024);
|
||||
options.set_recycle_log_file_num(1);
|
||||
|
||||
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", "
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
|
||||
zeroize = { version = "1", optional = true }
|
||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||
|
||||
@@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
) {
|
||||
let mut tributaries = HashMap::new();
|
||||
'outer: loop {
|
||||
// TODO: Create a better async flow for this, as this does still hammer this task
|
||||
tokio::task::yield_now().await;
|
||||
// TODO: Create a better async flow for this
|
||||
tokio::time::sleep(core::time::Duration::from_millis(100)).await;
|
||||
|
||||
match tributary_event.try_recv() {
|
||||
Ok(event) => match event {
|
||||
@@ -1292,6 +1292,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
p2p.clone(),
|
||||
cosign_channel.clone(),
|
||||
tributary_event_listener_4,
|
||||
<Ristretto as Ciphersuite>::generator() * key.deref(),
|
||||
));
|
||||
|
||||
// Handle all messages from processors
|
||||
|
||||
@@ -2,13 +2,15 @@ use core::{time::Duration, fmt};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
io::Read,
|
||||
collections::HashMap,
|
||||
collections::{HashSet, HashMap},
|
||||
time::{SystemTime, Instant},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||
@@ -290,6 +292,145 @@ impl LibP2p {
|
||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
||||
}
|
||||
|
||||
// The addrs we're currently dialing, and the networks associated with them
|
||||
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
|
||||
// The peers we're currently connected to, and the networks associated with them
|
||||
let connected_peers = Arc::new(RwLock::new(HashMap::<Multiaddr, HashSet<NetworkId>>::new()));
|
||||
|
||||
// Find and connect to peers
|
||||
let (connect_to_network_send, mut connect_to_network_recv) =
|
||||
tokio::sync::mpsc::unbounded_channel();
|
||||
let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel();
|
||||
tokio::spawn({
|
||||
let dialing_peers = dialing_peers.clone();
|
||||
let connected_peers = connected_peers.clone();
|
||||
|
||||
let connect_to_network_send = connect_to_network_send.clone();
|
||||
async move {
|
||||
loop {
|
||||
let connect = |network: NetworkId, addr: Multiaddr| {
|
||||
let dialing_peers = dialing_peers.clone();
|
||||
let connected_peers = connected_peers.clone();
|
||||
let to_dial_send = to_dial_send.clone();
|
||||
let connect_to_network_send = connect_to_network_send.clone();
|
||||
async move {
|
||||
log::info!("found peer from substrate: {addr}");
|
||||
|
||||
let protocols = addr.iter().filter_map(|piece| match piece {
|
||||
// Drop PeerIds from the Substrate P2p network
|
||||
Protocol::P2p(_) => None,
|
||||
// Use our own TCP port
|
||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||
other => Some(other),
|
||||
});
|
||||
|
||||
let mut new_addr = Multiaddr::empty();
|
||||
for protocol in protocols {
|
||||
new_addr.push(protocol);
|
||||
}
|
||||
let addr = new_addr;
|
||||
log::debug!("transformed found peer: {addr}");
|
||||
|
||||
let (is_fresh_dial, nets) = {
|
||||
let mut dialing_peers = dialing_peers.write().await;
|
||||
let is_fresh_dial = !dialing_peers.contains_key(&addr);
|
||||
if is_fresh_dial {
|
||||
dialing_peers.insert(addr.clone(), HashSet::new());
|
||||
}
|
||||
// Associate this network with this peer
|
||||
dialing_peers.get_mut(&addr).unwrap().insert(network);
|
||||
|
||||
let nets = dialing_peers.get(&addr).unwrap().clone();
|
||||
(is_fresh_dial, nets)
|
||||
};
|
||||
|
||||
// Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing
|
||||
// fails
|
||||
// This performs cleanup and bounds the size of the map to whatever growth occurs
|
||||
// within a temporal window
|
||||
tokio::spawn({
|
||||
let dialing_peers = dialing_peers.clone();
|
||||
let connected_peers = connected_peers.clone();
|
||||
let connect_to_network_send = connect_to_network_send.clone();
|
||||
let addr = addr.clone();
|
||||
async move {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
let mut dialing_peers = dialing_peers.write().await;
|
||||
if let Some(expected_nets) = dialing_peers.remove(&addr) {
|
||||
log::debug!("removed addr from dialing upon timeout: {addr}");
|
||||
|
||||
// TODO: De-duplicate this below instance
|
||||
// If we failed to dial and haven't gotten enough actual connections, retry
|
||||
let connected_peers = connected_peers.read().await;
|
||||
for net in expected_nets {
|
||||
let mut remaining_peers = 0;
|
||||
for nets in connected_peers.values() {
|
||||
if nets.contains(&net) {
|
||||
remaining_peers += 1;
|
||||
}
|
||||
}
|
||||
// If we do not, start connecting to this network again
|
||||
if remaining_peers < 3 {
|
||||
connect_to_network_send.send(net).expect(
|
||||
"couldn't send net to connect to due to disconnects (receiver dropped?)",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if is_fresh_dial {
|
||||
to_dial_send.send((addr, nets)).unwrap();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: We should also connect to random peers from random nets as needed for
|
||||
// cosigning
|
||||
|
||||
// Define a buffer, `to_retry`, so we can exhaust this channel before sending more down
|
||||
// it
|
||||
let mut to_retry = vec![];
|
||||
while let Some(network) = connect_to_network_recv.recv().await {
|
||||
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||
// If there's an insufficient amount of nodes known, connect to all yet add it
|
||||
// back and break
|
||||
if nodes.len() < 3 {
|
||||
log::warn!(
|
||||
"insufficient amount of P2P nodes known for {:?}: {}",
|
||||
network,
|
||||
nodes.len()
|
||||
);
|
||||
to_retry.push(network);
|
||||
for node in nodes {
|
||||
connect(network, node).await;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Randomly select up to 5
|
||||
for _ in 0 .. 5 {
|
||||
if !nodes.is_empty() {
|
||||
let to_connect = nodes.swap_remove(
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
|
||||
.unwrap(),
|
||||
);
|
||||
connect(network, to_connect).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for to_retry in to_retry {
|
||||
connect_to_network_send.send(to_retry).unwrap();
|
||||
}
|
||||
// Sleep 60 seconds before moving to the next iteration
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Manage the actual swarm
|
||||
tokio::spawn({
|
||||
let mut time_of_last_p2p_message = Instant::now();
|
||||
|
||||
@@ -320,67 +461,10 @@ impl LibP2p {
|
||||
}
|
||||
|
||||
async move {
|
||||
let connected_peers = connected_peers.clone();
|
||||
|
||||
let mut set_for_genesis = HashMap::new();
|
||||
let mut pending_p2p_connections = vec![];
|
||||
// Run this task ad-infinitum
|
||||
loop {
|
||||
// Handle pending P2P connections
|
||||
// TODO: Break this out onto its own task with better peer management logic?
|
||||
{
|
||||
let mut connect = |addr: Multiaddr| {
|
||||
log::info!("found peer from substrate: {addr}");
|
||||
|
||||
let protocols = addr.iter().filter_map(|piece| match piece {
|
||||
// Drop PeerIds from the Substrate P2p network
|
||||
Protocol::P2p(_) => None,
|
||||
// Use our own TCP port
|
||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||
other => Some(other),
|
||||
});
|
||||
|
||||
let mut new_addr = Multiaddr::empty();
|
||||
for protocol in protocols {
|
||||
new_addr.push(protocol);
|
||||
}
|
||||
let addr = new_addr;
|
||||
log::debug!("transformed found peer: {addr}");
|
||||
|
||||
if let Err(e) = swarm.dial(addr) {
|
||||
log::warn!("dialing peer failed: {e:?}");
|
||||
}
|
||||
};
|
||||
|
||||
while let Some(network) = pending_p2p_connections.pop() {
|
||||
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||
// If there's an insufficient amount of nodes known, connect to all yet add it back
|
||||
// and break
|
||||
if nodes.len() < 3 {
|
||||
log::warn!(
|
||||
"insufficient amount of P2P nodes known for {:?}: {}",
|
||||
network,
|
||||
nodes.len()
|
||||
);
|
||||
pending_p2p_connections.push(network);
|
||||
for node in nodes {
|
||||
connect(node);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Randomly select up to 5
|
||||
for _ in 0 .. 5 {
|
||||
if !nodes.is_empty() {
|
||||
let to_connect = nodes.swap_remove(
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
|
||||
.unwrap(),
|
||||
);
|
||||
connect(to_connect);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);
|
||||
tokio::select! {
|
||||
biased;
|
||||
@@ -392,7 +476,7 @@ impl LibP2p {
|
||||
let topic = topic_for_set(set);
|
||||
if subscribe {
|
||||
log::info!("subscribing to p2p messages for {set:?}");
|
||||
pending_p2p_connections.push(set.network);
|
||||
connect_to_network_send.send(set.network).unwrap();
|
||||
set_for_genesis.insert(genesis, set);
|
||||
swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();
|
||||
} else {
|
||||
@@ -421,13 +505,72 @@ impl LibP2p {
|
||||
Some(SwarmEvent::Dialing { connection_id, .. }) => {
|
||||
log::debug!("dialing to peer in connection ID {}", &connection_id);
|
||||
}
|
||||
Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => {
|
||||
Some(SwarmEvent::ConnectionEstablished {
|
||||
peer_id,
|
||||
connection_id,
|
||||
endpoint,
|
||||
..
|
||||
}) => {
|
||||
if &peer_id == swarm.local_peer_id() {
|
||||
log::warn!("established a libp2p connection to ourselves");
|
||||
swarm.close_connection(connection_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
let addr = endpoint.get_remote_address();
|
||||
let nets = {
|
||||
let mut dialing_peers = dialing_peers.write().await;
|
||||
if let Some(nets) = dialing_peers.remove(addr) {
|
||||
nets
|
||||
} else {
|
||||
log::debug!("connected to a peer who we didn't have within dialing");
|
||||
HashSet::new()
|
||||
}
|
||||
};
|
||||
{
|
||||
let mut connected_peers = connected_peers.write().await;
|
||||
connected_peers.insert(addr.clone(), nets);
|
||||
|
||||
log::debug!(
|
||||
"connection established to peer {} in connection ID {}",
|
||||
"connection established to peer {} in connection ID {}, connected peers: {}",
|
||||
&peer_id,
|
||||
&connection_id,
|
||||
connected_peers.len(),
|
||||
);
|
||||
}
|
||||
}
|
||||
Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => {
|
||||
let mut connected_peers = connected_peers.write().await;
|
||||
let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else {
|
||||
log::debug!("closed connection to peer which wasn't in connected_peers");
|
||||
continue;
|
||||
};
|
||||
// Downgrade to a read lock
|
||||
let connected_peers = connected_peers.downgrade();
|
||||
|
||||
// For each net we lost a peer for, check if we still have sufficient peers
|
||||
// overall
|
||||
for net in nets {
|
||||
let mut remaining_peers = 0;
|
||||
for nets in connected_peers.values() {
|
||||
if nets.contains(&net) {
|
||||
remaining_peers += 1;
|
||||
}
|
||||
}
|
||||
// If we do not, start connecting to this network again
|
||||
if remaining_peers < 3 {
|
||||
connect_to_network_send
|
||||
.send(net)
|
||||
.expect(
|
||||
"couldn't send net to connect to due to disconnects (receiver dropped?)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
log::debug!(
|
||||
"connection with peer {peer_id} closed, connected peers: {}",
|
||||
connected_peers.len(),
|
||||
);
|
||||
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id)
|
||||
}
|
||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
|
||||
GsEvent::Message { propagation_source, message, .. },
|
||||
@@ -440,6 +583,24 @@ impl LibP2p {
|
||||
}
|
||||
}
|
||||
|
||||
// Handle peers to dial
|
||||
addr_and_nets = to_dial_recv.recv() => {
|
||||
let (addr, nets) =
|
||||
addr_and_nets.expect("received address was None (sender dropped?)");
|
||||
// If we've already dialed and connected to this address, don't further dial them
|
||||
// Just associate these networks with them
|
||||
if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) {
|
||||
for net in nets {
|
||||
existing_nets.insert(net);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Err(e) = swarm.dial(addr) {
|
||||
log::warn!("dialing peer failed: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// If it's been >80s since we've published a message, publish a KeepAlive since we're
|
||||
// still an active service
|
||||
// This is useful when we have no active tributaries and accordingly aren't sending
|
||||
@@ -517,6 +678,17 @@ impl TributaryP2p for LibP2p {
|
||||
}
|
||||
}
|
||||
|
||||
fn heartbeat_time_unit<D: Db, P: P2p>() -> u64 {
|
||||
// Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("system clock is wrong")
|
||||
.as_secs();
|
||||
// Divide by the block time so if multiple parties send a Heartbeat, they're more likely to
|
||||
// overlap
|
||||
timestamp / u64::from(Tributary::<D, Transaction, P>::block_time())
|
||||
}
|
||||
|
||||
pub async fn heartbeat_tributaries_task<D: Db, P: P2p>(
|
||||
p2p: P,
|
||||
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
||||
@@ -551,14 +723,7 @@ pub async fn heartbeat_tributaries_task<D: Db, P: P2p>(
|
||||
if SystemTime::now() > (block_time + Duration::from_secs(60)) {
|
||||
log::warn!("last known tributary block was over a minute ago");
|
||||
let mut msg = tip.to_vec();
|
||||
// Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("system clock is wrong")
|
||||
.as_secs();
|
||||
// Divide by the block time so if multiple parties send a Heartbeat, they're more likely to
|
||||
// overlap
|
||||
let time_unit = timestamp / u64::from(Tributary::<D, Transaction, P>::block_time());
|
||||
let time_unit = heartbeat_time_unit::<D, P>();
|
||||
msg.extend(time_unit.to_le_bytes());
|
||||
P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await;
|
||||
}
|
||||
@@ -573,6 +738,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
p2p: P,
|
||||
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
|
||||
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
||||
our_key: <Ristretto as Ciphersuite>::G,
|
||||
) {
|
||||
let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender<Message<P>>>::new()));
|
||||
tokio::spawn({
|
||||
@@ -592,10 +758,13 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
// Subscribe to the topic for this tributary
|
||||
p2p.subscribe(tributary.spec.set(), genesis).await;
|
||||
|
||||
let spec_set = tributary.spec.set();
|
||||
|
||||
// Per-Tributary P2P message handler
|
||||
tokio::spawn({
|
||||
let p2p = p2p.clone();
|
||||
async move {
|
||||
let mut last_replied_to_heartbeat = 0;
|
||||
loop {
|
||||
let Some(mut msg) = recv.recv().await else {
|
||||
// Channel closure happens when the tributary retires
|
||||
@@ -606,7 +775,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
|
||||
P2pMessageKind::Tributary(msg_genesis) => {
|
||||
assert_eq!(msg_genesis, genesis);
|
||||
log::trace!("handling message for tributary {:?}", tributary.spec.set());
|
||||
log::trace!("handling message for tributary {:?}", spec_set);
|
||||
if tributary.tributary.handle_message(&msg.msg).await {
|
||||
P2p::broadcast(&p2p, msg.kind, msg.msg).await;
|
||||
}
|
||||
@@ -618,29 +787,36 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
// them?
|
||||
P2pMessageKind::Heartbeat(msg_genesis) => {
|
||||
assert_eq!(msg_genesis, genesis);
|
||||
|
||||
let current_time_unit = heartbeat_time_unit::<D, P>();
|
||||
if current_time_unit.saturating_sub(last_replied_to_heartbeat) < 10 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if msg.msg.len() != 40 {
|
||||
log::error!("validator sent invalid heartbeat");
|
||||
continue;
|
||||
}
|
||||
// Only respond to recent heartbeats
|
||||
let msg_time_unit = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect(
|
||||
"length-checked heartbeat message didn't have 8 bytes for the u64",
|
||||
));
|
||||
if current_time_unit.saturating_sub(msg_time_unit) > 1 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// This is the network's last replied to, not ours specifically
|
||||
last_replied_to_heartbeat = current_time_unit;
|
||||
|
||||
let p2p = p2p.clone();
|
||||
let spec = tributary.spec.clone();
|
||||
let reader = tributary.tributary.reader();
|
||||
// Spawn a dedicated task as this may require loading large amounts of data
|
||||
// from disk and take a notable amount of time
|
||||
tokio::spawn(async move {
|
||||
/*
|
||||
|
||||
// Have sqrt(n) nodes reply with the blocks
|
||||
let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64;
|
||||
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
|
||||
let mut responders = f32::from(tributary.spec.n(&[])).sqrt().floor() as u64;
|
||||
// Try to have at least 3 responders
|
||||
if responders < 3 {
|
||||
responders = tributary.spec.n().min(3).into();
|
||||
responders = tributary.spec.n(&[]).min(3).into();
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
// Have up to three nodes respond
|
||||
let responders = u64::from(spec.n().min(3));
|
||||
|
||||
// Decide which nodes will respond by using the latest block's hash as a
|
||||
// mutually agreed upon entropy source
|
||||
@@ -649,39 +825,46 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
// If n = 10, responders = 3, we want `start` to be 0 ..= 7
|
||||
// (so the highest is 7, 8, 9)
|
||||
// entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7
|
||||
let start =
|
||||
usize::try_from(entropy % (u64::from(spec.n() + 1) - responders))
|
||||
let start = usize::try_from(
|
||||
entropy % (u64::from(tributary.spec.n(&[]) + 1) - responders),
|
||||
)
|
||||
.unwrap();
|
||||
let mut selected = false;
|
||||
for validator in &spec.validators()
|
||||
for validator in &tributary.spec.validators()
|
||||
[start .. (start + usize::try_from(responders).unwrap())]
|
||||
{
|
||||
if our_key == validator.0 {
|
||||
selected = true;
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if !selected {
|
||||
log::debug!("received heartbeat and not selected to respond");
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
|
||||
log::debug!("received heartbeat and selected to respond");
|
||||
*/
|
||||
|
||||
// Have every node respond
|
||||
// While we could only have a subset respond, LibP2P will sync all messages
|
||||
// it isn't aware of
|
||||
// It's cheaper to be aware from our disk than from over the network
|
||||
let p2p = p2p.clone();
|
||||
// Spawn a dedicated task as this may require loading large amounts of data
|
||||
// from disk and take a notable amount of time
|
||||
tokio::spawn(async move {
|
||||
// Have the selected nodes respond
|
||||
// TODO: Spawn a dedicated topic for this heartbeat response?
|
||||
let mut latest = msg.msg[.. 32].try_into().unwrap();
|
||||
let mut to_send = vec![];
|
||||
while let Some(next) = reader.block_after(&latest) {
|
||||
to_send.push(next);
|
||||
latest = next;
|
||||
}
|
||||
if to_send.len() > 3 {
|
||||
for next in to_send {
|
||||
let mut res = reader.block(&next).unwrap().serialize();
|
||||
res.extend(reader.commit(&next).unwrap());
|
||||
// Also include the timestamp used within the Heartbeat
|
||||
res.extend(&msg.msg[32 .. 40]);
|
||||
p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await;
|
||||
latest = next;
|
||||
p2p.send(msg.sender, P2pMessageKind::Block(genesis), res).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -41,8 +41,9 @@ enum HasEvents {
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDb {
|
||||
ScanCosignFrom: () -> u64,
|
||||
IntendedCosign: () -> (u64, Option<u64>),
|
||||
BlockHasEvents: (block: u64) -> HasEvents,
|
||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
||||
LatestCosignedBlock: () -> u64,
|
||||
}
|
||||
);
|
||||
@@ -85,7 +86,7 @@ async fn block_has_events(
|
||||
serai: &Serai,
|
||||
block: u64,
|
||||
) -> Result<HasEvents, SeraiError> {
|
||||
let cached = BlockHasEvents::get(txn, block);
|
||||
let cached = BlockHasEventsCache::get(txn, block);
|
||||
match cached {
|
||||
None => {
|
||||
let serai = serai.as_of(
|
||||
@@ -107,8 +108,8 @@ async fn block_has_events(
|
||||
|
||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||
|
||||
BlockHasEvents::set(txn, block, &has_events);
|
||||
Ok(HasEvents::Yes)
|
||||
BlockHasEventsCache::set(txn, block, &has_events);
|
||||
Ok(has_events)
|
||||
}
|
||||
Some(code) => Ok(code),
|
||||
}
|
||||
@@ -135,6 +136,7 @@ async fn potentially_cosign_block(
|
||||
if (block_has_events == HasEvents::No) &&
|
||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||
{
|
||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
||||
LatestCosignedBlock::set(txn, &block);
|
||||
}
|
||||
|
||||
@@ -178,7 +180,7 @@ async fn potentially_cosign_block(
|
||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||
`latest_number` doesn't change.
|
||||
*/
|
||||
pub async fn advance_cosign_protocol(
|
||||
async fn advance_cosign_protocol_inner(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
@@ -203,16 +205,23 @@ pub async fn advance_cosign_protocol(
|
||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||
window_end_exclusive = 0;
|
||||
window_end_exclusive = 1;
|
||||
}
|
||||
|
||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
||||
// new state
|
||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
||||
|
||||
// Check all blocks within the window to see if they should be cosigned
|
||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||
// do cosign them
|
||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||
// the skipped block will cause will cosign all other blocks within this window
|
||||
if skipped_block.is_none() {
|
||||
for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) {
|
||||
let window_end_inclusive = window_end_exclusive - 1;
|
||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||
skipped_block = Some(b);
|
||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||
@@ -227,7 +236,7 @@ pub async fn advance_cosign_protocol(
|
||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||
let mut cosigning = vec![];
|
||||
|
||||
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
||||
for block in scan_start_block ..= latest_number {
|
||||
let actual_block = serai
|
||||
.finalized_block_by_number(block)
|
||||
.await?
|
||||
@@ -276,6 +285,11 @@ pub async fn advance_cosign_protocol(
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// If this TX is committed, always start future scanning from the next block
|
||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
||||
// Since we're scanning *from* the next block, tidy the cache
|
||||
BlockHasEventsCache::del(&mut txn, block);
|
||||
}
|
||||
|
||||
if let Some((number, hash)) = to_cosign {
|
||||
@@ -297,3 +311,22 @@ pub async fn advance_cosign_protocol(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn advance_cosign_protocol(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
latest_number: u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
loop {
|
||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
||||
let scan_to = latest_number.min(scan_from + 1000);
|
||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
||||
// If we didn't limit the scan_to, break
|
||||
if scan_to == latest_number {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -11,10 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use serai_client::{
|
||||
SeraiError, Block, Serai, TemporalSerai,
|
||||
primitives::{BlockHash, NetworkId},
|
||||
validator_sets::{
|
||||
primitives::{ValidatorSet, amortize_excess_key_shares},
|
||||
ValidatorSetsEvent,
|
||||
},
|
||||
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
||||
in_instructions::InInstructionsEvent,
|
||||
coins::CoinsEvent,
|
||||
};
|
||||
@@ -69,12 +66,7 @@ async fn handle_new_set<D: Db>(
|
||||
let set_participants =
|
||||
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||
|
||||
let mut set_data = set_participants
|
||||
.into_iter()
|
||||
.map(|(k, w)| (k, u16::try_from(w).unwrap()))
|
||||
.collect::<Vec<_>>();
|
||||
amortize_excess_key_shares(&mut set_data);
|
||||
set_data
|
||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let time = if let Ok(time) = block.time() {
|
||||
|
||||
@@ -3,6 +3,8 @@ use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, broadcast},
|
||||
time::sleep,
|
||||
@@ -35,12 +37,17 @@ async fn handle_p2p_test() {
|
||||
|
||||
let mut tributary_senders = vec![];
|
||||
let mut tributary_arcs = vec![];
|
||||
for (p2p, tributary) in tributaries.drain(..) {
|
||||
for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() {
|
||||
let tributary = Arc::new(tributary);
|
||||
tributary_arcs.push(tributary.clone());
|
||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
||||
tokio::spawn(handle_p2p_task(
|
||||
p2p,
|
||||
cosign_send,
|
||||
new_tributary_recv,
|
||||
<Ristretto as Ciphersuite>::generator() * *keys[i],
|
||||
));
|
||||
new_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
||||
.map_err(|_| "failed to send ActiveTributary")
|
||||
|
||||
@@ -45,12 +45,17 @@ async fn sync_test() {
|
||||
let mut tributary_senders = vec![];
|
||||
let mut tributary_arcs = vec![];
|
||||
let mut p2p_threads = vec![];
|
||||
for (p2p, tributary) in tributaries.drain(..) {
|
||||
for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() {
|
||||
let tributary = Arc::new(tributary);
|
||||
tributary_arcs.push(tributary.clone());
|
||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
||||
let thread = tokio::spawn(handle_p2p_task(
|
||||
p2p,
|
||||
cosign_send,
|
||||
new_tributary_recv,
|
||||
<Ristretto as Ciphersuite>::generator() * *keys[i],
|
||||
));
|
||||
new_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
||||
.map_err(|_| "failed to send ActiveTributary")
|
||||
@@ -86,7 +91,7 @@ async fn sync_test() {
|
||||
let syncer_tributary = Arc::new(syncer_tributary);
|
||||
let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv));
|
||||
tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv, syncer_key));
|
||||
syncer_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary {
|
||||
spec: spec.clone(),
|
||||
@@ -116,8 +121,8 @@ async fn sync_test() {
|
||||
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
||||
.unwrap();
|
||||
|
||||
// The heartbeat is once every 10 blocks
|
||||
sleep(Duration::from_secs(10 * block_time)).await;
|
||||
// The heartbeat is once every 10 blocks, with some limitations
|
||||
sleep(Duration::from_secs(20 * block_time)).await;
|
||||
assert!(syncer_tributary.tip().await != spec.genesis());
|
||||
|
||||
// Verify it synced to the tip
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use std::{sync::Arc, io};
|
||||
use std::{sync::Arc, io, collections::VecDeque};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const BLOCK_MESSAGE: u8 = 1;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 2;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -194,7 +193,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
);
|
||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||
|
||||
let to_rebroadcast = Arc::new(RwLock::new(vec![]));
|
||||
let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new()));
|
||||
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
||||
// P2P layer
|
||||
let p2p_meta_task_handle = Arc::new(
|
||||
@@ -207,7 +206,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
for msg in to_rebroadcast {
|
||||
p2p.broadcast(genesis, msg).await;
|
||||
}
|
||||
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -218,7 +217,15 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
||||
|
||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||
TendermintMachine::new(network.clone(), block_number, start_time, proposal).await;
|
||||
TendermintMachine::new(
|
||||
db.clone(),
|
||||
network.clone(),
|
||||
genesis,
|
||||
block_number,
|
||||
start_time,
|
||||
proposal,
|
||||
)
|
||||
.await;
|
||||
tokio::spawn(machine.run());
|
||||
|
||||
Some(Self {
|
||||
@@ -328,9 +335,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
|
||||
// Return true if the message should be rebroadcasted.
|
||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||
// Acquire the lock now to prevent sync_block from being run at the same time
|
||||
let mut sync_block = self.synced_block_result.write().await;
|
||||
|
||||
match msg.first() {
|
||||
Some(&TRANSACTION_MESSAGE) => {
|
||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||
@@ -362,19 +366,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
false
|
||||
}
|
||||
|
||||
Some(&BLOCK_MESSAGE) => {
|
||||
let mut msg_ref = &msg[1 ..];
|
||||
let Ok(block) = Block::<T>::read(&mut msg_ref) else {
|
||||
log::error!("received invalid block message");
|
||||
return false;
|
||||
};
|
||||
let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec();
|
||||
if self.sync_block_internal(block, commit, &mut sync_block).await {
|
||||
log::debug!("synced block over p2p net instead of building the commit ourselves");
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use core::ops::Deref;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{VecDeque, HashMap},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -38,9 +41,8 @@ use tendermint::{
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite,
|
||||
transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError,
|
||||
Blockchain, P2p,
|
||||
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait,
|
||||
Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,
|
||||
};
|
||||
|
||||
pub mod tx;
|
||||
@@ -268,7 +270,7 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
|
||||
pub(crate) validators: Arc<Validators>,
|
||||
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
|
||||
|
||||
pub(crate) to_rebroadcast: Arc<RwLock<Vec<Vec<u8>>>>,
|
||||
pub(crate) to_rebroadcast: Arc<RwLock<VecDeque<Vec<u8>>>>,
|
||||
|
||||
pub(crate) p2p: P,
|
||||
}
|
||||
@@ -277,31 +279,10 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
||||
pub const LATENCY_TIME: u32 = 1667;
|
||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||
|
||||
#[test]
|
||||
fn assert_target_block_time() {
|
||||
use serai_db::MemDb;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
// Type paremeters don't matter here since we only need to call the block_time()
|
||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||
// TARGET_BLOCK_TIME is in milliseconds.
|
||||
assert_eq!(
|
||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||
TARGET_BLOCK_TIME / 1000
|
||||
)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||
type Db = D;
|
||||
|
||||
type ValidatorId = [u8; 32];
|
||||
type SignatureScheme = Arc<Validators>;
|
||||
type Weights = Arc<Validators>;
|
||||
@@ -325,19 +306,28 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
}
|
||||
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
|
||||
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
|
||||
// until the block it's trying to build is complete
|
||||
// If the P2P layer drops a message before all nodes obtained access, or a node had an
|
||||
// intermittent failure, this will ensure reconcilliation
|
||||
// Resolves halts caused by timing discrepancies, which technically are violations of
|
||||
// Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing
|
||||
// environments
|
||||
// This is atrocious if there's no content-based deduplication protocol for messages actively
|
||||
// being gossiped
|
||||
// LibP2p, as used by Serai, is configured to content-based deduplicate
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
self.to_rebroadcast.write().await.push(to_broadcast.clone());
|
||||
{
|
||||
let mut to_rebroadcast_lock = self.to_rebroadcast.write().await;
|
||||
to_rebroadcast_lock.push_back(to_broadcast.clone());
|
||||
// We should have, ideally, 3 * validators messages within a round
|
||||
// Therefore, this should keep the most recent 2-rounds
|
||||
// TODO: This isn't perfect. Each participant should just rebroadcast their latest round of
|
||||
// messages
|
||||
while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) {
|
||||
to_rebroadcast_lock.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
|
||||
@@ -423,12 +413,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
);
|
||||
match block_res {
|
||||
Ok(()) => {
|
||||
// If we successfully added this block, broadcast it
|
||||
// TODO: Move this under the coordinator once we set up on new block notifications?
|
||||
let mut msg = serialized_block.0;
|
||||
msg.insert(0, BLOCK_MESSAGE);
|
||||
msg.extend(encoded_commit);
|
||||
self.p2p.broadcast(self.genesis, msg).await;
|
||||
// If we successfully added this block, break
|
||||
break;
|
||||
}
|
||||
Err(BlockError::NonLocalProvided(hash)) => {
|
||||
@@ -437,13 +422,14 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
hex::encode(hash),
|
||||
hex::encode(self.genesis)
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
_ => return invalid_block(),
|
||||
}
|
||||
}
|
||||
|
||||
// Since we've added a valid block, clear to_rebroadcast
|
||||
*self.to_rebroadcast.write().await = vec![];
|
||||
*self.to_rebroadcast.write().await = VecDeque::new();
|
||||
|
||||
Some(TendermintBlock(
|
||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
#[cfg(test)]
|
||||
mod tendermint;
|
||||
|
||||
mod transaction;
|
||||
pub use transaction::*;
|
||||
|
||||
|
||||
28
coordinator/tributary/src/tests/tendermint.rs
Normal file
28
coordinator/tributary/src/tests/tendermint.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use tendermint::ext::Network;
|
||||
use crate::{
|
||||
P2p, TendermintTx,
|
||||
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn assert_target_block_time() {
|
||||
use serai_db::MemDb;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
// Type paremeters don't matter here since we only need to call the block_time()
|
||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||
// TARGET_BLOCK_TIME is in milliseconds.
|
||||
assert_eq!(
|
||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||
TARGET_BLOCK_TIME / 1000
|
||||
)
|
||||
}
|
||||
@@ -27,5 +27,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std", "
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
tokio = { version = "1", default-features = false, features = ["time"] }
|
||||
|
||||
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
||||
|
||||
@@ -3,6 +3,9 @@ use std::{
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use parity_scale_codec::Encode;
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
|
||||
use crate::{
|
||||
time::CanonicalInstant,
|
||||
ext::{RoundNumber, BlockNumber, Block, Network},
|
||||
@@ -12,6 +15,9 @@ use crate::{
|
||||
};
|
||||
|
||||
pub(crate) struct BlockData<N: Network> {
|
||||
db: N::Db,
|
||||
genesis: [u8; 32],
|
||||
|
||||
pub(crate) number: BlockNumber,
|
||||
pub(crate) validator_id: Option<N::ValidatorId>,
|
||||
pub(crate) proposal: Option<N::Block>,
|
||||
@@ -32,12 +38,17 @@ pub(crate) struct BlockData<N: Network> {
|
||||
|
||||
impl<N: Network> BlockData<N> {
|
||||
pub(crate) fn new(
|
||||
db: N::Db,
|
||||
genesis: [u8; 32],
|
||||
weights: Arc<N::Weights>,
|
||||
number: BlockNumber,
|
||||
validator_id: Option<N::ValidatorId>,
|
||||
proposal: Option<N::Block>,
|
||||
) -> BlockData<N> {
|
||||
BlockData {
|
||||
db,
|
||||
genesis,
|
||||
|
||||
number,
|
||||
validator_id,
|
||||
proposal,
|
||||
@@ -129,11 +140,70 @@ impl<N: Network> BlockData<N> {
|
||||
self.round_mut().step = data.step();
|
||||
|
||||
// Only return a message to if we're actually a current validator
|
||||
self.validator_id.map(|validator_id| Message {
|
||||
let round_number = self.round().number;
|
||||
let res = self.validator_id.map(|validator_id| Message {
|
||||
sender: validator_id,
|
||||
block: self.number,
|
||||
round: self.round().number,
|
||||
round: round_number,
|
||||
data,
|
||||
})
|
||||
});
|
||||
|
||||
if let Some(res) = res.as_ref() {
|
||||
const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block";
|
||||
const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round";
|
||||
const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose";
|
||||
const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote";
|
||||
const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit";
|
||||
|
||||
let genesis = self.genesis;
|
||||
let key = |prefix: &[u8]| [prefix, &genesis].concat();
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Ensure we haven't prior sent a message for a future block/round
|
||||
let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| {
|
||||
let key = key(prefix);
|
||||
let latest =
|
||||
u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap());
|
||||
if latest > current {
|
||||
None?;
|
||||
}
|
||||
if current > latest {
|
||||
txn.put(&key, current.to_le_bytes());
|
||||
return Some(true);
|
||||
}
|
||||
Some(false)
|
||||
};
|
||||
let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;
|
||||
if new_block {
|
||||
// Delete the latest round key
|
||||
txn.del(&key(LATEST_ROUND_KEY));
|
||||
}
|
||||
let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;
|
||||
if new_block || new_round {
|
||||
// Delete the messages for the old round
|
||||
txn.del(&key(PROPOSE_KEY));
|
||||
txn.del(&key(PEVOTE_KEY));
|
||||
txn.del(&key(PRECOMMIT_KEY));
|
||||
}
|
||||
|
||||
// Check we haven't sent this message within this round
|
||||
let msg_key = key(match res.data.step() {
|
||||
Step::Propose => PROPOSE_KEY,
|
||||
Step::Prevote => PEVOTE_KEY,
|
||||
Step::Precommit => PRECOMMIT_KEY,
|
||||
});
|
||||
if txn.get(&msg_key).is_some() {
|
||||
assert!(!new_block);
|
||||
assert!(!new_round);
|
||||
None?;
|
||||
}
|
||||
// Put this message to the DB
|
||||
txn.put(&msg_key, res.encode());
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,6 +212,9 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode
|
||||
/// Trait representing the distributed system Tendermint is providing consensus over.
|
||||
#[async_trait]
|
||||
pub trait Network: Sized + Send + Sync {
|
||||
/// The database used to back this.
|
||||
type Db: serai_db::Db;
|
||||
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature scheme used by validators.
|
||||
|
||||
@@ -231,6 +231,9 @@ pub enum SlashEvent {
|
||||
|
||||
/// A machine executing the Tendermint protocol.
|
||||
pub struct TendermintMachine<N: Network> {
|
||||
db: N::Db,
|
||||
genesis: [u8; 32],
|
||||
|
||||
network: N,
|
||||
signer: <N::SignatureScheme as SignatureScheme>::Signer,
|
||||
validators: N::SignatureScheme,
|
||||
@@ -310,11 +313,16 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
|
||||
if time_until_round_end == Duration::ZERO {
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"resetting when prior round ended {}ms ago",
|
||||
Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
|
||||
);
|
||||
}
|
||||
log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis());
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"sleeping until round ends in {}ms",
|
||||
time_until_round_end.as_millis(),
|
||||
);
|
||||
sleep(time_until_round_end).await;
|
||||
|
||||
// Clear our outbound message queue
|
||||
@@ -322,6 +330,8 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
|
||||
// Create the new block
|
||||
self.block = BlockData::new(
|
||||
self.db.clone(),
|
||||
self.genesis,
|
||||
self.weights.clone(),
|
||||
BlockNumber(self.block.number.0 + 1),
|
||||
self.signer.validator_id().await,
|
||||
@@ -370,7 +380,9 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
/// the machine itself. The machine should have `run` called from an asynchronous task.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub async fn new(
|
||||
db: N::Db,
|
||||
network: N,
|
||||
genesis: [u8; 32],
|
||||
last_block: BlockNumber,
|
||||
last_time: u64,
|
||||
proposal: N::Block,
|
||||
@@ -409,6 +421,9 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
let validator_id = signer.validator_id().await;
|
||||
// 01-10
|
||||
let mut machine = TendermintMachine {
|
||||
db: db.clone(),
|
||||
genesis,
|
||||
|
||||
network,
|
||||
signer,
|
||||
validators,
|
||||
@@ -420,6 +435,8 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
synced_block_result_send,
|
||||
|
||||
block: BlockData::new(
|
||||
db,
|
||||
genesis,
|
||||
weights,
|
||||
BlockNumber(last_block.0 + 1),
|
||||
validator_id,
|
||||
@@ -497,7 +514,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
match step {
|
||||
Step::Propose => {
|
||||
// Slash the validator for not proposing when they should've
|
||||
log::debug!(target: "tendermint", "Validator didn't propose when they should have");
|
||||
log::debug!(target: "tendermint", "validator didn't propose when they should have");
|
||||
// this slash will be voted on.
|
||||
self.slash(
|
||||
self.weights.proposer(self.block.number, self.block.round().number),
|
||||
@@ -586,7 +603,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
);
|
||||
let id = block.id();
|
||||
let proposal = self.network.add_block(block, commit).await;
|
||||
log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref()));
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"added block {} (produced by machine)",
|
||||
hex::encode(id.as_ref()),
|
||||
);
|
||||
self.reset(msg.round, proposal).await;
|
||||
}
|
||||
Err(TendermintError::Malicious(sender, evidence)) => {
|
||||
@@ -680,7 +701,12 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
(msg.round == self.block.round().number) &&
|
||||
(msg.data.step() == Step::Propose)
|
||||
{
|
||||
log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0);
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"received Propose for block {}, round {}",
|
||||
msg.block.0,
|
||||
msg.round.0,
|
||||
);
|
||||
}
|
||||
|
||||
// If this is a precommit, verify its signature
|
||||
@@ -698,7 +724,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
if !self.block.log.log(signed.clone())? {
|
||||
return Err(TendermintError::AlreadyHandled);
|
||||
}
|
||||
log::debug!(target: "tendermint", "received new tendermint message");
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"received new tendermint message (block: {}, round: {}, step: {:?})",
|
||||
msg.block.0,
|
||||
msg.round.0,
|
||||
msg.data.step(),
|
||||
);
|
||||
|
||||
// All functions, except for the finalizer and the jump, are locked to the current round
|
||||
|
||||
@@ -745,6 +777,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
// 55-56
|
||||
// Jump, enabling processing by the below code
|
||||
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
|
||||
log::debug!(
|
||||
target: "tendermint",
|
||||
"jumping from round {} to round {}",
|
||||
self.block.round().number.0,
|
||||
msg.round.0,
|
||||
);
|
||||
|
||||
// Jump to the new round.
|
||||
let proposer = self.round(msg.round, None);
|
||||
|
||||
@@ -802,13 +841,26 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
|
||||
let (participation, weight) =
|
||||
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
|
||||
let threshold_weight = self.weights.threshold();
|
||||
if participation < threshold_weight {
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"progess towards setting prevote timeout, participation: {}, needed: {}",
|
||||
participation,
|
||||
threshold_weight,
|
||||
);
|
||||
}
|
||||
// 34-35
|
||||
if participation >= self.weights.threshold() {
|
||||
if participation >= threshold_weight {
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"setting timeout for prevote due to sufficient participation",
|
||||
);
|
||||
self.block.round_mut().set_timeout(Step::Prevote);
|
||||
}
|
||||
|
||||
// 44-46
|
||||
if weight >= self.weights.threshold() {
|
||||
if weight >= threshold_weight {
|
||||
self.broadcast(Data::Precommit(None));
|
||||
return Ok(None);
|
||||
}
|
||||
@@ -818,6 +870,10 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
if matches!(msg.data, Data::Precommit(_)) &&
|
||||
self.block.log.has_participation(self.block.round().number, Step::Precommit)
|
||||
{
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"setting timeout for precommit due to sufficient participation",
|
||||
);
|
||||
self.block.round_mut().set_timeout(Step::Precommit);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use log::debug;
|
||||
use parity_scale_codec::Encode;
|
||||
|
||||
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
|
||||
@@ -27,7 +26,7 @@ impl<N: Network> MessageLog<N> {
|
||||
let step = msg.data.step();
|
||||
if let Some(existing) = msgs.get(&step) {
|
||||
if existing.msg.data != msg.data {
|
||||
debug!(
|
||||
log::debug!(
|
||||
target: "tendermint",
|
||||
"Validator sent multiple messages for the same block + round + step"
|
||||
);
|
||||
|
||||
@@ -10,6 +10,8 @@ use parity_scale_codec::{Encode, Decode};
|
||||
use futures_util::sink::SinkExt;
|
||||
use tokio::{sync::RwLock, time::sleep};
|
||||
|
||||
use serai_db::MemDb;
|
||||
|
||||
use tendermint_machine::{
|
||||
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||
SlashEvent, TendermintMachine, TendermintHandle,
|
||||
@@ -111,6 +113,8 @@ struct TestNetwork(
|
||||
|
||||
#[async_trait]
|
||||
impl Network for TestNetwork {
|
||||
type Db = MemDb;
|
||||
|
||||
type ValidatorId = TestValidatorId;
|
||||
type SignatureScheme = TestSignatureScheme;
|
||||
type Weights = TestWeights;
|
||||
@@ -170,7 +174,9 @@ impl TestNetwork {
|
||||
let i = u16::try_from(i).unwrap();
|
||||
let TendermintHandle { messages, synced_block, synced_block_result, machine } =
|
||||
TendermintMachine::new(
|
||||
MemDb::new(),
|
||||
TestNetwork(i, arc.clone()),
|
||||
[0; 32],
|
||||
BlockNumber(1),
|
||||
start_time,
|
||||
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
||||
|
||||
@@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features =
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
||||
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||
dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
|
||||
|
||||
dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||
|
||||
|
||||
@@ -39,6 +39,13 @@ pub trait Algorithm<C: Curve>: Send + Sync + Clone {
|
||||
|
||||
/// Obtain the list of nonces to generate, as specified by the generators to create commitments
|
||||
/// against per-nonce.
|
||||
///
|
||||
/// The Algorithm is responsible for all transcripting of these nonce specifications/generators.
|
||||
///
|
||||
/// The prover will be passed the commitments, and the commitments will be sent to all other
|
||||
/// participants. No guarantees the commitments are internally consistent (have the same discrete
|
||||
/// logarithm across generators) are made. Any Algorithm which specifies multiple generators for
|
||||
/// a single nonce must handle that itself.
|
||||
fn nonces(&self) -> Vec<Vec<C::G>>;
|
||||
|
||||
/// Generate an addendum to FROST"s preprocessing stage.
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
// FROST defines its nonce as sum(Di, Ei * bi)
|
||||
// Monero needs not just the nonce over G however, yet also over H
|
||||
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
|
||||
//
|
||||
// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
|
||||
// of nonces, each against an arbitrary list of generators
|
||||
// In order for this library to be robust, it supports generating an arbitrary amount of nonces,
|
||||
// each against an arbitrary list of generators
|
||||
//
|
||||
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
|
||||
// When representations across multiple generators are provided, a DLEq proof is also provided to
|
||||
// confirm their integrity
|
||||
|
||||
use core::ops::Deref;
|
||||
use std::{
|
||||
@@ -24,32 +20,8 @@ use transcript::Transcript;
|
||||
use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
|
||||
use multiexp::multiexp_vartime;
|
||||
|
||||
use dleq::MultiDLEqProof;
|
||||
|
||||
use crate::{curve::Curve, Participant};
|
||||
|
||||
// Transcript used to aggregate binomial nonces for usage within a single DLEq proof.
|
||||
fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T {
|
||||
let mut transcript = T::new(b"FROST DLEq Aggregation v0.5");
|
||||
transcript.append_message(b"context", context);
|
||||
transcript
|
||||
}
|
||||
|
||||
// Every participant proves for their commitments at the start of the protocol
|
||||
// These proofs are verified sequentially, requiring independent transcripts
|
||||
// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is
|
||||
// challenged in order to create a commitment to it, carried in each independent transcript
|
||||
// (effectively forking the original transcript)
|
||||
//
|
||||
// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be
|
||||
// constructed). For higher level protocols, the transcript may have contextual info these proofs
|
||||
// will then be bound to
|
||||
fn dleq_transcript<T: Transcript>(context: &[u8]) -> T {
|
||||
let mut transcript = T::new(b"FROST Commitments DLEq v0.5");
|
||||
transcript.append_message(b"context", context);
|
||||
transcript
|
||||
}
|
||||
|
||||
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
|
||||
// This is considered a single nonce as r = d + be
|
||||
#[derive(Clone, Zeroize)]
|
||||
@@ -69,7 +41,7 @@ impl<C: Curve> GeneratorCommitments<C> {
|
||||
}
|
||||
}
|
||||
|
||||
// A single nonce's commitments and relevant proofs
|
||||
// A single nonce's commitments
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub(crate) struct NonceCommitments<C: Curve> {
|
||||
// Called generators as these commitments are indexed by generator later on
|
||||
@@ -121,12 +93,6 @@ impl<C: Curve> NonceCommitments<C> {
|
||||
t.append_message(b"commitment_E", commitments.0[1].to_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
fn aggregation_factor<T: Transcript>(&self, context: &[u8]) -> C::F {
|
||||
let mut transcript = aggregation_transcript::<T>(context);
|
||||
self.transcript(&mut transcript);
|
||||
<C as Curve>::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
/// Commitments for all the nonces across all their generators.
|
||||
@@ -135,51 +101,26 @@ pub(crate) struct Commitments<C: Curve> {
|
||||
// Called nonces as these commitments are indexed by nonce
|
||||
// So to get the commitments for the first nonce, it'd be commitments.nonces[0]
|
||||
pub(crate) nonces: Vec<NonceCommitments<C>>,
|
||||
// DLEq Proof proving that each set of commitments were generated using a single pair of discrete
|
||||
// logarithms
|
||||
pub(crate) dleq: Option<MultiDLEqProof<C::G>>,
|
||||
}
|
||||
|
||||
impl<C: Curve> Commitments<C> {
|
||||
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
|
||||
pub(crate) fn new<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
secret_share: &Zeroizing<C::F>,
|
||||
planned_nonces: &[Vec<C::G>],
|
||||
context: &[u8],
|
||||
) -> (Vec<Nonce<C>>, Commitments<C>) {
|
||||
let mut nonces = vec![];
|
||||
let mut commitments = vec![];
|
||||
|
||||
let mut dleq_generators = vec![];
|
||||
let mut dleq_nonces = vec![];
|
||||
for generators in planned_nonces {
|
||||
let (nonce, these_commitments): (Nonce<C>, _) =
|
||||
NonceCommitments::new(&mut *rng, secret_share, generators);
|
||||
|
||||
if generators.len() > 1 {
|
||||
dleq_generators.push(generators.clone());
|
||||
dleq_nonces.push(Zeroizing::new(
|
||||
(these_commitments.aggregation_factor::<T>(context) * nonce.0[1].deref()) +
|
||||
nonce.0[0].deref(),
|
||||
));
|
||||
}
|
||||
|
||||
nonces.push(nonce);
|
||||
commitments.push(these_commitments);
|
||||
}
|
||||
|
||||
let dleq = if !dleq_generators.is_empty() {
|
||||
Some(MultiDLEqProof::prove(
|
||||
rng,
|
||||
&mut dleq_transcript::<T>(context),
|
||||
&dleq_generators,
|
||||
&dleq_nonces,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
(nonces, Commitments { nonces: commitments, dleq })
|
||||
(nonces, Commitments { nonces: commitments })
|
||||
}
|
||||
|
||||
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
|
||||
@@ -187,58 +128,20 @@ impl<C: Curve> Commitments<C> {
|
||||
for nonce in &self.nonces {
|
||||
nonce.transcript(t);
|
||||
}
|
||||
|
||||
// Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in
|
||||
// an exact order
|
||||
// This means it shouldn't be possible for variadic generators to cause conflicts
|
||||
if let Some(dleq) = &self.dleq {
|
||||
t.append_message(b"dleq", dleq.serialize());
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn read<R: Read, T: Transcript>(
|
||||
reader: &mut R,
|
||||
generators: &[Vec<C::G>],
|
||||
context: &[u8],
|
||||
) -> io::Result<Self> {
|
||||
pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) -> io::Result<Self> {
|
||||
let nonces = (0 .. generators.len())
|
||||
.map(|i| NonceCommitments::read(reader, &generators[i]))
|
||||
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
|
||||
|
||||
let mut dleq_generators = vec![];
|
||||
let mut dleq_nonces = vec![];
|
||||
for (generators, nonce) in generators.iter().cloned().zip(&nonces) {
|
||||
if generators.len() > 1 {
|
||||
let binding = nonce.aggregation_factor::<T>(context);
|
||||
let mut aggregated = vec![];
|
||||
for commitments in &nonce.generators {
|
||||
aggregated.push(commitments.0[0] + (commitments.0[1] * binding));
|
||||
}
|
||||
dleq_generators.push(generators);
|
||||
dleq_nonces.push(aggregated);
|
||||
}
|
||||
}
|
||||
|
||||
let dleq = if !dleq_generators.is_empty() {
|
||||
let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?;
|
||||
dleq
|
||||
.verify(&mut dleq_transcript::<T>(context), &dleq_generators, &dleq_nonces)
|
||||
.map_err(|_| io::Error::other("invalid DLEq proof"))?;
|
||||
Some(dleq)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Commitments { nonces, dleq })
|
||||
Ok(Commitments { nonces })
|
||||
}
|
||||
|
||||
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
for nonce in &self.nonces {
|
||||
nonce.write(writer)?;
|
||||
}
|
||||
if let Some(dleq) = &self.dleq {
|
||||
dleq.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user