mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-11 13:39:25 +00:00
Merge branch 'emissions' of https://github.com/akildemir/serai into block-emissions
This commit is contained in:
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: 24.0.1
|
default: "27.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
6
.github/actions/test-dependencies/action.yml
vendored
6
.github/actions/test-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
|||||||
bitcoin-version:
|
bitcoin-version:
|
||||||
description: "Bitcoin version to download and run as a regtest node"
|
description: "Bitcoin version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: 24.0.1
|
default: "27.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -19,9 +19,9 @@ runs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install Foundry
|
- name: Install Foundry
|
||||||
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
|
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||||
with:
|
with:
|
||||||
version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
|
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
- name: Run a Monero Regtest Node
|
- name: Run a Monero Regtest Node
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2024-02-07
|
nightly-2024-05-01
|
||||||
|
|||||||
1
.github/workflows/coins-tests.yml
vendored
1
.github/workflows/coins-tests.yml
vendored
@@ -30,6 +30,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p bitcoin-serai \
|
-p bitcoin-serai \
|
||||||
|
-p alloy-simple-request-transport \
|
||||||
-p ethereum-serai \
|
-p ethereum-serai \
|
||||||
-p monero-generators \
|
-p monero-generators \
|
||||||
-p monero-serai
|
-p monero-serai
|
||||||
|
|||||||
3
.github/workflows/common-tests.yml
vendored
3
.github/workflows/common-tests.yml
vendored
@@ -28,4 +28,5 @@ jobs:
|
|||||||
-p std-shims \
|
-p std-shims \
|
||||||
-p zalloc \
|
-p zalloc \
|
||||||
-p serai-db \
|
-p serai-db \
|
||||||
-p serai-env
|
-p serai-env \
|
||||||
|
-p simple-request
|
||||||
|
|||||||
2
.github/workflows/coordinator-tests.yml
vendored
2
.github/workflows/coordinator-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run coordinator Docker tests
|
- name: Run coordinator Docker tests
|
||||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||||
|
|||||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Full Stack Docker tests
|
- name: Run Full Stack Docker tests
|
||||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||||
|
|||||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run message-queue Docker tests
|
- name: Run message-queue Docker tests
|
||||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||||
|
|||||||
2
.github/workflows/processor-tests.yml
vendored
2
.github/workflows/processor-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run processor Docker tests
|
- name: Run processor Docker tests
|
||||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||||
|
|||||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Reproducible Runtime tests
|
- name: Run Reproducible Runtime tests
|
||||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||||
|
|||||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -43,6 +43,7 @@ jobs:
|
|||||||
-p tendermint-machine \
|
-p tendermint-machine \
|
||||||
-p tributary-chain \
|
-p tributary-chain \
|
||||||
-p serai-coordinator \
|
-p serai-coordinator \
|
||||||
|
-p serai-orchestrator \
|
||||||
-p serai-docker-tests
|
-p serai-docker-tests
|
||||||
|
|
||||||
test-substrate:
|
test-substrate:
|
||||||
@@ -64,7 +65,9 @@ jobs:
|
|||||||
-p serai-validator-sets-pallet \
|
-p serai-validator-sets-pallet \
|
||||||
-p serai-in-instructions-primitives \
|
-p serai-in-instructions-primitives \
|
||||||
-p serai-in-instructions-pallet \
|
-p serai-in-instructions-pallet \
|
||||||
|
-p serai-signals-primitives \
|
||||||
-p serai-signals-pallet \
|
-p serai-signals-pallet \
|
||||||
|
-p serai-abi \
|
||||||
-p serai-runtime \
|
-p serai-runtime \
|
||||||
-p serai-node
|
-p serai-node
|
||||||
|
|
||||||
|
|||||||
1633
Cargo.lock
generated
1633
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -36,6 +36,7 @@ members = [
|
|||||||
"crypto/schnorrkel",
|
"crypto/schnorrkel",
|
||||||
|
|
||||||
"coins/bitcoin",
|
"coins/bitcoin",
|
||||||
|
"coins/ethereum/alloy-simple-request-transport",
|
||||||
"coins/ethereum",
|
"coins/ethereum",
|
||||||
"coins/monero/generators",
|
"coins/monero/generators",
|
||||||
"coins/monero",
|
"coins/monero",
|
||||||
|
|||||||
@@ -375,7 +375,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
|||||||
msg: &[u8],
|
msg: &[u8],
|
||||||
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
|
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
|
||||||
if !msg.is_empty() {
|
if !msg.is_empty() {
|
||||||
panic!("message was passed to the TransactionMachine when it generates its own");
|
panic!("message was passed to the TransactionSignMachine when it generates its own");
|
||||||
}
|
}
|
||||||
|
|
||||||
let commitments = (0 .. self.sigs.len())
|
let commitments = (0 .. self.sigs.len())
|
||||||
|
|||||||
4
coins/ethereum/.gitignore
vendored
4
coins/ethereum/.gitignore
vendored
@@ -1,7 +1,3 @@
|
|||||||
# Solidity build outputs
|
# Solidity build outputs
|
||||||
cache
|
cache
|
||||||
artifacts
|
artifacts
|
||||||
|
|
||||||
# Auto-generated ABI files
|
|
||||||
src/abi/schnorr.rs
|
|
||||||
src/abi/router.rs
|
|
||||||
|
|||||||
@@ -18,28 +18,29 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
thiserror = { version = "1", default-features = false }
|
thiserror = { version = "1", default-features = false }
|
||||||
eyre = { version = "0.6", default-features = false }
|
|
||||||
|
|
||||||
sha3 = { version = "0.10", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
group = { version = "0.13", default-features = false }
|
|
||||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
|
|
||||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
|
|
||||||
|
|
||||||
ethers-core = { version = "2", default-features = false }
|
|
||||||
ethers-providers = { version = "2", default-features = false }
|
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
|
||||||
serde = { version = "1", default-features = false, features = ["std"] }
|
|
||||||
serde_json = { version = "1", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
sha2 = { version = "0.10", default-features = false, features = ["std"] }
|
group = { version = "0.13", default-features = false }
|
||||||
|
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
|
||||||
|
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||||
|
|
||||||
|
alloy-core = { version = "0.7", default-features = false }
|
||||||
|
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
|
||||||
|
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] }
|
||||||
|
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||||
|
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||||
|
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
|
||||||
|
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
|
||||||
|
|
||||||
tokio = { version = "1", features = ["macros"] }
|
tokio = { version = "1", features = ["macros"] }
|
||||||
|
|
||||||
|
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
tests = []
|
||||||
|
|||||||
@@ -3,6 +3,12 @@
|
|||||||
This package contains Ethereum-related functionality, specifically deploying and
|
This package contains Ethereum-related functionality, specifically deploying and
|
||||||
interacting with Serai contracts.
|
interacting with Serai contracts.
|
||||||
|
|
||||||
|
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
|
||||||
|
`ethereum-serai` is Serai specific. If any of the utilities are generally
|
||||||
|
desired, please fork and maintain your own copy to ensure the desired
|
||||||
|
functionality is preserved, or open an issue to request we make this library
|
||||||
|
general purpose.
|
||||||
|
|
||||||
### Dependencies
|
### Dependencies
|
||||||
|
|
||||||
- solc
|
- solc
|
||||||
|
|||||||
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
[package]
|
||||||
|
name = "alloy-simple-request-transport"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "A transport for alloy based off simple-request"
|
||||||
|
license = "MIT"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
edition = "2021"
|
||||||
|
rust-version = "1.74"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tower = "0.4"
|
||||||
|
|
||||||
|
serde_json = { version = "1", default-features = false }
|
||||||
|
simple-request = { path = "../../../common/request", default-features = false }
|
||||||
|
|
||||||
|
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||||
|
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["tls"]
|
||||||
|
tls = ["simple-request/tls"]
|
||||||
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2024 Luke Parker
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# Alloy Simple Request Transport
|
||||||
|
|
||||||
|
A transport for alloy based on simple-request, a small HTTP client built around
|
||||||
|
hyper.
|
||||||
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
|
use core::task;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use alloy_json_rpc::{RequestPacket, ResponsePacket};
|
||||||
|
use alloy_transport::{TransportError, TransportErrorKind, TransportFut};
|
||||||
|
|
||||||
|
use simple_request::{hyper, Request, Client};
|
||||||
|
|
||||||
|
use tower::Service;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct SimpleRequest {
|
||||||
|
client: Client,
|
||||||
|
url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SimpleRequest {
|
||||||
|
pub fn new(url: String) -> Self {
|
||||||
|
Self { client: Client::with_connection_pool(), url }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Service<RequestPacket> for SimpleRequest {
|
||||||
|
type Response = ResponsePacket;
|
||||||
|
type Error = TransportError;
|
||||||
|
type Future = TransportFut<'static>;
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {
|
||||||
|
task::Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
||||||
|
let inner = self.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let packet = req.serialize().map_err(TransportError::SerError)?;
|
||||||
|
let request = Request::from(
|
||||||
|
hyper::Request::post(&inner.url)
|
||||||
|
.header("Content-Type", "application/json")
|
||||||
|
.body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut res = inner
|
||||||
|
.client
|
||||||
|
.request(request)
|
||||||
|
.await
|
||||||
|
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?
|
||||||
|
.body()
|
||||||
|
.await
|
||||||
|
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?;
|
||||||
|
|
||||||
|
serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, ""))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,5 @@
|
|||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
use ethers_contract::Abigen;
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
println!("cargo:rerun-if-changed=contracts/*");
|
println!("cargo:rerun-if-changed=contracts/*");
|
||||||
println!("cargo:rerun-if-changed=artifacts/*");
|
println!("cargo:rerun-if-changed=artifacts/*");
|
||||||
@@ -21,22 +19,23 @@ fn main() {
|
|||||||
"--base-path", ".",
|
"--base-path", ".",
|
||||||
"-o", "./artifacts", "--overwrite",
|
"-o", "./artifacts", "--overwrite",
|
||||||
"--bin", "--abi",
|
"--bin", "--abi",
|
||||||
"--optimize",
|
"--via-ir", "--optimize",
|
||||||
"./contracts/Schnorr.sol", "./contracts/Router.sol",
|
|
||||||
|
"./contracts/IERC20.sol",
|
||||||
|
|
||||||
|
"./contracts/Schnorr.sol",
|
||||||
|
"./contracts/Deployer.sol",
|
||||||
|
"./contracts/Sandbox.sol",
|
||||||
|
"./contracts/Router.sol",
|
||||||
|
|
||||||
|
"./src/tests/contracts/Schnorr.sol",
|
||||||
|
"./src/tests/contracts/ERC20.sol",
|
||||||
|
|
||||||
|
"--no-color",
|
||||||
];
|
];
|
||||||
assert!(Command::new("solc").args(args).status().unwrap().success());
|
let solc = Command::new("solc").args(args).output().unwrap();
|
||||||
|
assert!(solc.status.success());
|
||||||
Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
|
for line in String::from_utf8(solc.stderr).unwrap().lines() {
|
||||||
.unwrap()
|
assert!(!line.starts_with("Error:"));
|
||||||
.generate()
|
}
|
||||||
.unwrap()
|
|
||||||
.write_to_file("./src/abi/schnorr.rs")
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Abigen::new("Router", "./artifacts/Router.abi")
|
|
||||||
.unwrap()
|
|
||||||
.generate()
|
|
||||||
.unwrap()
|
|
||||||
.write_to_file("./src/abi/router.rs")
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
52
coins/ethereum/contracts/Deployer.sol
Normal file
52
coins/ethereum/contracts/Deployer.sol
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
// SPDX-License-Identifier: AGPLv3
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
The expected deployment process of the Router is as follows:
|
||||||
|
|
||||||
|
1) A transaction deploying Deployer is made. Then, a deterministic signature is
|
||||||
|
created such that an account with an unknown private key is the creator of
|
||||||
|
the contract. Anyone can fund this address, and once anyone does, the
|
||||||
|
transaction deploying Deployer can be published by anyone. No other
|
||||||
|
transaction may be made from that account.
|
||||||
|
|
||||||
|
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
|
||||||
|
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
|
||||||
|
While such attacks would still be feasible if the Deployer's address was
|
||||||
|
controllable, the usage of a deterministic signature with a NUMS method
|
||||||
|
prevents that.
|
||||||
|
|
||||||
|
This doesn't have any denial-of-service risks and will resolve once anyone steps
|
||||||
|
forward as deployer. This does fail to guarantee an identical address across
|
||||||
|
every chain, though it enables letting anyone efficiently ask the Deployer for
|
||||||
|
the address (with the Deployer having an identical address on every chain).
|
||||||
|
|
||||||
|
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
|
||||||
|
Deployer contract to use a consistent salt for the Router, yet the Router must
|
||||||
|
be deployed with a specific public key for Serai. Since Ethereum isn't able to
|
||||||
|
determine a valid public key (one the result of a Serai DKG) from a dishonest
|
||||||
|
public key, we have to allow multiple deployments with Serai being the one to
|
||||||
|
determine which to use.
|
||||||
|
|
||||||
|
The alternative would be to have a council publish the Serai key on-Ethereum,
|
||||||
|
with Serai verifying the published result. This would introduce a DoS risk in
|
||||||
|
the council not publishing the correct key/not publishing any key.
|
||||||
|
*/
|
||||||
|
|
||||||
|
contract Deployer {
|
||||||
|
event Deployment(bytes32 indexed init_code_hash, address created);
|
||||||
|
|
||||||
|
error DeploymentFailed();
|
||||||
|
|
||||||
|
function deploy(bytes memory init_code) external {
|
||||||
|
address created;
|
||||||
|
assembly {
|
||||||
|
created := create(0, add(init_code, 0x20), mload(init_code))
|
||||||
|
}
|
||||||
|
if (created == address(0)) {
|
||||||
|
revert DeploymentFailed();
|
||||||
|
}
|
||||||
|
// These may be emitted out of order upon re-entrancy
|
||||||
|
emit Deployment(keccak256(init_code), created);
|
||||||
|
}
|
||||||
|
}
|
||||||
20
coins/ethereum/contracts/IERC20.sol
Normal file
20
coins/ethereum/contracts/IERC20.sol
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
// SPDX-License-Identifier: CC0
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
interface IERC20 {
|
||||||
|
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||||
|
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||||
|
|
||||||
|
function name() external view returns (string memory);
|
||||||
|
function symbol() external view returns (string memory);
|
||||||
|
function decimals() external view returns (uint8);
|
||||||
|
|
||||||
|
function totalSupply() external view returns (uint256);
|
||||||
|
|
||||||
|
function balanceOf(address owner) external view returns (uint256);
|
||||||
|
function transfer(address to, uint256 value) external returns (bool);
|
||||||
|
function transferFrom(address from, address to, uint256 value) external returns (bool);
|
||||||
|
|
||||||
|
function approve(address spender, uint256 value) external returns (bool);
|
||||||
|
function allowance(address owner, address spender) external view returns (uint256);
|
||||||
|
}
|
||||||
@@ -1,27 +1,24 @@
|
|||||||
// SPDX-License-Identifier: AGPLv3
|
// SPDX-License-Identifier: AGPLv3
|
||||||
pragma solidity ^0.8.0;
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
import "./IERC20.sol";
|
||||||
|
|
||||||
import "./Schnorr.sol";
|
import "./Schnorr.sol";
|
||||||
|
import "./Sandbox.sol";
|
||||||
|
|
||||||
contract Router is Schnorr {
|
contract Router {
|
||||||
// Contract initializer
|
// Nonce is incremented for each batch of transactions executed/key update
|
||||||
// TODO: Replace with a MuSig of the genesis validators
|
|
||||||
address public initializer;
|
|
||||||
|
|
||||||
// Nonce is incremented for each batch of transactions executed
|
|
||||||
uint256 public nonce;
|
uint256 public nonce;
|
||||||
|
|
||||||
// fixed parity for the public keys used in this contract
|
// Current public key's x-coordinate
|
||||||
uint8 constant public KEY_PARITY = 27;
|
// This key must always have the parity defined within the Schnorr contract
|
||||||
|
|
||||||
// current public key's x-coordinate
|
|
||||||
// note: this key must always use the fixed parity defined above
|
|
||||||
bytes32 public seraiKey;
|
bytes32 public seraiKey;
|
||||||
|
|
||||||
struct OutInstruction {
|
struct OutInstruction {
|
||||||
address to;
|
address to;
|
||||||
|
Call[] calls;
|
||||||
|
|
||||||
uint256 value;
|
uint256 value;
|
||||||
bytes data;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Signature {
|
struct Signature {
|
||||||
@@ -29,62 +26,197 @@ contract Router is Schnorr {
|
|||||||
bytes32 s;
|
bytes32 s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
event SeraiKeyUpdated(
|
||||||
|
uint256 indexed nonce,
|
||||||
|
bytes32 indexed key,
|
||||||
|
Signature signature
|
||||||
|
);
|
||||||
|
event InInstruction(
|
||||||
|
address indexed from,
|
||||||
|
address indexed coin,
|
||||||
|
uint256 amount,
|
||||||
|
bytes instruction
|
||||||
|
);
|
||||||
// success is a uint256 representing a bitfield of transaction successes
|
// success is a uint256 representing a bitfield of transaction successes
|
||||||
event Executed(uint256 nonce, bytes32 batch, uint256 success);
|
event Executed(
|
||||||
|
uint256 indexed nonce,
|
||||||
|
bytes32 indexed batch,
|
||||||
|
uint256 success,
|
||||||
|
Signature signature
|
||||||
|
);
|
||||||
|
|
||||||
// error types
|
// error types
|
||||||
error NotInitializer();
|
|
||||||
error AlreadyInitialized();
|
|
||||||
error InvalidKey();
|
error InvalidKey();
|
||||||
|
error InvalidSignature();
|
||||||
|
error InvalidAmount();
|
||||||
|
error FailedTransfer();
|
||||||
error TooManyTransactions();
|
error TooManyTransactions();
|
||||||
|
|
||||||
constructor() {
|
modifier _updateSeraiKeyAtEndOfFn(
|
||||||
initializer = msg.sender;
|
uint256 _nonce,
|
||||||
|
bytes32 key,
|
||||||
|
Signature memory sig
|
||||||
|
) {
|
||||||
|
if (
|
||||||
|
(key == bytes32(0)) ||
|
||||||
|
((bytes32(uint256(key) % Schnorr.Q)) != key)
|
||||||
|
) {
|
||||||
|
revert InvalidKey();
|
||||||
|
}
|
||||||
|
|
||||||
|
_;
|
||||||
|
|
||||||
|
seraiKey = key;
|
||||||
|
emit SeraiKeyUpdated(_nonce, key, sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
// initSeraiKey can be called by the contract initializer to set the first
|
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
|
||||||
// public key, only if the public key has yet to be set.
|
0,
|
||||||
function initSeraiKey(bytes32 _seraiKey) external {
|
_seraiKey,
|
||||||
if (msg.sender != initializer) revert NotInitializer();
|
Signature({ c: bytes32(0), s: bytes32(0) })
|
||||||
if (seraiKey != 0) revert AlreadyInitialized();
|
) {
|
||||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
nonce = 1;
|
||||||
seraiKey = _seraiKey;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateSeraiKey validates the given Schnorr signature against the current public key,
|
// updateSeraiKey validates the given Schnorr signature against the current
|
||||||
// and if successful, updates the contract's public key to the given one.
|
// public key, and if successful, updates the contract's public key to the
|
||||||
|
// given one.
|
||||||
function updateSeraiKey(
|
function updateSeraiKey(
|
||||||
bytes32 _seraiKey,
|
bytes32 _seraiKey,
|
||||||
Signature memory sig
|
Signature calldata sig
|
||||||
) public {
|
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
|
||||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
bytes memory message =
|
||||||
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
|
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
|
||||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
nonce++;
|
||||||
seraiKey = _seraiKey;
|
|
||||||
|
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||||
|
revert InvalidSignature();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute accepts a list of transactions to execute as well as a Schnorr signature.
|
function inInstruction(
|
||||||
|
address coin,
|
||||||
|
uint256 amount,
|
||||||
|
bytes memory instruction
|
||||||
|
) external payable {
|
||||||
|
if (coin == address(0)) {
|
||||||
|
if (amount != msg.value) {
|
||||||
|
revert InvalidAmount();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
(bool success, bytes memory res) =
|
||||||
|
address(coin).call(
|
||||||
|
abi.encodeWithSelector(
|
||||||
|
IERC20.transferFrom.selector,
|
||||||
|
msg.sender,
|
||||||
|
address(this),
|
||||||
|
amount
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
// Require there was nothing returned, which is done by some non-standard
|
||||||
|
// tokens, or that the ERC20 contract did in fact return true
|
||||||
|
bool nonStandardResOrTrue =
|
||||||
|
(res.length == 0) || abi.decode(res, (bool));
|
||||||
|
if (!(success && nonStandardResOrTrue)) {
|
||||||
|
revert FailedTransfer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
|
||||||
|
The amount instructed to transfer may not actually be the amount
|
||||||
|
transferred.
|
||||||
|
|
||||||
|
If we add nonReentrant to every single function which can effect the
|
||||||
|
balance, we can check the amount exactly matches. This prevents transfers of
|
||||||
|
less value than expected occurring, at least, not without an additional
|
||||||
|
transfer to top up the difference (which isn't routed through this contract
|
||||||
|
and accordingly isn't trying to artificially create events).
|
||||||
|
|
||||||
|
If we don't add nonReentrant, a transfer can be started, and then a new
|
||||||
|
transfer for the difference can follow it up (again and again until a
|
||||||
|
rounding error is reached). This contract would believe all transfers were
|
||||||
|
done in full, despite each only being done in part (except for the last
|
||||||
|
one).
|
||||||
|
|
||||||
|
Given fee-on-transfer tokens aren't intended to be supported, the only
|
||||||
|
token planned to be supported is Dai and it doesn't have any fee-on-transfer
|
||||||
|
logic, fee-on-transfer tokens aren't even able to be supported at this time,
|
||||||
|
we simply classify this entire class of tokens as non-standard
|
||||||
|
implementations which induce undefined behavior. It is the Serai network's
|
||||||
|
role not to add support for any non-standard implementations.
|
||||||
|
*/
|
||||||
|
emit InInstruction(msg.sender, coin, amount, instruction);
|
||||||
|
}
|
||||||
|
|
||||||
|
// execute accepts a list of transactions to execute as well as a signature.
|
||||||
// if signature verification passes, the given transactions are executed.
|
// if signature verification passes, the given transactions are executed.
|
||||||
// if signature verification fails, this function will revert.
|
// if signature verification fails, this function will revert.
|
||||||
function execute(
|
function execute(
|
||||||
OutInstruction[] calldata transactions,
|
OutInstruction[] calldata transactions,
|
||||||
Signature memory sig
|
Signature calldata sig
|
||||||
) public {
|
) external {
|
||||||
if (transactions.length > 256) revert TooManyTransactions();
|
if (transactions.length > 256) {
|
||||||
|
revert TooManyTransactions();
|
||||||
|
}
|
||||||
|
|
||||||
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
|
bytes memory message =
|
||||||
|
abi.encode("execute", block.chainid, nonce, transactions);
|
||||||
|
uint256 executed_with_nonce = nonce;
|
||||||
// This prevents re-entrancy from causing double spends yet does allow
|
// This prevents re-entrancy from causing double spends yet does allow
|
||||||
// out-of-order execution via re-entrancy
|
// out-of-order execution via re-entrancy
|
||||||
nonce++;
|
nonce++;
|
||||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
|
||||||
|
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||||
|
revert InvalidSignature();
|
||||||
|
}
|
||||||
|
|
||||||
uint256 successes;
|
uint256 successes;
|
||||||
for(uint256 i = 0; i < transactions.length; i++) {
|
for (uint256 i = 0; i < transactions.length; i++) {
|
||||||
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
|
bool success;
|
||||||
|
|
||||||
|
// If there are no calls, send to `to` the value
|
||||||
|
if (transactions[i].calls.length == 0) {
|
||||||
|
(success, ) = transactions[i].to.call{
|
||||||
|
value: transactions[i].value,
|
||||||
|
gas: 5_000
|
||||||
|
}("");
|
||||||
|
} else {
|
||||||
|
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
|
||||||
|
// calls through that
|
||||||
|
//
|
||||||
|
// We could use a single sandbox in order to reduce gas costs, yet that
|
||||||
|
// risks one person creating an approval that's hooked before another
|
||||||
|
// user's intended action executes, in order to drain their coins
|
||||||
|
//
|
||||||
|
// While technically, that would be a flaw in the sandboxed flow, this
|
||||||
|
// is robust and prevents such flaws from being possible
|
||||||
|
//
|
||||||
|
// We also don't want people to set state via the Sandbox and expect it
|
||||||
|
// future available when anyone else could set a distinct value
|
||||||
|
Sandbox sandbox = new Sandbox();
|
||||||
|
(success, ) = address(sandbox).call{
|
||||||
|
value: transactions[i].value,
|
||||||
|
// TODO: Have the Call specify the gas up front
|
||||||
|
gas: 350_000
|
||||||
|
}(
|
||||||
|
abi.encodeWithSelector(
|
||||||
|
Sandbox.sandbox.selector,
|
||||||
|
transactions[i].calls
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
assembly {
|
assembly {
|
||||||
successes := or(successes, shl(i, success))
|
successes := or(successes, shl(i, success))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
emit Executed(nonce, message, successes);
|
emit Executed(
|
||||||
|
executed_with_nonce,
|
||||||
|
keccak256(message),
|
||||||
|
successes,
|
||||||
|
sig
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
48
coins/ethereum/contracts/Sandbox.sol
Normal file
48
coins/ethereum/contracts/Sandbox.sol
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
// SPDX-License-Identifier: AGPLv3
|
||||||
|
pragma solidity ^0.8.24;
|
||||||
|
|
||||||
|
struct Call {
|
||||||
|
address to;
|
||||||
|
uint256 value;
|
||||||
|
bytes data;
|
||||||
|
}
|
||||||
|
|
||||||
|
// A minimal sandbox focused on gas efficiency.
|
||||||
|
//
|
||||||
|
// The first call is executed if any of the calls fail, making it a fallback.
|
||||||
|
// All other calls are executed sequentially.
|
||||||
|
contract Sandbox {
|
||||||
|
error AlreadyCalled();
|
||||||
|
error CallsFailed();
|
||||||
|
|
||||||
|
function sandbox(Call[] calldata calls) external payable {
|
||||||
|
// Prevent re-entrancy due to this executing arbitrary calls from anyone
|
||||||
|
// and anywhere
|
||||||
|
bool called;
|
||||||
|
assembly { called := tload(0) }
|
||||||
|
if (called) {
|
||||||
|
revert AlreadyCalled();
|
||||||
|
}
|
||||||
|
assembly { tstore(0, 1) }
|
||||||
|
|
||||||
|
// Execute the calls, starting from 1
|
||||||
|
for (uint256 i = 1; i < calls.length; i++) {
|
||||||
|
(bool success, ) =
|
||||||
|
calls[i].to.call{ value: calls[i].value }(calls[i].data);
|
||||||
|
|
||||||
|
// If this call failed, execute the fallback (call 0)
|
||||||
|
if (!success) {
|
||||||
|
(success, ) =
|
||||||
|
calls[0].to.call{ value: address(this).balance }(calls[0].data);
|
||||||
|
// If this call also failed, revert entirely
|
||||||
|
if (!success) {
|
||||||
|
revert CallsFailed();
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't clear the re-entrancy guard as this contract should never be
|
||||||
|
// called again, so there's no reason to spend the effort
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,38 +2,43 @@
|
|||||||
pragma solidity ^0.8.0;
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
// see https://github.com/noot/schnorr-verify for implementation details
|
// see https://github.com/noot/schnorr-verify for implementation details
|
||||||
contract Schnorr {
|
library Schnorr {
|
||||||
// secp256k1 group order
|
// secp256k1 group order
|
||||||
uint256 constant public Q =
|
uint256 constant public Q =
|
||||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||||
|
|
||||||
error InvalidSOrA();
|
// Fixed parity for the public keys used in this contract
|
||||||
error InvalidSignature();
|
// This avoids spending a word passing the parity in a similar style to
|
||||||
|
// Bitcoin's Taproot
|
||||||
|
uint8 constant public KEY_PARITY = 27;
|
||||||
|
|
||||||
// parity := public key y-coord parity (27 or 28)
|
error InvalidSOrA();
|
||||||
// px := public key x-coord
|
error MalformedSignature();
|
||||||
|
|
||||||
|
// px := public key x-coord, where the public key has a parity of KEY_PARITY
|
||||||
// message := 32-byte hash of the message
|
// message := 32-byte hash of the message
|
||||||
// c := schnorr signature challenge
|
// c := schnorr signature challenge
|
||||||
// s := schnorr signature
|
// s := schnorr signature
|
||||||
function verify(
|
function verify(
|
||||||
uint8 parity,
|
|
||||||
bytes32 px,
|
bytes32 px,
|
||||||
bytes32 message,
|
bytes memory message,
|
||||||
bytes32 c,
|
bytes32 c,
|
||||||
bytes32 s
|
bytes32 s
|
||||||
) public view returns (bool) {
|
) internal pure returns (bool) {
|
||||||
// ecrecover = (m, v, r, s);
|
// ecrecover = (m, v, r, s) -> key
|
||||||
|
// We instead pass the following to obtain the nonce (not the key)
|
||||||
|
// Then we hash it and verify it matches the challenge
|
||||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||||
|
|
||||||
|
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
|
||||||
|
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
|
||||||
|
// That leaves us to check `sa` are non-zero
|
||||||
if (sa == 0) revert InvalidSOrA();
|
if (sa == 0) revert InvalidSOrA();
|
||||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||||
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
|
if (R == address(0)) revert MalformedSignature();
|
||||||
// check if they're zero.
|
|
||||||
address R = ecrecover(sa, parity, px, ca);
|
// Check the signature is correct by rebuilding the challenge
|
||||||
if (R == address(0)) revert InvalidSignature();
|
return c == keccak256(abi.encodePacked(R, px, message));
|
||||||
return c == keccak256(
|
|
||||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,37 @@
|
|||||||
|
use alloy_sol_types::sol;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(warnings)]
|
||||||
|
#[allow(needless_pass_by_value)]
|
||||||
#[allow(clippy::all)]
|
#[allow(clippy::all)]
|
||||||
pub(crate) mod schnorr;
|
#[allow(clippy::ignored_unit_patterns)]
|
||||||
|
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||||
|
mod erc20_container {
|
||||||
|
use super::*;
|
||||||
|
sol!("contracts/IERC20.sol");
|
||||||
|
}
|
||||||
|
pub use erc20_container::IERC20 as erc20;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
|
#[allow(warnings)]
|
||||||
|
#[allow(needless_pass_by_value)]
|
||||||
#[allow(clippy::all)]
|
#[allow(clippy::all)]
|
||||||
pub(crate) mod router;
|
#[allow(clippy::ignored_unit_patterns)]
|
||||||
|
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||||
|
mod deployer_container {
|
||||||
|
use super::*;
|
||||||
|
sol!("contracts/Deployer.sol");
|
||||||
|
}
|
||||||
|
pub use deployer_container::Deployer as deployer;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(warnings)]
|
||||||
|
#[allow(needless_pass_by_value)]
|
||||||
|
#[allow(clippy::all)]
|
||||||
|
#[allow(clippy::ignored_unit_patterns)]
|
||||||
|
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||||
|
mod router_container {
|
||||||
|
use super::*;
|
||||||
|
sol!(Router, "artifacts/Router.abi");
|
||||||
|
}
|
||||||
|
pub use router_container::Router as router;
|
||||||
|
|||||||
1164
coins/ethereum/src/abi/router.rs
Normal file
1164
coins/ethereum/src/abi/router.rs
Normal file
File diff suppressed because it is too large
Load Diff
410
coins/ethereum/src/abi/schnorr.rs
Normal file
410
coins/ethereum/src/abi/schnorr.rs
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
pub use schnorr::*;
|
||||||
|
/// This module was auto-generated with ethers-rs Abigen.
|
||||||
|
/// More information at: <https://github.com/gakonst/ethers-rs>
|
||||||
|
#[allow(
|
||||||
|
clippy::enum_variant_names,
|
||||||
|
clippy::too_many_arguments,
|
||||||
|
clippy::upper_case_acronyms,
|
||||||
|
clippy::type_complexity,
|
||||||
|
dead_code,
|
||||||
|
non_camel_case_types,
|
||||||
|
)]
|
||||||
|
pub mod schnorr {
|
||||||
|
#[allow(deprecated)]
|
||||||
|
fn __abi() -> ::ethers_core::abi::Abi {
|
||||||
|
::ethers_core::abi::ethabi::Contract {
|
||||||
|
constructor: ::core::option::Option::None,
|
||||||
|
functions: ::core::convert::From::from([
|
||||||
|
(
|
||||||
|
::std::borrow::ToOwned::to_owned("Q"),
|
||||||
|
::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::Function {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("Q"),
|
||||||
|
inputs: ::std::vec![],
|
||||||
|
outputs: ::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::string::String::new(),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize),
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("uint256"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
constant: ::core::option::Option::None,
|
||||||
|
state_mutability: ::ethers_core::abi::ethabi::StateMutability::View,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
::std::borrow::ToOwned::to_owned("verify"),
|
||||||
|
::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::Function {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("verify"),
|
||||||
|
inputs: ::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("parity"),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::Uint(8usize),
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("uint8"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("px"),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
|
||||||
|
32usize,
|
||||||
|
),
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("bytes32"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("message"),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
|
||||||
|
32usize,
|
||||||
|
),
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("bytes32"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("c"),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
|
||||||
|
32usize,
|
||||||
|
),
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("bytes32"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("s"),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
|
||||||
|
32usize,
|
||||||
|
),
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("bytes32"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
outputs: ::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::Param {
|
||||||
|
name: ::std::string::String::new(),
|
||||||
|
kind: ::ethers_core::abi::ethabi::ParamType::Bool,
|
||||||
|
internal_type: ::core::option::Option::Some(
|
||||||
|
::std::borrow::ToOwned::to_owned("bool"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
constant: ::core::option::Option::None,
|
||||||
|
state_mutability: ::ethers_core::abi::ethabi::StateMutability::View,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
events: ::std::collections::BTreeMap::new(),
|
||||||
|
errors: ::core::convert::From::from([
|
||||||
|
(
|
||||||
|
::std::borrow::ToOwned::to_owned("InvalidSOrA"),
|
||||||
|
::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::AbiError {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("InvalidSOrA"),
|
||||||
|
inputs: ::std::vec![],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
::std::borrow::ToOwned::to_owned("InvalidSignature"),
|
||||||
|
::std::vec![
|
||||||
|
::ethers_core::abi::ethabi::AbiError {
|
||||||
|
name: ::std::borrow::ToOwned::to_owned("InvalidSignature"),
|
||||||
|
inputs: ::std::vec![],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
),
|
||||||
|
]),
|
||||||
|
receive: false,
|
||||||
|
fallback: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
///The parsed JSON ABI of the contract.
|
||||||
|
pub static SCHNORR_ABI: ::ethers_contract::Lazy<::ethers_core::abi::Abi> = ::ethers_contract::Lazy::new(
|
||||||
|
__abi,
|
||||||
|
);
|
||||||
|
pub struct Schnorr<M>(::ethers_contract::Contract<M>);
|
||||||
|
impl<M> ::core::clone::Clone for Schnorr<M> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self(::core::clone::Clone::clone(&self.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<M> ::core::ops::Deref for Schnorr<M> {
|
||||||
|
type Target = ::ethers_contract::Contract<M>;
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<M> ::core::ops::DerefMut for Schnorr<M> {
|
||||||
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||||
|
&mut self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<M> ::core::fmt::Debug for Schnorr<M> {
|
||||||
|
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
|
||||||
|
f.debug_tuple(::core::stringify!(Schnorr)).field(&self.address()).finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<M: ::ethers_providers::Middleware> Schnorr<M> {
|
||||||
|
/// Creates a new contract instance with the specified `ethers` client at
|
||||||
|
/// `address`. The contract derefs to a `ethers::Contract` object.
|
||||||
|
pub fn new<T: Into<::ethers_core::types::Address>>(
|
||||||
|
address: T,
|
||||||
|
client: ::std::sync::Arc<M>,
|
||||||
|
) -> Self {
|
||||||
|
Self(
|
||||||
|
::ethers_contract::Contract::new(
|
||||||
|
address.into(),
|
||||||
|
SCHNORR_ABI.clone(),
|
||||||
|
client,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
///Calls the contract's `Q` (0xe493ef8c) function
|
||||||
|
pub fn q(
|
||||||
|
&self,
|
||||||
|
) -> ::ethers_contract::builders::ContractCall<M, ::ethers_core::types::U256> {
|
||||||
|
self.0
|
||||||
|
.method_hash([228, 147, 239, 140], ())
|
||||||
|
.expect("method not found (this should never happen)")
|
||||||
|
}
|
||||||
|
///Calls the contract's `verify` (0x9186da4c) function
|
||||||
|
pub fn verify(
|
||||||
|
&self,
|
||||||
|
parity: u8,
|
||||||
|
px: [u8; 32],
|
||||||
|
message: [u8; 32],
|
||||||
|
c: [u8; 32],
|
||||||
|
s: [u8; 32],
|
||||||
|
) -> ::ethers_contract::builders::ContractCall<M, bool> {
|
||||||
|
self.0
|
||||||
|
.method_hash([145, 134, 218, 76], (parity, px, message, c, s))
|
||||||
|
.expect("method not found (this should never happen)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<M: ::ethers_providers::Middleware> From<::ethers_contract::Contract<M>>
|
||||||
|
for Schnorr<M> {
|
||||||
|
fn from(contract: ::ethers_contract::Contract<M>) -> Self {
|
||||||
|
Self::new(contract.address(), contract.client())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
///Custom Error type `InvalidSOrA` with signature `InvalidSOrA()` and selector `0x4e99a12e`
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
::ethers_contract::EthError,
|
||||||
|
::ethers_contract::EthDisplay,
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash
|
||||||
|
)]
|
||||||
|
#[etherror(name = "InvalidSOrA", abi = "InvalidSOrA()")]
|
||||||
|
pub struct InvalidSOrA;
|
||||||
|
///Custom Error type `InvalidSignature` with signature `InvalidSignature()` and selector `0x8baa579f`
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
::ethers_contract::EthError,
|
||||||
|
::ethers_contract::EthDisplay,
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash
|
||||||
|
)]
|
||||||
|
#[etherror(name = "InvalidSignature", abi = "InvalidSignature()")]
|
||||||
|
pub struct InvalidSignature;
|
||||||
|
///Container type for all of the contract's custom errors
|
||||||
|
#[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub enum SchnorrErrors {
|
||||||
|
InvalidSOrA(InvalidSOrA),
|
||||||
|
InvalidSignature(InvalidSignature),
|
||||||
|
/// The standard solidity revert string, with selector
|
||||||
|
/// Error(string) -- 0x08c379a0
|
||||||
|
RevertString(::std::string::String),
|
||||||
|
}
|
||||||
|
impl ::ethers_core::abi::AbiDecode for SchnorrErrors {
|
||||||
|
fn decode(
|
||||||
|
data: impl AsRef<[u8]>,
|
||||||
|
) -> ::core::result::Result<Self, ::ethers_core::abi::AbiError> {
|
||||||
|
let data = data.as_ref();
|
||||||
|
if let Ok(decoded) = <::std::string::String as ::ethers_core::abi::AbiDecode>::decode(
|
||||||
|
data,
|
||||||
|
) {
|
||||||
|
return Ok(Self::RevertString(decoded));
|
||||||
|
}
|
||||||
|
if let Ok(decoded) = <InvalidSOrA as ::ethers_core::abi::AbiDecode>::decode(
|
||||||
|
data,
|
||||||
|
) {
|
||||||
|
return Ok(Self::InvalidSOrA(decoded));
|
||||||
|
}
|
||||||
|
if let Ok(decoded) = <InvalidSignature as ::ethers_core::abi::AbiDecode>::decode(
|
||||||
|
data,
|
||||||
|
) {
|
||||||
|
return Ok(Self::InvalidSignature(decoded));
|
||||||
|
}
|
||||||
|
Err(::ethers_core::abi::Error::InvalidData.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::ethers_core::abi::AbiEncode for SchnorrErrors {
|
||||||
|
fn encode(self) -> ::std::vec::Vec<u8> {
|
||||||
|
match self {
|
||||||
|
Self::InvalidSOrA(element) => {
|
||||||
|
::ethers_core::abi::AbiEncode::encode(element)
|
||||||
|
}
|
||||||
|
Self::InvalidSignature(element) => {
|
||||||
|
::ethers_core::abi::AbiEncode::encode(element)
|
||||||
|
}
|
||||||
|
Self::RevertString(s) => ::ethers_core::abi::AbiEncode::encode(s),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::ethers_contract::ContractRevert for SchnorrErrors {
|
||||||
|
fn valid_selector(selector: [u8; 4]) -> bool {
|
||||||
|
match selector {
|
||||||
|
[0x08, 0xc3, 0x79, 0xa0] => true,
|
||||||
|
_ if selector
|
||||||
|
== <InvalidSOrA as ::ethers_contract::EthError>::selector() => true,
|
||||||
|
_ if selector
|
||||||
|
== <InvalidSignature as ::ethers_contract::EthError>::selector() => {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::fmt::Display for SchnorrErrors {
|
||||||
|
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::InvalidSOrA(element) => ::core::fmt::Display::fmt(element, f),
|
||||||
|
Self::InvalidSignature(element) => ::core::fmt::Display::fmt(element, f),
|
||||||
|
Self::RevertString(s) => ::core::fmt::Display::fmt(s, f),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::convert::From<::std::string::String> for SchnorrErrors {
|
||||||
|
fn from(value: String) -> Self {
|
||||||
|
Self::RevertString(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::convert::From<InvalidSOrA> for SchnorrErrors {
|
||||||
|
fn from(value: InvalidSOrA) -> Self {
|
||||||
|
Self::InvalidSOrA(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::convert::From<InvalidSignature> for SchnorrErrors {
|
||||||
|
fn from(value: InvalidSignature) -> Self {
|
||||||
|
Self::InvalidSignature(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
///Container type for all input parameters for the `Q` function with signature `Q()` and selector `0xe493ef8c`
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
::ethers_contract::EthCall,
|
||||||
|
::ethers_contract::EthDisplay,
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash
|
||||||
|
)]
|
||||||
|
#[ethcall(name = "Q", abi = "Q()")]
|
||||||
|
pub struct QCall;
|
||||||
|
///Container type for all input parameters for the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c`
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
::ethers_contract::EthCall,
|
||||||
|
::ethers_contract::EthDisplay,
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash
|
||||||
|
)]
|
||||||
|
#[ethcall(name = "verify", abi = "verify(uint8,bytes32,bytes32,bytes32,bytes32)")]
|
||||||
|
pub struct VerifyCall {
|
||||||
|
pub parity: u8,
|
||||||
|
pub px: [u8; 32],
|
||||||
|
pub message: [u8; 32],
|
||||||
|
pub c: [u8; 32],
|
||||||
|
pub s: [u8; 32],
|
||||||
|
}
|
||||||
|
///Container type for all of the contract's call
|
||||||
|
#[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub enum SchnorrCalls {
|
||||||
|
Q(QCall),
|
||||||
|
Verify(VerifyCall),
|
||||||
|
}
|
||||||
|
impl ::ethers_core::abi::AbiDecode for SchnorrCalls {
|
||||||
|
fn decode(
|
||||||
|
data: impl AsRef<[u8]>,
|
||||||
|
) -> ::core::result::Result<Self, ::ethers_core::abi::AbiError> {
|
||||||
|
let data = data.as_ref();
|
||||||
|
if let Ok(decoded) = <QCall as ::ethers_core::abi::AbiDecode>::decode(data) {
|
||||||
|
return Ok(Self::Q(decoded));
|
||||||
|
}
|
||||||
|
if let Ok(decoded) = <VerifyCall as ::ethers_core::abi::AbiDecode>::decode(
|
||||||
|
data,
|
||||||
|
) {
|
||||||
|
return Ok(Self::Verify(decoded));
|
||||||
|
}
|
||||||
|
Err(::ethers_core::abi::Error::InvalidData.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::ethers_core::abi::AbiEncode for SchnorrCalls {
|
||||||
|
fn encode(self) -> Vec<u8> {
|
||||||
|
match self {
|
||||||
|
Self::Q(element) => ::ethers_core::abi::AbiEncode::encode(element),
|
||||||
|
Self::Verify(element) => ::ethers_core::abi::AbiEncode::encode(element),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::fmt::Display for SchnorrCalls {
|
||||||
|
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Q(element) => ::core::fmt::Display::fmt(element, f),
|
||||||
|
Self::Verify(element) => ::core::fmt::Display::fmt(element, f),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::convert::From<QCall> for SchnorrCalls {
|
||||||
|
fn from(value: QCall) -> Self {
|
||||||
|
Self::Q(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl ::core::convert::From<VerifyCall> for SchnorrCalls {
|
||||||
|
fn from(value: VerifyCall) -> Self {
|
||||||
|
Self::Verify(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
///Container type for all return fields from the `Q` function with signature `Q()` and selector `0xe493ef8c`
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
::ethers_contract::EthAbiType,
|
||||||
|
::ethers_contract::EthAbiCodec,
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash
|
||||||
|
)]
|
||||||
|
pub struct QReturn(pub ::ethers_core::types::U256);
|
||||||
|
///Container type for all return fields from the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c`
|
||||||
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
::ethers_contract::EthAbiType,
|
||||||
|
::ethers_contract::EthAbiCodec,
|
||||||
|
Default,
|
||||||
|
Debug,
|
||||||
|
PartialEq,
|
||||||
|
Eq,
|
||||||
|
Hash
|
||||||
|
)]
|
||||||
|
pub struct VerifyReturn(pub bool);
|
||||||
|
}
|
||||||
@@ -1,91 +1,185 @@
|
|||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
use group::ff::PrimeField;
|
||||||
use k256::{
|
use k256::{
|
||||||
elliptic_curve::{
|
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||||
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
|
ProjectivePoint, Scalar, U256 as KU256,
|
||||||
},
|
|
||||||
ProjectivePoint, Scalar, U256,
|
|
||||||
};
|
};
|
||||||
|
#[cfg(test)]
|
||||||
|
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
|
||||||
|
|
||||||
use frost::{
|
use frost::{
|
||||||
algorithm::{Hram, SchnorrSignature},
|
algorithm::{Hram, SchnorrSignature},
|
||||||
curve::Secp256k1,
|
curve::{Ciphersuite, Secp256k1},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use alloy_core::primitives::{Parity, Signature as AlloySignature};
|
||||||
|
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
|
||||||
|
|
||||||
|
use crate::abi::router::{Signature as AbiSignature};
|
||||||
|
|
||||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||||
Keccak256::digest(data).into()
|
alloy_core::primitives::keccak256(data).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
|
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||||
|
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||||
let encoded_point = point.to_encoded_point(false);
|
let encoded_point = point.to_encoded_point(false);
|
||||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
|
||||||
|
assert!(
|
||||||
|
tx.chain_id.is_none(),
|
||||||
|
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
|
||||||
|
);
|
||||||
|
|
||||||
|
let sig_hash = tx.signature_hash().0;
|
||||||
|
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
|
||||||
|
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
|
||||||
|
loop {
|
||||||
|
let r_bytes: [u8; 32] = r.to_repr().into();
|
||||||
|
let s_bytes: [u8; 32] = s.to_repr().into();
|
||||||
|
let v = Parity::NonEip155(false);
|
||||||
|
let signature =
|
||||||
|
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
|
||||||
|
let tx = tx.clone().into_signed(signature);
|
||||||
|
if tx.recover_signer().is_ok() {
|
||||||
|
return tx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-hash until valid
|
||||||
|
r = hash_to_scalar(r_bytes.as_ref());
|
||||||
|
s = hash_to_scalar(s_bytes.as_ref());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The public key for a Schnorr-signing account.
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub struct PublicKey {
|
pub struct PublicKey {
|
||||||
pub A: ProjectivePoint,
|
pub(crate) A: ProjectivePoint,
|
||||||
pub px: Scalar,
|
pub(crate) px: Scalar,
|
||||||
pub parity: u8,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PublicKey {
|
impl PublicKey {
|
||||||
|
/// Construct a new `PublicKey`.
|
||||||
|
///
|
||||||
|
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||||
|
/// bounds such as parity).
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||||
let affine = A.to_affine();
|
let affine = A.to_affine();
|
||||||
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
|
// Only allow even keys to save a word within Ethereum
|
||||||
if parity != 27 {
|
let is_odd = bool::from(affine.y_is_odd());
|
||||||
|
if is_odd {
|
||||||
None?;
|
None?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let x_coord = affine.x();
|
let x_coord = affine.x();
|
||||||
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
|
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
|
||||||
// Return None if a reduction would occur
|
// Return None if a reduction would occur
|
||||||
|
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
|
||||||
|
// headache/concern to have
|
||||||
|
// This does ban a trivial amoount of public keys
|
||||||
if x_coord_scalar.to_repr() != x_coord {
|
if x_coord_scalar.to_repr() != x_coord {
|
||||||
None?;
|
None?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(PublicKey { A, px: x_coord_scalar, parity })
|
Some(PublicKey { A, px: x_coord_scalar })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn point(&self) -> ProjectivePoint {
|
||||||
|
self.A
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn eth_repr(&self) -> [u8; 32] {
|
||||||
|
self.px.to_repr().into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
|
||||||
|
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The HRAm to use for the Schnorr contract.
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct EthereumHram {}
|
pub struct EthereumHram {}
|
||||||
impl Hram<Secp256k1> for EthereumHram {
|
impl Hram<Secp256k1> for EthereumHram {
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
||||||
let a_encoded_point = A.to_encoded_point(true);
|
let x_coord = A.to_affine().x();
|
||||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
|
||||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
|
||||||
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
|
|
||||||
let mut data = address(R).to_vec();
|
let mut data = address(R).to_vec();
|
||||||
data.append(&mut a_encoded);
|
data.extend(x_coord.as_slice());
|
||||||
data.extend(m);
|
data.extend(m);
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
|
||||||
|
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A signature for the Schnorr contract.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub struct Signature {
|
pub struct Signature {
|
||||||
pub(crate) c: Scalar,
|
pub(crate) c: Scalar,
|
||||||
pub(crate) s: Scalar,
|
pub(crate) s: Scalar,
|
||||||
}
|
}
|
||||||
impl Signature {
|
impl Signature {
|
||||||
|
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
|
||||||
|
EthereumHram::hram(&R, &public_key.A, message) == self.c
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct a new `Signature`.
|
||||||
|
///
|
||||||
|
/// This will return None if the signature is invalid.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
public_key: &PublicKey,
|
public_key: &PublicKey,
|
||||||
chain_id: U256,
|
message: &[u8],
|
||||||
m: &[u8],
|
|
||||||
signature: SchnorrSignature<Secp256k1>,
|
signature: SchnorrSignature<Secp256k1>,
|
||||||
) -> Option<Signature> {
|
) -> Option<Signature> {
|
||||||
let c = EthereumHram::hram(
|
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
|
||||||
&signature.R,
|
|
||||||
&public_key.A,
|
|
||||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
|
||||||
);
|
|
||||||
if !signature.verify(public_key.A, c) {
|
if !signature.verify(public_key.A, c) {
|
||||||
None?;
|
None?;
|
||||||
}
|
}
|
||||||
Some(Signature { c, s: signature.s })
|
|
||||||
|
let res = Signature { c, s: signature.s };
|
||||||
|
assert!(res.verify(public_key, message));
|
||||||
|
Some(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn c(&self) -> Scalar {
|
||||||
|
self.c
|
||||||
|
}
|
||||||
|
pub fn s(&self) -> Scalar {
|
||||||
|
self.s
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_bytes(&self) -> [u8; 64] {
|
||||||
|
let mut res = [0; 64];
|
||||||
|
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||||
|
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
|
||||||
|
let mut reader = bytes.as_slice();
|
||||||
|
let c = Secp256k1::read_F(&mut reader)?;
|
||||||
|
let s = Secp256k1::read_F(&mut reader)?;
|
||||||
|
Ok(Signature { c, s })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<&Signature> for AbiSignature {
|
||||||
|
fn from(sig: &Signature) -> AbiSignature {
|
||||||
|
let c: [u8; 32] = sig.c.to_repr().into();
|
||||||
|
let s: [u8; 32] = sig.s.to_repr().into();
|
||||||
|
AbiSignature { c: c.into(), s: s.into() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
120
coins/ethereum/src/deployer.rs
Normal file
120
coins/ethereum/src/deployer.rs
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
|
||||||
|
use alloy_consensus::{Signed, TxLegacy};
|
||||||
|
|
||||||
|
use alloy_sol_types::{SolCall, SolEvent};
|
||||||
|
|
||||||
|
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||||
|
use alloy_simple_request_transport::SimpleRequest;
|
||||||
|
use alloy_provider::{Provider, RootProvider};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Error,
|
||||||
|
crypto::{self, keccak256, PublicKey},
|
||||||
|
router::Router,
|
||||||
|
};
|
||||||
|
pub use crate::abi::deployer as abi;
|
||||||
|
|
||||||
|
/// The Deployer contract for the Router contract.
|
||||||
|
///
|
||||||
|
/// This Deployer has a deterministic address, letting it be immediately identified on any
|
||||||
|
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
|
||||||
|
/// deterministic) using a single log query.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Deployer;
|
||||||
|
impl Deployer {
|
||||||
|
/// Obtain the transaction to deploy this contract, already signed.
|
||||||
|
///
|
||||||
|
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
|
||||||
|
/// funded for this transaction to be submitted. This account has no known private key to anyone,
|
||||||
|
/// so ETH sent can be neither misappropriated nor returned.
|
||||||
|
pub fn deployment_tx() -> Signed<TxLegacy> {
|
||||||
|
let bytecode = include_str!("../artifacts/Deployer.bin");
|
||||||
|
let bytecode =
|
||||||
|
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
|
||||||
|
|
||||||
|
let tx = TxLegacy {
|
||||||
|
chain_id: None,
|
||||||
|
nonce: 0,
|
||||||
|
gas_price: 100_000_000_000u128,
|
||||||
|
// TODO: Use a more accurate gas limit
|
||||||
|
gas_limit: 1_000_000u128,
|
||||||
|
to: TxKind::Create,
|
||||||
|
value: U256::ZERO,
|
||||||
|
input: bytecode,
|
||||||
|
};
|
||||||
|
|
||||||
|
crypto::deterministically_sign(&tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Obtain the deterministic address for this contract.
|
||||||
|
pub fn address() -> [u8; 20] {
|
||||||
|
let deployer_deployer =
|
||||||
|
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
|
||||||
|
**Address::create(&deployer_deployer, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construct a new view of the `Deployer`.
|
||||||
|
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
|
||||||
|
let address = Self::address();
|
||||||
|
#[cfg(not(test))]
|
||||||
|
let required_block = BlockNumberOrTag::Finalized;
|
||||||
|
#[cfg(test)]
|
||||||
|
let required_block = BlockNumberOrTag::Latest;
|
||||||
|
let code = provider
|
||||||
|
.get_code_at(address.into(), required_block.into())
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ConnectionError)?;
|
||||||
|
// Contract has yet to be deployed
|
||||||
|
if code.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
Ok(Some(Self))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Yield the `ContractCall` necessary to deploy the Router.
|
||||||
|
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
|
||||||
|
TxLegacy {
|
||||||
|
to: TxKind::Call(Self::address().into()),
|
||||||
|
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
|
||||||
|
gas_limit: 1_000_000,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the first Router deployed with the specified key as its first key.
|
||||||
|
///
|
||||||
|
/// This is the Router Serai will use, and is the only way to construct a `Router`.
|
||||||
|
pub async fn find_router(
|
||||||
|
&self,
|
||||||
|
provider: Arc<RootProvider<SimpleRequest>>,
|
||||||
|
key: &PublicKey,
|
||||||
|
) -> Result<Option<Router>, Error> {
|
||||||
|
let init_code = Router::init_code(key);
|
||||||
|
let init_code_hash = keccak256(&init_code);
|
||||||
|
|
||||||
|
#[cfg(not(test))]
|
||||||
|
let to_block = BlockNumberOrTag::Finalized;
|
||||||
|
#[cfg(test)]
|
||||||
|
let to_block = BlockNumberOrTag::Latest;
|
||||||
|
|
||||||
|
// Find the first log using this init code (where the init code is binding to the key)
|
||||||
|
// TODO: Make an abstraction for event filtering (de-duplicating common code)
|
||||||
|
let filter =
|
||||||
|
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
|
||||||
|
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
|
||||||
|
let filter = filter.topic1(B256::from(init_code_hash));
|
||||||
|
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
let Some(first_log) = logs.first() else { return Ok(None) };
|
||||||
|
let router = first_log
|
||||||
|
.log_decode::<abi::Deployment>()
|
||||||
|
.map_err(|_| Error::ConnectionError)?
|
||||||
|
.inner
|
||||||
|
.data
|
||||||
|
.created;
|
||||||
|
|
||||||
|
Ok(Some(Router::new(provider, router)))
|
||||||
|
}
|
||||||
|
}
|
||||||
118
coins/ethereum/src/erc20.rs
Normal file
118
coins/ethereum/src/erc20.rs
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
use std::{sync::Arc, collections::HashSet};
|
||||||
|
|
||||||
|
use alloy_core::primitives::{Address, B256, U256};
|
||||||
|
|
||||||
|
use alloy_sol_types::{SolInterface, SolEvent};
|
||||||
|
|
||||||
|
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||||
|
use alloy_simple_request_transport::SimpleRequest;
|
||||||
|
use alloy_provider::{Provider, RootProvider};
|
||||||
|
|
||||||
|
use crate::Error;
|
||||||
|
pub use crate::abi::erc20 as abi;
|
||||||
|
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct TopLevelErc20Transfer {
|
||||||
|
pub id: [u8; 32],
|
||||||
|
pub from: [u8; 20],
|
||||||
|
pub amount: U256,
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A view for an ERC20 contract.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
|
||||||
|
impl Erc20 {
|
||||||
|
/// Construct a new view of the specified ERC20 contract.
|
||||||
|
///
|
||||||
|
/// This checks a contract is deployed at that address yet does not check the contract is
|
||||||
|
/// actually an ERC20.
|
||||||
|
pub async fn new(
|
||||||
|
provider: Arc<RootProvider<SimpleRequest>>,
|
||||||
|
address: [u8; 20],
|
||||||
|
) -> Result<Option<Self>, Error> {
|
||||||
|
let code = provider
|
||||||
|
.get_code_at(address.into(), BlockNumberOrTag::Finalized.into())
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ConnectionError)?;
|
||||||
|
// Contract has yet to be deployed
|
||||||
|
if code.is_empty() {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
Ok(Some(Self(provider.clone(), Address::from(&address))))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn top_level_transfers(
|
||||||
|
&self,
|
||||||
|
block: u64,
|
||||||
|
to: [u8; 20],
|
||||||
|
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
|
||||||
|
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||||
|
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
|
||||||
|
let mut to_topic = [0; 32];
|
||||||
|
to_topic[12 ..].copy_from_slice(&to);
|
||||||
|
let filter = filter.topic2(B256::from(to_topic));
|
||||||
|
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
let mut handled = HashSet::new();
|
||||||
|
|
||||||
|
let mut top_level_transfers = vec![];
|
||||||
|
for log in logs {
|
||||||
|
// Double check the address which emitted this log
|
||||||
|
if log.address() != self.1 {
|
||||||
|
Err(Error::ConnectionError)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||||
|
let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
// If this is a top-level call...
|
||||||
|
if tx.to == Some(self.1) {
|
||||||
|
// And we recognize the call...
|
||||||
|
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
|
||||||
|
// to the InInstruction appended
|
||||||
|
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
|
||||||
|
// Extract the top-level call's from/to/value
|
||||||
|
let (from, call_to, value) = match call {
|
||||||
|
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
|
||||||
|
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
|
||||||
|
(from, call_to, value)
|
||||||
|
}
|
||||||
|
// Treat any other function selectors as unrecognized
|
||||||
|
_ => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||||
|
|
||||||
|
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
|
||||||
|
// internal transfer
|
||||||
|
if (log.from != from) || (call_to != to) || (value != log.value) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
|
||||||
|
// the only log we handle
|
||||||
|
if handled.contains(&tx_id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
handled.insert(tx_id);
|
||||||
|
|
||||||
|
// Read the data appended after
|
||||||
|
let encoded = call.abi_encode();
|
||||||
|
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
|
||||||
|
|
||||||
|
// Push the transfer
|
||||||
|
top_level_transfers.push(TopLevelErc20Transfer {
|
||||||
|
// Since we'll only handle one log for this TX, set the ID to the TX ID
|
||||||
|
id: *tx_id,
|
||||||
|
from: *log.from.0,
|
||||||
|
amount: log.value,
|
||||||
|
data,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(top_level_transfers)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,16 +1,30 @@
|
|||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
|
||||||
|
pub use alloy_core;
|
||||||
|
pub use alloy_consensus;
|
||||||
|
|
||||||
|
pub use alloy_rpc_types;
|
||||||
|
pub use alloy_simple_request_transport;
|
||||||
|
pub use alloy_rpc_client;
|
||||||
|
pub use alloy_provider;
|
||||||
|
|
||||||
pub mod crypto;
|
pub mod crypto;
|
||||||
|
|
||||||
pub(crate) mod abi;
|
pub(crate) mod abi;
|
||||||
pub mod schnorr;
|
|
||||||
|
pub mod erc20;
|
||||||
|
pub mod deployer;
|
||||||
pub mod router;
|
pub mod router;
|
||||||
|
|
||||||
|
pub mod machine;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
#[error("failed to verify Schnorr signature")]
|
#[error("failed to verify Schnorr signature")]
|
||||||
InvalidSignature,
|
InvalidSignature,
|
||||||
|
#[error("couldn't make call/send TX")]
|
||||||
|
ConnectionError,
|
||||||
}
|
}
|
||||||
|
|||||||
414
coins/ethereum/src/machine.rs
Normal file
414
coins/ethereum/src/machine.rs
Normal file
@@ -0,0 +1,414 @@
|
|||||||
|
use std::{
|
||||||
|
io::{self, Read},
|
||||||
|
collections::HashMap,
|
||||||
|
};
|
||||||
|
|
||||||
|
use rand_core::{RngCore, CryptoRng};
|
||||||
|
|
||||||
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
|
||||||
|
use group::GroupEncoding;
|
||||||
|
use frost::{
|
||||||
|
curve::{Ciphersuite, Secp256k1},
|
||||||
|
Participant, ThresholdKeys, FrostError,
|
||||||
|
algorithm::Schnorr,
|
||||||
|
sign::*,
|
||||||
|
};
|
||||||
|
|
||||||
|
use alloy_core::primitives::U256;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
crypto::{PublicKey, EthereumHram, Signature},
|
||||||
|
router::{
|
||||||
|
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
|
||||||
|
Router,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct Call {
|
||||||
|
pub to: [u8; 20],
|
||||||
|
pub value: U256,
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
}
|
||||||
|
impl Call {
|
||||||
|
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut to = [0; 20];
|
||||||
|
reader.read_exact(&mut to)?;
|
||||||
|
|
||||||
|
let value = {
|
||||||
|
let mut value_bytes = [0; 32];
|
||||||
|
reader.read_exact(&mut value_bytes)?;
|
||||||
|
U256::from_le_slice(&value_bytes)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut data_len = {
|
||||||
|
let mut data_len = [0; 4];
|
||||||
|
reader.read_exact(&mut data_len)?;
|
||||||
|
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
|
||||||
|
};
|
||||||
|
|
||||||
|
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
|
||||||
|
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
|
||||||
|
let mut data = vec![];
|
||||||
|
while data_len > 0 {
|
||||||
|
let chunk_len = data_len.min(1024);
|
||||||
|
let mut chunk = vec![0; chunk_len];
|
||||||
|
reader.read_exact(&mut chunk)?;
|
||||||
|
data.extend(&chunk);
|
||||||
|
data_len -= chunk_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Call { to, value, data })
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_all(&self.to)?;
|
||||||
|
writer.write_all(&self.value.as_le_bytes())?;
|
||||||
|
|
||||||
|
let data_len = u32::try_from(self.data.len())
|
||||||
|
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
|
||||||
|
writer.write_all(&data_len.to_le_bytes())?;
|
||||||
|
writer.write_all(&self.data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<Call> for AbiCall {
|
||||||
|
fn from(call: Call) -> AbiCall {
|
||||||
|
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum OutInstructionTarget {
|
||||||
|
Direct([u8; 20]),
|
||||||
|
Calls(Vec<Call>),
|
||||||
|
}
|
||||||
|
impl OutInstructionTarget {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut kind = [0xff];
|
||||||
|
reader.read_exact(&mut kind)?;
|
||||||
|
|
||||||
|
match kind[0] {
|
||||||
|
0 => {
|
||||||
|
let mut addr = [0; 20];
|
||||||
|
reader.read_exact(&mut addr)?;
|
||||||
|
Ok(OutInstructionTarget::Direct(addr))
|
||||||
|
}
|
||||||
|
1 => {
|
||||||
|
let mut calls_len = [0; 4];
|
||||||
|
reader.read_exact(&mut calls_len)?;
|
||||||
|
let calls_len = u32::from_le_bytes(calls_len);
|
||||||
|
|
||||||
|
let mut calls = vec![];
|
||||||
|
for _ in 0 .. calls_len {
|
||||||
|
calls.push(Call::read(reader)?);
|
||||||
|
}
|
||||||
|
Ok(OutInstructionTarget::Calls(calls))
|
||||||
|
}
|
||||||
|
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
OutInstructionTarget::Direct(addr) => {
|
||||||
|
writer.write_all(&[0])?;
|
||||||
|
writer.write_all(addr)?;
|
||||||
|
}
|
||||||
|
OutInstructionTarget::Calls(calls) => {
|
||||||
|
writer.write_all(&[1])?;
|
||||||
|
let call_len = u32::try_from(calls.len())
|
||||||
|
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
|
||||||
|
writer.write_all(&call_len.to_le_bytes())?;
|
||||||
|
for call in calls {
|
||||||
|
call.write(writer)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct OutInstruction {
|
||||||
|
pub target: OutInstructionTarget,
|
||||||
|
pub value: U256,
|
||||||
|
}
|
||||||
|
impl OutInstruction {
|
||||||
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let target = OutInstructionTarget::read(reader)?;
|
||||||
|
|
||||||
|
let value = {
|
||||||
|
let mut value_bytes = [0; 32];
|
||||||
|
reader.read_exact(&mut value_bytes)?;
|
||||||
|
U256::from_le_slice(&value_bytes)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(OutInstruction { target, value })
|
||||||
|
}
|
||||||
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
self.target.write(writer)?;
|
||||||
|
writer.write_all(&self.value.as_le_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<OutInstruction> for AbiOutInstruction {
|
||||||
|
fn from(instruction: OutInstruction) -> AbiOutInstruction {
|
||||||
|
match instruction.target {
|
||||||
|
OutInstructionTarget::Direct(addr) => {
|
||||||
|
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
|
||||||
|
}
|
||||||
|
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
|
||||||
|
to: [0; 20].into(),
|
||||||
|
calls: calls.into_iter().map(Into::into).collect(),
|
||||||
|
value: instruction.value,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum RouterCommand {
|
||||||
|
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
|
||||||
|
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RouterCommand {
|
||||||
|
pub fn msg(&self) -> Vec<u8> {
|
||||||
|
match self {
|
||||||
|
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||||
|
Router::update_serai_key_message(*chain_id, *nonce, key)
|
||||||
|
}
|
||||||
|
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
|
||||||
|
*chain_id,
|
||||||
|
*nonce,
|
||||||
|
outs.iter().map(|out| out.clone().into()).collect(),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let mut kind = [0xff];
|
||||||
|
reader.read_exact(&mut kind)?;
|
||||||
|
|
||||||
|
match kind[0] {
|
||||||
|
0 => {
|
||||||
|
let mut chain_id = [0; 32];
|
||||||
|
reader.read_exact(&mut chain_id)?;
|
||||||
|
|
||||||
|
let mut nonce = [0; 32];
|
||||||
|
reader.read_exact(&mut nonce)?;
|
||||||
|
|
||||||
|
let key = PublicKey::new(Secp256k1::read_G(reader)?)
|
||||||
|
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
|
||||||
|
Ok(RouterCommand::UpdateSeraiKey {
|
||||||
|
chain_id: U256::from_le_slice(&chain_id),
|
||||||
|
nonce: U256::from_le_slice(&nonce),
|
||||||
|
key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
1 => {
|
||||||
|
let mut chain_id = [0; 32];
|
||||||
|
reader.read_exact(&mut chain_id)?;
|
||||||
|
let chain_id = U256::from_le_slice(&chain_id);
|
||||||
|
|
||||||
|
let mut nonce = [0; 32];
|
||||||
|
reader.read_exact(&mut nonce)?;
|
||||||
|
let nonce = U256::from_le_slice(&nonce);
|
||||||
|
|
||||||
|
let mut outs_len = [0; 4];
|
||||||
|
reader.read_exact(&mut outs_len)?;
|
||||||
|
let outs_len = u32::from_le_bytes(outs_len);
|
||||||
|
|
||||||
|
let mut outs = vec![];
|
||||||
|
for _ in 0 .. outs_len {
|
||||||
|
outs.push(OutInstruction::read(reader)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(RouterCommand::Execute { chain_id, nonce, outs })
|
||||||
|
}
|
||||||
|
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||||
|
writer.write_all(&[0])?;
|
||||||
|
writer.write_all(&chain_id.as_le_bytes())?;
|
||||||
|
writer.write_all(&nonce.as_le_bytes())?;
|
||||||
|
writer.write_all(&key.A.to_bytes())
|
||||||
|
}
|
||||||
|
RouterCommand::Execute { chain_id, nonce, outs } => {
|
||||||
|
writer.write_all(&[1])?;
|
||||||
|
writer.write_all(&chain_id.as_le_bytes())?;
|
||||||
|
writer.write_all(&nonce.as_le_bytes())?;
|
||||||
|
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
|
||||||
|
for out in outs {
|
||||||
|
out.write(writer)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn serialize(&self) -> Vec<u8> {
|
||||||
|
let mut res = vec![];
|
||||||
|
self.write(&mut res).unwrap();
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct SignedRouterCommand {
|
||||||
|
command: RouterCommand,
|
||||||
|
signature: Signature,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignedRouterCommand {
|
||||||
|
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
|
||||||
|
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
|
||||||
|
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
|
||||||
|
let signature = Signature { c, s };
|
||||||
|
|
||||||
|
if !signature.verify(key, &command.msg()) {
|
||||||
|
None?
|
||||||
|
}
|
||||||
|
Some(SignedRouterCommand { command, signature })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn command(&self) -> &RouterCommand {
|
||||||
|
&self.command
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn signature(&self) -> &Signature {
|
||||||
|
&self.signature
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let command = RouterCommand::read(reader)?;
|
||||||
|
|
||||||
|
let mut sig = [0; 64];
|
||||||
|
reader.read_exact(&mut sig)?;
|
||||||
|
let signature = Signature::from_bytes(sig)?;
|
||||||
|
|
||||||
|
Ok(SignedRouterCommand { command, signature })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
self.command.write(writer)?;
|
||||||
|
writer.write_all(&self.signature.to_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RouterCommandMachine {
|
||||||
|
key: PublicKey,
|
||||||
|
command: RouterCommand,
|
||||||
|
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RouterCommandMachine {
|
||||||
|
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
|
||||||
|
// The Schnorr algorithm should be fine without this, even when using the IETF variant
|
||||||
|
// If this is better and more comprehensive, we should do it, even if not necessary
|
||||||
|
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
|
||||||
|
let key = keys.group_key();
|
||||||
|
transcript.append_message(b"key", key.to_bytes());
|
||||||
|
transcript.append_message(b"command", command.serialize());
|
||||||
|
|
||||||
|
Some(Self {
|
||||||
|
key: PublicKey::new(key)?,
|
||||||
|
command,
|
||||||
|
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PreprocessMachine for RouterCommandMachine {
|
||||||
|
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||||
|
type Signature = SignedRouterCommand;
|
||||||
|
type SignMachine = RouterCommandSignMachine;
|
||||||
|
|
||||||
|
fn preprocess<R: RngCore + CryptoRng>(
|
||||||
|
self,
|
||||||
|
rng: &mut R,
|
||||||
|
) -> (Self::SignMachine, Self::Preprocess) {
|
||||||
|
let (machine, preprocess) = self.machine.preprocess(rng);
|
||||||
|
|
||||||
|
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RouterCommandSignMachine {
|
||||||
|
key: PublicKey,
|
||||||
|
command: RouterCommand,
|
||||||
|
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
|
||||||
|
type Params = ();
|
||||||
|
type Keys = ThresholdKeys<Secp256k1>;
|
||||||
|
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||||
|
type SignatureShare = SignatureShare<Secp256k1>;
|
||||||
|
type SignatureMachine = RouterCommandSignatureMachine;
|
||||||
|
|
||||||
|
fn cache(self) -> CachedPreprocess {
|
||||||
|
unimplemented!(
|
||||||
|
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||||
|
"being already bound to a specific command"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_cache(
|
||||||
|
(): (),
|
||||||
|
_: ThresholdKeys<Secp256k1>,
|
||||||
|
_: CachedPreprocess,
|
||||||
|
) -> (Self, Self::Preprocess) {
|
||||||
|
unimplemented!(
|
||||||
|
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||||
|
"being already bound to a specific command"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||||
|
self.machine.read_preprocess(reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign(
|
||||||
|
self,
|
||||||
|
commitments: HashMap<Participant, Self::Preprocess>,
|
||||||
|
msg: &[u8],
|
||||||
|
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
|
||||||
|
if !msg.is_empty() {
|
||||||
|
panic!("message was passed to a RouterCommand machine when it generates its own");
|
||||||
|
}
|
||||||
|
|
||||||
|
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
|
||||||
|
|
||||||
|
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RouterCommandSignatureMachine {
|
||||||
|
key: PublicKey,
|
||||||
|
command: RouterCommand,
|
||||||
|
machine:
|
||||||
|
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
|
||||||
|
type SignatureShare = SignatureShare<Secp256k1>;
|
||||||
|
|
||||||
|
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
||||||
|
self.machine.read_share(reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn complete(
|
||||||
|
self,
|
||||||
|
shares: HashMap<Participant, Self::SignatureShare>,
|
||||||
|
) -> Result<SignedRouterCommand, FrostError> {
|
||||||
|
let sig = self.machine.complete(shares)?;
|
||||||
|
let signature = Signature::new(&self.key, &self.command.msg(), sig)
|
||||||
|
.expect("machine produced an invalid signature");
|
||||||
|
Ok(SignedRouterCommand { command: self.command, signature })
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,30 +1,428 @@
|
|||||||
pub use crate::abi::router::*;
|
use std::{sync::Arc, io, collections::HashSet};
|
||||||
|
|
||||||
/*
|
use k256::{
|
||||||
use crate::crypto::{ProcessedSignature, PublicKey};
|
elliptic_curve::{group::GroupEncoding, sec1},
|
||||||
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
|
ProjectivePoint,
|
||||||
use eyre::Result;
|
};
|
||||||
use std::{convert::From, fs::File, sync::Arc};
|
|
||||||
|
|
||||||
pub async fn router_update_public_key<M: Middleware + 'static>(
|
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
|
||||||
contract: &Router<M>,
|
#[cfg(test)]
|
||||||
public_key: &PublicKey,
|
use alloy_core::primitives::B256;
|
||||||
signature: &ProcessedSignature,
|
use alloy_consensus::TxLegacy;
|
||||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
|
||||||
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
|
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
|
||||||
let pending_tx = tx.send().await?;
|
|
||||||
let receipt = pending_tx.await?;
|
use alloy_rpc_types::Filter;
|
||||||
Ok(receipt)
|
#[cfg(test)]
|
||||||
|
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
|
||||||
|
use alloy_simple_request_transport::SimpleRequest;
|
||||||
|
use alloy_provider::{Provider, RootProvider};
|
||||||
|
|
||||||
|
pub use crate::{
|
||||||
|
Error,
|
||||||
|
crypto::{PublicKey, Signature},
|
||||||
|
abi::{erc20::Transfer, router as abi},
|
||||||
|
};
|
||||||
|
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub enum Coin {
|
||||||
|
Ether,
|
||||||
|
Erc20([u8; 20]),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn router_execute<M: Middleware + 'static>(
|
impl Coin {
|
||||||
contract: &Router<M>,
|
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
txs: Vec<Rtransaction>,
|
let mut kind = [0xff];
|
||||||
signature: &ProcessedSignature,
|
reader.read_exact(&mut kind)?;
|
||||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
Ok(match kind[0] {
|
||||||
let tx = contract.execute(txs, signature.into()).send();
|
0 => Coin::Ether,
|
||||||
let pending_tx = tx.send().await?;
|
1 => {
|
||||||
let receipt = pending_tx.await?;
|
let mut address = [0; 20];
|
||||||
Ok(receipt)
|
reader.read_exact(&mut address)?;
|
||||||
|
Coin::Erc20(address)
|
||||||
|
}
|
||||||
|
_ => Err(io::Error::other("unrecognized Coin type"))?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
match self {
|
||||||
|
Coin::Ether => writer.write_all(&[0]),
|
||||||
|
Coin::Erc20(token) => {
|
||||||
|
writer.write_all(&[1])?;
|
||||||
|
writer.write_all(token)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct InInstruction {
|
||||||
|
pub id: ([u8; 32], u64),
|
||||||
|
pub from: [u8; 20],
|
||||||
|
pub coin: Coin,
|
||||||
|
pub amount: U256,
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
pub key_at_end_of_block: ProjectivePoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InInstruction {
|
||||||
|
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||||
|
let id = {
|
||||||
|
let mut id_hash = [0; 32];
|
||||||
|
reader.read_exact(&mut id_hash)?;
|
||||||
|
let mut id_pos = [0; 8];
|
||||||
|
reader.read_exact(&mut id_pos)?;
|
||||||
|
let id_pos = u64::from_le_bytes(id_pos);
|
||||||
|
(id_hash, id_pos)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut from = [0; 20];
|
||||||
|
reader.read_exact(&mut from)?;
|
||||||
|
|
||||||
|
let coin = Coin::read(reader)?;
|
||||||
|
let mut amount = [0; 32];
|
||||||
|
reader.read_exact(&mut amount)?;
|
||||||
|
let amount = U256::from_le_slice(&amount);
|
||||||
|
|
||||||
|
let mut data_len = [0; 4];
|
||||||
|
reader.read_exact(&mut data_len)?;
|
||||||
|
let data_len = usize::try_from(u32::from_le_bytes(data_len))
|
||||||
|
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
|
||||||
|
let mut data = vec![0; data_len];
|
||||||
|
reader.read_exact(&mut data)?;
|
||||||
|
|
||||||
|
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||||
|
reader.read_exact(&mut key_at_end_of_block)?;
|
||||||
|
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
|
||||||
|
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
|
||||||
|
|
||||||
|
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
|
writer.write_all(&self.id.0)?;
|
||||||
|
writer.write_all(&self.id.1.to_le_bytes())?;
|
||||||
|
|
||||||
|
writer.write_all(&self.from)?;
|
||||||
|
|
||||||
|
self.coin.write(writer)?;
|
||||||
|
writer.write_all(&self.amount.as_le_bytes())?;
|
||||||
|
|
||||||
|
writer.write_all(
|
||||||
|
&u32::try_from(self.data.len())
|
||||||
|
.map_err(|_| {
|
||||||
|
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
|
||||||
|
})?
|
||||||
|
.to_le_bytes(),
|
||||||
|
)?;
|
||||||
|
writer.write_all(&self.data)?;
|
||||||
|
|
||||||
|
writer.write_all(&self.key_at_end_of_block.to_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub struct Executed {
|
||||||
|
pub tx_id: [u8; 32],
|
||||||
|
pub nonce: u64,
|
||||||
|
pub signature: [u8; 64],
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The contract Serai uses to manage its state.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
|
||||||
|
impl Router {
|
||||||
|
pub(crate) fn code() -> Vec<u8> {
|
||||||
|
let bytecode = include_str!("../artifacts/Router.bin");
|
||||||
|
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
|
||||||
|
let mut bytecode = Self::code();
|
||||||
|
// Append the constructor arguments
|
||||||
|
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
|
||||||
|
bytecode
|
||||||
|
}
|
||||||
|
|
||||||
|
// This isn't pub in order to force users to use `Deployer::find_router`.
|
||||||
|
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
|
||||||
|
Self(provider, address)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn address(&self) -> [u8; 20] {
|
||||||
|
**self.1
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the key for Serai at the specified block.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
|
||||||
|
let call = TransactionRequest::default()
|
||||||
|
.to(Some(self.1))
|
||||||
|
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
|
||||||
|
let bytes = self
|
||||||
|
.0
|
||||||
|
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ConnectionError)?;
|
||||||
|
let res =
|
||||||
|
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||||
|
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the message to be signed in order to update the key for Serai.
|
||||||
|
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
|
||||||
|
let mut buffer = b"updateSeraiKey".to_vec();
|
||||||
|
buffer.extend(&chain_id.to_be_bytes::<32>());
|
||||||
|
buffer.extend(&nonce.to_be_bytes::<32>());
|
||||||
|
buffer.extend(&key.eth_repr());
|
||||||
|
buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the key representing Serai.
|
||||||
|
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
|
||||||
|
// TODO: Set a more accurate gas
|
||||||
|
TxLegacy {
|
||||||
|
to: TxKind::Call(self.1),
|
||||||
|
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
|
||||||
|
.abi_encode()
|
||||||
|
.into(),
|
||||||
|
gas_limit: 100_000,
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the current nonce for the published batches.
|
||||||
|
#[cfg(test)]
|
||||||
|
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
|
||||||
|
let call = TransactionRequest::default()
|
||||||
|
.to(Some(self.1))
|
||||||
|
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
|
||||||
|
let bytes = self
|
||||||
|
.0
|
||||||
|
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ConnectionError)?;
|
||||||
|
let res =
|
||||||
|
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||||
|
Ok(res._0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the message to be signed in order to update the key for Serai.
|
||||||
|
pub(crate) fn execute_message(
|
||||||
|
chain_id: U256,
|
||||||
|
nonce: U256,
|
||||||
|
outs: Vec<abi::OutInstruction>,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute a batch of `OutInstruction`s.
|
||||||
|
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
|
||||||
|
TxLegacy {
|
||||||
|
to: TxKind::Call(self.1),
|
||||||
|
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
|
||||||
|
// TODO
|
||||||
|
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn key_at_end_of_block(&self, block: u64) -> Result<ProjectivePoint, Error> {
|
||||||
|
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
|
||||||
|
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||||
|
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
|
||||||
|
let last_key_x_coordinate = last_key_x_coordinate_log
|
||||||
|
.log_decode::<SeraiKeyUpdated>()
|
||||||
|
.map_err(|_| Error::ConnectionError)?
|
||||||
|
.inner
|
||||||
|
.data
|
||||||
|
.key;
|
||||||
|
|
||||||
|
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||||
|
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
|
||||||
|
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
|
||||||
|
|
||||||
|
Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn in_instructions(
|
||||||
|
&self,
|
||||||
|
block: u64,
|
||||||
|
allowed_tokens: &HashSet<[u8; 20]>,
|
||||||
|
) -> Result<Vec<InInstruction>, Error> {
|
||||||
|
let key_at_end_of_block = self.key_at_end_of_block(block).await?;
|
||||||
|
|
||||||
|
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||||
|
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
|
||||||
|
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
let mut transfer_check = HashSet::new();
|
||||||
|
let mut in_instructions = vec![];
|
||||||
|
for log in logs {
|
||||||
|
// Double check the address which emitted this log
|
||||||
|
if log.address() != self.1 {
|
||||||
|
Err(Error::ConnectionError)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let id = (
|
||||||
|
log.block_hash.ok_or(Error::ConnectionError)?.into(),
|
||||||
|
log.log_index.ok_or(Error::ConnectionError)?,
|
||||||
|
);
|
||||||
|
|
||||||
|
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||||
|
let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
let log =
|
||||||
|
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||||
|
|
||||||
|
let coin = if log.coin.0 == [0; 20] {
|
||||||
|
Coin::Ether
|
||||||
|
} else {
|
||||||
|
let token = *log.coin.0;
|
||||||
|
|
||||||
|
if !allowed_tokens.contains(&token) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this also counts as a top-level transfer via the token, drop it
|
||||||
|
//
|
||||||
|
// Necessary in order to handle a potential edge case with some theoretical token
|
||||||
|
// implementations
|
||||||
|
//
|
||||||
|
// This will either let it be handled by the top-level transfer hook or will drop it
|
||||||
|
// entirely on the side of caution
|
||||||
|
if tx.to == Some(token.into()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get all logs for this TX
|
||||||
|
let receipt = self
|
||||||
|
.0
|
||||||
|
.get_transaction_receipt(tx_hash)
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ConnectionError)?
|
||||||
|
.ok_or(Error::ConnectionError)?;
|
||||||
|
let tx_logs = receipt.inner.logs();
|
||||||
|
|
||||||
|
// Find a matching transfer log
|
||||||
|
let mut found_transfer = false;
|
||||||
|
for tx_log in tx_logs {
|
||||||
|
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
|
||||||
|
// Ensure we didn't already use this transfer to check a distinct InInstruction event
|
||||||
|
if transfer_check.contains(&log_index) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this log is from the token we expected to be transferred
|
||||||
|
if tx_log.address().0 != token {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Check if this is a transfer log
|
||||||
|
// https://github.com/alloy-rs/core/issues/589
|
||||||
|
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
|
||||||
|
// Check if this is a transfer to us for the expected amount
|
||||||
|
if (transfer.to == self.1) && (transfer.value == log.amount) {
|
||||||
|
transfer_check.insert(log_index);
|
||||||
|
found_transfer = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found_transfer {
|
||||||
|
// This shouldn't be a ConnectionError
|
||||||
|
// This is an exploit, a non-conforming ERC20, or an invalid connection
|
||||||
|
// This should halt the process which is sufficient, yet this is sub-optimal
|
||||||
|
// TODO
|
||||||
|
Err(Error::ConnectionError)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Coin::Erc20(token)
|
||||||
|
};
|
||||||
|
|
||||||
|
in_instructions.push(InInstruction {
|
||||||
|
id,
|
||||||
|
from: *log.from.0,
|
||||||
|
coin,
|
||||||
|
amount: log.amount,
|
||||||
|
data: log.instruction.as_ref().to_vec(),
|
||||||
|
key_at_end_of_block,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(in_instructions)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
|
||||||
|
let mut res = vec![];
|
||||||
|
|
||||||
|
{
|
||||||
|
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||||
|
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||||
|
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
for log in logs {
|
||||||
|
// Double check the address which emitted this log
|
||||||
|
if log.address() != self.1 {
|
||||||
|
Err(Error::ConnectionError)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||||
|
|
||||||
|
let log =
|
||||||
|
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||||
|
|
||||||
|
let mut signature = [0; 64];
|
||||||
|
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||||
|
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||||
|
res.push(Executed {
|
||||||
|
tx_id,
|
||||||
|
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||||
|
signature,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||||
|
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
|
||||||
|
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
for log in logs {
|
||||||
|
// Double check the address which emitted this log
|
||||||
|
if log.address() != self.1 {
|
||||||
|
Err(Error::ConnectionError)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||||
|
|
||||||
|
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||||
|
|
||||||
|
let mut signature = [0; 64];
|
||||||
|
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||||
|
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||||
|
res.push(Executed {
|
||||||
|
tx_id,
|
||||||
|
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||||
|
signature,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tests")]
|
||||||
|
pub fn key_updated_filter(&self) -> Filter {
|
||||||
|
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
|
||||||
|
}
|
||||||
|
#[cfg(feature = "tests")]
|
||||||
|
pub fn executed_filter(&self) -> Filter {
|
||||||
|
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
use eyre::{eyre, Result};
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
|
|
||||||
use ethers_providers::{Provider, Http};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Error,
|
|
||||||
crypto::{keccak256, PublicKey, Signature},
|
|
||||||
};
|
|
||||||
pub use crate::abi::schnorr::*;
|
|
||||||
|
|
||||||
pub async fn call_verify(
|
|
||||||
contract: &Schnorr<Provider<Http>>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
message: &[u8],
|
|
||||||
signature: &Signature,
|
|
||||||
) -> Result<()> {
|
|
||||||
if contract
|
|
||||||
.verify(
|
|
||||||
public_key.parity,
|
|
||||||
public_key.px.to_repr().into(),
|
|
||||||
keccak256(message),
|
|
||||||
signature.c.to_repr().into(),
|
|
||||||
signature.s.to_repr().into(),
|
|
||||||
)
|
|
||||||
.call()
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(eyre!(Error::InvalidSignature))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
use alloy_sol_types::sol;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(warnings)]
|
||||||
|
#[allow(needless_pass_by_value)]
|
||||||
|
#[allow(clippy::all)]
|
||||||
|
#[allow(clippy::ignored_unit_patterns)]
|
||||||
|
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||||
|
mod schnorr_container {
|
||||||
|
use super::*;
|
||||||
|
sol!("src/tests/contracts/Schnorr.sol");
|
||||||
|
}
|
||||||
|
pub(crate) use schnorr_container::TestSchnorr as schnorr;
|
||||||
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
// SPDX-License-Identifier: AGPLv3
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
contract TestERC20 {
|
||||||
|
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||||
|
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||||
|
|
||||||
|
function name() public pure returns (string memory) {
|
||||||
|
return "Test ERC20";
|
||||||
|
}
|
||||||
|
function symbol() public pure returns (string memory) {
|
||||||
|
return "TEST";
|
||||||
|
}
|
||||||
|
function decimals() public pure returns (uint8) {
|
||||||
|
return 18;
|
||||||
|
}
|
||||||
|
|
||||||
|
function totalSupply() public pure returns (uint256) {
|
||||||
|
return 1_000_000 * 10e18;
|
||||||
|
}
|
||||||
|
|
||||||
|
mapping(address => uint256) balances;
|
||||||
|
mapping(address => mapping(address => uint256)) allowances;
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
balances[msg.sender] = totalSupply();
|
||||||
|
}
|
||||||
|
|
||||||
|
function balanceOf(address owner) public view returns (uint256) {
|
||||||
|
return balances[owner];
|
||||||
|
}
|
||||||
|
function transfer(address to, uint256 value) public returns (bool) {
|
||||||
|
balances[msg.sender] -= value;
|
||||||
|
balances[to] += value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
function transferFrom(address from, address to, uint256 value) public returns (bool) {
|
||||||
|
allowances[from][msg.sender] -= value;
|
||||||
|
balances[from] -= value;
|
||||||
|
balances[to] += value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
function approve(address spender, uint256 value) public returns (bool) {
|
||||||
|
allowances[msg.sender][spender] = value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
function allowance(address owner, address spender) public view returns (uint256) {
|
||||||
|
return allowances[owner][spender];
|
||||||
|
}
|
||||||
|
}
|
||||||
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
// SPDX-License-Identifier: AGPLv3
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
import "../../../contracts/Schnorr.sol";
|
||||||
|
|
||||||
|
contract TestSchnorr {
|
||||||
|
function verify(
|
||||||
|
bytes32 px,
|
||||||
|
bytes calldata message,
|
||||||
|
bytes32 c,
|
||||||
|
bytes32 s
|
||||||
|
) external pure returns (bool) {
|
||||||
|
return Schnorr.verify(px, message, c, s);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,49 +1,33 @@
|
|||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use sha2::Sha256;
|
use group::ff::{Field, PrimeField};
|
||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use group::Group;
|
|
||||||
use k256::{
|
use k256::{
|
||||||
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
|
ecdsa::{
|
||||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
|
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||||
U256, Scalar, AffinePoint, ProjectivePoint,
|
},
|
||||||
|
Scalar, ProjectivePoint,
|
||||||
};
|
};
|
||||||
|
|
||||||
use frost::{
|
use frost::{
|
||||||
curve::Secp256k1,
|
curve::{Ciphersuite, Secp256k1},
|
||||||
algorithm::{Hram, IetfSchnorr},
|
algorithm::{Hram, IetfSchnorr},
|
||||||
tests::{algorithm_machines, sign},
|
tests::{algorithm_machines, sign},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{crypto::*, tests::key_gen};
|
use crate::{crypto::*, tests::key_gen};
|
||||||
|
|
||||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
// The ecrecover opcode, yet with parity replacing v
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||||
}
|
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||||
|
let message: [u8; 32] = message.to_repr().into();
|
||||||
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||||
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
|
sig,
|
||||||
return None;
|
alloy_core::primitives::Parity::Parity(odd_y),
|
||||||
}
|
)
|
||||||
|
.ok()?
|
||||||
#[allow(non_snake_case)]
|
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||||
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
|
.ok()
|
||||||
#[allow(non_snake_case)]
|
.map(Into::into)
|
||||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let R = ProjectivePoint::from(R);
|
|
||||||
|
|
||||||
let r = r.invert().unwrap();
|
|
||||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
|
||||||
let u2 = R * (s * r);
|
|
||||||
let key: ProjectivePoint = u1 + u2;
|
|
||||||
if !bool::from(key.is_identity()) {
|
|
||||||
return Some(address(&key));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -55,20 +39,23 @@ fn test_ecrecover() {
|
|||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
let (sig, recovery_id) = private
|
let (sig, recovery_id) = private
|
||||||
.as_nonzero_scalar()
|
.as_nonzero_scalar()
|
||||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
.try_sign_prehashed(
|
||||||
|
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
|
||||||
|
&keccak256(MESSAGE).into(),
|
||||||
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// Sanity check the signature verifies
|
// Sanity check the signature verifies
|
||||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||||
{
|
{
|
||||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform the ecrecover
|
// Perform the ecrecover
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ecrecover(
|
ecrecover(
|
||||||
hash_to_scalar(MESSAGE),
|
hash_to_scalar(MESSAGE),
|
||||||
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
|
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||||
*sig.r(),
|
*sig.r(),
|
||||||
*sig.s()
|
*sig.s()
|
||||||
)
|
)
|
||||||
@@ -93,18 +80,13 @@ fn test_signing() {
|
|||||||
pub fn preprocess_signature_for_ecrecover(
|
pub fn preprocess_signature_for_ecrecover(
|
||||||
R: ProjectivePoint,
|
R: ProjectivePoint,
|
||||||
public_key: &PublicKey,
|
public_key: &PublicKey,
|
||||||
chain_id: U256,
|
|
||||||
m: &[u8],
|
m: &[u8],
|
||||||
s: Scalar,
|
s: Scalar,
|
||||||
) -> (u8, Scalar, Scalar) {
|
) -> (Scalar, Scalar) {
|
||||||
let c = EthereumHram::hram(
|
let c = EthereumHram::hram(&R, &public_key.A, m);
|
||||||
&R,
|
|
||||||
&public_key.A,
|
|
||||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
|
||||||
);
|
|
||||||
let sa = -(s * public_key.px);
|
let sa = -(s * public_key.px);
|
||||||
let ca = -(c * public_key.px);
|
let ca = -(c * public_key.px);
|
||||||
(public_key.parity, sa, ca)
|
(sa, ca)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -112,21 +94,12 @@ fn test_ecrecover_hack() {
|
|||||||
let (keys, public_key) = key_gen();
|
let (keys, public_key) = key_gen();
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let chain_id = U256::ONE;
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
let sig = sign(
|
let sig =
|
||||||
&mut OsRng,
|
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (parity, sa, ca) =
|
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
|
||||||
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
|
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
|
||||||
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
|
|
||||||
assert_eq!(q, address(&sig.R));
|
assert_eq!(q, address(&sig.R));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,25 @@
|
|||||||
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
use k256::{Scalar, ProjectivePoint};
|
use k256::{Scalar, ProjectivePoint};
|
||||||
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
||||||
|
|
||||||
use ethers_core::{
|
use alloy_core::{
|
||||||
types::{H160, Signature as EthersSignature},
|
primitives::{Address, U256, Bytes, TxKind},
|
||||||
abi::Abi,
|
hex::FromHex,
|
||||||
};
|
};
|
||||||
use ethers_contract::ContractFactory;
|
use alloy_consensus::{SignableTransaction, TxLegacy};
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use crate::crypto::PublicKey;
|
use alloy_rpc_types::TransactionReceipt;
|
||||||
|
use alloy_simple_request_transport::SimpleRequest;
|
||||||
|
use alloy_provider::{Provider, RootProvider};
|
||||||
|
|
||||||
|
use crate::crypto::{address, deterministically_sign, PublicKey};
|
||||||
|
|
||||||
mod crypto;
|
mod crypto;
|
||||||
|
|
||||||
|
mod abi;
|
||||||
mod schnorr;
|
mod schnorr;
|
||||||
mod router;
|
mod router;
|
||||||
|
|
||||||
@@ -36,57 +40,88 @@ pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey)
|
|||||||
(keys, public_key)
|
(keys, public_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
// TODO: Use a proper error here
|
||||||
// to fund the deployer, not create/pass a wallet
|
pub async fn send(
|
||||||
// TODO: Deterministic deployments across chains
|
provider: &RootProvider<SimpleRequest>,
|
||||||
|
wallet: &k256::ecdsa::SigningKey,
|
||||||
|
mut tx: TxLegacy,
|
||||||
|
) -> Option<TransactionReceipt> {
|
||||||
|
let verifying_key = *wallet.verifying_key().as_affine();
|
||||||
|
let address = Address::from(address(&verifying_key.into()));
|
||||||
|
|
||||||
|
// https://github.com/alloy-rs/alloy/issues/539
|
||||||
|
// let chain_id = provider.get_chain_id().await.unwrap();
|
||||||
|
// tx.chain_id = Some(chain_id);
|
||||||
|
tx.chain_id = None;
|
||||||
|
tx.nonce = provider.get_transaction_count(address, None).await.unwrap();
|
||||||
|
// 100 gwei
|
||||||
|
tx.gas_price = 100_000_000_000u128;
|
||||||
|
|
||||||
|
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
|
||||||
|
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
|
||||||
|
assert!(
|
||||||
|
provider.get_balance(address, None).await.unwrap() >
|
||||||
|
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut bytes = vec![];
|
||||||
|
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
|
||||||
|
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
|
||||||
|
pending_tx.get_receipt().await.ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn fund_account(
|
||||||
|
provider: &RootProvider<SimpleRequest>,
|
||||||
|
wallet: &k256::ecdsa::SigningKey,
|
||||||
|
to_fund: Address,
|
||||||
|
value: U256,
|
||||||
|
) -> Option<()> {
|
||||||
|
let funding_tx =
|
||||||
|
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
|
||||||
|
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
|
||||||
|
|
||||||
|
Some(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Use a proper error here
|
||||||
pub async fn deploy_contract(
|
pub async fn deploy_contract(
|
||||||
chain_id: u32,
|
client: Arc<RootProvider<SimpleRequest>>,
|
||||||
client: Arc<Provider<Http>>,
|
|
||||||
wallet: &k256::ecdsa::SigningKey,
|
wallet: &k256::ecdsa::SigningKey,
|
||||||
name: &str,
|
name: &str,
|
||||||
) -> eyre::Result<H160> {
|
) -> Option<Address> {
|
||||||
let abi: Abi =
|
|
||||||
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
|
|
||||||
|
|
||||||
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
||||||
let hex_bin =
|
let hex_bin =
|
||||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||||
let bin = hex::decode(hex_bin).unwrap();
|
let bin = Bytes::from_hex(hex_bin).unwrap();
|
||||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
|
||||||
|
|
||||||
let mut deployment_tx = factory.deploy(())?.tx;
|
let deployment_tx = TxLegacy {
|
||||||
deployment_tx.set_chain_id(chain_id);
|
chain_id: None,
|
||||||
deployment_tx.set_gas(1_000_000);
|
nonce: 0,
|
||||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
// 100 gwei
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
gas_price: 100_000_000_000u128,
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
gas_limit: 1_000_000,
|
||||||
|
to: TxKind::Create,
|
||||||
|
value: U256::ZERO,
|
||||||
|
input: bin,
|
||||||
|
};
|
||||||
|
|
||||||
let sig_hash = deployment_tx.sighash();
|
let deployment_tx = deterministically_sign(&deployment_tx);
|
||||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
|
||||||
|
|
||||||
// EIP-155 v
|
// Fund the deployer address
|
||||||
let mut v = u64::from(rid.to_byte());
|
fund_account(
|
||||||
assert!((v == 0) || (v == 1));
|
&client,
|
||||||
v += u64::from((chain_id * 2) + 35);
|
wallet,
|
||||||
|
deployment_tx.recover_signer().unwrap(),
|
||||||
|
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let r = sig.r().to_repr();
|
let (deployment_tx, sig, _) = deployment_tx.into_parts();
|
||||||
let r_ref: &[u8] = r.as_ref();
|
let mut bytes = vec![];
|
||||||
let s = sig.s().to_repr();
|
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||||
let s_ref: &[u8] = s.as_ref();
|
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
|
||||||
let deployment_tx =
|
let receipt = pending_tx.get_receipt().await.ok()?;
|
||||||
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
|
assert!(receipt.status());
|
||||||
|
|
||||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
Some(receipt.contract_address.unwrap())
|
||||||
|
|
||||||
let mut receipt;
|
|
||||||
while {
|
|
||||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
|
||||||
receipt.is_none()
|
|
||||||
} {
|
|
||||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
|
||||||
}
|
|
||||||
let receipt = receipt.unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
Ok(receipt.contract_address.unwrap())
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,8 @@ use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
|||||||
|
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
use group::Group;
|
||||||
|
use k256::ProjectivePoint;
|
||||||
use frost::{
|
use frost::{
|
||||||
curve::Secp256k1,
|
curve::Secp256k1,
|
||||||
Participant, ThresholdKeys,
|
Participant, ThresholdKeys,
|
||||||
@@ -10,100 +11,173 @@ use frost::{
|
|||||||
tests::{algorithm_machines, sign},
|
tests::{algorithm_machines, sign},
|
||||||
};
|
};
|
||||||
|
|
||||||
use ethers_core::{
|
use alloy_core::primitives::{Address, U256};
|
||||||
types::{H160, U256, Bytes},
|
|
||||||
abi::AbiEncode,
|
use alloy_simple_request_transport::SimpleRequest;
|
||||||
utils::{Anvil, AnvilInstance},
|
use alloy_rpc_client::ClientBuilder;
|
||||||
};
|
use alloy_provider::{Provider, RootProvider};
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
crypto::{keccak256, PublicKey, EthereumHram, Signature},
|
crypto::*,
|
||||||
router::{self, *},
|
deployer::Deployer,
|
||||||
tests::{key_gen, deploy_contract},
|
router::{Router, abi as router},
|
||||||
|
tests::{key_gen, send, fund_account},
|
||||||
};
|
};
|
||||||
|
|
||||||
async fn setup_test() -> (
|
async fn setup_test() -> (
|
||||||
u32,
|
|
||||||
AnvilInstance,
|
AnvilInstance,
|
||||||
Router<Provider<Http>>,
|
Arc<RootProvider<SimpleRequest>>,
|
||||||
|
u64,
|
||||||
|
Router,
|
||||||
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||||
PublicKey,
|
PublicKey,
|
||||||
) {
|
) {
|
||||||
let anvil = Anvil::new().spawn();
|
let anvil = Anvil::new().spawn();
|
||||||
|
|
||||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
let provider = RootProvider::new(
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||||
|
);
|
||||||
|
let chain_id = provider.get_chain_id().await.unwrap();
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
let wallet = anvil.keys()[0].clone().into();
|
||||||
let client = Arc::new(provider);
|
let client = Arc::new(provider);
|
||||||
|
|
||||||
let contract_address =
|
// Make sure the Deployer constructor returns None, as it doesn't exist yet
|
||||||
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
|
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
|
||||||
let contract = Router::new(contract_address, client.clone());
|
|
||||||
|
// Deploy the Deployer
|
||||||
|
let tx = Deployer::deployment_tx();
|
||||||
|
fund_account(
|
||||||
|
&client,
|
||||||
|
&wallet,
|
||||||
|
tx.recover_signer().unwrap(),
|
||||||
|
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let (tx, sig, _) = tx.into_parts();
|
||||||
|
let mut bytes = vec![];
|
||||||
|
tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||||
|
|
||||||
|
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
|
||||||
|
let receipt = pending_tx.get_receipt().await.unwrap();
|
||||||
|
assert!(receipt.status());
|
||||||
|
let deployer =
|
||||||
|
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
|
||||||
|
|
||||||
let (keys, public_key) = key_gen();
|
let (keys, public_key) = key_gen();
|
||||||
|
|
||||||
// Set the key to the threshold keys
|
// Verify the Router constructor returns None, as it doesn't exist yet
|
||||||
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
|
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
|
||||||
let pending_tx = tx.send().await.unwrap();
|
|
||||||
let receipt = pending_tx.await.unwrap().unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
(chain_id, anvil, contract, keys, public_key)
|
// Deploy the router
|
||||||
|
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(receipt.status());
|
||||||
|
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
|
||||||
|
|
||||||
|
(anvil, client, chain_id, contract, keys, public_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
|
||||||
|
client
|
||||||
|
.get_block(client.get_block_number().await.unwrap().into(), false)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap()
|
||||||
|
.header
|
||||||
|
.hash
|
||||||
|
.unwrap()
|
||||||
|
.0
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_deploy_contract() {
|
async fn test_deploy_contract() {
|
||||||
setup_test().await;
|
let (_anvil, client, _, router, _, public_key) = setup_test().await;
|
||||||
|
|
||||||
|
let block_hash = latest_block_hash(&client).await;
|
||||||
|
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
|
||||||
|
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||||
|
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn hash_and_sign(
|
pub fn hash_and_sign(
|
||||||
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||||
public_key: &PublicKey,
|
public_key: &PublicKey,
|
||||||
chain_id: U256,
|
|
||||||
message: &[u8],
|
message: &[u8],
|
||||||
) -> Signature {
|
) -> Signature {
|
||||||
let hashed_message = keccak256(message);
|
|
||||||
|
|
||||||
let mut chain_id_bytes = [0; 32];
|
|
||||||
chain_id.to_big_endian(&mut chain_id_bytes);
|
|
||||||
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
let sig = sign(
|
let sig =
|
||||||
&mut OsRng,
|
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
|
Signature::new(public_key, message, sig).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_router_update_serai_key() {
|
||||||
|
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||||
|
|
||||||
|
let next_key = loop {
|
||||||
|
let point = ProjectivePoint::random(&mut OsRng);
|
||||||
|
let Some(next_key) = PublicKey::new(point) else { continue };
|
||||||
|
break next_key;
|
||||||
|
};
|
||||||
|
|
||||||
|
let message = Router::update_serai_key_message(
|
||||||
|
U256::try_from(chain_id).unwrap(),
|
||||||
|
U256::try_from(1u64).unwrap(),
|
||||||
|
&next_key,
|
||||||
|
);
|
||||||
|
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||||
|
|
||||||
|
let first_block_hash = latest_block_hash(&client).await;
|
||||||
|
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||||
|
|
||||||
|
let receipt =
|
||||||
|
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(receipt.status());
|
||||||
|
|
||||||
|
let second_block_hash = latest_block_hash(&client).await;
|
||||||
|
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
|
||||||
|
// Check this does still offer the historical state
|
||||||
|
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||||
|
// TODO: Check logs
|
||||||
|
|
||||||
|
println!("gas used: {:?}", receipt.gas_used);
|
||||||
|
// println!("logs: {:?}", receipt.logs);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_router_execute() {
|
async fn test_router_execute() {
|
||||||
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
|
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||||
|
|
||||||
let to = H160([0u8; 20]);
|
let to = Address::from([0; 20]);
|
||||||
let value = U256([0u64; 4]);
|
let value = U256::ZERO;
|
||||||
let data = Bytes::from([0]);
|
let tx = router::OutInstruction { to, value, calls: vec![] };
|
||||||
let tx = OutInstruction { to, value, data: data.clone() };
|
let txs = vec![tx];
|
||||||
|
|
||||||
let nonce_call = contract.nonce();
|
let first_block_hash = latest_block_hash(&client).await;
|
||||||
let nonce = nonce_call.call().await.unwrap();
|
let nonce = contract.nonce(first_block_hash).await.unwrap();
|
||||||
|
assert_eq!(nonce, U256::try_from(1u64).unwrap());
|
||||||
|
|
||||||
let encoded =
|
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
|
||||||
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
|
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||||
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
|
|
||||||
|
|
||||||
let tx = contract
|
let receipt =
|
||||||
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
|
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
|
||||||
.gas(300_000);
|
assert!(receipt.status());
|
||||||
let pending_tx = tx.send().await.unwrap();
|
|
||||||
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
println!("gas used: {:?}", receipt.cumulative_gas_used);
|
let second_block_hash = latest_block_hash(&client).await;
|
||||||
println!("logs: {:?}", receipt.logs);
|
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
|
||||||
|
// Check this does still offer the historical state
|
||||||
|
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||||
|
// TODO: Check logs
|
||||||
|
|
||||||
|
println!("gas used: {:?}", receipt.gas_used);
|
||||||
|
// println!("logs: {:?}", receipt.logs);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,9 @@
|
|||||||
use std::{convert::TryFrom, sync::Arc};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use rand_core::OsRng;
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
|
use group::ff::PrimeField;
|
||||||
|
use k256::Scalar;
|
||||||
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use frost::{
|
use frost::{
|
||||||
curve::Secp256k1,
|
curve::Secp256k1,
|
||||||
@@ -13,24 +11,34 @@ use frost::{
|
|||||||
tests::{algorithm_machines, sign},
|
tests::{algorithm_machines, sign},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use alloy_core::primitives::Address;
|
||||||
|
|
||||||
|
use alloy_sol_types::SolCall;
|
||||||
|
|
||||||
|
use alloy_rpc_types::{TransactionInput, TransactionRequest};
|
||||||
|
use alloy_simple_request_transport::SimpleRequest;
|
||||||
|
use alloy_rpc_client::ClientBuilder;
|
||||||
|
use alloy_provider::{Provider, RootProvider};
|
||||||
|
|
||||||
|
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
Error,
|
||||||
crypto::*,
|
crypto::*,
|
||||||
schnorr::*,
|
tests::{key_gen, deploy_contract, abi::schnorr as abi},
|
||||||
tests::{key_gen, deploy_contract},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||||
let anvil = Anvil::new().spawn();
|
let anvil = Anvil::new().spawn();
|
||||||
|
|
||||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
let provider = RootProvider::new(
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||||
|
);
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
let wallet = anvil.keys()[0].clone().into();
|
||||||
let client = Arc::new(provider);
|
let client = Arc::new(provider);
|
||||||
|
|
||||||
let contract_address =
|
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
|
||||||
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
|
(anvil, client, address)
|
||||||
let contract = Schnorr::new(contract_address, client.clone());
|
|
||||||
(chain_id, anvil, contract)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
@@ -38,30 +46,48 @@ async fn test_deploy_contract() {
|
|||||||
setup_test().await;
|
setup_test().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn call_verify(
|
||||||
|
provider: &RootProvider<SimpleRequest>,
|
||||||
|
contract: Address,
|
||||||
|
public_key: &PublicKey,
|
||||||
|
message: &[u8],
|
||||||
|
signature: &Signature,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let px: [u8; 32] = public_key.px.to_repr().into();
|
||||||
|
let c_bytes: [u8; 32] = signature.c.to_repr().into();
|
||||||
|
let s_bytes: [u8; 32] = signature.s.to_repr().into();
|
||||||
|
let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new(
|
||||||
|
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
|
||||||
|
.abi_encode()
|
||||||
|
.into(),
|
||||||
|
));
|
||||||
|
let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?;
|
||||||
|
let res =
|
||||||
|
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||||
|
|
||||||
|
if res._0 {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::InvalidSignature)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_ecrecover_hack() {
|
async fn test_ecrecover_hack() {
|
||||||
let (chain_id, _anvil, contract) = setup_test().await;
|
let (_anvil, client, contract) = setup_test().await;
|
||||||
let chain_id = U256::from(chain_id);
|
|
||||||
|
|
||||||
let (keys, public_key) = key_gen();
|
let (keys, public_key) = key_gen();
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
let sig = sign(
|
let sig =
|
||||||
&mut OsRng,
|
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||||
&algo,
|
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
|
|
||||||
|
|
||||||
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
|
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||||
// Test an invalid signature fails
|
// Test an invalid signature fails
|
||||||
let mut sig = sig;
|
let mut sig = sig;
|
||||||
sig.s += Scalar::ONE;
|
sig.s += Scalar::ONE;
|
||||||
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
|
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features =
|
|||||||
|
|
||||||
# Needed for multisig
|
# Needed for multisig
|
||||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
||||||
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
|
|
||||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
|
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
|
||||||
|
|
||||||
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
||||||
@@ -91,7 +90,6 @@ std = [
|
|||||||
"multiexp/std",
|
"multiexp/std",
|
||||||
|
|
||||||
"transcript/std",
|
"transcript/std",
|
||||||
"dleq/std",
|
|
||||||
|
|
||||||
"monero-generators/std",
|
"monero-generators/std",
|
||||||
|
|
||||||
@@ -106,7 +104,7 @@ std = [
|
|||||||
|
|
||||||
cache-distribution = ["async-lock"]
|
cache-distribution = ["async-lock"]
|
||||||
http-rpc = ["digest_auth", "simple-request", "tokio"]
|
http-rpc = ["digest_auth", "simple-request", "tokio"]
|
||||||
multisig = ["transcript", "frost", "dleq", "std"]
|
multisig = ["transcript", "frost", "std"]
|
||||||
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
|
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
|
||||||
experimental = []
|
experimental = []
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
|
|||||||
|
|
||||||
use sha3::{Digest, Keccak256};
|
use sha3::{Digest, Keccak256};
|
||||||
|
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
use curve25519_dalek::{
|
||||||
|
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||||
|
scalar::Scalar,
|
||||||
|
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||||
|
traits::VartimePrecomputedMultiscalarMul,
|
||||||
|
};
|
||||||
|
|
||||||
pub use monero_generators::{H, decompress_point};
|
pub use monero_generators::{H, decompress_point};
|
||||||
|
|
||||||
@@ -56,6 +61,13 @@ pub(crate) fn INV_EIGHT() -> Scalar {
|
|||||||
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
|
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
|
||||||
|
BASEPOINT_PRECOMP_CELL
|
||||||
|
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
|
||||||
|
}
|
||||||
|
|
||||||
/// Monero protocol version.
|
/// Monero protocol version.
|
||||||
///
|
///
|
||||||
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
|
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ impl Bulletproofs {
|
|||||||
Bulletproofs::Plus(
|
Bulletproofs::Plus(
|
||||||
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
|
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
|
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap()))
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ use crate::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Figure 3
|
// Figure 3 of the Bulletproofs+ Paper
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct AggregateRangeStatement {
|
pub(crate) struct AggregateRangeStatement {
|
||||||
generators: Generators,
|
generators: Generators,
|
||||||
@@ -38,24 +38,15 @@ impl Zeroize for AggregateRangeStatement {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||||
pub(crate) struct AggregateRangeWitness {
|
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
|
||||||
values: Vec<u64>,
|
|
||||||
gammas: Vec<Scalar>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AggregateRangeWitness {
|
impl AggregateRangeWitness {
|
||||||
pub(crate) fn new(commitments: &[Commitment]) -> Option<Self> {
|
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
|
||||||
if commitments.is_empty() || (commitments.len() > MAX_M) {
|
if commitments.is_empty() || (commitments.len() > MAX_M) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut values = Vec::with_capacity(commitments.len());
|
Some(AggregateRangeWitness(commitments))
|
||||||
let mut gammas = Vec::with_capacity(commitments.len());
|
|
||||||
for commitment in commitments {
|
|
||||||
values.push(commitment.amount);
|
|
||||||
gammas.push(Scalar(commitment.mask));
|
|
||||||
}
|
|
||||||
Some(AggregateRangeWitness { values, gammas })
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,13 +153,11 @@ impl AggregateRangeStatement {
|
|||||||
witness: &AggregateRangeWitness,
|
witness: &AggregateRangeWitness,
|
||||||
) -> Option<AggregateRangeProof> {
|
) -> Option<AggregateRangeProof> {
|
||||||
// Check for consistency with the witness
|
// Check for consistency with the witness
|
||||||
if self.V.len() != witness.values.len() {
|
if self.V.len() != witness.0.len() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
for (commitment, (value, gamma)) in
|
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
|
||||||
self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter()))
|
if witness.calculate() != **commitment {
|
||||||
{
|
|
||||||
if Commitment::new(**gamma, *value).calculate() != **commitment {
|
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -196,7 +185,13 @@ impl AggregateRangeStatement {
|
|||||||
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
|
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
|
||||||
for j in 1 ..= V.len() {
|
for j in 1 ..= V.len() {
|
||||||
d_js.push(Self::d_j(j, V.len()));
|
d_js.push(Self::d_j(j, V.len()));
|
||||||
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
#[allow(clippy::map_unwrap_or)]
|
||||||
|
a_l.0.append(
|
||||||
|
&mut u64_decompose(
|
||||||
|
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
|
||||||
|
)
|
||||||
|
.0,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let a_r = a_l.clone() - Scalar::ONE;
|
let a_r = a_l.clone() - Scalar::ONE;
|
||||||
@@ -223,8 +218,8 @@ impl AggregateRangeStatement {
|
|||||||
let a_l = a_l - z;
|
let a_l = a_l - z;
|
||||||
let a_r = a_r + &d_descending_y_plus_z;
|
let a_r = a_r + &d_descending_y_plus_z;
|
||||||
let mut alpha = alpha;
|
let mut alpha = alpha;
|
||||||
for j in 1 ..= witness.gammas.len() {
|
for j in 1 ..= witness.0.len() {
|
||||||
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(AggregateRangeProof {
|
Some(AggregateRangeProof {
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use crate::ringct::bulletproofs::plus::{
|
|||||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Figure 1
|
// Figure 1 of the Bulletproofs+ paper
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct WipStatement {
|
pub(crate) struct WipStatement {
|
||||||
generators: Generators,
|
generators: Generators,
|
||||||
|
|||||||
@@ -9,17 +9,17 @@ use std_shims::{
|
|||||||
use rand_core::{RngCore, CryptoRng};
|
use rand_core::{RngCore, CryptoRng};
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||||
use subtle::{ConstantTimeEq, Choice, CtOption};
|
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
||||||
|
|
||||||
use curve25519_dalek::{
|
use curve25519_dalek::{
|
||||||
constants::ED25519_BASEPOINT_TABLE,
|
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||||
scalar::Scalar,
|
scalar::Scalar,
|
||||||
traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
|
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
|
||||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||||
ringct::hash_to_point, serialize::*,
|
ringct::hash_to_point, serialize::*,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -27,8 +27,6 @@ use crate::{
|
|||||||
mod multisig;
|
mod multisig;
|
||||||
#[cfg(feature = "multisig")]
|
#[cfg(feature = "multisig")]
|
||||||
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
pub(crate) use multisig::add_key_image_share;
|
|
||||||
|
|
||||||
/// Errors returned when CLSAG signing fails.
|
/// Errors returned when CLSAG signing fails.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
@@ -100,8 +98,11 @@ fn core(
|
|||||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
||||||
let n = ring.len();
|
let n = ring.len();
|
||||||
|
|
||||||
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
|
let images_precomp = match A_c1 {
|
||||||
let D = D * INV_EIGHT();
|
Mode::Sign(..) => None,
|
||||||
|
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
|
||||||
|
};
|
||||||
|
let D_INV_EIGHT = D * INV_EIGHT();
|
||||||
|
|
||||||
// Generate the transcript
|
// Generate the transcript
|
||||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
// Instead of generating multiple, a single transcript is created and then edited as needed
|
||||||
@@ -130,7 +131,7 @@ fn core(
|
|||||||
}
|
}
|
||||||
|
|
||||||
to_hash.extend(I.compress().to_bytes());
|
to_hash.extend(I.compress().to_bytes());
|
||||||
to_hash.extend(D.compress().to_bytes());
|
to_hash.extend(D_INV_EIGHT.compress().to_bytes());
|
||||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||||
// mu_P with agg_0
|
// mu_P with agg_0
|
||||||
let mu_P = hash_to_scalar(&to_hash);
|
let mu_P = hash_to_scalar(&to_hash);
|
||||||
@@ -169,29 +170,44 @@ fn core(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Perform the core loop
|
// Perform the core loop
|
||||||
let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0));
|
let mut c1 = c;
|
||||||
for i in (start .. end).map(|i| i % n) {
|
for i in (start .. end).map(|i| i % n) {
|
||||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
|
||||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
|
||||||
// however
|
|
||||||
c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0)));
|
|
||||||
|
|
||||||
let c_p = mu_P * c;
|
let c_p = mu_P * c;
|
||||||
let c_c = mu_C * c;
|
let c_c = mu_C * c;
|
||||||
|
|
||||||
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
// (s_i * G) + (c_p * P_i) + (c_c * C_i)
|
||||||
|
let L = match A_c1 {
|
||||||
|
Mode::Sign(..) => {
|
||||||
|
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
|
||||||
|
}
|
||||||
|
Mode::Verify(..) => {
|
||||||
|
BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let PH = hash_to_point(&P[i]);
|
let PH = hash_to_point(&P[i]);
|
||||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
|
||||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
|
// (c_p * I) + (c_c * D) + (s_i * PH)
|
||||||
|
let R = match A_c1 {
|
||||||
|
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
|
||||||
|
Mode::Verify(..) => {
|
||||||
|
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
to_hash.truncate(((2 * n) + 3) * 32);
|
to_hash.truncate(((2 * n) + 3) * 32);
|
||||||
to_hash.extend(L.compress().to_bytes());
|
to_hash.extend(L.compress().to_bytes());
|
||||||
to_hash.extend(R.compress().to_bytes());
|
to_hash.extend(R.compress().to_bytes());
|
||||||
c = hash_to_scalar(&to_hash);
|
c = hash_to_scalar(&to_hash);
|
||||||
|
|
||||||
|
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||||
|
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||||
|
// however
|
||||||
|
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||||
((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
|
((D_INV_EIGHT, c * mu_P, c * mu_C), c1)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// CLSAG signature, as used in Monero.
|
/// CLSAG signature, as used in Monero.
|
||||||
@@ -261,8 +277,10 @@ impl Clsag {
|
|||||||
nonce.deref() *
|
nonce.deref() *
|
||||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||||
);
|
);
|
||||||
clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring
|
||||||
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
|
// member's commitment and our input commitment (which will only have a known discrete log
|
||||||
|
// over G if the amounts cancel out)
|
||||||
|
clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c);
|
||||||
inputs[i].0.zeroize();
|
inputs[i].0.zeroize();
|
||||||
nonce.zeroize();
|
nonce.zeroize();
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
use core::{ops::Deref, fmt::Debug};
|
use core::{ops::Deref, fmt::Debug};
|
||||||
use std_shims::io::{self, Read, Write};
|
use std_shims::{
|
||||||
|
io::{self, Read, Write},
|
||||||
|
collections::HashMap,
|
||||||
|
};
|
||||||
use std::sync::{Arc, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||||
@@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
|||||||
|
|
||||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||||
|
|
||||||
use group::{ff::Field, Group, GroupEncoding};
|
use group::{
|
||||||
|
ff::{Field, PrimeField},
|
||||||
|
Group, GroupEncoding,
|
||||||
|
};
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
use dalek_ff_group as dfg;
|
use dalek_ff_group as dfg;
|
||||||
use dleq::DLEqProof;
|
|
||||||
use frost::{
|
use frost::{
|
||||||
dkg::lagrange,
|
dkg::lagrange,
|
||||||
curve::Ed25519,
|
curve::Ed25519,
|
||||||
@@ -26,10 +31,6 @@ use crate::ringct::{
|
|||||||
clsag::{ClsagInput, Clsag},
|
clsag::{ClsagInput, Clsag},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn dleq_transcript() -> RecommendedTranscript {
|
|
||||||
RecommendedTranscript::new(b"monero_key_image_dleq")
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClsagInput {
|
impl ClsagInput {
|
||||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
||||||
@@ -43,6 +44,7 @@ impl ClsagInput {
|
|||||||
// They're just a unreliable reference to this data which will be included in the message
|
// They're just a unreliable reference to this data which will be included in the message
|
||||||
// if in use
|
// if in use
|
||||||
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
||||||
|
// This also transcripts the key image generator since it's derived from this key
|
||||||
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
||||||
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
||||||
}
|
}
|
||||||
@@ -70,13 +72,11 @@ impl ClsagDetails {
|
|||||||
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
||||||
pub struct ClsagAddendum {
|
pub struct ClsagAddendum {
|
||||||
pub(crate) key_image: dfg::EdwardsPoint,
|
pub(crate) key_image: dfg::EdwardsPoint,
|
||||||
dleq: DLEqProof<dfg::EdwardsPoint>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WriteAddendum for ClsagAddendum {
|
impl WriteAddendum for ClsagAddendum {
|
||||||
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
|
writer.write_all(self.key_image.compress().to_bytes().as_ref())
|
||||||
self.dleq.write(writer)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,9 +97,8 @@ pub struct ClsagMultisig {
|
|||||||
transcript: RecommendedTranscript,
|
transcript: RecommendedTranscript,
|
||||||
|
|
||||||
pub(crate) H: EdwardsPoint,
|
pub(crate) H: EdwardsPoint,
|
||||||
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
|
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>,
|
||||||
// an extra round
|
image: Option<dfg::EdwardsPoint>,
|
||||||
image: EdwardsPoint,
|
|
||||||
|
|
||||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||||
|
|
||||||
@@ -117,7 +116,8 @@ impl ClsagMultisig {
|
|||||||
transcript,
|
transcript,
|
||||||
|
|
||||||
H: hash_to_point(&output_key),
|
H: hash_to_point(&output_key),
|
||||||
image: EdwardsPoint::identity(),
|
key_image_shares: HashMap::new(),
|
||||||
|
image: None,
|
||||||
|
|
||||||
details,
|
details,
|
||||||
|
|
||||||
@@ -135,20 +135,6 @@ impl ClsagMultisig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn add_key_image_share(
|
|
||||||
image: &mut EdwardsPoint,
|
|
||||||
generator: EdwardsPoint,
|
|
||||||
offset: Scalar,
|
|
||||||
included: &[Participant],
|
|
||||||
participant: Participant,
|
|
||||||
share: EdwardsPoint,
|
|
||||||
) {
|
|
||||||
if image.is_identity().into() {
|
|
||||||
*image = generator * offset;
|
|
||||||
}
|
|
||||||
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
impl Algorithm<Ed25519> for ClsagMultisig {
|
||||||
type Transcript = RecommendedTranscript;
|
type Transcript = RecommendedTranscript;
|
||||||
type Addendum = ClsagAddendum;
|
type Addendum = ClsagAddendum;
|
||||||
@@ -160,23 +146,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
|
|
||||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
||||||
&mut self,
|
&mut self,
|
||||||
rng: &mut R,
|
_rng: &mut R,
|
||||||
keys: &ThresholdKeys<Ed25519>,
|
keys: &ThresholdKeys<Ed25519>,
|
||||||
) -> ClsagAddendum {
|
) -> ClsagAddendum {
|
||||||
ClsagAddendum {
|
ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() }
|
||||||
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
|
|
||||||
dleq: DLEqProof::prove(
|
|
||||||
rng,
|
|
||||||
// Doesn't take in a larger transcript object due to the usage of this
|
|
||||||
// Every prover would immediately write their own DLEq proof, when they can only do so in
|
|
||||||
// the proper order if they want to reach consensus
|
|
||||||
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
|
|
||||||
// try to merge later in some form, when it should instead just merge xH (as it does)
|
|
||||||
&mut dleq_transcript(),
|
|
||||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
|
||||||
keys.secret_share(),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
||||||
@@ -190,7 +163,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
Err(io::Error::other("non-canonical key image"))?;
|
Err(io::Error::other("non-canonical key image"))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
|
Ok(ClsagAddendum { key_image: xH })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_addendum(
|
fn process_addendum(
|
||||||
@@ -199,33 +172,29 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
l: Participant,
|
l: Participant,
|
||||||
addendum: ClsagAddendum,
|
addendum: ClsagAddendum,
|
||||||
) -> Result<(), FrostError> {
|
) -> Result<(), FrostError> {
|
||||||
// TODO: This check is faulty if two shares are additive inverses of each other
|
if self.image.is_none() {
|
||||||
if self.image.is_identity().into() {
|
|
||||||
self.transcript.domain_separate(b"CLSAG");
|
self.transcript.domain_separate(b"CLSAG");
|
||||||
|
// Transcript the ring
|
||||||
self.input().transcript(&mut self.transcript);
|
self.input().transcript(&mut self.transcript);
|
||||||
|
// Transcript the mask
|
||||||
self.transcript.append_message(b"mask", self.mask().to_bytes());
|
self.transcript.append_message(b"mask", self.mask().to_bytes());
|
||||||
|
|
||||||
|
// Init the image to the offset
|
||||||
|
self.image = Some(dfg::EdwardsPoint(self.H) * view.offset());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Transcript this participant's contribution
|
||||||
self.transcript.append_message(b"participant", l.to_bytes());
|
self.transcript.append_message(b"participant", l.to_bytes());
|
||||||
|
|
||||||
addendum
|
|
||||||
.dleq
|
|
||||||
.verify(
|
|
||||||
&mut dleq_transcript(),
|
|
||||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
|
||||||
&[view.original_verification_share(l), addendum.key_image],
|
|
||||||
)
|
|
||||||
.map_err(|_| FrostError::InvalidPreprocess(l))?;
|
|
||||||
|
|
||||||
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
|
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
|
||||||
add_key_image_share(
|
|
||||||
&mut self.image,
|
// Accumulate the interpolated share
|
||||||
self.H,
|
let interpolated_key_image_share =
|
||||||
view.offset().0,
|
addendum.key_image * lagrange::<dfg::Scalar>(l, view.included());
|
||||||
view.included(),
|
*self.image.as_mut().unwrap() += interpolated_key_image_share;
|
||||||
l,
|
|
||||||
addendum.key_image.0,
|
self
|
||||||
);
|
.key_image_shares
|
||||||
|
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -253,7 +222,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||||
&mut rng,
|
&mut rng,
|
||||||
&self.image,
|
&self.image.expect("verifying a share despite never processing any addendums").0,
|
||||||
&self.input(),
|
&self.input(),
|
||||||
self.mask(),
|
self.mask(),
|
||||||
self.msg.as_ref().unwrap(),
|
self.msg.as_ref().unwrap(),
|
||||||
@@ -262,7 +231,8 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
);
|
);
|
||||||
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
||||||
|
|
||||||
(-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
|
// r - p x, where p is the challenge for the keys
|
||||||
|
*nonces[0] - dfg::Scalar(p) * view.secret_share().deref()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use]
|
#[must_use]
|
||||||
@@ -274,11 +244,13 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
) -> Option<Self::Signature> {
|
) -> Option<Self::Signature> {
|
||||||
let interim = self.interim.as_ref().unwrap();
|
let interim = self.interim.as_ref().unwrap();
|
||||||
let mut clsag = interim.clsag.clone();
|
let mut clsag = interim.clsag.clone();
|
||||||
|
// We produced shares as `r - p x`, yet the signature is `r - p x - c x`
|
||||||
|
// Substract `c x` (saved as `c`) now
|
||||||
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
||||||
if clsag
|
if clsag
|
||||||
.verify(
|
.verify(
|
||||||
&self.input().decoys.ring,
|
&self.input().decoys.ring,
|
||||||
&self.image,
|
&self.image.expect("verifying a signature despite never processing any addendums").0,
|
||||||
&interim.pseudo_out,
|
&interim.pseudo_out,
|
||||||
self.msg.as_ref().unwrap(),
|
self.msg.as_ref().unwrap(),
|
||||||
)
|
)
|
||||||
@@ -296,10 +268,61 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
share: dfg::Scalar,
|
share: dfg::Scalar,
|
||||||
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
||||||
let interim = self.interim.as_ref().unwrap();
|
let interim = self.interim.as_ref().unwrap();
|
||||||
Ok(vec![
|
|
||||||
|
// For a share `r - p x`, the following two equalities should hold:
|
||||||
|
// - `(r - p x)G == R.0 - pV`, where `V = xG`
|
||||||
|
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
|
||||||
|
//
|
||||||
|
// This is effectively a discrete log equality proof for:
|
||||||
|
// V, K over G, H
|
||||||
|
// with nonces
|
||||||
|
// R.0, R.1
|
||||||
|
// and solution
|
||||||
|
// s
|
||||||
|
//
|
||||||
|
// Which is a batch-verifiable rewrite of the traditional CP93 proof
|
||||||
|
// (and also writable as Generalized Schnorr Protocol)
|
||||||
|
//
|
||||||
|
// That means that given a proper challenge, this alone can be certainly argued to prove the
|
||||||
|
// key image share is well-formed and the provided signature so proves for that.
|
||||||
|
|
||||||
|
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
|
||||||
|
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
|
||||||
|
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
|
||||||
|
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
|
||||||
|
|
||||||
|
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
|
||||||
|
|
||||||
|
// Hash every variable relevant here, using the hahs output as the random weight
|
||||||
|
let mut weight_transcript =
|
||||||
|
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
|
||||||
|
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
|
||||||
|
weight_transcript.append_message(b"H", self.H.to_bytes());
|
||||||
|
weight_transcript.append_message(b"xG", verification_share.to_bytes());
|
||||||
|
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
|
||||||
|
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
|
||||||
|
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
|
||||||
|
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
|
||||||
|
weight_transcript.append_message(b"s", share.to_repr());
|
||||||
|
let weight = weight_transcript.challenge(b"weight");
|
||||||
|
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
|
||||||
|
|
||||||
|
let part_one = vec![
|
||||||
(share, dfg::EdwardsPoint::generator()),
|
(share, dfg::EdwardsPoint::generator()),
|
||||||
(dfg::Scalar(interim.p), verification_share),
|
// -(R.0 - pV) == -R.0 + pV
|
||||||
(-dfg::Scalar::ONE, nonces[0][0]),
|
(-dfg::Scalar::ONE, nonces[0][0]),
|
||||||
])
|
(dfg::Scalar(interim.p), verification_share),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut part_two = vec![
|
||||||
|
(weight * share, dfg::EdwardsPoint(self.H)),
|
||||||
|
// -(R.1 - pK) == -R.1 + pK
|
||||||
|
(-weight, nonces[0][1]),
|
||||||
|
(weight * dfg::Scalar(interim.p), key_image_share),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut all = part_one;
|
||||||
|
all.append(&mut part_two);
|
||||||
|
Ok(all)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ fn test_aggregate_range_proof() {
|
|||||||
}
|
}
|
||||||
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
|
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
|
||||||
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
|
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
|
||||||
let witness = AggregateRangeWitness::new(&commitments).unwrap();
|
let witness = AggregateRangeWitness::new(commitments).unwrap();
|
||||||
|
|
||||||
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
||||||
statement.verify(&mut OsRng, &mut verifier, (), proof);
|
statement.verify(&mut OsRng, &mut verifier, (), proof);
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ fn clsag() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let image = generate_key_image(&secrets.0);
|
let image = generate_key_image(&secrets.0);
|
||||||
let (clsag, pseudo_out) = Clsag::sign(
|
let (mut clsag, pseudo_out) = Clsag::sign(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
vec![(
|
vec![(
|
||||||
secrets.0,
|
secrets.0,
|
||||||
@@ -76,7 +76,12 @@ fn clsag() {
|
|||||||
msg,
|
msg,
|
||||||
)
|
)
|
||||||
.swap_remove(0);
|
.swap_remove(0);
|
||||||
|
|
||||||
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||||
|
|
||||||
|
// make sure verification fails if we throw a random `c1` at it.
|
||||||
|
clsag.c1 = random_scalar(&mut OsRng);
|
||||||
|
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::{marker::PhantomData, fmt::Debug};
|
use core::{marker::PhantomData, fmt};
|
||||||
use std_shims::string::{String, ToString};
|
use std_shims::string::ToString;
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
@@ -81,7 +81,7 @@ impl AddressType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A type which returns the byte for a given address.
|
/// A type which returns the byte for a given address.
|
||||||
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug {
|
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug {
|
||||||
fn network_bytes(network: Network) -> (u8, u8, u8, u8);
|
fn network_bytes(network: Network) -> (u8, u8, u8, u8);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -191,8 +191,8 @@ pub struct Address<B: AddressBytes> {
|
|||||||
pub view: EdwardsPoint,
|
pub view: EdwardsPoint,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: AddressBytes> core::fmt::Debug for Address<B> {
|
impl<B: AddressBytes> fmt::Debug for Address<B> {
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
fmt
|
fmt
|
||||||
.debug_struct("Address")
|
.debug_struct("Address")
|
||||||
.field("meta", &self.meta)
|
.field("meta", &self.meta)
|
||||||
@@ -212,8 +212,8 @@ impl<B: AddressBytes> Zeroize for Address<B> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<B: AddressBytes> ToString for Address<B> {
|
impl<B: AddressBytes> fmt::Display for Address<B> {
|
||||||
fn to_string(&self) -> String {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
let mut data = vec![self.meta.to_byte()];
|
let mut data = vec![self.meta.to_byte()];
|
||||||
data.extend(self.spend.compress().to_bytes());
|
data.extend(self.spend.compress().to_bytes());
|
||||||
data.extend(self.view.compress().to_bytes());
|
data.extend(self.view.compress().to_bytes());
|
||||||
@@ -226,7 +226,7 @@ impl<B: AddressBytes> ToString for Address<B> {
|
|||||||
if let Some(id) = self.meta.kind.payment_id() {
|
if let Some(id) = self.meta.kind.payment_id() {
|
||||||
data.extend(id);
|
data.extend(id);
|
||||||
}
|
}
|
||||||
encode_check(&data).unwrap()
|
write!(f, "{}", encode_check(&data).unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript};
|
|||||||
use frost::{
|
use frost::{
|
||||||
curve::Ed25519,
|
curve::Ed25519,
|
||||||
Participant, FrostError, ThresholdKeys,
|
Participant, FrostError, ThresholdKeys,
|
||||||
|
dkg::lagrange,
|
||||||
sign::{
|
sign::{
|
||||||
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
|
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
|
||||||
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
|
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
|
||||||
@@ -27,7 +28,7 @@ use frost::{
|
|||||||
use crate::{
|
use crate::{
|
||||||
random_scalar,
|
random_scalar,
|
||||||
ringct::{
|
ringct::{
|
||||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
|
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig},
|
||||||
RctPrunable,
|
RctPrunable,
|
||||||
},
|
},
|
||||||
transaction::{Input, Transaction},
|
transaction::{Input, Transaction},
|
||||||
@@ -261,8 +262,13 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
|||||||
included.push(self.i);
|
included.push(self.i);
|
||||||
included.sort_unstable();
|
included.sort_unstable();
|
||||||
|
|
||||||
// Convert the unified commitments to a Vec of the individual commitments
|
// Start calculating the key images, as needed on the TX level
|
||||||
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
||||||
|
for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) {
|
||||||
|
*image = generator * offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert the serialized nonces commitments to a parallelized Vec
|
||||||
let mut commitments = (0 .. self.clsags.len())
|
let mut commitments = (0 .. self.clsags.len())
|
||||||
.map(|c| {
|
.map(|c| {
|
||||||
included
|
included
|
||||||
@@ -291,14 +297,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
|||||||
// provides the easiest API overall, as this is where the TX is (which needs the key
|
// provides the easiest API overall, as this is where the TX is (which needs the key
|
||||||
// images in its message), along with where the outputs are determined (where our
|
// images in its message), along with where the outputs are determined (where our
|
||||||
// outputs may need these in order to guarantee uniqueness)
|
// outputs may need these in order to guarantee uniqueness)
|
||||||
add_key_image_share(
|
images[c] += preprocess.addendum.key_image.0 * lagrange::<dfg::Scalar>(*l, &included).0;
|
||||||
&mut images[c],
|
|
||||||
self.key_images[c].0,
|
|
||||||
self.key_images[c].1,
|
|
||||||
&included,
|
|
||||||
*l,
|
|
||||||
preprocess.addendum.key_image.0,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((*l, preprocess))
|
Ok((*l, preprocess))
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ impl Get for Transaction<'_> {
|
|||||||
let mut res = self.0.get(&key);
|
let mut res = self.0.get(&key);
|
||||||
for change in &self.1 {
|
for change in &self.1 {
|
||||||
if change.1 == key.as_ref() {
|
if change.1 == key.as_ref() {
|
||||||
res = change.2.clone();
|
res.clone_from(&change.2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
|
|||||||
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
|
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db};
|
|||||||
use processor_messages::coordinator::cosign_block_msg;
|
use processor_messages::coordinator::cosign_block_msg;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
p2p::{CosignedBlock, P2pMessageKind, P2p},
|
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||||
substrate::LatestCosignedBlock,
|
substrate::LatestCosignedBlock,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -323,7 +323,7 @@ impl<D: Db> CosignEvaluator<D> {
|
|||||||
for cosign in cosigns {
|
for cosign in cosigns {
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
cosign.serialize(&mut buf).unwrap();
|
cosign.serialize(&mut buf).unwrap();
|
||||||
P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await;
|
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||||
}
|
}
|
||||||
sleep(Duration::from_secs(60)).await;
|
sleep(Duration::from_secs(60)).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
cosign_channel.send(cosigned_block).unwrap();
|
cosign_channel.send(cosigned_block).unwrap();
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
cosigned_block.serialize(&mut buf).unwrap();
|
cosigned_block.serialize(&mut buf).unwrap();
|
||||||
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
|
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
// This causes an action on Substrate yet not on any Tributary
|
// This causes an action on Substrate yet not on any Tributary
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,7 @@ use tokio::sync::RwLock;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
processors::{Message, Processors},
|
processors::{Message, Processors},
|
||||||
TributaryP2p, P2pMessageKind, P2p,
|
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod tributary;
|
pub mod tributary;
|
||||||
@@ -45,7 +45,10 @@ impl Processors for MemProcessors {
|
|||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct LocalP2p(usize, pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, Vec<u8>)>>)>>);
|
pub struct LocalP2p(
|
||||||
|
usize,
|
||||||
|
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
|
||||||
|
);
|
||||||
|
|
||||||
impl LocalP2p {
|
impl LocalP2p {
|
||||||
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
||||||
@@ -65,11 +68,13 @@ impl P2p for LocalP2p {
|
|||||||
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||||
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||||
|
|
||||||
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||||
self.1.write().await.1[to].push_back((self.0, msg));
|
let mut msg_ref = msg.as_slice();
|
||||||
|
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
|
||||||
|
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||||
// Content-based deduplication
|
// Content-based deduplication
|
||||||
let mut lock = self.1.write().await;
|
let mut lock = self.1.write().await;
|
||||||
{
|
{
|
||||||
@@ -81,19 +86,26 @@ impl P2p for LocalP2p {
|
|||||||
}
|
}
|
||||||
let queues = &mut lock.1;
|
let queues = &mut lock.1;
|
||||||
|
|
||||||
|
let kind_len = (match kind {
|
||||||
|
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
||||||
|
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
||||||
|
})
|
||||||
|
.len();
|
||||||
|
let msg = msg[kind_len ..].to_vec();
|
||||||
|
|
||||||
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
||||||
if i == self.0 {
|
if i == self.0 {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
msg_queue.push_back((self.0, msg.clone()));
|
msg_queue.push_back((self.0, kind, msg.clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
|
async fn receive(&self) -> P2pMessage<Self> {
|
||||||
// This is a cursed way to implement an async read from a Vec
|
// This is a cursed way to implement an async read from a Vec
|
||||||
loop {
|
loop {
|
||||||
if let Some(res) = self.1.write().await.1[self.0].pop_front() {
|
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
|
||||||
return res;
|
return P2pMessage { sender, kind, msg };
|
||||||
}
|
}
|
||||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||||
}
|
}
|
||||||
@@ -103,6 +115,11 @@ impl P2p for LocalP2p {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl TributaryP2p for LocalP2p {
|
impl TributaryP2p for LocalP2p {
|
||||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||||
<Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
|
<Self as P2p>::broadcast(
|
||||||
|
self,
|
||||||
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
|
||||||
|
msg,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ use serai_db::MemDb;
|
|||||||
use tributary::Tributary;
|
use tributary::Tributary;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
P2pMessageKind, P2p,
|
GossipMessageKind, P2pMessageKind, P2p,
|
||||||
tributary::{Transaction, TributarySpec},
|
tributary::{Transaction, TributarySpec},
|
||||||
tests::LocalP2p,
|
tests::LocalP2p,
|
||||||
};
|
};
|
||||||
@@ -98,7 +98,7 @@ pub async fn run_tributaries(
|
|||||||
for (p2p, tributary) in &mut tributaries {
|
for (p2p, tributary) in &mut tributaries {
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||||
match msg.kind {
|
match msg.kind {
|
||||||
P2pMessageKind::Tributary(genesis) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||||
assert_eq!(genesis, tributary.genesis());
|
assert_eq!(genesis, tributary.genesis());
|
||||||
if tributary.handle_message(&msg.msg).await {
|
if tributary.handle_message(&msg.msg).await {
|
||||||
p2p.broadcast(msg.kind, msg.msg).await;
|
p2p.broadcast(msg.kind, msg.msg).await;
|
||||||
@@ -173,7 +173,7 @@ async fn tributary_test() {
|
|||||||
for (p2p, tributary) in &mut tributaries {
|
for (p2p, tributary) in &mut tributaries {
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||||
match msg.kind {
|
match msg.kind {
|
||||||
P2pMessageKind::Tributary(genesis) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||||
assert_eq!(genesis, tributary.genesis());
|
assert_eq!(genesis, tributary.genesis());
|
||||||
tributary.handle_message(&msg.msg).await;
|
tributary.handle_message(&msg.msg).await;
|
||||||
}
|
}
|
||||||
@@ -199,7 +199,7 @@ async fn tributary_test() {
|
|||||||
for (p2p, tributary) in &mut tributaries {
|
for (p2p, tributary) in &mut tributaries {
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||||
match msg.kind {
|
match msg.kind {
|
||||||
P2pMessageKind::Tributary(genesis) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||||
assert_eq!(genesis, tributary.genesis());
|
assert_eq!(genesis, tributary.genesis());
|
||||||
tributary.handle_message(&msg.msg).await;
|
tributary.handle_message(&msg.msg).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -116,8 +116,8 @@ async fn sync_test() {
|
|||||||
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// The heartbeat is once every 10 blocks
|
// The heartbeat is once every 10 blocks, with some limitations
|
||||||
sleep(Duration::from_secs(10 * block_time)).await;
|
sleep(Duration::from_secs(20 * block_time)).await;
|
||||||
assert!(syncer_tributary.tip().await != spec.genesis());
|
assert!(syncer_tributary.tip().await != spec.genesis());
|
||||||
|
|
||||||
// Verify it synced to the tip
|
// Verify it synced to the tip
|
||||||
|
|||||||
@@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
|||||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||||
|
|
||||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||||
pub(crate) const BLOCK_MESSAGE: u8 = 1;
|
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 2;
|
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
@@ -336,9 +335,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
|
|
||||||
// Return true if the message should be rebroadcasted.
|
// Return true if the message should be rebroadcasted.
|
||||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||||
// Acquire the lock now to prevent sync_block from being run at the same time
|
|
||||||
let mut sync_block = self.synced_block_result.write().await;
|
|
||||||
|
|
||||||
match msg.first() {
|
match msg.first() {
|
||||||
Some(&TRANSACTION_MESSAGE) => {
|
Some(&TRANSACTION_MESSAGE) => {
|
||||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||||
@@ -370,19 +366,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(&BLOCK_MESSAGE) => {
|
|
||||||
let mut msg_ref = &msg[1 ..];
|
|
||||||
let Ok(block) = Block::<T>::read(&mut msg_ref) else {
|
|
||||||
log::error!("received invalid block message");
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec();
|
|
||||||
if self.sync_block_internal(block, commit, &mut sync_block).await {
|
|
||||||
log::debug!("synced block over p2p net instead of building the commit ourselves");
|
|
||||||
}
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
|
|||||||
panic!("provided transaction saved to disk wasn't provided");
|
panic!("provided transaction saved to disk wasn't provided");
|
||||||
};
|
};
|
||||||
|
|
||||||
if res.transactions.get(order).is_none() {
|
if !res.transactions.contains_key(order) {
|
||||||
res.transactions.insert(order, VecDeque::new());
|
res.transactions.insert(order, VecDeque::new());
|
||||||
}
|
}
|
||||||
res.transactions.get_mut(order).unwrap().push_back(tx);
|
res.transactions.get_mut(order).unwrap().push_back(tx);
|
||||||
@@ -135,7 +135,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
|
|||||||
txn.put(current_provided_key, currently_provided);
|
txn.put(current_provided_key, currently_provided);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
if self.transactions.get(order).is_none() {
|
if !self.transactions.contains_key(order) {
|
||||||
self.transactions.insert(order, VecDeque::new());
|
self.transactions.insert(order, VecDeque::new());
|
||||||
}
|
}
|
||||||
self.transactions.get_mut(order).unwrap().push_back(tx);
|
self.transactions.get_mut(order).unwrap().push_back(tx);
|
||||||
|
|||||||
@@ -41,9 +41,8 @@ use tendermint::{
|
|||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite,
|
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait,
|
||||||
transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError,
|
Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,
|
||||||
Blockchain, P2p,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod tx;
|
pub mod tx;
|
||||||
@@ -414,12 +413,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
);
|
);
|
||||||
match block_res {
|
match block_res {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
// If we successfully added this block, broadcast it
|
// If we successfully added this block, break
|
||||||
// TODO: Move this under the coordinator once we set up on new block notifications?
|
|
||||||
let mut msg = serialized_block.0;
|
|
||||||
msg.insert(0, BLOCK_MESSAGE);
|
|
||||||
msg.extend(encoded_commit);
|
|
||||||
self.p2p.broadcast(self.genesis, msg).await;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(BlockError::NonLocalProvided(hash)) => {
|
Err(BlockError::NonLocalProvided(hash)) => {
|
||||||
@@ -428,6 +422,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
hex::encode(hash),
|
hex::encode(hash),
|
||||||
hex::encode(self.genesis)
|
hex::encode(self.genesis)
|
||||||
);
|
);
|
||||||
|
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||||
}
|
}
|
||||||
_ => return invalid_block(),
|
_ => return invalid_block(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -139,10 +139,8 @@ impl<N: Network> BlockData<N> {
|
|||||||
// 27, 33, 41, 46, 60, 64
|
// 27, 33, 41, 46, 60, 64
|
||||||
self.round_mut().step = data.step();
|
self.round_mut().step = data.step();
|
||||||
|
|
||||||
// Only return a message to if we're actually a current validator and haven't prior posted a
|
// Only return a message to if we're actually a current validator
|
||||||
// message
|
|
||||||
let round_number = self.round().number;
|
let round_number = self.round().number;
|
||||||
let step = data.step();
|
|
||||||
let res = self.validator_id.map(|validator_id| Message {
|
let res = self.validator_id.map(|validator_id| Message {
|
||||||
sender: validator_id,
|
sender: validator_id,
|
||||||
block: self.number,
|
block: self.number,
|
||||||
@@ -150,21 +148,59 @@ impl<N: Network> BlockData<N> {
|
|||||||
data,
|
data,
|
||||||
});
|
});
|
||||||
|
|
||||||
if res.is_some() {
|
if let Some(res) = res.as_ref() {
|
||||||
|
const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block";
|
||||||
|
const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round";
|
||||||
|
const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose";
|
||||||
|
const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote";
|
||||||
|
const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit";
|
||||||
|
|
||||||
|
let genesis = self.genesis;
|
||||||
|
let key = |prefix: &[u8]| [prefix, &genesis].concat();
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let key = [
|
|
||||||
b"tendermint-machine_already_sent_message".as_ref(),
|
// Ensure we haven't prior sent a message for a future block/round
|
||||||
&self.genesis,
|
let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| {
|
||||||
&self.number.0.to_le_bytes(),
|
let key = key(prefix);
|
||||||
&round_number.0.to_le_bytes(),
|
let latest =
|
||||||
&step.encode(),
|
u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap());
|
||||||
]
|
if latest > current {
|
||||||
.concat();
|
None?;
|
||||||
// If we've already sent a message, return
|
}
|
||||||
if txn.get(&key).is_some() {
|
if current > latest {
|
||||||
|
txn.put(&key, current.to_le_bytes());
|
||||||
|
return Some(true);
|
||||||
|
}
|
||||||
|
Some(false)
|
||||||
|
};
|
||||||
|
let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;
|
||||||
|
if new_block {
|
||||||
|
// Delete the latest round key
|
||||||
|
txn.del(&key(LATEST_ROUND_KEY));
|
||||||
|
}
|
||||||
|
let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;
|
||||||
|
if new_block || new_round {
|
||||||
|
// Delete the messages for the old round
|
||||||
|
txn.del(&key(PROPOSE_KEY));
|
||||||
|
txn.del(&key(PEVOTE_KEY));
|
||||||
|
txn.del(&key(PRECOMMIT_KEY));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we haven't sent this message within this round
|
||||||
|
let msg_key = key(match res.data.step() {
|
||||||
|
Step::Propose => PROPOSE_KEY,
|
||||||
|
Step::Prevote => PEVOTE_KEY,
|
||||||
|
Step::Precommit => PRECOMMIT_KEY,
|
||||||
|
});
|
||||||
|
if txn.get(&msg_key).is_some() {
|
||||||
|
assert!(!new_block);
|
||||||
|
assert!(!new_round);
|
||||||
None?;
|
None?;
|
||||||
}
|
}
|
||||||
txn.put(&key, []);
|
// Put this message to the DB
|
||||||
|
txn.put(&msg_key, res.encode());
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -313,11 +313,16 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
|
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
|
||||||
if time_until_round_end == Duration::ZERO {
|
if time_until_round_end == Duration::ZERO {
|
||||||
log::trace!(
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
"resetting when prior round ended {}ms ago",
|
"resetting when prior round ended {}ms ago",
|
||||||
Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
|
Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis());
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"sleeping until round ends in {}ms",
|
||||||
|
time_until_round_end.as_millis(),
|
||||||
|
);
|
||||||
sleep(time_until_round_end).await;
|
sleep(time_until_round_end).await;
|
||||||
|
|
||||||
// Clear our outbound message queue
|
// Clear our outbound message queue
|
||||||
@@ -509,7 +514,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
match step {
|
match step {
|
||||||
Step::Propose => {
|
Step::Propose => {
|
||||||
// Slash the validator for not proposing when they should've
|
// Slash the validator for not proposing when they should've
|
||||||
log::debug!(target: "tendermint", "Validator didn't propose when they should have");
|
log::debug!(target: "tendermint", "validator didn't propose when they should have");
|
||||||
// this slash will be voted on.
|
// this slash will be voted on.
|
||||||
self.slash(
|
self.slash(
|
||||||
self.weights.proposer(self.block.number, self.block.round().number),
|
self.weights.proposer(self.block.number, self.block.round().number),
|
||||||
@@ -598,7 +603,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
);
|
);
|
||||||
let id = block.id();
|
let id = block.id();
|
||||||
let proposal = self.network.add_block(block, commit).await;
|
let proposal = self.network.add_block(block, commit).await;
|
||||||
log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref()));
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"added block {} (produced by machine)",
|
||||||
|
hex::encode(id.as_ref()),
|
||||||
|
);
|
||||||
self.reset(msg.round, proposal).await;
|
self.reset(msg.round, proposal).await;
|
||||||
}
|
}
|
||||||
Err(TendermintError::Malicious(sender, evidence)) => {
|
Err(TendermintError::Malicious(sender, evidence)) => {
|
||||||
@@ -692,7 +701,12 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
(msg.round == self.block.round().number) &&
|
(msg.round == self.block.round().number) &&
|
||||||
(msg.data.step() == Step::Propose)
|
(msg.data.step() == Step::Propose)
|
||||||
{
|
{
|
||||||
log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0);
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"received Propose for block {}, round {}",
|
||||||
|
msg.block.0,
|
||||||
|
msg.round.0,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a precommit, verify its signature
|
// If this is a precommit, verify its signature
|
||||||
@@ -710,7 +724,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
if !self.block.log.log(signed.clone())? {
|
if !self.block.log.log(signed.clone())? {
|
||||||
return Err(TendermintError::AlreadyHandled);
|
return Err(TendermintError::AlreadyHandled);
|
||||||
}
|
}
|
||||||
log::debug!(target: "tendermint", "received new tendermint message");
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"received new tendermint message (block: {}, round: {}, step: {:?})",
|
||||||
|
msg.block.0,
|
||||||
|
msg.round.0,
|
||||||
|
msg.data.step(),
|
||||||
|
);
|
||||||
|
|
||||||
// All functions, except for the finalizer and the jump, are locked to the current round
|
// All functions, except for the finalizer and the jump, are locked to the current round
|
||||||
|
|
||||||
@@ -757,6 +777,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
// 55-56
|
// 55-56
|
||||||
// Jump, enabling processing by the below code
|
// Jump, enabling processing by the below code
|
||||||
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
|
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
|
||||||
|
log::debug!(
|
||||||
|
target: "tendermint",
|
||||||
|
"jumping from round {} to round {}",
|
||||||
|
self.block.round().number.0,
|
||||||
|
msg.round.0,
|
||||||
|
);
|
||||||
|
|
||||||
// Jump to the new round.
|
// Jump to the new round.
|
||||||
let proposer = self.round(msg.round, None);
|
let proposer = self.round(msg.round, None);
|
||||||
|
|
||||||
@@ -814,13 +841,26 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
|
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
|
||||||
let (participation, weight) =
|
let (participation, weight) =
|
||||||
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
|
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
|
||||||
|
let threshold_weight = self.weights.threshold();
|
||||||
|
if participation < threshold_weight {
|
||||||
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"progess towards setting prevote timeout, participation: {}, needed: {}",
|
||||||
|
participation,
|
||||||
|
threshold_weight,
|
||||||
|
);
|
||||||
|
}
|
||||||
// 34-35
|
// 34-35
|
||||||
if participation >= self.weights.threshold() {
|
if participation >= threshold_weight {
|
||||||
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"setting timeout for prevote due to sufficient participation",
|
||||||
|
);
|
||||||
self.block.round_mut().set_timeout(Step::Prevote);
|
self.block.round_mut().set_timeout(Step::Prevote);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 44-46
|
// 44-46
|
||||||
if weight >= self.weights.threshold() {
|
if weight >= threshold_weight {
|
||||||
self.broadcast(Data::Precommit(None));
|
self.broadcast(Data::Precommit(None));
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
@@ -830,6 +870,10 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
if matches!(msg.data, Data::Precommit(_)) &&
|
if matches!(msg.data, Data::Precommit(_)) &&
|
||||||
self.block.log.has_participation(self.block.round().number, Step::Precommit)
|
self.block.log.has_participation(self.block.round().number, Step::Precommit)
|
||||||
{
|
{
|
||||||
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"setting timeout for precommit due to sufficient participation",
|
||||||
|
);
|
||||||
self.block.round_mut().set_timeout(Step::Precommit);
|
self.block.round_mut().set_timeout(Step::Precommit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::{sync::Arc, collections::HashMap};
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use log::debug;
|
|
||||||
use parity_scale_codec::Encode;
|
use parity_scale_codec::Encode;
|
||||||
|
|
||||||
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
|
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
|
||||||
@@ -27,7 +26,7 @@ impl<N: Network> MessageLog<N> {
|
|||||||
let step = msg.data.step();
|
let step = msg.data.step();
|
||||||
if let Some(existing) = msgs.get(&step) {
|
if let Some(existing) = msgs.get(&step) {
|
||||||
if existing.msg.data != msg.data {
|
if existing.msg.data != msg.data {
|
||||||
debug!(
|
log::debug!(
|
||||||
target: "tendermint",
|
target: "tendermint",
|
||||||
"Validator sent multiple messages for the same block + round + step"
|
"Validator sent multiple messages for the same block + round + step"
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
|
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.65"
|
rust-version = "1.66"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.70"
|
rust-version = "1.74"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ license = "MIT"
|
|||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.73"
|
rust-version = "1.74"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["ed448", "ff", "group"]
|
keywords = ["ed448", "ff", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.65"
|
rust-version = "1.66"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features =
|
|||||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
||||||
|
|
||||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||||
dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
|
|
||||||
|
|
||||||
dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] }
|
dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,13 @@ pub trait Algorithm<C: Curve>: Send + Sync + Clone {
|
|||||||
|
|
||||||
/// Obtain the list of nonces to generate, as specified by the generators to create commitments
|
/// Obtain the list of nonces to generate, as specified by the generators to create commitments
|
||||||
/// against per-nonce.
|
/// against per-nonce.
|
||||||
|
///
|
||||||
|
/// The Algorithm is responsible for all transcripting of these nonce specifications/generators.
|
||||||
|
///
|
||||||
|
/// The prover will be passed the commitments, and the commitments will be sent to all other
|
||||||
|
/// participants. No guarantees the commitments are internally consistent (have the same discrete
|
||||||
|
/// logarithm across generators) are made. Any Algorithm which specifies multiple generators for
|
||||||
|
/// a single nonce must handle that itself.
|
||||||
fn nonces(&self) -> Vec<Vec<C::G>>;
|
fn nonces(&self) -> Vec<Vec<C::G>>;
|
||||||
|
|
||||||
/// Generate an addendum to FROST"s preprocessing stage.
|
/// Generate an addendum to FROST"s preprocessing stage.
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
// FROST defines its nonce as sum(Di, Ei * bi)
|
// FROST defines its nonce as sum(Di, Ei * bi)
|
||||||
// Monero needs not just the nonce over G however, yet also over H
|
|
||||||
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
|
|
||||||
//
|
//
|
||||||
// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
|
// In order for this library to be robust, it supports generating an arbitrary amount of nonces,
|
||||||
// of nonces, each against an arbitrary list of generators
|
// each against an arbitrary list of generators
|
||||||
//
|
//
|
||||||
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
|
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
|
||||||
// When representations across multiple generators are provided, a DLEq proof is also provided to
|
|
||||||
// confirm their integrity
|
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::{
|
use std::{
|
||||||
@@ -24,32 +20,8 @@ use transcript::Transcript;
|
|||||||
use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
|
use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
|
||||||
use multiexp::multiexp_vartime;
|
use multiexp::multiexp_vartime;
|
||||||
|
|
||||||
use dleq::MultiDLEqProof;
|
|
||||||
|
|
||||||
use crate::{curve::Curve, Participant};
|
use crate::{curve::Curve, Participant};
|
||||||
|
|
||||||
// Transcript used to aggregate binomial nonces for usage within a single DLEq proof.
|
|
||||||
fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T {
|
|
||||||
let mut transcript = T::new(b"FROST DLEq Aggregation v0.5");
|
|
||||||
transcript.append_message(b"context", context);
|
|
||||||
transcript
|
|
||||||
}
|
|
||||||
|
|
||||||
// Every participant proves for their commitments at the start of the protocol
|
|
||||||
// These proofs are verified sequentially, requiring independent transcripts
|
|
||||||
// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is
|
|
||||||
// challenged in order to create a commitment to it, carried in each independent transcript
|
|
||||||
// (effectively forking the original transcript)
|
|
||||||
//
|
|
||||||
// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be
|
|
||||||
// constructed). For higher level protocols, the transcript may have contextual info these proofs
|
|
||||||
// will then be bound to
|
|
||||||
fn dleq_transcript<T: Transcript>(context: &[u8]) -> T {
|
|
||||||
let mut transcript = T::new(b"FROST Commitments DLEq v0.5");
|
|
||||||
transcript.append_message(b"context", context);
|
|
||||||
transcript
|
|
||||||
}
|
|
||||||
|
|
||||||
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
|
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
|
||||||
// This is considered a single nonce as r = d + be
|
// This is considered a single nonce as r = d + be
|
||||||
#[derive(Clone, Zeroize)]
|
#[derive(Clone, Zeroize)]
|
||||||
@@ -69,7 +41,7 @@ impl<C: Curve> GeneratorCommitments<C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A single nonce's commitments and relevant proofs
|
// A single nonce's commitments
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
pub(crate) struct NonceCommitments<C: Curve> {
|
pub(crate) struct NonceCommitments<C: Curve> {
|
||||||
// Called generators as these commitments are indexed by generator later on
|
// Called generators as these commitments are indexed by generator later on
|
||||||
@@ -121,12 +93,6 @@ impl<C: Curve> NonceCommitments<C> {
|
|||||||
t.append_message(b"commitment_E", commitments.0[1].to_bytes());
|
t.append_message(b"commitment_E", commitments.0[1].to_bytes());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn aggregation_factor<T: Transcript>(&self, context: &[u8]) -> C::F {
|
|
||||||
let mut transcript = aggregation_transcript::<T>(context);
|
|
||||||
self.transcript(&mut transcript);
|
|
||||||
<C as Curve>::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commitments for all the nonces across all their generators.
|
/// Commitments for all the nonces across all their generators.
|
||||||
@@ -135,51 +101,26 @@ pub(crate) struct Commitments<C: Curve> {
|
|||||||
// Called nonces as these commitments are indexed by nonce
|
// Called nonces as these commitments are indexed by nonce
|
||||||
// So to get the commitments for the first nonce, it'd be commitments.nonces[0]
|
// So to get the commitments for the first nonce, it'd be commitments.nonces[0]
|
||||||
pub(crate) nonces: Vec<NonceCommitments<C>>,
|
pub(crate) nonces: Vec<NonceCommitments<C>>,
|
||||||
// DLEq Proof proving that each set of commitments were generated using a single pair of discrete
|
|
||||||
// logarithms
|
|
||||||
pub(crate) dleq: Option<MultiDLEqProof<C::G>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Curve> Commitments<C> {
|
impl<C: Curve> Commitments<C> {
|
||||||
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
|
pub(crate) fn new<R: RngCore + CryptoRng>(
|
||||||
rng: &mut R,
|
rng: &mut R,
|
||||||
secret_share: &Zeroizing<C::F>,
|
secret_share: &Zeroizing<C::F>,
|
||||||
planned_nonces: &[Vec<C::G>],
|
planned_nonces: &[Vec<C::G>],
|
||||||
context: &[u8],
|
|
||||||
) -> (Vec<Nonce<C>>, Commitments<C>) {
|
) -> (Vec<Nonce<C>>, Commitments<C>) {
|
||||||
let mut nonces = vec![];
|
let mut nonces = vec![];
|
||||||
let mut commitments = vec![];
|
let mut commitments = vec![];
|
||||||
|
|
||||||
let mut dleq_generators = vec![];
|
|
||||||
let mut dleq_nonces = vec![];
|
|
||||||
for generators in planned_nonces {
|
for generators in planned_nonces {
|
||||||
let (nonce, these_commitments): (Nonce<C>, _) =
|
let (nonce, these_commitments): (Nonce<C>, _) =
|
||||||
NonceCommitments::new(&mut *rng, secret_share, generators);
|
NonceCommitments::new(&mut *rng, secret_share, generators);
|
||||||
|
|
||||||
if generators.len() > 1 {
|
|
||||||
dleq_generators.push(generators.clone());
|
|
||||||
dleq_nonces.push(Zeroizing::new(
|
|
||||||
(these_commitments.aggregation_factor::<T>(context) * nonce.0[1].deref()) +
|
|
||||||
nonce.0[0].deref(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
nonces.push(nonce);
|
nonces.push(nonce);
|
||||||
commitments.push(these_commitments);
|
commitments.push(these_commitments);
|
||||||
}
|
}
|
||||||
|
|
||||||
let dleq = if !dleq_generators.is_empty() {
|
(nonces, Commitments { nonces: commitments })
|
||||||
Some(MultiDLEqProof::prove(
|
|
||||||
rng,
|
|
||||||
&mut dleq_transcript::<T>(context),
|
|
||||||
&dleq_generators,
|
|
||||||
&dleq_nonces,
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
(nonces, Commitments { nonces: commitments, dleq })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
|
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
|
||||||
@@ -187,58 +128,20 @@ impl<C: Curve> Commitments<C> {
|
|||||||
for nonce in &self.nonces {
|
for nonce in &self.nonces {
|
||||||
nonce.transcript(t);
|
nonce.transcript(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in
|
|
||||||
// an exact order
|
|
||||||
// This means it shouldn't be possible for variadic generators to cause conflicts
|
|
||||||
if let Some(dleq) = &self.dleq {
|
|
||||||
t.append_message(b"dleq", dleq.serialize());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn read<R: Read, T: Transcript>(
|
pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) -> io::Result<Self> {
|
||||||
reader: &mut R,
|
|
||||||
generators: &[Vec<C::G>],
|
|
||||||
context: &[u8],
|
|
||||||
) -> io::Result<Self> {
|
|
||||||
let nonces = (0 .. generators.len())
|
let nonces = (0 .. generators.len())
|
||||||
.map(|i| NonceCommitments::read(reader, &generators[i]))
|
.map(|i| NonceCommitments::read(reader, &generators[i]))
|
||||||
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
|
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
|
||||||
|
|
||||||
let mut dleq_generators = vec![];
|
Ok(Commitments { nonces })
|
||||||
let mut dleq_nonces = vec![];
|
|
||||||
for (generators, nonce) in generators.iter().cloned().zip(&nonces) {
|
|
||||||
if generators.len() > 1 {
|
|
||||||
let binding = nonce.aggregation_factor::<T>(context);
|
|
||||||
let mut aggregated = vec![];
|
|
||||||
for commitments in &nonce.generators {
|
|
||||||
aggregated.push(commitments.0[0] + (commitments.0[1] * binding));
|
|
||||||
}
|
|
||||||
dleq_generators.push(generators);
|
|
||||||
dleq_nonces.push(aggregated);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let dleq = if !dleq_generators.is_empty() {
|
|
||||||
let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?;
|
|
||||||
dleq
|
|
||||||
.verify(&mut dleq_transcript::<T>(context), &dleq_generators, &dleq_nonces)
|
|
||||||
.map_err(|_| io::Error::other("invalid DLEq proof"))?;
|
|
||||||
Some(dleq)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Commitments { nonces, dleq })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
for nonce in &self.nonces {
|
for nonce in &self.nonces {
|
||||||
nonce.write(writer)?;
|
nonce.write(writer)?;
|
||||||
}
|
}
|
||||||
if let Some(dleq) = &self.dleq {
|
|
||||||
dleq.write(writer)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -125,14 +125,8 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
|
|||||||
let mut params = self.params;
|
let mut params = self.params;
|
||||||
|
|
||||||
let mut rng = ChaCha20Rng::from_seed(*seed.0);
|
let mut rng = ChaCha20Rng::from_seed(*seed.0);
|
||||||
// Get a challenge to the existing transcript for use when proving for the commitments
|
let (nonces, commitments) =
|
||||||
let commitments_challenge = params.algorithm.transcript().challenge(b"commitments");
|
Commitments::new::<_>(&mut rng, params.keys.secret_share(), ¶ms.algorithm.nonces());
|
||||||
let (nonces, commitments) = Commitments::new::<_, A::Transcript>(
|
|
||||||
&mut rng,
|
|
||||||
params.keys.secret_share(),
|
|
||||||
¶ms.algorithm.nonces(),
|
|
||||||
commitments_challenge.as_ref(),
|
|
||||||
);
|
|
||||||
let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys);
|
let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys);
|
||||||
|
|
||||||
let preprocess = Preprocess { commitments, addendum };
|
let preprocess = Preprocess { commitments, addendum };
|
||||||
@@ -141,27 +135,18 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
|
|||||||
let mut blame_entropy = [0; 32];
|
let mut blame_entropy = [0; 32];
|
||||||
rng.fill_bytes(&mut blame_entropy);
|
rng.fill_bytes(&mut blame_entropy);
|
||||||
(
|
(
|
||||||
AlgorithmSignMachine {
|
AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy },
|
||||||
params,
|
|
||||||
seed,
|
|
||||||
commitments_challenge,
|
|
||||||
nonces,
|
|
||||||
preprocess: preprocess.clone(),
|
|
||||||
blame_entropy,
|
|
||||||
},
|
|
||||||
preprocess,
|
preprocess,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(test, feature = "tests"))]
|
#[cfg(any(test, feature = "tests"))]
|
||||||
pub(crate) fn unsafe_override_preprocess(
|
pub(crate) fn unsafe_override_preprocess(
|
||||||
mut self,
|
self,
|
||||||
nonces: Vec<Nonce<C>>,
|
nonces: Vec<Nonce<C>>,
|
||||||
preprocess: Preprocess<C, A::Addendum>,
|
preprocess: Preprocess<C, A::Addendum>,
|
||||||
) -> AlgorithmSignMachine<C, A> {
|
) -> AlgorithmSignMachine<C, A> {
|
||||||
AlgorithmSignMachine {
|
AlgorithmSignMachine {
|
||||||
commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"),
|
|
||||||
|
|
||||||
params: self.params,
|
params: self.params,
|
||||||
seed: CachedPreprocess(Zeroizing::new([0; 32])),
|
seed: CachedPreprocess(Zeroizing::new([0; 32])),
|
||||||
|
|
||||||
@@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
|
|||||||
params: Params<C, A>,
|
params: Params<C, A>,
|
||||||
seed: CachedPreprocess,
|
seed: CachedPreprocess,
|
||||||
|
|
||||||
#[zeroize(skip)]
|
|
||||||
commitments_challenge: <A::Transcript as Transcript>::Challenge,
|
|
||||||
pub(crate) nonces: Vec<Nonce<C>>,
|
pub(crate) nonces: Vec<Nonce<C>>,
|
||||||
// Skips the preprocess due to being too large a bound to feasibly enforce on users
|
// Skips the preprocess due to being too large a bound to feasibly enforce on users
|
||||||
#[zeroize(skip)]
|
#[zeroize(skip)]
|
||||||
@@ -285,11 +268,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
|
|||||||
|
|
||||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||||
Ok(Preprocess {
|
Ok(Preprocess {
|
||||||
commitments: Commitments::read::<_, A::Transcript>(
|
commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?,
|
||||||
reader,
|
|
||||||
&self.params.algorithm.nonces(),
|
|
||||||
self.commitments_challenge.as_ref(),
|
|
||||||
)?,
|
|
||||||
addendum: self.params.algorithm.read_addendum(reader)?,
|
addendum: self.params.algorithm.read_addendum(reader)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ use crate::{
|
|||||||
|
|
||||||
/// Tests for the nonce handling code.
|
/// Tests for the nonce handling code.
|
||||||
pub mod nonces;
|
pub mod nonces;
|
||||||
use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof};
|
use nonces::test_multi_nonce;
|
||||||
|
|
||||||
/// Vectorized test suite to ensure consistency.
|
/// Vectorized test suite to ensure consistency.
|
||||||
pub mod vectors;
|
pub mod vectors;
|
||||||
@@ -267,6 +267,4 @@ pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut
|
|||||||
test_schnorr_blame::<R, C, H>(rng);
|
test_schnorr_blame::<R, C, H>(rng);
|
||||||
|
|
||||||
test_multi_nonce::<R, C>(rng);
|
test_multi_nonce::<R, C>(rng);
|
||||||
test_invalid_commitment::<R, C>(rng);
|
|
||||||
test_invalid_dleq_proof::<R, C>(rng);
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript};
|
|||||||
|
|
||||||
use ciphersuite::group::{ff::Field, Group, GroupEncoding};
|
use ciphersuite::group::{ff::Field, Group, GroupEncoding};
|
||||||
|
|
||||||
use dleq::MultiDLEqProof;
|
|
||||||
pub use dkg::tests::{key_gen, recover_key};
|
pub use dkg::tests::{key_gen, recover_key};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Curve, Participant, ThresholdView, ThresholdKeys, FrostError,
|
Curve, Participant, ThresholdView, ThresholdKeys, FrostError,
|
||||||
algorithm::Algorithm,
|
algorithm::Algorithm,
|
||||||
sign::{Writable, SignMachine},
|
tests::{algorithm_machines, sign},
|
||||||
tests::{algorithm_machines, preprocess, sign},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -157,75 +155,3 @@ pub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
|
|||||||
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
||||||
sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]);
|
sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test malleating a commitment for a nonce across generators causes the preprocess to error.
|
|
||||||
pub fn test_invalid_commitment<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
|
|
||||||
let keys = key_gen::<R, C>(&mut *rng);
|
|
||||||
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
|
||||||
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
|
|
||||||
|
|
||||||
// Select a random participant to give an invalid commitment
|
|
||||||
let participants = preprocesses.keys().collect::<Vec<_>>();
|
|
||||||
let faulty = *participants
|
|
||||||
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
|
|
||||||
|
|
||||||
// Grab their preprocess
|
|
||||||
let mut preprocess = preprocesses.remove(&faulty).unwrap();
|
|
||||||
|
|
||||||
// Mutate one of the commitments
|
|
||||||
let nonce =
|
|
||||||
preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap();
|
|
||||||
let generators_len = nonce.generators.len();
|
|
||||||
nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0
|
|
||||||
[usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng);
|
|
||||||
|
|
||||||
// The commitments are validated at time of deserialization (read_preprocess)
|
|
||||||
// Accordingly, serialize it and read it again to make sure that errors
|
|
||||||
assert!(machines
|
|
||||||
.iter()
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
|
|
||||||
.is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Test malleating the DLEq proof for a preprocess causes it to error.
|
|
||||||
pub fn test_invalid_dleq_proof<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
|
|
||||||
let keys = key_gen::<R, C>(&mut *rng);
|
|
||||||
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
|
||||||
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
|
|
||||||
|
|
||||||
// Select a random participant to give an invalid DLEq proof
|
|
||||||
let participants = preprocesses.keys().collect::<Vec<_>>();
|
|
||||||
let faulty = *participants
|
|
||||||
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
|
|
||||||
|
|
||||||
// Invalidate it by replacing it with a completely different proof
|
|
||||||
let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))];
|
|
||||||
let mut preprocess = preprocesses.remove(&faulty).unwrap();
|
|
||||||
preprocess.commitments.dleq = Some(MultiDLEqProof::prove(
|
|
||||||
&mut *rng,
|
|
||||||
&mut RecommendedTranscript::new(b"Invalid DLEq Proof"),
|
|
||||||
&nonces::<C>(),
|
|
||||||
&dlogs,
|
|
||||||
));
|
|
||||||
|
|
||||||
assert!(machines
|
|
||||||
.iter()
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
|
|
||||||
.is_err());
|
|
||||||
|
|
||||||
// Also test None for a proof will cause an error
|
|
||||||
preprocess.commitments.dleq = None;
|
|
||||||
assert!(machines
|
|
||||||
.iter()
|
|
||||||
.next()
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
|
|
||||||
.is_err());
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding};
|
|||||||
use crate::{
|
use crate::{
|
||||||
curve::Curve,
|
curve::Curve,
|
||||||
Participant, ThresholdCore, ThresholdKeys,
|
Participant, ThresholdCore, ThresholdKeys,
|
||||||
algorithm::{IetfTranscript, Hram, IetfSchnorr},
|
algorithm::{Hram, IetfSchnorr},
|
||||||
sign::{
|
sign::{
|
||||||
Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,
|
Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,
|
||||||
PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
|
PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
|
||||||
@@ -191,7 +191,6 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
|
|||||||
nonces: vec![NonceCommitments {
|
nonces: vec![NonceCommitments {
|
||||||
generators: vec![GeneratorCommitments(these_commitments)],
|
generators: vec![GeneratorCommitments(these_commitments)],
|
||||||
}],
|
}],
|
||||||
dleq: None,
|
|
||||||
},
|
},
|
||||||
addendum: (),
|
addendum: (),
|
||||||
};
|
};
|
||||||
@@ -301,12 +300,8 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Also test it at the Commitments level
|
// Also test it at the Commitments level
|
||||||
let (generated_nonces, commitments) = Commitments::<C>::new::<_, IetfTranscript>(
|
let (generated_nonces, commitments) =
|
||||||
&mut TransparentRng(randomness),
|
Commitments::<C>::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]);
|
||||||
&share,
|
|
||||||
&[vec![C::generator()]],
|
|
||||||
&[],
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(generated_nonces.len(), 1);
|
assert_eq!(generated_nonces.len(), 1);
|
||||||
assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);
|
assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);
|
||||||
|
|||||||
@@ -101,6 +101,7 @@ allow-git = [
|
|||||||
"https://github.com/rust-lang-nursery/lazy-static.rs",
|
"https://github.com/rust-lang-nursery/lazy-static.rs",
|
||||||
"https://github.com/serai-dex/substrate-bip39",
|
"https://github.com/serai-dex/substrate-bip39",
|
||||||
"https://github.com/serai-dex/substrate",
|
"https://github.com/serai-dex/substrate",
|
||||||
|
"https://github.com/alloy-rs/alloy",
|
||||||
"https://github.com/monero-rs/base58-monero",
|
"https://github.com/monero-rs/base58-monero",
|
||||||
"https://github.com/kayabaNerve/dockertest-rs",
|
"https://github.com/kayabaNerve/dockertest-rs",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
geth --dev --networkid 5208 --datadir "eth-devnet" \
|
~/.foundry/bin/anvil --no-mining --slots-in-an-epoch 32
|
||||||
--http --http.api "web3,net,eth,miner" \
|
|
||||||
--http.addr 0.0.0.0 --http.port 8545 \
|
|
||||||
--http.vhosts="*" --http.corsdomain "*"
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use crate::{Network, Os, mimalloc, os, write_dockerfile};
|
use crate::{Network, Os, mimalloc, os, write_dockerfile};
|
||||||
|
|
||||||
@@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) {
|
|||||||
const DOWNLOAD_BITCOIN: &str = r#"
|
const DOWNLOAD_BITCOIN: &str = r#"
|
||||||
FROM alpine:latest as bitcoin
|
FROM alpine:latest as bitcoin
|
||||||
|
|
||||||
ENV BITCOIN_VERSION=26.0
|
ENV BITCOIN_VERSION=27.0
|
||||||
|
|
||||||
RUN apk --no-cache add git gnupg
|
RUN apk --no-cache add git gnupg
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
use std::path::Path;
|
|
||||||
|
|
||||||
pub fn ethereum(_orchestration_path: &Path) {
|
|
||||||
// TODO
|
|
||||||
}
|
|
||||||
36
orchestration/src/coins/ethereum/consensus/lighthouse.rs
Normal file
36
orchestration/src/coins/ethereum/consensus/lighthouse.rs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
use crate::Network;
|
||||||
|
|
||||||
|
pub fn lighthouse(network: Network) -> (String, String, String) {
|
||||||
|
assert_ne!(network, Network::Dev);
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const DOWNLOAD_LIGHTHOUSE: &str = r#"
|
||||||
|
FROM alpine:latest as lighthouse
|
||||||
|
|
||||||
|
ENV LIGHTHOUSE_VERSION=5.1.3
|
||||||
|
|
||||||
|
RUN apk --no-cache add git gnupg
|
||||||
|
|
||||||
|
# Download lighthouse
|
||||||
|
RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
|
||||||
|
RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc
|
||||||
|
|
||||||
|
# Verify the signature
|
||||||
|
gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0
|
||||||
|
gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
|
||||||
|
|
||||||
|
# Extract lighthouse
|
||||||
|
RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
|
||||||
|
"#;
|
||||||
|
|
||||||
|
let run_lighthouse = format!(
|
||||||
|
r#"
|
||||||
|
COPY --from=lighthouse --chown=ethereum lighthouse /bin
|
||||||
|
|
||||||
|
ADD /orchestration/{}/coins/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh
|
||||||
|
"#,
|
||||||
|
network.label()
|
||||||
|
);
|
||||||
|
|
||||||
|
(DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse)
|
||||||
|
}
|
||||||
6
orchestration/src/coins/ethereum/consensus/mod.rs
Normal file
6
orchestration/src/coins/ethereum/consensus/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
mod lighthouse;
|
||||||
|
#[allow(unused)]
|
||||||
|
pub use lighthouse::lighthouse;
|
||||||
|
|
||||||
|
mod nimbus;
|
||||||
|
pub use nimbus::nimbus;
|
||||||
49
orchestration/src/coins/ethereum/consensus/nimbus.rs
Normal file
49
orchestration/src/coins/ethereum/consensus/nimbus.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
use crate::Network;
|
||||||
|
|
||||||
|
pub fn nimbus(network: Network) -> (String, String, String) {
|
||||||
|
assert_ne!(network, Network::Dev);
|
||||||
|
|
||||||
|
let platform = match std::env::consts::ARCH {
|
||||||
|
"x86_64" => "amd64",
|
||||||
|
"arm" => "arm32v7",
|
||||||
|
"aarch64" => "arm64v8",
|
||||||
|
_ => panic!("unsupported platform"),
|
||||||
|
};
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
let checksum = match platform {
|
||||||
|
"amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba",
|
||||||
|
"arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45",
|
||||||
|
"arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556",
|
||||||
|
_ => panic!("unsupported platform"),
|
||||||
|
};
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
let download_nimbus = format!(r#"
|
||||||
|
FROM alpine:latest as nimbus
|
||||||
|
|
||||||
|
ENV NIMBUS_VERSION=24.3.0
|
||||||
|
ENV NIMBUS_COMMIT=dc19b082
|
||||||
|
|
||||||
|
# Download nimbus
|
||||||
|
RUN wget https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz
|
||||||
|
|
||||||
|
# Extract nimbus
|
||||||
|
RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz
|
||||||
|
RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus
|
||||||
|
|
||||||
|
# Verify the checksum
|
||||||
|
RUN sha512sum nimbus | grep {checksum}
|
||||||
|
"#);
|
||||||
|
|
||||||
|
let run_nimbus = format!(
|
||||||
|
r#"
|
||||||
|
COPY --from=nimbus --chown=ethereum nimbus /bin
|
||||||
|
|
||||||
|
ADD /orchestration/{}/coins/ethereum/consensus/nimbus/run.sh /consensus_layer.sh
|
||||||
|
"#,
|
||||||
|
network.label()
|
||||||
|
);
|
||||||
|
|
||||||
|
(download_nimbus, String::new(), run_nimbus)
|
||||||
|
}
|
||||||
14
orchestration/src/coins/ethereum/execution/anvil.rs
Normal file
14
orchestration/src/coins/ethereum/execution/anvil.rs
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
use crate::Network;
|
||||||
|
|
||||||
|
pub fn anvil(network: Network) -> (String, String, String) {
|
||||||
|
assert_eq!(network, Network::Dev);
|
||||||
|
|
||||||
|
const ANVIL_SETUP: &str = r#"
|
||||||
|
RUN curl -L https://foundry.paradigm.xyz | bash || exit 0
|
||||||
|
RUN ~/.foundry/bin/foundryup
|
||||||
|
|
||||||
|
EXPOSE 8545
|
||||||
|
"#;
|
||||||
|
|
||||||
|
(String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string())
|
||||||
|
}
|
||||||
5
orchestration/src/coins/ethereum/execution/mod.rs
Normal file
5
orchestration/src/coins/ethereum/execution/mod.rs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
mod reth;
|
||||||
|
pub use reth::reth;
|
||||||
|
|
||||||
|
mod anvil;
|
||||||
|
pub use anvil::anvil;
|
||||||
38
orchestration/src/coins/ethereum/execution/reth.rs
Normal file
38
orchestration/src/coins/ethereum/execution/reth.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
use crate::Network;
|
||||||
|
|
||||||
|
pub fn reth(network: Network) -> (String, String, String) {
|
||||||
|
assert_ne!(network, Network::Dev);
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const DOWNLOAD_RETH: &str = r#"
|
||||||
|
FROM alpine:latest as reth
|
||||||
|
|
||||||
|
ENV RETH_VERSION=0.2.0-beta.6
|
||||||
|
|
||||||
|
RUN apk --no-cache add git gnupg
|
||||||
|
|
||||||
|
# Download reth
|
||||||
|
RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
|
||||||
|
RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc
|
||||||
|
|
||||||
|
# Verify the signature
|
||||||
|
gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4
|
||||||
|
gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
|
||||||
|
|
||||||
|
# Extract reth
|
||||||
|
RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
|
||||||
|
"#;
|
||||||
|
|
||||||
|
let run_reth = format!(
|
||||||
|
r#"
|
||||||
|
COPY --from=reth --chown=ethereum reth /bin
|
||||||
|
|
||||||
|
EXPOSE 30303 9001 8545
|
||||||
|
|
||||||
|
ADD /orchestration/{}/coins/ethereum/execution/reth/run.sh /execution_layer.sh
|
||||||
|
"#,
|
||||||
|
network.label()
|
||||||
|
);
|
||||||
|
|
||||||
|
(DOWNLOAD_RETH.to_string(), String::new(), run_reth)
|
||||||
|
}
|
||||||
43
orchestration/src/coins/ethereum/mod.rs
Normal file
43
orchestration/src/coins/ethereum/mod.rs
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use crate::{Network, Os, mimalloc, os, write_dockerfile};
|
||||||
|
|
||||||
|
mod execution;
|
||||||
|
use execution::*;
|
||||||
|
|
||||||
|
mod consensus;
|
||||||
|
use consensus::*;
|
||||||
|
|
||||||
|
pub fn ethereum(orchestration_path: &Path, network: Network) {
|
||||||
|
let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) =
|
||||||
|
if network == Network::Dev {
|
||||||
|
(anvil(network), (String::new(), String::new(), String::new()))
|
||||||
|
} else {
|
||||||
|
// TODO: Select an EL/CL based off a RNG seeded from the public key
|
||||||
|
(reth(network), nimbus(network))
|
||||||
|
};
|
||||||
|
|
||||||
|
let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download;
|
||||||
|
|
||||||
|
let run = format!(
|
||||||
|
r#"
|
||||||
|
ADD /orchestration/{}/coins/ethereum/run.sh /run.sh
|
||||||
|
CMD ["/run.sh"]
|
||||||
|
"#,
|
||||||
|
network.label()
|
||||||
|
);
|
||||||
|
let run = mimalloc(Os::Debian).to_string() +
|
||||||
|
&os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") +
|
||||||
|
&el_run +
|
||||||
|
&cl_run +
|
||||||
|
&run;
|
||||||
|
|
||||||
|
let res = download + &run;
|
||||||
|
|
||||||
|
let mut ethereum_path = orchestration_path.to_path_buf();
|
||||||
|
ethereum_path.push("coins");
|
||||||
|
ethereum_path.push("ethereum");
|
||||||
|
ethereum_path.push("Dockerfile");
|
||||||
|
|
||||||
|
write_dockerfile(ethereum_path, &res);
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use crate::{Network, Os, mimalloc, write_dockerfile};
|
use crate::{Network, Os, mimalloc, write_dockerfile};
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
|||||||
@@ -266,7 +266,7 @@ fn dockerfiles(network: Network) {
|
|||||||
let orchestration_path = orchestration_path(network);
|
let orchestration_path = orchestration_path(network);
|
||||||
|
|
||||||
bitcoin(&orchestration_path, network);
|
bitcoin(&orchestration_path, network);
|
||||||
ethereum(&orchestration_path);
|
ethereum(&orchestration_path, network);
|
||||||
monero(&orchestration_path, network);
|
monero(&orchestration_path, network);
|
||||||
if network == Network::Dev {
|
if network == Network::Dev {
|
||||||
monero_wallet_rpc(&orchestration_path);
|
monero_wallet_rpc(&orchestration_path);
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::{path::Path};
|
use std::path::Path;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
||||||
|
|||||||
3
orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh
Executable file
3
orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
RUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt
|
||||||
3
orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh
Executable file
3
orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh
Executable file
@@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
exit 1
|
||||||
8
orchestration/testnet/coins/ethereum/execution/geth/run.sh
Executable file
8
orchestration/testnet/coins/ethereum/execution/geth/run.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
#geth --dev --networkid 5208 \
|
||||||
|
# --http --http.api "web3,net,eth,miner" \
|
||||||
|
# --http.addr 0.0.0.0 --http.port 8545 \
|
||||||
|
# --http.vhosts="*" --http.corsdomain "*"
|
||||||
|
|
||||||
|
exit 1
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user